id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
3305767 | <reponame>m1yag1/zenhub_charts<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 16:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveIntegerField()),
('title', models.CharField(max_length=255)),
('durations', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('latest_pipeline_name', models.CharField(max_length=255)),
('latest_transfer_date', models.DateTimeField()),
('labels', django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=100), size=None), default=list,
size=None)),
],
),
migrations.CreateModel(
name='Pipeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('pipeline_id', models.CharField(max_length=255)),
('order', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='PipelineNameMapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_name', models.CharField(max_length=255)),
('new_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_id', models.PositiveIntegerField(unique=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Transfer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transfered_at', models.DateTimeField()),
('from_pipeline', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='from_transfers', to='boards.Pipeline')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transfers', to='boards.Issue')),
('to_pipeline', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='to_transfers', to='boards.Pipeline')),
],
),
migrations.AddField(
model_name='pipelinenamemapping',
name='repo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boards.Repo'),
),
migrations.AddField(
model_name='pipeline',
name='repo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boards.Repo'),
),
migrations.AddField(
model_name='issue',
name='repo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='boards.Repo'),
),
migrations.AlterUniqueTogether(
name='transfer',
unique_together=set([('issue', 'from_pipeline', 'to_pipeline', 'transfered_at')]),
),
migrations.AlterUniqueTogether(
name='pipeline',
unique_together=set([('pipeline_id', 'repo')]),
),
migrations.AlterUniqueTogether(
name='issue',
unique_together=set([('repo', 'number')]),
),
]
| StarcoderdataPython |
1807754 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
class App():
def __init__(self):
rospy.init_node("app_node", anonymous=False) # informações gerais do dó
rospy.Subscriber("sensor/value", Int32, self.update) # subscrito com callback
self.value = Int32()
def run(self):
rospy.spin() # deixa o programa em espera constante
def update(self, msg):
self.value = msg
print(f'Receiving message: value={self.value.data}')
if __name__ == "__main__":
try:
app = App()
app.run()
except rospy.ROSInterruptException:
pass | StarcoderdataPython |
9741421 | from .base_options import BaseOptions
import os
class TestOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--frame_name',type=str, default='all_frames_5m6b_norm')
parser.add_argument('--mask_name',type=str, default='all_masks_5m6b')
self.isTrain = False
return parser
def parse(self):
opt = super().parse()
# opt.frame_path = os.path.join(opt.dataroot, opt.frame_name)
# opt.mask_path = os.path.join(opt.dataroot, opt.mask_name)
self.opt = opt
return self.opt
| StarcoderdataPython |
1641432 | <gh_stars>0
# Generated by Django 3.0 on 2020-01-20 16:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0009_shoppingcart'),
]
operations = [
migrations.DeleteModel(
name='ShoppingCart',
),
]
| StarcoderdataPython |
1979125 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import random
from django.db import models, migrations
def default_email_keys(apps, schema_editor):
user_model = apps.get_model("gatekeeper", "User")
for user in user_model.objects.all():
salt = hashlib.sha1(str(random.random()).encode("utf-8")).hexdigest()[:5]
user.email_confirm_key = hashlib.sha1(salt.encode("utf-8") + user.email.encode("utf-8")).hexdigest()
user.save()
class Migration(migrations.Migration):
dependencies = [
('gatekeeper', '0020_user_email_confirm_key'),
]
operations = [
migrations.RunPython(default_email_keys),
]
| StarcoderdataPython |
3509331 | <gh_stars>0
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object('config')
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app.controllers import default
from app.models import tables | StarcoderdataPython |
3566921 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Check the schema parity between Django and SQLAlchemy."""
def test_columns(backend, data_regression):
"""Test parity of table columns."""
data = {}
for tbl_name, col_name, data_type, is_nullable, column_default, char_max_length in get_table_fields(backend):
data.setdefault(tbl_name, {})[col_name] = {
'data_type': data_type,
'is_nullable': is_nullable,
'default': column_default,
}
if char_max_length:
data[tbl_name][col_name]['max_length'] = char_max_length
data_regression.check(data)
def test_primary_keys(backend, data_regression):
"""Test parity of primary key constraints."""
data = {}
for tbl_name, name, col_names in sorted(get_constraints(backend, 'p')):
data.setdefault(tbl_name, {})[name] = col_names
data_regression.check(data)
def test_unique_constraints(backend, data_regression):
"""Test parity of unique constraints."""
data = {}
for tbl_name, name, col_names in sorted(get_constraints(backend, 'u')):
data.setdefault(tbl_name, {})[name] = sorted(col_names)
data_regression.check(data)
def test_indexes(backend, data_regression):
"""Test parity of indexes."""
data = {}
for tbl_name, name, definition in sorted(get_indexes(backend)):
data.setdefault(tbl_name, {})[name] = definition
data_regression.check(data)
def get_table_fields(backend):
"""Get the fields of all AiiDA tables."""
# see https://www.postgresql.org/docs/9.1/infoschema-columns.html
rows = backend.execute_raw(
'SELECT table_name,column_name,data_type,is_nullable,column_default,character_maximum_length '
'FROM information_schema.columns '
"WHERE table_schema = 'public' AND table_name LIKE 'db_%';"
)
rows = [list(row) for row in rows]
for row in rows:
row[3] = row[3].upper() == 'YES'
return rows
def get_constraints(backend, ctype):
"""Get the constraints of all AiiDA tables, for a particular constraint type."""
# see https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html
rows = backend.execute_raw(
'SELECT tbl.relname,c.conname,ARRAY_AGG(a.attname) FROM pg_constraint AS c '
'INNER JOIN pg_class AS tbl ON tbl.oid = c.conrelid '
'INNER JOIN pg_attribute AS a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey) '
f"WHERE c.contype='{ctype}' AND tbl.relname LIKE 'db_%' "
'GROUP BY tbl.relname,c.conname;'
)
rows = [list(row) for row in rows]
return rows
def get_indexes(backend):
"""Get the indexes of all AiiDA tables."""
# see https://www.postgresql.org/docs/9.1/view-pg-indexes.html
rows = backend.execute_raw(
'SELECT tablename,indexname,indexdef FROM pg_indexes '
"WHERE tablename LIKE 'db_%' "
'ORDER BY tablename,indexname;'
)
rows = [list(row) for row in rows]
return rows
| StarcoderdataPython |
362560 | import utool as ut
from fixtex import latex_parser
def testdata_fpaths():
dpath = '.'
#tex_fpath_list = ut.ls(dpath, 'chapter*.tex') + ut.ls(dpath, 'appendix.tex')
patterns = [
'chapter*.tex',
'sec-*.tex',
'figdef*.tex',
'def.tex',
'pairwise-classifier.tex',
'graph-id.tex',
'appendix.tex',
'main.tex',
'graph_id.tex',
]
exclude_dirs = ['guts']
tex_fpath_list = sorted(
ut.glob(dpath, patterns, recursive=True, exclude_dirs=exclude_dirs)
)
tex_fpath_list = ut.get_argval('--fpaths', type_=list, default=tex_fpath_list)
return tex_fpath_list
fpaths = testdata_fpaths()
fpath = 'main.tex'
text = ut.readfrom(fpath)
root = latex_parser.LatexDocPart.parse_text(text, debug=None)
# root._config['asmarkdown'] = True
# root._config['numlines'] = float('inf')
commands = list(root.find_descendant_types('newcommand'))
figcommands = []
for self in commands:
if self.fpath_root() in {'colordef.tex', 'def.tex', 'CrallDef.tex'}:
continue
figcommands.append(self)
cmd_to_fpaths = ut.ddict(list)
for self in figcommands:
keys = [tup[0] for tup in self.parse_command_def()]
if len(keys) == 0:
print(self)
continue
assert len(keys) <= 1
cmd = keys[0]
figures = list(self.find_descendant_types('figure'))
for fig in figures:
fig = figures[0]
text = fig.summary_str(outline=True, numlines=float('inf'))
fpaths = [info['fpath'] for info in fig.parse_includegraphics()]
if fpaths:
cmd_to_fpaths[cmd].extend(fpaths)
for key in cmd_to_fpaths.keys():
cmd = key.lstrip('\\')
if not root.find_descendant_type(cmd):
print(key)
from os.path import abspath, dirname
used_fpaths = ut.flatten(cmd_to_fpaths.values())
used_fpaths = set(ut.emap(abspath, used_fpaths))
all_fpaths = set(ut.emap(abspath, ut.glob('.', ['*.png', '*.jpg'], recursive=True)))
unused = list(all_fpaths - used_fpaths)
unuse_dirs = ut.group_items(unused, ut.emap(dirname, unused))
semi_used = {}
for dpath, fpaths in unuse_dirs.items():
used_in_dpath = set(ut.ls(dpath)) - set(fpaths)
if len(used_in_dpath) == 0:
# completely unused directories
print(dpath)
else:
semi_used[dpath] = fpaths
print(ut.repr4(list(semi_used.keys())))
| StarcoderdataPython |
120716 | from .views import token_bp
| StarcoderdataPython |
1875754 | # Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""On Device Decisioning Artifact Provider"""
import json
from threading import Timer
import urllib3
from urllib3 import Retry
from target_decisioning_engine.constants import LOG_PREFIX
from target_decisioning_engine.constants import FORBIDDEN
from target_decisioning_engine.constants import HTTP_GET
from target_decisioning_engine.constants import NOT_MODIFIED
from target_decisioning_engine.constants import OK
from target_decisioning_engine.constants import MINIMUM_POLLING_INTERVAL
from target_decisioning_engine.constants import DEFAULT_POLLING_INTERVAL
from target_decisioning_engine.constants import NUM_FETCH_RETRIES
from target_decisioning_engine.events import ARTIFACT_DOWNLOAD_SUCCEEDED
from target_decisioning_engine.events import GEO_LOCATION_UPDATED
from target_decisioning_engine.events import ARTIFACT_DOWNLOAD_FAILED
from target_decisioning_engine.messages import MESSAGES
from target_decisioning_engine.timings import TIMING_ARTIFACT_READ_JSON
from target_decisioning_engine.timings import TIMING_ARTIFACT_DOWNLOADED_TOTAL
from target_decisioning_engine.timings import TIMING_ARTIFACT_DOWNLOADED_FETCH
from target_decisioning_engine.timings import TIMING_ARTIFACT_GET_INITIAL
from target_decisioning_engine.trace_provider import ArtifactTracer
from target_decisioning_engine.utils import determine_artifact_location
from target_decisioning_engine.utils import get_http_codes_to_retry
from target_decisioning_engine.geo_provider import create_or_update_geo_object
from target_tools.utils import is_int
from target_tools.utils import to_dict
from target_tools.utils import is_string
from target_tools.utils import is_dict
from target_tools.logger import get_logger
from target_tools.perf_tool import get_perf_tool_instance
from target_tools.utils import noop
LOG_TAG = "{}.ArtifactProvider".format(LOG_PREFIX)
BACKOFF_FACTOR = 0.1
CODES_TO_RETRY = get_http_codes_to_retry()
def get_min_polling_interval():
"""Minimum allowed amount of time between polling - in seconds"""
return MINIMUM_POLLING_INTERVAL
class ArtifactProvider:
"""ArtifactProvider"""
def __init__(self, config):
"""
:param config: (target_decisioning_engine.types.decisioning_config.DecisioningConfig)
Decisioning engine configuration
"""
self.pool_manager = urllib3.PoolManager()
self.http_retry = Retry(total=NUM_FETCH_RETRIES, backoff_factor=BACKOFF_FACTOR, status_forcelist=CODES_TO_RETRY)
self.config = config
self.logger = get_logger()
self.event_emitter = config.event_emitter or noop
self.polling_interval = None
self.artifact_location = None
self.polling_halted = False
self.polling_timer = None
self.artifact = None
self.subscriptions = {}
self.subscription_count = 0
self.last_response_etag = None
self.last_response_data = None
self.artifact_tracer = None
self.perf_tool = get_perf_tool_instance()
def _get_polling_interval(self):
"""Get artifact polling interval"""
if self.config.polling_interval == 0:
return 0
return max(
get_min_polling_interval(),
self.config.polling_interval if is_int(self.config.polling_interval) else DEFAULT_POLLING_INTERVAL
)
def initialize(self):
"""Initialize ArtifactProvider and fetch initial artifact"""
self.polling_interval = self._get_polling_interval()
self.artifact_location = self.config.artifact_location if is_string(self.config.artifact_location) else \
determine_artifact_location(self.config)
try:
self.artifact = self._get_initial_artifact()
self.artifact_tracer = ArtifactTracer(
self.artifact_location,
self.config.artifact_payload,
self.polling_interval,
self.polling_halted,
self.artifact
)
self.subscribe(self._artifact_tracer_update)
finally:
self._schedule_next_update()
def _emit_new_artifact(self, artifact_payload, geo_context=None):
"""Send events and notify subscribers of new artifact
:param artifact_payload: (dict) artifact payload in dict format
:param geo_context: (dict) geo object in dict format
:return: None
"""
if not geo_context:
geo_context = {}
self.event_emitter(ARTIFACT_DOWNLOAD_SUCCEEDED, {
"artifact_location": self.artifact_location,
"artifact_payload": artifact_payload
})
self.event_emitter(GEO_LOCATION_UPDATED, {
"geo_context": geo_context
})
for subscription_func in self.subscriptions.values():
subscription_func(artifact_payload)
def subscribe(self, callback_func):
"""Add event subscription"""
self.subscription_count += 1
self.subscriptions[self.subscription_count] = callback_func
return self.subscription_count
def unsubscribe(self, _id):
"""Remove event subscription"""
try:
del self.subscriptions[_id]
self.subscription_count -= 1
except KeyError:
pass
def _fetch_and_schedule(self):
"""Fetch artifact and schedule next polling"""
self.artifact = self._fetch_artifact(self.artifact_location)
self._schedule_next_update()
def _schedule_next_update(self):
"""Schedule next artifact polling based on configured interval (in seconds)"""
if self.polling_interval == 0 or self.polling_halted:
return
self.polling_timer = Timer(self.polling_interval, self._fetch_and_schedule)
self.polling_timer.start()
def stop_polling(self):
"""Disable artifact polling"""
if self.polling_timer:
self.polling_timer.cancel()
self.polling_timer = None
self.polling_halted = True
def resume_polling(self):
"""Enable artifact polling"""
self.polling_halted = False
self._schedule_next_update()
def get_artifact(self):
"""Return current artifact"""
return self.artifact
def _get_initial_artifact(self):
"""Fetch initial artifact"""
self.perf_tool.time_start(TIMING_ARTIFACT_GET_INITIAL)
artifact = self.config.artifact_payload if is_dict(self.config.artifact_payload) else \
self._fetch_artifact(self.artifact_location)
self.perf_tool.time_end(TIMING_ARTIFACT_GET_INITIAL)
return artifact
def _artifact_tracer_update(self, artifact):
"""Update ArtifactTracer with latest artifact"""
self.artifact_tracer.provide_new_artifact(artifact)
def get_trace(self):
"""Returns ArtifactTracer in dict format"""
return self.artifact_tracer.to_dict()
def _fetch_artifact(self, artifact_url):
"""Fetch artifact from server"""
self.perf_tool.time_start(TIMING_ARTIFACT_DOWNLOADED_TOTAL)
headers = {}
self.logger.debug("{} fetching artifact - {}".format(LOG_TAG, artifact_url))
if self.last_response_etag:
headers["If-None-Match"] = self.last_response_etag
try:
self.perf_tool.time_start(TIMING_ARTIFACT_DOWNLOADED_FETCH)
res = self.pool_manager.request(HTTP_GET, artifact_url, headers=headers, retries=self.http_retry)
self.perf_tool.time_end(TIMING_ARTIFACT_DOWNLOADED_FETCH)
self.logger.debug("{} artifact received - status={}".format(LOG_TAG, res.status))
if res.status == NOT_MODIFIED and self.last_response_data:
return self.last_response_data
if res.status == FORBIDDEN:
raise Exception("Artifact request is not authorized. This is likely due to On-Device-Decisioning "
"being disabled in Admin settings. Please enable and try again.")
if res.status != OK:
raise Exception("Non-200 status code response from artifact request: {}".format(res.status))
self.perf_tool.time_start(TIMING_ARTIFACT_READ_JSON)
response_data = json.loads(res.data)
self.perf_tool.time_end(TIMING_ARTIFACT_READ_JSON)
etag = res.headers.get("Etag")
if etag:
self.last_response_data = response_data
self.last_response_etag = etag
geo = create_or_update_geo_object(geo_data=res.headers)
self._emit_new_artifact(response_data, to_dict(geo))
self.perf_tool.time_end(TIMING_ARTIFACT_DOWNLOADED_TOTAL)
return response_data
except Exception as err:
self.logger.error(MESSAGES.get("ARTIFACT_FETCH_ERROR")(str(err)))
failure_event = {
"artifact_location": artifact_url,
"error": err
}
self.event_emitter(ARTIFACT_DOWNLOAD_FAILED, failure_event)
return None
| StarcoderdataPython |
1647917 | <gh_stars>0
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import unittest
from data_aggregator.models import Job
from data_aggregator.management.commands._mixins import RunJobMixin
from django.test import TestCase
from mock import MagicMock, patch
class TestRunJobMixin(TestCase):
@patch("data_aggregator.management.commands._mixins.JobDAO")
def test_work(self, mock_job_dao):
mixin = RunJobMixin()
job = Job()
mock_job_dao_inst = mock_job_dao()
mixin.work(job)
mock_job_dao_inst.run_job.assert_called_once_with(job)
@patch("data_aggregator.management.commands._mixins.traceback")
def test_run_job(self, mock_traceback):
mixin = RunJobMixin()
mixin.work = MagicMock()
mock_job = MagicMock()
completed_job = mixin.run_job(mock_job)
mock_job.start_job.assert_called_once()
mixin.work.assert_called_once()
mock_job.end_job.assert_called_once()
self.assertEqual(completed_job, mock_job)
mixin.work.reset_mock()
mock_job.reset_mock()
mixin.work.side_effect = Exception
mock_tb = "MOCK TRACEBACK"
mock_traceback.format_exc.return_value = mock_tb
completed_job = mixin.run_job(mock_job)
mock_job.start_job.assert_called_once()
mixin.work.assert_called_once()
mock_job.end_job.assert_not_called()
self.assertEqual(mock_job.message, mock_tb)
mock_job.save.assert_called_once()
self.assertEqual(completed_job, mock_job)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1846226 | <gh_stars>0
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
database = sb.load_dataset("flights")
print(database)
#Default kindnnya = "strip"
#sb.catplot(x="month",y="passengers",data=database,kind='violin')
sb.catplot(x="month",y="passengers",data=database,kind='box')
plt.show() | StarcoderdataPython |
4838617 | import re
from generators.common.Helper import Helper, AttributeKind
class PythonHelper(Helper):
@staticmethod
def add_required_import(required_import: set, import_type, class_name, base_class_name):
for typename in re.split('[\\[\\]]', import_type):
if typename:
if typename in ['List']:
required_import.add('from typing import ' + typename)
elif 'TransactionHeaderBuilder' in typename:
if typename == base_class_name:
required_import.add('from .' + typename + ' import ' + typename)
elif typename != class_name and str(typename)[0].isupper():
required_import.add('from .' + typename + ' import ' + typename)
if class_name.endswith('TransactionBuilder') or class_name.endswith('TransactionBodyBuilder') or class_name == 'CosignatureBuilder':
required_import.add('from binascii import hexlify')
if class_name.endswith('TransactionBuilder') or class_name.endswith('TransactionBodyBuilder'):
required_import.add('import re')
if class_name == 'AggregateTransactionBodyBuilder':
required_import.add('from .EmbeddedTransactionBuilderFactory import EmbeddedTransactionBuilderFactory')
return required_import
@staticmethod
def camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
@staticmethod
def get_class_template_path(template_path, name):
if name.endswith('Transaction') or name.endswith('TransactionBody'):
return template_path + 'TransactionClass.mako'
return template_path + 'Class.mako'
@staticmethod
def get_all_constructor_params(attributes):
return [a for a in attributes if not a.kind == AttributeKind.SIZE_FIELD]
@staticmethod
def get_body_class_name(name):
body_name = name if not name.startswith('Embedded') else name[8:]
if name.startswith('Aggregate') and name.endswith('Transaction'):
body_name = 'AggregateTransaction'
return '{0}Body'.format(body_name)
def get_builtin_type(self, size):
return 'int'
@staticmethod
def get_condition_operation_text(op):
if op == 'has':
return '{1} in {0}'
return '{0} == {1}'
def get_generated_type(self, schema, attribute, attribute_kind):
typename = attribute['type']
if attribute_kind in (AttributeKind.SIMPLE, AttributeKind.SIZE_FIELD):
return self.get_builtin_type(self.get_attribute_size(schema, attribute))
if attribute_kind == AttributeKind.BUFFER:
return 'bytes'
if not self.is_byte_type(typename):
typename = self.get_generated_class_name(typename, attribute, schema)
if self.is_any_array_kind(attribute_kind):
return 'List[{0}]'.format(typename)
if attribute_kind == AttributeKind.FLAGS:
return 'List[{0}]'.format(typename)
return typename
| StarcoderdataPython |
6522059 | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule implementations for creating watchOS applications and bundles.
DO NOT load this file directly; use the macro in
@build_bazel_rules_apple//apple:watchos.bzl instead. Bazel rules receive their name at
*definition* time based on the name of the global to which they are assigned.
We want the user to call macros that have the same name, to get automatic
binary creation, entitlements support, and other features--which requires a
wrapping macro because rules cannot invoke other rules.
"""
load("@build_bazel_rules_apple//apple/bundling:binary_support.bzl", "binary_support")
load("@build_bazel_rules_apple//apple/bundling:bundler.bzl", "bundler")
load(
"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/bundling:rule_factory.bzl",
"rule_factory",
)
load("@build_bazel_rules_apple//apple/bundling:run_actions.bzl", "run_actions")
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleInfo",
"AppleResourceSet",
"WatchosApplicationBundleInfo",
"WatchosExtensionBundleInfo",
)
def _watchos_application_impl(ctx):
"""Implementation of the watchos_application Skylark rule."""
app_icons = ctx.files.app_icons
if app_icons:
bundling_support.ensure_single_xcassets_type(
"app_icons",
app_icons,
"appiconset",
)
# Collect asset catalogs and storyboards, if any are present.
additional_resource_sets = []
additional_resources = depset(app_icons + ctx.files.storyboards)
if additional_resources:
additional_resource_sets.append(AppleResourceSet(
resources = additional_resources,
))
embedded_bundles = []
ext = ctx.attr.extension
if ext:
embedded_bundles.append(bundling_support.embedded_bundle(
"PlugIns",
ext,
verify_has_child_plist = True,
parent_bundle_id_reference = [
"NSExtension",
"NSExtensionAttributes",
"WKAppBundleIdentifier",
],
))
binary_artifact = binary_support.create_stub_binary(ctx)
additional_providers, legacy_providers = bundler.run(
ctx,
"WatchosApplicationArchive",
"watchOS application",
ctx.attr.bundle_id,
additional_resource_sets = additional_resource_sets,
binary_artifact = binary_artifact,
embedded_bundles = embedded_bundles,
)
# TODO(b/36513412): Support 'bazel run'.
return struct(
providers = [
WatchosApplicationBundleInfo(),
] + additional_providers,
**legacy_providers
)
watchos_application = rule_factory.make_bundling_rule(
_watchos_application_impl,
additional_attrs = {
"app_icons": attr.label_list(allow_files = True),
"extension": attr.label(
providers = [[AppleBundleInfo, WatchosExtensionBundleInfo]],
mandatory = True,
),
"storyboards": attr.label_list(
allow_files = [".storyboard"],
),
},
archive_extension = ".zip",
bundles_frameworks = True,
code_signing = rule_factory.code_signing(".mobileprovision"),
device_families = rule_factory.device_families(allowed = ["watch"]),
needs_pkginfo = True,
path_formats = rule_factory.simple_path_formats(
path_in_archive_format = "%s",
),
platform_type = apple_common.platform_type.watchos,
product_type = rule_factory.product_type(
apple_product_type.watch2_application,
private = True,
),
use_binary_rule = False,
)
def _watchos_extension_impl(ctx):
"""Implementation of the watchos_extension Skylark rule."""
app_icons = ctx.files.app_icons
if app_icons:
bundling_support.ensure_single_xcassets_type(
"app_icons",
app_icons,
"appiconset",
)
# Collect asset catalogs and storyboards, if any are present.
additional_resource_sets = []
additional_resources = depset(app_icons)
if additional_resources:
additional_resource_sets.append(AppleResourceSet(
resources = additional_resources,
))
binary_provider = binary_support.get_binary_provider(
ctx.attr.deps,
apple_common.AppleExecutableBinary,
)
binary_artifact = binary_provider.binary
deps_objc_provider = binary_provider.objc
additional_providers, legacy_providers = bundler.run(
ctx,
"WatchosExtensionArchive",
"watchOS extension",
ctx.attr.bundle_id,
additional_resource_sets = additional_resource_sets,
binary_artifact = binary_artifact,
deps_objc_providers = [deps_objc_provider],
)
return struct(
providers = [
WatchosExtensionBundleInfo(),
binary_provider,
] + additional_providers,
**legacy_providers
)
watchos_extension = rule_factory.make_bundling_rule(
_watchos_extension_impl,
additional_attrs = {
"app_icons": attr.label_list(allow_files = True),
},
archive_extension = ".zip",
code_signing = rule_factory.code_signing(".mobileprovision"),
device_families = rule_factory.device_families(allowed = ["watch"]),
path_formats = rule_factory.simple_path_formats(path_in_archive_format = "%s"),
platform_type = apple_common.platform_type.watchos,
product_type = rule_factory.product_type(
apple_product_type.watch2_extension,
private = True,
),
)
| StarcoderdataPython |
4851927 | <reponame>jstremme/classifier-recall-over-time
import dash
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from utilities.color_palette import ColorPalette
from utilities.load_data import get_performance_values
def create_callbacks(app):
@app.callback(dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('threshold', 'value')])
def update_graph(threshold):
float_threshold = int(threshold)*0.01
months, num_recalled, num_diagnosed, recall, precision = get_performance_values(threshold=float_threshold)
print('Months: {}'.format(months))
print('Num Recalled: {}'.format(num_recalled))
print('Num Diagnosed: {}'.format(num_diagnosed))
print('Recall: {}'.format(recall))
print('Precision: {}'.format(precision))
colors = ColorPalette()
trace1 = go.Bar(x=months,
y=num_recalled,
name='Recalled',
marker=dict(color=colors.recalled))
trace2 = go.Bar(x=months,
y=num_diagnosed,
name='Diagnosed',
marker=dict(color=colors.diagnosed))
return {
'data':
[trace1, trace2],
'layout':
go.Layout(
barmode='stack',
plot_bgcolor=colors.background,
paper_bgcolor=colors.background,
font={'color': colors.text},
title='Total Recall: {}\n'.format(recall)
+ ' -- ' +
'Total Precision: {}'.format(precision),
xaxis={'title': 'Month (M)'},
yaxis={'title': 'Cases'})
}
return app
| StarcoderdataPython |
6470533 | <filename>mininet/topo.py<gh_stars>1-10
#!/usr/bin/python
# Copyright 2019-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.topo import Topo
from bmv2 import ONOSStratumSwitch
from host6 import IPv6Host
CPU_PORT = 255
class TutorialTopo(Topo):
"""
/--------\ /----\ /----\ /----\ /----\
| Site A |---| R1 |---| R4 |---| R5 |---| R8 |
\________/ \____/ \____/ \____/ \____/
| | | |
| | | |
/--------\ /----\ /----\ /----\ /----\
| Site B |---| R2 |---| R3 |---| R6 |---| R7 |
\________/ \____/ \____/ \____/ \____/
"""
def __init__(self, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
# End routers
r1 = self.addSwitch('r1', cls=ONOSStratumSwitch, grpcport=50001,
cpuport=CPU_PORT)
r2 = self.addSwitch('r2', cls=ONOSStratumSwitch, grpcport=50002,
cpuport=CPU_PORT)
# Transit routers
r3 = self.addSwitch('r3', cls=ONOSStratumSwitch, grpcport=50003,
cpuport=CPU_PORT)
r4 = self.addSwitch('r4', cls=ONOSStratumSwitch, grpcport=50004,
cpuport=CPU_PORT)
r5 = self.addSwitch('r5', cls=ONOSStratumSwitch, grpcport=50005,
cpuport=CPU_PORT)
r6 = self.addSwitch('r6', cls=ONOSStratumSwitch, grpcport=50006,
cpuport=CPU_PORT)
r7 = self.addSwitch('r7', cls=ONOSStratumSwitch, grpcport=50007,
cpuport=CPU_PORT)
r8 = self.addSwitch('r8', cls=ONOSStratumSwitch, grpcport=50008,
cpuport=CPU_PORT)
# Switch Links
self.addLink(r1, r2)
self.addLink(r1, r4)
self.addLink(r2, r3)
self.addLink(r4, r5)
self.addLink(r4, r3)
self.addLink(r3, r6)
self.addLink(r5, r8)
self.addLink(r5, r6)
self.addLink(r6, r7)
self.addLink(r7, r8)
# IPv6 hosts attached to leaf 1
h1 = self.addHost('h1', cls=IPv6Host, mac="00:00:00:00:00:10",
ipv6='2001:1:1::1/64', ipv6_gw='2001:1:1::ff')
h2 = self.addHost('h2', cls=IPv6Host, mac="00:00:00:00:00:20",
ipv6='2001:1:2::1/64', ipv6_gw='2001:1:2::ff')
self.addLink(h1, r1)
self.addLink(h2, r2)
def main(argz):
topo = TutorialTopo()
controller = RemoteController('c0', ip=argz.onos_ip)
net = Mininet(topo=topo, controller=None)
net.addController(controller)
net.start()
CLI(net)
net.stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Mininet script for cisco-srv6 topology')
parser.add_argument('--onos-ip', help='ONOS controller IP address',
type=str, action="store", required=True)
args = parser.parse_args()
setLogLevel('info')
main(args)
| StarcoderdataPython |
3308144 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitCommitRef(Model):
"""GitCommitRef.
:param _links:
:type _links: ReferenceLinks
:param author:
:type author: :class:`GitUserDate <microsoft.-team-foundation.-source-control.-web-api.v4_0.models.GitUserDate>`
:param change_counts:
:type change_counts: dict
:param changes:
:type changes: list of :class:`object <microsoft.-team-foundation.-source-control.-web-api.v4_0.models.object>`
:param comment:
:type comment: str
:param comment_truncated:
:type comment_truncated: bool
:param commit_id:
:type commit_id: str
:param committer:
:type committer: :class:`GitUserDate <microsoft.-team-foundation.-source-control.-web-api.v4_0.models.GitUserDate>`
:param parents:
:type parents: list of str
:param remote_url:
:type remote_url: str
:param statuses:
:type statuses: list of :class:`GitStatus <microsoft.-team-foundation.-source-control.-web-api.v4_0.models.GitStatus>`
:param url:
:type url: str
:param work_items:
:type work_items: list of ResourceRef
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'author': {'key': 'author', 'type': 'GitUserDate'},
'change_counts': {'key': 'changeCounts', 'type': '{int}'},
'changes': {'key': 'changes', 'type': '[object]'},
'comment': {'key': 'comment', 'type': 'str'},
'comment_truncated': {'key': 'commentTruncated', 'type': 'bool'},
'commit_id': {'key': 'commitId', 'type': 'str'},
'committer': {'key': 'committer', 'type': 'GitUserDate'},
'parents': {'key': 'parents', 'type': '[str]'},
'remote_url': {'key': 'remoteUrl', 'type': 'str'},
'statuses': {'key': 'statuses', 'type': '[GitStatus]'},
'url': {'key': 'url', 'type': 'str'},
'work_items': {'key': 'workItems', 'type': '[ResourceRef]'}
}
def __init__(self, _links=None, author=None, change_counts=None, changes=None, comment=None, comment_truncated=None, commit_id=None, committer=None, parents=None, remote_url=None, statuses=None, url=None, work_items=None):
super(GitCommitRef, self).__init__()
self._links = _links
self.author = author
self.change_counts = change_counts
self.changes = changes
self.comment = comment
self.comment_truncated = comment_truncated
self.commit_id = commit_id
self.committer = committer
self.parents = parents
self.remote_url = remote_url
self.statuses = statuses
self.url = url
self.work_items = work_items
| StarcoderdataPython |
6512977 | <reponame>Mediotaku/Kanji-Recognition
# importamos los modulos necesarios de keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.utils import np_utils
from keras.models import model_from_json
import matplotlib.pyplot as plt
import numpy as np
# cargar datos de mnist Kuzushiji
#(X_train, y_train), (X_test, y_test) = mnist.load_data()
#En formato .npz de numpy
X_train = np.load('kmnist-train-imgs.npz')['arr_0']
y_train = np.load('kmnist-train-labels.npz')['arr_0']
X_test = np.load('kmnist-test-imgs.npz')['arr_0']
y_test = np.load('kmnist-test-labels.npz')['arr_0']
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
print(num_pixels,num_classes)
learning_rate = 1e-3
epochs = 50
decay_rate = learning_rate / epochs
# define baseline model
def baseline_model():
# create model
model = Sequential()
#Entrada (pixeles de la imagen)
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
#model.add(Dropout(0.2))
#Modelo dividido en dos hidden layers con baseline error of 1.79%
#model.add(Dense(150, kernel_initializer='normal', activation='relu'))
#model.add(Dense(150, kernel_initializer='normal', activation='relu'))
#Modelo de una sola hidden layer de 128
#model.add(Dense(522, kernel_initializer='normal', activation='relu'))
#model.add(Dropout(0.5))
#Salida (10 characters)
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# Compile model
#model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=learning_rate, momentum=0.8, decay=decay_rate), metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
return model
def CNN_model():
# create model
model = keras.Sequential()
model.add(keras.Input(shape=(28, 28, 1)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def CNN_deep_model():
model = keras.Sequential()
model.add(keras.Input(shape=(28, 28, 1)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(4096))
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#Simple prediction
def predict():
img = X_train[0]
testimg = img.reshape((1,784))
img_class = model.predict_classes(testimg)
prediction = img_class[0]
classname = img_class[0]
print("Class: ",classname)
img = img.reshape((28,28))
plt.imshow(img)
plt.title(classname)
plt.show()
#Plot accuracy and val_accuracy into graph
def show_history(history, scores):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_accuracy', 'validation_accuracy'], loc='best')
plt.title("Baseline Error: %.2f%%" % (100-scores[1]*100))
plt.show()
# build the model
#model = baseline_model()
model = CNN_model()
#model = CNN_deep_model()
# Fit the model
result = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=16, epochs=20, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=2)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
show_history(result, scores)
# serialize model to JSON
model_json = model.to_json()
with open("model_hiraganaadam.json", "w") as json_file:
json_file.write(model_json)
#serialize weights to HDF5
model.save_weights("model_hiraganaadam.h5")
print("Saved model to disk")
| StarcoderdataPython |
8185026 | """
http://community.topcoder.com/stat?c=problem_statement&pm=1692
Single Round Match 146 Round 1 - Division II, Level One
Single Round Match 212 Round 1 - Division II, Level One
"""
class Yahtzee:
def maxPoints(self, toss):
sums = [0] * 7
for value in toss:
sums[value] += value
return max(sums)
| StarcoderdataPython |
8193554 | # SPDX-FileCopyrightText: 2014 <NAME> for Adafruit Industries
# SPDX-License-Identifier: MIT
# This example is for use on (Linux) computers that are using CPython with
# Adafruit Blinka to support CircuitPython libraries. CircuitPython does
# not support PIL/pillow (python imaging library)!
from board import SCL, SDA
import busio
from PIL import Image
import adafruit_ssd1306
# Create the I2C interface.
i2c = busio.I2C(SCL, SDA)
# Create the SSD1306 OLED class.
# The first two parameters are the pixel width and pixel height. Change these
# to the right size for your display!
disp = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c)
# Note you can change the I2C address, or add a reset pin:
# disp = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, addr=0x3c, reset=reset_pin)
# Clear display.
disp.fill(0)
disp.show()
# Load image based on OLED display height. Note that image is converted to 1 bit color.
if disp.height == 64:
image = Image.open("happycat_oled_64.ppm").convert("1")
else:
image = Image.open("happycat_oled_32.ppm").convert("1")
# Alternatively load a different format image, resize it, and convert to 1 bit color.
# image = Image.open('happycat.png').resize((disp.width, disp.height), Image.ANTIALIAS).convert('1')
# Display image.
disp.image(image)
disp.show()
| StarcoderdataPython |
6641838 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\conta\Documents\script\Wizard\App\ui_files\error_handler.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(501, 670)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.error_handler_icon_label = QtWidgets.QLabel(self.frame)
self.error_handler_icon_label.setMinimumSize(QtCore.QSize(40, 40))
self.error_handler_icon_label.setMaximumSize(QtCore.QSize(40, 40))
self.error_handler_icon_label.setText("")
self.error_handler_icon_label.setObjectName("error_handler_icon_label")
self.horizontalLayout_2.addWidget(self.error_handler_icon_label)
self.label = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.verticalLayout.addWidget(self.frame)
self.frame_3 = QtWidgets.QFrame(Form)
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame_3)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.handler_textEdit = QtWidgets.QTextEdit(self.frame_3)
self.handler_textEdit.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.handler_textEdit.setObjectName("handler_textEdit")
self.verticalLayout_4.addWidget(self.handler_textEdit)
self.verticalLayout.addWidget(self.frame_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.cancel_pushButton = QtWidgets.QPushButton(Form)
self.cancel_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.cancel_pushButton.setObjectName("cancel_pushButton")
self.horizontalLayout.addWidget(self.cancel_pushButton)
self.submit_error_pushButton = QtWidgets.QPushButton(Form)
self.submit_error_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.submit_error_pushButton.setObjectName("submit_error_pushButton")
self.horizontalLayout.addWidget(self.submit_error_pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Error handler"))
self.cancel_pushButton.setText(_translate("Form", "Close"))
self.submit_error_pushButton.setText(_translate("Form", "Submit error to support"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1959753 | <filename>release-assistant/test/test_start/test_start_cli.py<gh_stars>1-10
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
"""
TestStart
"""
import datetime
import os
from pathlib import Path
from test.base.basetest import TestMixin
from requests.exceptions import RequestException
from javcra.cli.commands.startpart import StartCommand
import pandas as pd
MOCK_DATA_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), "mock_data")
class TestStart(TestMixin):
"""
class for test TestStart
"""
cmd_class = StartCommand
def test_success(self):
"""
test success
"""
self.expect_str = """
[INFO] start update successfully.
"""
resp = self.make_expect_data(200, 'startpart.txt')
mock_init_r = self.make_need_content('init_success_data.txt', MOCK_DATA_FILE)
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
con = self.read_file_content('mock_obs_data.json', folder=MOCK_DATA_FILE)
for con_key in con['contents']:
con_key["key"] = "cve-manager-updateinfo/{}/{}".format(
datetime.date(datetime.date.today().year, datetime.date.today().month,
datetime.date.today().day).strftime('%Y-%m-%d'), con_key["key"])
mock_r = self.make_obs_cloud_data(200, con)
self.mock_obs_cloud_list_objects(return_value=mock_r)
self.mock_obs_cloud_get_objects(return_value=mock_r)
read_excel = pd.read_excel(Path(MOCK_DATA_FILE, "mock_cve_data.xlsx"), sheet_name="cve_list")
self.mock_pandas_read_excel(return_value=read_excel)
self.mock_request(side_effect=[resp, resp, resp, mock_init_r])
mock_get_r = self.make_object_data(200, "The number of requests is too frequent, "
"please try again later, there is currently a task being processed")
self.mock_requests_get(side_effect=[mock_get_r])
self.assert_result()
def test_failed(self):
"""
test failed
"""
self.expect_str = """
[ERROR] failed to start update.
"""
resp = self.make_expect_data(200, 'startpart.txt')
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
con = self.read_file_content('mock_obs_data.json', folder=MOCK_DATA_FILE)
for con_key in con['contents']:
con_key["key"] = "cve-manager-updateinfo/{}/{}".format(
datetime.date(datetime.date.today().year, datetime.date.today().month,
datetime.date.today().day).strftime('%Y-%m-%d'), con_key["key"])
mock_r = self.make_obs_cloud_data(200, con)
self.mock_obs_cloud_list_objects(return_value=mock_r)
self.mock_obs_cloud_get_objects(return_value=mock_r)
read_excel = pd.read_excel(Path(MOCK_DATA_FILE, "mock_cve_data.xlsx"), sheet_name="cve_list")
self.mock_pandas_read_excel(return_value=read_excel)
self.mock_request(side_effect=[resp, resp, resp, RequestException])
mock_get_r = self.make_object_data(200)
self.mock_requests_get(side_effect=[mock_get_r])
self.assert_result()
def test_validate_failed(self):
"""
test validate failed
"""
self.expect_str = """
Parameter validation failed
"""
self.command_params = ["--giteeid=Mary", "--token=example", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", ""]
self.assert_result()
def test_request_status_408(self):
"""test_request_status_408"""
self.expect_str = """
[ERROR] Failed to get the list of personnel permissions
"""
self.command_params = ["--giteeid=Mary", "--token=example", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
mock_r = self.make_object_data(408)
self.mock_request(return_value=mock_r)
self.assert_result()
def test_request_raise_requestexception(self):
"""test_request_raise_requestexception"""
self.command_params = ["--giteeid=Mary", "--token=example", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
self.expect_str = """
[ERROR] Failed to get the list of personnel permissions
"""
self.mock_request(side_effect=[RequestException])
self.assert_result()
def test_no_permission(self):
"""
test no permission
"""
self.expect_str = """
[ERROR] The current user does not have relevant operation permissions
"""
self.command_params = ["--giteeid=onetwothree", "--token=<PASSWORD>", "--useremail=<EMAIL>",
"--ak=forexample", "--sk=forexample", "I40321"]
resp = self.make_expect_data(200, 'startpart.txt')
self.mock_request(side_effect=[resp])
self.assert_result()
def test_no_personnel_authority(self):
"""
test no personnel authority
"""
self.expect_str = """
[ERROR] Failed to get the list of personnel permissions
"""
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
resp = self.make_expect_data(200, 'mock_incorrect_issue.txt')
self.mock_request(side_effect=[resp])
self.assert_result()
def test_resp_body_is_none(self):
"""
test resp body is none
"""
self.expect_str = """
[ERROR] failed to start update.
"""
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
resp = self.make_expect_data(200, 'startpart.txt')
issue_body_is_none_data = self.read_file_content('mock_issue_is_none.txt', folder=MOCK_DATA_FILE,
is_json=False)
mock_issue_body_is_none_r = self.make_object_data(200, issue_body_is_none_data)
self.mock_request(side_effect=[resp, mock_issue_body_is_none_r])
self.assert_result()
def test_already_operated(self):
"""
test already operated
"""
self.expect_str = """
[ERROR] failed to start update.
"""
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
resp = self.make_expect_data(200, 'startpart.txt')
already_operated_data = self.read_file_content('already_operated.txt', folder=MOCK_DATA_FILE,
is_json=False)
mock_already_operated_r = self.make_object_data(200, already_operated_data)
self.mock_request(side_effect=[resp, mock_already_operated_r])
self.assert_result()
def test_download_file_failed(self):
"""
test download file failed
"""
self.expect_str = """
[ERROR] failed to start update.
"""
resp = self.make_expect_data(200, 'startpart.txt')
self.command_params = ["--giteeid=Mary", "--token=<PASSWORD>", "--useremail=<EMAIL>", "--ak=forexample",
"--sk=forexample", "I40321"]
con = self.read_file_content('mock_obs_data.json', folder=MOCK_DATA_FILE)
for con_key in con['contents']:
con_key["key"] = "cve-manager-updateinfo/{}/{}".format(
datetime.date(datetime.date.today().year, datetime.date.today().month,
datetime.date.today().day).strftime('%Y-%m-%d'), con_key["key"])
mock_listobjects_r = self.make_obs_cloud_data(200, con)
self.mock_obs_cloud_list_objects(return_value=mock_listobjects_r)
mock_getobjects_r = self.make_object_data(400)
self.mock_obs_cloud_get_objects(return_value=mock_getobjects_r)
read_excel = pd.read_excel(Path(MOCK_DATA_FILE, "mock_cve_data.xlsx"), sheet_name="cve_list")
self.mock_pandas_read_excel(return_value=read_excel)
self.mock_request(side_effect=[resp, resp, resp, resp, resp, resp, resp])
mock_get_r = self.make_object_data(200)
self.mock_requests_get(side_effect=[mock_get_r])
self.assert_result()
| StarcoderdataPython |
5180974 | <reponame>blockchain-etl/iotex-etl<filename>airflow/dags/mainnet_export_dag.py
from __future__ import print_function
from iotexetl_airflow.build_export_dag import build_export_dag
from iotexetl_airflow.variables import read_export_dag_vars
# airflow DAG
DAG = build_export_dag(
dag_id='mainnet_export_dag',
**read_export_dag_vars(
var_prefix='mainnet_',
export_schedule_interval='0 1 * * *',
export_start_date='2019-04-22',
export_max_active_runs=3,
export_max_workers=10,
)
)
| StarcoderdataPython |
3560320 | <filename>app.py
from flask import Flask, request, jsonify
from NostaleVersionGet import NostaleVersionGet
import json
import time
import NostaleServerStatusBot
import os
app = Flask(__name__)
@app.route('/')
def index():
try:
f = open("cache.json")
d = json.load(f)
if d['lastUpdateTime'] + 120 < int(time.time()):
raise Exception("Too old data, refresh")
d['cache'] = True
except:
d = json.loads(version().data)
# if config is specifed
if os.environ.get("LOGIN_SERVER_IP", default=None) != None:
status = NostaleServerStatusBot.CanConnect(nostale_version_json=d)
if status == True:
d['canConnectLoginServer'] = True
d['canConnectLoginServerMsg'] = "OK"
else:
d['canConnectLoginServer'] = False
d['canConnectLoginServerMsg'] = status
d['lastUpdateTime'] = int(time.time())
d['cache'] = False
f = open("cache.json", "w")
json.dump(d, f)
f.close()
d['_comment'] = "Cache is every 2min. Try dont spam this api (heroku can close it)"
return jsonify(d)
@app.route('/version')
def version():
try:
f = open("cache_version.json")
d = json.load(f)
if d['lastUpdateTime'] + 120 < int(time.time()):
raise Exception("Too old data, refresh")
d['cache'] = True
except:
d = NostaleVersionGet()
d['lastUpdateTime'] = int(time.time())
d['cache'] = False
f = open("cache_version.json", "w")
json.dump(d, f)
f.close()
d['_comment'] = "Cache is every 2min. Try dont spam this api (heroku can close it)"
return jsonify(d)
if __name__ == '__main__':
app.run(threaded=True, port=5000) | StarcoderdataPython |
1897829 | <filename>v1/pi/CamCalibration.py
import sched, time, threading
import serial
from tendo import singleton
from SimpleCV import *
me = singleton.SingleInstance() # will sys.exit(-1) if another instance is running
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# Init serial
ser = serial.Serial('/dev/ttyUSB0', 57600)
ser.write("A,0,000,0,000,S")
# Init SimpleCV
cam = SimpleCV.Camera()
js = SimpleCV.JpegStreamer(8989)
img = cam.getImage().rotate(180)
img.save(js, "jpeg")
while (1):
img = cam.getImage().rotate(180)
img.save(js, "jpeg")
time.sleep(0.1)
| StarcoderdataPython |
6516618 | <reponame>etheleon/tools
#!/usr/bin/env python
import os
import re
import sqlite3
import argparse
from Bio import SeqIO
def storeInSQL(sqlite3File, koFolder, debug=False):
'''
Creates a NEW sqlite3 DB and stores the ko information inside it
'''
kos_unfiltered = os.listdir(koFolder)
kos = [ko for ko in kos_unfiltered if re.search('^ko\:K\d{5}$', ko)]
conn = sqlite3.connect(sqlite3File)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS kotable (GI INT PRIMARY KEY, REFSEQ TEXT,KO TEXT, AASEQ TEXT, RECORDID TEXT, RECORDDESCRIPTION TEXT);')
if debug:
ko = kos[1]
koname = re.sub('ko\:', '', ko)
with open("%s/%s" % (koFolder, ko)) as fh:
for record in SeqIO.parse(fh, "fasta"):
_, gi, _, refseqID,_ = record.id.split('|')
c.execute("INSERT INTO kotable (GI, REFSEQ, KO, AASEQ,RECORDID,RECORDDESCRIPTION) VALUES (?, ?, ?, ?, ?, ?)", (gi, refseqID,koname, str(record.seq), record.id, record.description))
# c.execute('SELECT * FROM kotable where KO=={ko}.format(ko='K00001')')
else:
i = 0
for ko in kos:
i = i + 1
koname = re.sub('ko\:', '', ko)
# print("processing ko:%s"%koname)
with open("%s/%s" % (koFolder, ko)) as fh:
for record in SeqIO.parse(fh, "fasta"):
_, gi, _, refseqID,_ = record.id.split('|')
# print("%s %s %s" % (record.description, koname, gi))
c.execute("INSERT INTO kotable (GI, REFSEQ, KO, AASEQ,RECORDID,RECORDDESCRIPTION) VALUES (?, ?, ?, ?, ?, ?)", (gi, refseqID,koname, str(record.seq), record.id, record.description))
# print(record.id)
print("Processed %s KOs" % i)
conn.commit()
conn.close()
print("Finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("koNR", help="Relative path to the konr directory")
parser.add_argument("SQL", help="SQL file NOTE must be full path")
args = parser.parse_args()
storeInSQL(args.SQL, args.koNR)
| StarcoderdataPython |
3449873 | from konverter.utils.model_attributes import Activations, Layers, watermark
from konverter.utils.konverter_support import KonverterSupport
import numpy as np
support = KonverterSupport()
class Konverter:
def __init__(self, model, output_file, indent_spaces, verbose=True, use_watermark=True):
"""
:param model: A preloaded Sequential Keras model
:param output_file: The desired path and name of the output model files
:param indent_spaces: The number of spaces to use for indentation
:param use_watermark: To prepend a watermark comment to model wrapper
"""
self.model = model
self.output_file = output_file
self.indent = ' ' * indent_spaces
self.verbose = verbose
self.use_watermark = use_watermark
self.layers = []
self.start()
def start(self):
self.check_model()
self.get_layers()
if self.verbose:
self.print_model_architecture()
self.remove_unused_layers()
self.parse_output_file()
self.build_konverted_model()
def build_konverted_model(self):
self.print('\nNow building pure Python + NumPy model...')
model_builder = {'imports': ['import numpy as np'],
'functions': [],
'load_weights': [],
'model': ['def predict(x):']}
# add section to load model weights and biases
model_builder['load_weights'].append(f'wb = np.load(\'{self.output_file}_weights.npz\', allow_pickle=True)')
model_builder['load_weights'].append('w, b = wb[\'wb\']')
# builds the model and adds needed activation functions
for idx, layer in enumerate(self.layers):
prev_output = 'x' if idx == 0 else f'l{idx - 1}'
# work on predict function
if layer.name == Layers.Dense.name:
model_line = f'l{idx} = {layer.string.format(prev_output, idx, idx)}'
model_builder['model'].append(model_line)
if layer.info.has_activation:
if layer.info.activation.needs_function:
activation = f'l{idx} = {layer.info.activation.alias.lower()}(l{idx})'
else: # eg. tanh or relu
activation = f'l{idx} = {layer.info.activation.string.lower().format(f"l{idx}")}'
model_builder['model'].append(activation)
elif layer.info.is_recurrent:
rnn_function = f'l{idx} = {layer.alias.lower()}({prev_output}, {idx})'
if not layer.info.returns_sequences:
rnn_function += '[-1]'
model_builder['model'].append(rnn_function)
# work on functions: activations/simplernn
if layer.info.activation.string is not None:
if support.is_function(layer.info.activation.string): # don't add tanh as a function
model_builder['functions'].append(layer.info.activation.string)
if layer.info.is_recurrent:
model_builder['functions'].append(layer.string)
model_builder['functions'] = set(model_builder['functions']) # remove duplicates
model_builder['model'].append(f'return l{len(self.layers) - 1}')
self.save_model(model_builder)
self.output_file = self.output_file.replace('\\', '/')
self.print('\nSaved Konverted model!')
self.print(f'Model wrapper: {self.output_file}.py\nWeights and biases file: {self.output_file}_weights.npz')
self.print('\nMake sure to change the path inside the wrapper file to your weights if you move the file elsewhere.')
if Activations.Softmax.name in support.model_activations(self.layers):
self.print('Important: Since you are using Softmax, make sure that predictions are working correctly!')
def save_model(self, model_builder):
wb = list(zip(*[[np.array(layer.info.weights), np.array(layer.info.biases)] for layer in self.layers]))
np.savez_compressed('{}_weights'.format(self.output_file), wb=wb)
output = ['\n'.join(model_builder['imports']), # eg. import numpy as np
'\n'.join(model_builder['load_weights']), # loads weights and biases for predict()
'\n\n'.join(model_builder['functions']), # houses the model helper functions
'\n\t'.join(model_builder['model'])] # builds the predict function
output = '\n\n'.join(output) + '\n' # now combine all sections
if self.use_watermark:
output = watermark + output
with open(f'{self.output_file}.py', 'w') as f:
f.write(output.replace('\t', self.indent))
def remove_unused_layers(self):
self.layers = [layer for layer in self.layers if layer.name not in support.attrs_without_activations]
def parse_output_file(self):
if self.output_file[-3:] == '.py':
self.output_file = self.output_file[:-3]
def print_model_architecture(self):
print('\nSuccessfully got model architecture!\n')
print('Layers:\n-----')
to_print = [[f'name: {layer.alias}'] for layer in self.layers]
for idx, layer in enumerate(self.layers):
if not layer.info.is_ignored:
if layer.info.has_activation:
to_print[idx].append(f'activation: {layer.info.activation.alias}')
if layer.info.is_recurrent:
to_print[idx].append(f'shape: {layer.info.weights[0].shape}')
else:
to_print[idx].append(f'shape: {layer.info.weights.shape}')
to_print[idx] = ' ' + '\n '.join(to_print[idx])
print('\n-----\n'.join(to_print))
def get_layers(self):
for layer in self.model.layers:
layer = support.get_layer_info(layer)
if layer.info.supported:
self.layers.append(layer)
else:
raise Exception('Layer `{}` with activation `{}` not currently supported (check type or activation)'.format(layer.name, layer.info.activation.name))
def check_model(self):
if str(type(self.model)) != "<class 'tensorflow.python.keras.engine.sequential.Sequential'>":
raise Exception('Input for `model` must be a Sequential tf.keras model, not {}'.format(type(self.model)))
elif not support.in_models(self.model.name):
raise Exception('Model is `{}`, must be in {}'.format(self.model.name, [mdl.name for mdl in support.models]))
def print(self, msg):
if self.verbose:
print(msg)
| StarcoderdataPython |
3349935 | import os
import socket
import sys
from StringIO import StringIO
from multiprocessing.pool import ThreadPool
from threading import Thread
import time
from paramiko import SSHClient, AutoAddPolicy, RSAKey
from paramiko.ssh_exception import NoValidConnectionsError
from scpclient import Write, SCPError
from cloudshell.cm.customscript.domain.cancellation_sampler import CancellationSampler
from cloudshell.cm.customscript.domain.reservation_output_writer import ReservationOutputWriter
from cloudshell.cm.customscript.domain.script_configuration import HostConfiguration
from cloudshell.cm.customscript.domain.script_executor import IScriptExecutor, ErrorMsg, ExcutorConnectionError
from cloudshell.cm.customscript.domain.script_file import ScriptFile
class LinuxScriptExecutor(IScriptExecutor):
PasswordEnvVarName = 'cs_machine_pass'
class ExecutionResult(object):
def __init__(self, exit_code, std_out, std_err):
self.std_err = std_err
self.std_out = std_out
self.success = exit_code == 0
def __init__(self, logger, target_host, cancel_sampler):
"""
:type logger: Logger
:type target_host: HostConfiguration
:type cancel_sampler: CancellationSampler
"""
self.logger = logger
self.cancel_sampler = cancel_sampler
self.pool = ThreadPool(processes=1)
self.session = SSHClient()
self.session.set_missing_host_key_policy(AutoAddPolicy())
self.target_host = target_host
def connect(self):
try:
if self.target_host.password:
self.session.connect(self.target_host.ip, username=self.target_host.username, password=self.target_host.password)
elif self.target_host.access_key:
key_stream = StringIO(self.target_host.access_key)
key_obj = RSAKey.from_private_key(key_stream)
self.session.connect(self.target_host.ip, username=self.target_host.username, pkey=key_obj)
elif self.target_host.username:
raise Exception('Both password and access key are empty.')
else:
raise Exception('Machine credentials are empty.')
except NoValidConnectionsError as e:
error_code = next(e.errors.itervalues(), type('e', (object,), {'errno': 0})).errno
raise ExcutorConnectionError(error_code, e)
except socket.error as e:
raise ExcutorConnectionError(e.errno, e)
except Exception as e:
raise ExcutorConnectionError(0, e)
def get_expected_file_extensions(self):
"""
:rtype list[str]
"""
return ['.sh', '.bash']
# file_name, file_ext = os.path.splitext(script_file.name)
# if file_ext and file_ext != '.sh' and file_ext != '.bash':
# output_writer.write_warning('Trying to run "%s" file via ssh on host %s' % (file_ext, self.target_host.ip))
def execute(self, script_file, env_vars, output_writer, print_output=True):
"""
:type script_file: ScriptFile
:type output_writer: ReservationOutputWriter
:type print_output: bool
"""
self.logger.info('Creating temp folder on target machine ...')
tmp_folder = self.create_temp_folder()
self.logger.info('Done (%s).' % tmp_folder)
try:
self.logger.info('Copying "%s" (%s chars) to "%s" target machine ...' % (script_file.name, len(script_file.text), tmp_folder))
self.copy_script(tmp_folder, script_file)
self.logger.info('Done.')
self.logger.info('Running "%s" on target machine ...' % script_file.name)
self.run_script(tmp_folder, script_file, env_vars, output_writer, print_output)
self.logger.info('Done.')
finally:
self.logger.info('Deleting "%s" folder from target machine ...' % tmp_folder)
self.delete_temp_folder(tmp_folder)
self.logger.info('Done.')
def create_temp_folder(self):
"""
:rtype str
"""
result = self._run_cancelable('mktemp -d')
if not result.success:
raise Exception(ErrorMsg.CREATE_TEMP_FOLDER % result.std_err)
return result.std_out.rstrip('\n')
def copy_script(self, tmp_folder, script_file):
"""
:type tmp_folder: str
:type script_file: ScriptFile
"""
file_stream = StringIO(script_file.text)
file_size = len(file_stream.getvalue())
scp = None
try:
scp = Write(self.session.get_transport(), tmp_folder)
scp.send(file_stream, script_file.name, '0601', file_size)
except SCPError as e:
raise Exception,ErrorMsg.COPY_SCRIPT % str(e),sys.exc_info()[2]
finally:
if scp:
scp.close()
def run_script(self, tmp_folder, script_file, env_vars, output_writer, print_output=True):
"""
:type tmp_folder: str
:type script_file: ScriptFile
:type env_vars: dict
:type output_writer: ReservationOutputWriter
:type print_output: bool
"""
code = ''
for key, value in (env_vars or {}).iteritems():
code += 'export %s=%s;' % (key,self._escape(value))
if self.target_host.password:
code += 'export %s=%s;' % (self.PasswordEnvVarName, self._escape(self.target_host.password))
code += 'sh '+tmp_folder+'/'+script_file.name
result = self._run_cancelable(code)
if print_output:
output_writer.write(result.std_out)
output_writer.write(result.std_err)
if not result.success:
raise Exception(ErrorMsg.RUN_SCRIPT % result.std_err)
def delete_temp_folder(self, tmp_folder):
"""
:type tmp_folder: str
"""
result = self._run_cancelable('rm -rf '+tmp_folder)
if not result.success:
raise Exception(ErrorMsg.DELETE_TEMP_FOLDER % result.std_err)
def _run(self, code):
self.logger.debug('BashScript:' + code)
#stdin, stdout, stderr = self._run_cancelable(code)
stdin, stdout, stderr = self.session.exec_command(code)
exit_code = stdout.channel.recv_exit_status()
stdout_txt = ''.join(stdout.readlines())
stderr_txt = ''.join(stderr.readlines())
self.logger.debug('ReturnedCode:' + str(exit_code))
self.logger.debug('Stdout:' + stdout_txt)
self.logger.debug('Stderr:' + stderr_txt)
return LinuxScriptExecutor.ExecutionResult(exit_code, stdout_txt, stderr_txt)
def _run_cancelable(self, txt, *args):
async_result = self.pool.apply_async(self._run, kwds={'code': txt % args})
while not async_result.ready():
if self.cancel_sampler.is_cancelled():
self.session.close()
self.cancel_sampler.throw()
time.sleep(1)
return async_result.get()
def _escape(self, value):
escaped_str = "$'" + '\\x' + '\\x'.join([x.encode("hex") for x in str(value).encode("utf-8")]) + "'"
return escaped_str | StarcoderdataPython |
11358070 | from ex108 import moeda
p = float(input('Digite o preço: R$ '))
print(f'A metade de R$ {moeda.moeda(p)} é R$ {moeda.moeda(moeda.metade(p))}')
print(f'O dobro de R$ {moeda.moeda(p)} é R$ {moeda.moeda(moeda.dobro(p))}')
print(f'Aumentando 10%, temos {moeda.moeda(moeda.aumentar(p, 10))}')
print(f'Diminuindo 10%, temos {moeda.moeda(moeda.diminuir(p, 10))}')
| StarcoderdataPython |
9686268 | def testing_image(input_image):
# Convert to HSV
hsv = cv2.cvtColor(input_image, cv2.COLOR_RGB2HSV)
# HSV channels
h = hsv[:, :, 0]
s = hsv[:, :, 1]
v = hsv[:, :, 2]
# HSV mask
hsv_lower = np.array([0, 0, 0])
hsv_upper = np.array([255, 35, 255])
hsv_mask = cv2.inRange(hsv, hsv_lower, hsv_upper)
masked_image = np.copy(input_image)
masked_image[hsv_mask != 0] = [0, 0, 0]
# RGB mask - black
black_max = 150
rgb_lower = np.array([0, 0, 0])
rgb_upper = np.array([black_max, black_max, black_max])
rgb_mask = cv2.inRange(masked_image, rgb_lower, rgb_upper)
rgb_masked_image = np.copy(input_image)
rgb_masked_image[rgb_mask != 0] = [0, 0, 0]
# RGB mask - white
white_min = 200
rgb_lower = np.array([white_min, white_min, white_min])
rgb_upper = np.array([255, 255, 255])
rgb_mask = cv2.inRange(rgb_masked_image, rgb_lower, rgb_upper)
rgb_masked_image[rgb_mask != 0] = [0, 0, 0]
red = in_range(rgb_masked_image, [200, 0, 0], [255, 200, 200])
green = in_range(rgb_masked_image, [36, 0, 0], [86, 255, 255])
yellow = in_range(rgb_masked_image, [153, 139, 0], [255, 255, 153])
stats = {
'green': green,
'red': red,
'yellow': yellow
}
classification = max(stats, key=stats.get)
# Count pixels
total = in_range(rgb_masked_image, [1, 1, 1], [255, 255, 255])
print("Total pixels", total)
print("Red pixels", red)
print("Green pixels", green)
print("Yellow pixels", yellow)
print("Color => ", classification)
colors_found = red + green + yellow
confidence = 0 if colors_found == 0 else stats[classification] / colors_found
print("confidence", confidence)
# Plot
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20, 10))
ax1.set_title('Standardized image')
ax1.imshow(rgb_masked_image, cmap='gray')
ax2.set_title('H channel')
ax2.imshow(masked_image, cmap='gray')
ax3.set_title('S channel')
ax3.imshow(s, cmap='gray')
ax4.set_title('V channel')
ax4.imshow(v, cmap='gray')
image_num = 1
input_image = MISCLASSIFIED[image_num][0]
test_label = MISCLASSIFIED[image_num][1]
# Red, yellow, green
print("Expected", test_label)
testing_image(input_image)
| StarcoderdataPython |
20179 | from django.test import TestCase
from django.urls import reverse_lazy
from ..models import PHOTO_MODEL, UploadedPhotoModel, IMAGE_SIZES
from .model_factories import get_image_file, get_zip_file
import time
from uuid import uuid4
class UploadPhotoApiViewTest(TestCase):
def check_photo_ok_and_delete(self, photo):
self.assertTrue(photo.image.storage.exists(photo.image.name))
for size in IMAGE_SIZES.values():
self.assertTrue(photo.image.storage.exists(photo.get_filepath_for_size(size)))
photo.delete()
def test_upload_photo(self):
self.client.post(reverse_lazy('image_upload'), {'file': get_image_file(), 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(1, PHOTO_MODEL.objects.count())
self.assertEqual(1, UploadedPhotoModel.objects.count())
self.assertEqual(PHOTO_MODEL.objects.first(), UploadedPhotoModel.objects.first().photo)
photo = PHOTO_MODEL.objects.first()
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
def test_upload_zip(self):
zip_file = get_zip_file(images=[get_image_file(name='img1.png'), get_image_file(name='img2.png')])
self.client.post(reverse_lazy('image_upload'), {'file': zip_file, 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(2, PHOTO_MODEL.objects.count())
self.assertEqual(2, UploadedPhotoModel.objects.count())
for photo in PHOTO_MODEL.objects.all():
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
| StarcoderdataPython |
6524758 | <reponame>NREL/VirtualEngineering
# Init file for vebio package | StarcoderdataPython |
6583144 | <gh_stars>0
import functools
import json
import itertools
from couchdbkit.exceptions import DocTypeError
from corehq import Domain
from corehq.apps.app_manager.const import CT_REQUISITION_MODE_3, CT_LEDGER_STOCK, CT_LEDGER_REQUESTED, CT_REQUISITION_MODE_4, CT_LEDGER_APPROVED, CT_LEDGER_PREFIX
from corehq.apps.app_manager.xform import XForm, XFormError, parse_xml
import re
from dimagi.utils.decorators.memoized import memoized
from django.core.cache import cache
def get_app_id(form):
"""
Given an XForm instance, try to grab the app id, returning
None if not available. This is just a shortcut since the app_id
might not always be set.
"""
return getattr(form, "app_id", None)
def split_path(path):
path_parts = path.split('/')
name = path_parts.pop(-1)
path = '/'.join(path_parts)
return path, name
def save_xform(app, form, xml):
def change_xmlns(xform, replacing):
data = xform.data_node.render()
xmlns = "http://openrosa.org/formdesigner/%s" % form.get_unique_id()
data = data.replace(replacing, xmlns, 1)
xform.instance_node.remove(xform.data_node.xml)
xform.instance_node.append(parse_xml(data))
xml = xform.render()
return xform, xml
try:
xform = XForm(xml)
except XFormError:
pass
else:
duplicates = app.get_xmlns_map()[xform.data_node.tag_xmlns]
for duplicate in duplicates:
if form == duplicate:
continue
else:
xform, xml = change_xmlns(xform, xform.data_node.tag_xmlns)
break
GENERIC_XMLNS = "http://www.w3.org/2002/xforms"
if not xform.data_node.tag_xmlns or xform.data_node.tag_xmlns == GENERIC_XMLNS: #no xmlns
xform, xml = change_xmlns(xform, GENERIC_XMLNS)
form.source = xml
CASE_TYPE_REGEX = r'^[\w-]+$'
_case_type_regex = re.compile(CASE_TYPE_REGEX)
def is_valid_case_type(case_type):
"""
>>> is_valid_case_type('foo')
True
>>> is_valid_case_type('foo-bar')
True
>>> is_valid_case_type('foo bar')
False
>>> is_valid_case_type('')
False
>>> is_valid_case_type(None)
False
"""
return bool(_case_type_regex.match(case_type or ''))
class ParentCasePropertyBuilder(object):
def __init__(self, app, defaults=()):
self.app = app
self.defaults = defaults
@property
@memoized
def forms_info(self):
# unfortunate, but biggest speed issue is accessing couchdbkit properties
# so compute them once
forms_info = []
if self.app.doc_type == 'RemoteApp':
return forms_info
for module in self.app.get_modules():
for form in module.get_forms():
forms_info.append((module.case_type, form))
return forms_info
@memoized
def get_parent_types_and_contributed_properties(self, case_type):
parent_types = set()
case_properties = set()
for m_case_type, form in self.forms_info:
p_types, c_props = form.get_parent_types_and_contributed_properties(m_case_type, case_type)
parent_types.update(p_types)
case_properties.update(c_props)
return parent_types, case_properties
def get_parent_types(self, case_type):
parent_types, _ = \
self.get_parent_types_and_contributed_properties(case_type)
return set(p[0] for p in parent_types)
@memoized
def get_other_case_sharing_apps_in_domain(self):
from corehq.apps.app_manager.models import get_apps_in_domain
apps = get_apps_in_domain(self.app.domain, include_remote=False)
return [a for a in apps if a.case_sharing and a.id != self.app.id]
@memoized
def get_properties(self, case_type, already_visited=(),
include_shared_properties=True):
if case_type in already_visited:
return ()
get_properties_recursive = functools.partial(
self.get_properties,
already_visited=already_visited + (case_type,),
include_shared_properties=include_shared_properties
)
case_properties = set(self.defaults)
for m_case_type, form in self.forms_info:
case_properties.update(self.get_case_updates(form, case_type))
parent_types, contributed_properties = \
self.get_parent_types_and_contributed_properties(case_type)
case_properties.update(contributed_properties)
for parent_type in parent_types:
for property in get_properties_recursive(parent_type[0]):
case_properties.add('%s/%s' % (parent_type[1], property))
if self.app.case_sharing and include_shared_properties:
from corehq.apps.app_manager.models import get_apps_in_domain
for app in self.get_other_case_sharing_apps_in_domain():
case_properties.update(
get_case_properties(
app, [case_type], include_shared_properties=False
).get(case_type, [])
)
return case_properties
@memoized
def get_case_updates(self, form, case_type):
return form.get_case_updates(case_type)
def get_case_property_map(self, case_types,
include_shared_properties=True):
case_types = sorted(case_types)
return {
case_type: sorted(self.get_properties(
case_type, include_shared_properties=include_shared_properties
))
for case_type in case_types
}
def get_case_properties(app, case_types, defaults=(),
include_shared_properties=True):
builder = ParentCasePropertyBuilder(app, defaults)
return builder.get_case_property_map(
case_types, include_shared_properties=include_shared_properties
)
def get_all_case_properties(app):
return get_case_properties(
app,
set(itertools.chain.from_iterable(m.get_case_types() for m in app.modules)),
defaults=('name',)
)
def get_settings_values(app):
try:
profile = app.profile
except AttributeError:
profile = {}
hq_settings = dict([
(attr, app[attr])
for attr in app.properties() if not hasattr(app[attr], 'pop')
])
if getattr(app, 'use_custom_suite', False):
hq_settings.update({'custom_suite': getattr(app, 'custom_suite', None)})
hq_settings['build_spec'] = app.build_spec.to_string()
# the admin_password hash shouldn't be sent to the client
hq_settings.pop('admin_password', None)
domain = Domain.get_by_name(app.domain)
return {
'properties': profile.get('properties', {}),
'features': profile.get('features', {}),
'hq': hq_settings,
'$parent': {
'doc_type': app.get_doc_type(),
'_id': app.get_id,
'domain': app.domain,
'commtrack_enabled': domain.commtrack_enabled,
}
}
def add_odk_profile_after_build(app_build):
"""caller must save"""
profile = app_build.create_profile(is_odk=True)
app_build.lazy_put_attachment(profile, 'files/profile.ccpr')
# hack this in for records
app_build.odk_profile_created_after_build = True
def create_temp_sort_column(field, index):
"""
Used to create a column for the sort only properties to
add the field to the list of properties and app strings but
not persist anything to the detail data.
"""
from corehq.apps.app_manager.models import SortOnlyDetailColumn
return SortOnlyDetailColumn(
model='case',
field=field,
format='invisible',
header=None,
)
def is_sort_only_column(column):
from corehq.apps.app_manager.models import SortOnlyDetailColumn
return isinstance(column, SortOnlyDetailColumn)
def get_correct_app_class(doc):
from corehq.apps.app_manager.models import Application, RemoteApp
try:
return {
'Application': Application,
'Application-Deleted': Application,
"RemoteApp": RemoteApp,
"RemoteApp-Deleted": RemoteApp,
}[doc['doc_type']]
except KeyError:
raise DocTypeError()
def all_apps_by_domain(domain):
from corehq.apps.app_manager.models import ApplicationBase
rows = ApplicationBase.get_db().view(
'app_manager/applications',
startkey=[domain, None],
endkey=[domain, None, {}],
include_docs=True,
).all()
for row in rows:
doc = row['doc']
yield get_correct_app_class(doc).wrap(doc)
def new_careplan_module(app, name, lang, target_module):
from corehq.apps.app_manager.models import CareplanModule, CareplanGoalForm, CareplanTaskForm
module = app.add_module(CareplanModule.new_module(
app,
name,
lang,
target_module.unique_id,
target_module.case_type)
)
forms = [form_class.new_form(lang, name, mode)
for form_class in [CareplanGoalForm, CareplanTaskForm]
for mode in ['create', 'update']]
for form, source in forms:
module.forms.append(form)
form = module.get_form(-1)
form.source = source
return module
def languages_mapping():
mapping = cache.get('__languages_mapping')
if not mapping:
with open('submodules/langcodes/langs.json') as langs_file:
lang_data = json.load(langs_file)
mapping = dict([(l["two"], l["names"]) for l in lang_data])
mapping["default"] = ["Default Language"]
cache.set('__languages_mapping', mapping, 12*60*60)
return mapping
def commtrack_ledger_sections(mode):
sections = [CT_LEDGER_STOCK]
if mode == CT_REQUISITION_MODE_3:
sections += [CT_LEDGER_REQUESTED]
elif mode == CT_REQUISITION_MODE_4:
sections += [CT_LEDGER_REQUESTED, CT_LEDGER_APPROVED]
return ['{}{}'.format(CT_LEDGER_PREFIX, s) for s in sections]
| StarcoderdataPython |
3217748 | from unittest import TestCase
from .base_type_rxt import SingleField, SingleField_deserialize, SingleField_serialize,MultiFields
class BaseTypeSerializationTestCase(TestCase):
def test_create_SingleField(self):
item = SingleField(42)
self.assertEqual(42, item.foo32)
def test_create_MultiFields(self):
item = MultiFields(1 ,2 ,3 ,4 ,True ,1.2, "baz")
self.assertEqual(1, item.foo32)
self.assertEqual(2, item.bar32)
self.assertEqual(3, item.foo64)
self.assertEqual(4, item.bar64)
self.assertEqual(True, item.biz)
self.assertEqual(1.2, item.buz)
self.assertEqual('baz', item.name)
def test_deserialize_SingleField(self):
itemJson = '{"foo32": 42}'
item = SingleField_deserialize(itemJson)
self.assertIs(type(item), SingleField)
self.assertEqual(42, item.foo32)
def test_deserialize_SingleField_failure(self):
itemJson = '{}'
item = SingleField_deserialize(itemJson)
self.assertIs(item, None)
def test_serialize_SingleField(self):
expectedJson = '{"foo32": 42}'
item = SingleField(42)
itemJson = SingleField_serialize(item);
self.assertEqual(expectedJson, itemJson)
| StarcoderdataPython |
6696404 | <reponame>bogdan824/LeetCode-Problems
def thousandSep(n):
x = str(n)
if len(x)<=3:
return x
for i in range(len(x),0,-3):
x = x[:i] + '.' + x[i:]
return x[:-1]
n = 12345678912345678
print(thousandSep(n)) | StarcoderdataPython |
4915184 | import os
from setuptools import setup, find_packages
from setuptools.command.test import test
from unittest import TestLoader
MAJOR_VERSION = 4
MINOR_VERSION = 3
PATCH_VERSION = 9
# Environment variable into which CI places the build ID
# https://docs.gitlab.com/ce/ci/variables/
CI_BUILD_ID = 'BUILD_NUMBER'
class TestRunner(test):
def run_tests(self):
# If we perform this input at the top of the file, we get an
# import error because we need to load this file to discover
# dependenices.
from xmlrunner import XMLTestRunner
tests = TestLoader().discover('tests', pattern='test_*.py')
runner = XMLTestRunner(output='reports')
result = runner.run(tests)
exit(0 if result.wasSuccessful() else 1)
def set_build_number_from_ci_environment():
return int(os.environ[CI_BUILD_ID])
def version_number():
if CI_BUILD_ID in os.environ:
build = set_build_number_from_ci_environment()
else:
build = 0
return '%d.%d.%d.%d' % (MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION, build)
with open('README.md', 'r') as f:
long_description = f.read()
setup(name='twin_sister',
version=version_number(),
description='Unit test toolkit',
url='https://github.com/CyberGRX/twin-sister',
author='<NAME>',
author_email='<EMAIL>',
license='',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
include_package_data=True,
exclude_package_data={'': ['tests']},
install_requires=[
'expects>=0.8.0',
'pyfakefs>=3.4.3',
'twine>=1.9.1',
'unittest-xml-reporting>=2.1.1',
'wheel>=0.30.0'
],
cmdclass={'test': TestRunner}
)
| StarcoderdataPython |
9618610 | <gh_stars>10-100
import tensorflow as tf
import cv2 as cv
import random
import os
import math
import numpy as np
from tensorflow.contrib.framework.python.ops import add_arg_scope
def random_interpolates(x, y, alpha=None):
"""
x: first dimension as batch_size
y: first dimension as batch_size
alpha: [BATCH_SIZE, 1]
"""
shape = x.get_shape().as_list()
x = tf.reshape(x, [shape[0], -1])
y = tf.reshape(y, [shape[0], -1])
if alpha is None:
alpha = tf.random_uniform(shape=[shape[0], 1])
interpolates = x + alpha*(y - x)
return tf.reshape(interpolates, shape)
def gradients_penalty(x, y, mask=None, norm=1.):
"""Improved Training of Wasserstein GANs
- https://arxiv.org/abs/1704.00028
"""
gradients = tf.gradients(y, x)[0]
if mask is None:
mask = tf.ones_like(gradients)
slopes = tf.sqrt(tf.reduce_mean(tf.square(gradients) * mask, axis=[1, 2, 3]))
return tf.reduce_mean(tf.square(slopes - norm))
def standard_conv(x,mask,cnum,ksize=3,stride=1,rate=1,name='conv',padding='SAME'):
'''
define convolution for generator
Args:
x:iput image
cnum: channel number
ksize: kernal size
stride: convolution stride
rate : rate for dilated conv
name: name of layers
'''
p = int(rate*(ksize-1)/2)
x = tf.pad(x, [[0,0], [p, p], [p, p], [0,0]],'REFLECT')
padding = 'VALID'
x = tf.layers.conv2d(x,cnum,ksize,stride,dilation_rate=rate,activation=tf.nn.elu
,padding=padding,name=name+'_1')
return x
def standard_dconv(x,mask,cnum,name='deconv',padding='VALID'):
'''
define upsample convolution for generator
Args:
x: input image
mask: input mask
name: name of layers
'''
rate = 1
ksize = 3
stride = 1
shape = x.get_shape().as_list()
x = tf.image.resize_nearest_neighbor(x,[shape[1]*2,shape[2]*2])
p3 = int(1 * (3 - 1) / 2)
x = tf.pad(x, [[0,0], [p3,p3], [p3,p3], [0,0]], 'REFLECT')
x = tf.layers.conv2d(x,cnum,ksize,stride,dilation_rate=rate,activation=tf.nn.elu
,padding=padding,name=name+'_1')
return x
def l2_norm(v, eps=1e-12):
return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)
def spectral_norm(w, iteration=1,name='dasd'):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(name+"u", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
w_norm = w / sigma
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def LeakyRelu(x, leak=0.2, name="LeakyRelu"):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def dis_conv(x, cnum, ksize=5, stride=2, activation = 'leak_relu', name='conv'):
"""
covolution for discriminator.
Args:
x: input image
cnum: channel number.
ksize: kernel size.
Stride: convolution stride.
name: name of layers.
"""
x_shape = x.get_shape().as_list()
w = tf.get_variable(name = name+'_w',shape = [ksize, ksize, x_shape[-1]] + [cnum])
w = spectral_norm(w, name = name)
x = tf.nn.conv2d(x, w, strides = [1, stride, stride, 1], padding = 'SAME')
bias = tf.get_variable(name=name+'_bias',shape=[cnum])
if activation != None:
return LeakyRelu(x + bias, name = name)
else:
return x + bias
| StarcoderdataPython |
1729606 | <filename>bitwise.py
#!/Applications/anaconda/envs/Python3/bin
def main():
'''Bitwise Operators and Examples'''
x, y, allOn = 0x55, 0xaa, 0xff
print("x is: ", end="")
bitPrint(x)
print("y is: ", end="")
bitPrint(y)
print("allOn is: ", end="")
bitPrint(allOn)
# Bitwise OR: |
print("x | y: ", end="")
bitPrint(x | y)
# Bitwise AND: &
print("x & y: ", end="")
bitPrint(x & y)
# Bitwise XOR: ^
print("x ^ allOn: ", end="")
bitPrint(x ^ allOn)
# Bitwise Left Shift: <<
print("allOn << 4: ", end="")
bitPrint(allOn << 4)
# Bitwise Right Shift: >>
print("allOn >> 4: ", end="")
bitPrint(allOn >> 4)
# Bitwise One's Complement: ~
print("One's complement of x (~x): ", end="")
bitPrint(~x)
return 0
def bitPrint(n):
'''Prints a given number n in binary format to 8 places'''
print('{:08b}'.format(n))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4869989 | <reponame>cloudsoft/brooklyn-marklogic<gh_stars>0
import sys
try:
import requests
except ImportError:
print "Couldn't import requests. Have you run `sudo pip install requests`?"
sys.exit(1)
def loadApps(entities=[]):
payload = {'items': ','.join(entities)}
return requests.get('http://localhost:8081/v1/applications/fetch', params=payload).json()
def loadEntitySensors(entity):
appId = entity['applicationId']
entId = entity['id']
return requests.get('http://localhost:8081/v1/applications/'+appId+'/entities/'+entId+'/sensors/current-state').json()
def isMarkLogicEntity(app):
return 'MarkLogic' in app['name']
def isMarkLogicNode(app):
return app['type'] == 'io.cloudsoft.marklogic.nodes.MarkLogicNode'
def walkEntities():
"""Repeatedly calls loadApps until no new children are encountered"""
toCheck = ['']
visited = set()
while len(toCheck) > 0:
checking = filter(lambda appId: appId not in visited, toCheck)
checked = loadApps(checking)
toCheck = []
for newApp in checked:
if newApp['id'] not in visited:
yield newApp
visited.add(newApp['id'])
if 'children' in newApp:
for child in newApp['children']:
toCheck.append(child['id'])
for entity in walkEntities():
# print entity
if isMarkLogicNode(entity):
host = loadEntitySensors(entity)['host.name']
if host:
print host
| StarcoderdataPython |
1896935 | <filename>exp/bezier/diff_exp.py<gh_stars>100-1000
from sympy import *
#f = Function('f')
#eq = Derivative(f(x), x) + 1
#res = dsolve(eq, f(x), ics={f(0):0})
#print(res)
x = Function('x')
y = Function('y')
t = symbols('t')
x1, y1, x2, y2, yx1, yx2 = symbols('x1 y1 x2 y2 yx1 yx2')
# constant speed
eq = Derivative(Derivative(x(t), t)**2 + Derivative(y(t), t)**2, t)
res = dsolve(eq, ics={x(0):x1, y(0):y1, x(1):x2, y(1):y2, y(0)/x(0):yx1, y(1)/x(1):yx2})
print (res)
| StarcoderdataPython |
11288346 | """ This script defines an example automated tron client that will avoid walls if it's about to crash into one.
This is meant to be an example of how to implement a basic matchmaking agent.
"""
import argparse
from random import choice, randint
from colosseumrl.envs.tron.rllib import SimpleAvoidAgent
from colosseumrl.matchmaking import request_game, GameResponse
from colosseumrl.RLApp import create_rl_agent
from colosseumrl.envs.tron import TronGridClientEnvironment
from colosseumrl.envs.tron import TronGridEnvironment
from colosseumrl.rl_logging import get_logger
logger = get_logger()
def tron_client(env: TronGridClientEnvironment, username: str):
""" Our client function for the random tron client.
Parameters
----------
env : TronGridClientEnvironment
The client environment that we will interact with for this agent.
username : str
Our desired username.
"""
# Connect to the game server and wait for the game to begin.
# We run env.connect once we have initialized ourselves and we are ready to join the game.
player_num = env.connect(username)
logger.debug("Player number: {}".format(player_num))
# Next we run env.wait_for_turn() to wait for our first real observation
env.wait_for_turn()
logger.info("Game started...")
# Keep executing moves until the game is over
terminal = False
agent = SimpleAvoidAgent()
while not terminal:
# See if there is a wall in front of us, if there is, then we will turn in a random direction.
action = agent(env.server_environment, env.observation)
# We use env.step in order to execute an action and wait until it is our turn again.
# This function will block while the action is executed and will return the next observation that belongs to us
new_obs, reward, terminal, winners = env.step(action)
print("Took step with action {}, got: {}".format(action, (new_obs, reward, terminal, winners)))
# Once the game is over, we print out the results and close the agent.
logger.info("Game is over. Players {} won".format(winners))
logger.info("Final observation: {}".format(new_obs))
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--host", "-s", type=str, default="localhost",
help="Hostname of the matchmaking server.")
parser.add_argument("--port", "-p", type=int, default=50051,
help="Port the matchmaking server is running on.")
parser.add_argument("--username", "-u", type=str, default="",
help="Desired username to use for your connection. By default it will generate a random one.")
logger.debug("Connecting to matchmaking server. Waiting for a game to be created.")
args = parser.parse_args()
if args.username == "":
username = "Tester_{}".format(randint(0, 1000))
else:
username = args.username
# We use request game to connect to the matchmaking server and await a game assigment.
game: GameResponse = request_game(args.host, args.port, username)
logger.debug("Game has been created. Playing as {}".format(username))
logger.debug("Current Ranking: {}".format(game.ranking))
# Once we have been assigned a game server, we launch an RLApp agent and begin our computation
agent = create_rl_agent(agent_fn=tron_client,
host=game.host,
port=game.port,
auth_key=game.token,
client_environment=TronGridClientEnvironment,
server_environment=TronGridEnvironment)
agent(username)
| StarcoderdataPython |
187161 | <gh_stars>0
# Code from Chapter 6 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
# Various dimensionality reductions running on the Iris dataset
import pylab as pl
import numpy as np
iris = np.loadtxt('../3 MLP/iris_proc.data', delimiter=',')
iris[:, :4] = iris[:, :4] - iris[:, :4].mean(axis=0)
imax = np.concatenate((iris.max(axis=0) * np.ones((1, 5)), iris.min(axis=0) * np.ones((1, 5))), axis=0).max(axis=0)
iris[:, :4] = iris[:, :4] / imax[:4]
labels = iris[:, 4:]
iris = iris[:, :4]
order = list(range(np.shape(iris)[0]))
np.random.shuffle(order)
iris = iris[order, :]
labels = labels[order, 0]
w0 = np.where(labels == 0)
w1 = np.where(labels == 1)
w2 = np.where(labels == 2)
import lda
newData, w = lda.lda(iris, labels, 2)
print(np.shape(newData))
pl.plot(iris[w0, 0], iris[w0, 1], 'ok')
pl.plot(iris[w1, 0], iris[w1, 1], '^k')
pl.plot(iris[w2, 0], iris[w2, 1], 'vk')
pl.axis([-1.5, 1.8, -1.5, 1.8])
pl.axis('off')
pl.figure(2)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis([-1.5, 1.8, -1.5, 1.8])
pl.axis('off')
import pca
x, y, evals, evecs = pca.pca(iris, 2)
pl.figure(3)
pl.plot(y[w0, 0], y[w0, 1], 'ok')
pl.plot(y[w1, 0], y[w1, 1], '^k')
pl.plot(y[w2, 0], y[w2, 1], 'vk')
pl.axis('off')
import kernelpca
newData = kernelpca.kernelpca(iris, 'gaussian', 2)
pl.figure(4)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import factoranalysis
newData = factoranalysis.factoranalysis(iris, 2)
# print newData
pl.figure(5)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import lle
print(np.shape(iris))
a, b, newData = lle.lle(iris, 2, 12)
print(np.shape(newData))
print(newData[w0, :])
print("---")
print(newData[w1, :])
print("---")
print(newData[w2, :])
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import isomap
print(labels)
newData, newLabels = isomap.isomap(iris, 2, 100)
print(np.shape(newData))
print(newLabels)
w0 = np.where(newLabels == 0)
w1 = np.where(newLabels == 1)
w2 = np.where(newLabels == 2)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
print("Done")
pl.show()
| StarcoderdataPython |
221768 | <gh_stars>0
from uuid import uuid4
from cascade.core.context import ExecutionContext
def make_execution_context(**parameters):
defaults = {"database": "dismod-at-dev", "bundle_database": "epi"}
defaults.update(parameters)
context = ExecutionContext()
context.parameters = defaults
context.parameters.run_id = uuid4()
return context
| StarcoderdataPython |
4964099 | from __future__ import annotations
from typing import Optional
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.entity_registry as er
import voluptuous as vol
from homeassistant.core import State
from .const import CONF_POWER, CONF_STATES_POWER
from .strategy_interface import PowerCalculationStrategyInterface
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_POWER): vol.Coerce(float),
vol.Optional(CONF_STATES_POWER, default={}): vol.Schema(
{cv.string: vol.Coerce(float)}
),
}
)
class FixedStrategy(PowerCalculationStrategyInterface):
def __init__(
self, power: Optional[float], per_state_power: Optional[dict[str, float]]
) -> None:
self._power = power
self._per_state_power = per_state_power
async def calculate(self, entity_state: State) -> Optional[int]:
if entity_state.state in self._per_state_power:
return self._per_state_power.get(entity_state.state)
return self._power
async def validate_config(self, entity_entry: er.RegistryEntry):
"""Validate correct setup of the strategy"""
pass
| StarcoderdataPython |
1770426 | # Keras implementation of the paper:
# 3D MRI Brain Tumor Segmentation Using Autoencoder Regularization
# by <NAME>. (https://arxiv.org/pdf/1810.11654.pdf)
# Author of this code: <NAME> (https://github.com/IAmSUyogJadhav)
from blocks import *
from utils import *
import torch
import torch.nn as nn
from collections import OrderedDict
# from group_norm import GroupNormalization
class Encoder(nn.Sequential):
def __init__(self, in_features=1, input_side_dim=48, model_depth=32):
super(Encoder, self).__init__()
next_padding = calc_same_padding(input_side_dim, 3, 1)
out_dim_0 = input_side_dim
out_dim_1 = calc_conv_shape(out_dim_0, 3, 0, 2)
out_dim_2 = calc_conv_shape(out_dim_1, 3, 0, 2)
out_dim_3 = calc_conv_shape(out_dim_2, 3, 0, 2)
# Define depth of model
init_d = model_depth
modules = [
('conv0', nn.Conv3d(in_features, init_d, kernel_size=3, stride=1, padding=next_padding)),
('sp_drop0', nn.Dropout3d(0.2)),
('green0', GreenBlock(init_d, init_d, out_dim_0)),
('downsize_0', nn.Conv3d(init_d, init_d * 2, kernel_size=3, stride=2, padding=1)),
# add padding to divide images by 2 exactly
('green10', GreenBlock(init_d * 2, init_d * 2, out_dim_1)),
('green11', GreenBlock(init_d * 2, init_d * 2, out_dim_1)),
('downsize_1', nn.Conv3d(init_d * 2, init_d * 4, kernel_size=3, stride=2, padding=1)),
('green20', GreenBlock(init_d * 4, init_d * 4, out_dim_2)),
('green21', GreenBlock(init_d * 4, init_d * 4, out_dim_2)),
('downsize_2', nn.Conv3d(init_d * 4, init_d * 8, kernel_size=3, stride=2, padding=1)),
('green30', GreenBlock(init_d * 8, init_d * 8, out_dim_3)),
('green31', GreenBlock(init_d * 8, init_d * 8, out_dim_3)),
('green32', GreenBlock(init_d * 8, init_d * 8, out_dim_3)),
('green33', GreenBlock(init_d * 8, init_d * 8, out_dim_3)),
]
for m in modules:
self.add_module(*m)
class Classifier(nn.Module):
def __init__(self, num_classes=5, input_side_dim=6, model_depth=32):
super(Classifier, self).__init__()
out_dim_0 = calc_conv_shape(input_side_dim, 1, 0, 1)
out_dim_1 = calc_conv_shape(out_dim_0, 1, 0, 1)
out_dim_2 = calc_conv_shape(out_dim_1, 1, 0, 1)
# print(out_dim_0, out_dim_1, out_dim_2)
next_padding = calc_same_padding(out_dim_2, 3, 1)
out_dim_3 = calc_conv_shape(out_dim_2, 3, next_padding, 1)
self.to_ground_truth = nn.Sequential(OrderedDict([
('conv0', nn.Conv3d(model_depth * 8, model_depth * 4, kernel_size=(1, 1, 1), stride=1, padding=0)),
('green0', GreenBlock(model_depth * 4, model_depth * 4, out_dim_0)),
('conv1', nn.Conv3d(model_depth * 4, model_depth * 2, kernel_size=(1, 1, 1), stride=1, padding=0)),
('green1', GreenBlock(model_depth * 2, model_depth * 2, out_dim_1)),
('conv2', nn.Conv3d(model_depth * 2, model_depth, kernel_size=(1, 1, 1), stride=1, padding=0)),
('green2', GreenBlock(model_depth, model_depth, out_dim_2)),
('conv3', nn.Conv3d(model_depth, model_depth, kernel_size=(3, 3, 3), stride=1, padding=next_padding)),
('relu', nn.LeakyReLU(inplace=True))
]))
# print('Classifier has {} features'.format(model_depth * out_dim_3 ** 3))
self.regressor = nn.Linear(in_features=model_depth * out_dim_3 ** 3, out_features=num_classes)
def forward(self, inputs):
conv_out = self.to_ground_truth(inputs)
return torch.nn.functional.relu(self.regressor(conv_out.view(conv_out.shape[0], -1)), inplace=True)
class VAERegularization(nn.Module):
def __init__(self, input_side_dim=6, model_depth=32):
super(VAERegularization, self).__init__()
# VAE regularization
self.reduce_dimension = nn.Sequential(OrderedDict([
# ('group_normR', GroupNormalization(in_features, groups=8)),
('norm0', nn.BatchNorm3d(model_depth * 8)),
('reluR0', nn.LeakyReLU(inplace=True)),
('convR0', nn.Conv3d(model_depth * 8, model_depth // 2, kernel_size=(3, 3, 3), stride=2, padding=1)),
]))
out_dim = 3
# print("out dim after VAE: {}".format(out_dim))
# REPARAMETERIZATION TRICK (needs flattening)
self.out_linear = nn.Linear(in_features=(model_depth // 2) * out_dim ** 3, out_features=model_depth * 8)
self.z_mean = nn.Linear(in_features=model_depth * 8, out_features=model_depth*4)
self.z_var = nn.Linear(in_features=model_depth * 8, out_features=model_depth*4)
self.reparameterization = Reparametrization()
def forward(self, inputs):
x = self.reduce_dimension(inputs)
x = self.out_linear(x.view(x.shape[0], -1))
z_mean = self.z_mean(x)
z_var = self.z_var(x)
del x
return self.reparameterization(z_mean, z_var), z_mean, z_var
class Decoder(nn.Module):
def __init__(self, model_depth=32, num_channels=1, input_side_dim=3):
super(Decoder, self).__init__()
self.model_depth = model_depth
self.input_side_dim = input_side_dim
self.reshape_block = nn.Sequential(OrderedDict([
('fc0', nn.Linear(in_features=model_depth*4, out_features=(model_depth // 2) * input_side_dim ** 3)),
('relu', nn.LeakyReLU(inplace=True)),
]))
blue_padding = calc_same_padding(48, 3, 1)
self.decode_block = nn.Sequential(OrderedDict([
('conv0', nn.Conv3d(model_depth // 2, model_depth * 8, kernel_size=1, stride=1)),
('up1', nn.Upsample(scale_factor=2)),
('upgreen0', UpGreenBlock(model_depth * 8, model_depth * 4, input_side_dim)),
('upgreen1', UpGreenBlock(model_depth * 4, model_depth * 2, 12)),
('upgreen2', UpGreenBlock(model_depth * 2, model_depth, 24)),
('blue_block', nn.Conv3d(model_depth, model_depth, kernel_size=3, stride=1, padding=blue_padding)),
('output_block', nn.Conv3d(in_channels=model_depth, out_channels=num_channels, kernel_size=1, stride=1))
]))
def forward(self, inputs):
x = self.reshape_block(inputs)
x = x.reshape([x.shape[0], self.model_depth // 2, self.input_side_dim, self.input_side_dim, self.input_side_dim])
x = self.decode_block(x)
return x
class BrainClassifierVAE(nn.Module):
def __init__(self, in_channels=1, input_side_dim=48, num_classes=16, model_depth=32):
super().__init__()
# ENCODING
self.encoder = Encoder(in_features=in_channels, input_side_dim=input_side_dim, model_depth=model_depth)
# input_side_dim = 6
# VAE regularization
self.internal_representation = VAERegularization(input_side_dim=6, model_depth=model_depth)
# DECODER
# The internal representation shrinks the dimension by a factor of 2
self.decoder = Decoder(
model_depth=model_depth,
num_channels=in_channels,
input_side_dim=3
)
# CLASSIFICATION
self.classifier = Classifier(num_classes=num_classes, input_side_dim=6, model_depth=model_depth)
def forward(self, inputs):
encoded = self.encoder(inputs)
out_features = self.classifier(encoded)
int_repr, z_mean, z_var = self.internal_representation(encoded)
del encoded
reconstructed_image = self.decoder(int_repr)
return out_features, reconstructed_image, inputs, z_mean, z_var
if __name__ == '__main__':
from losses import VAELoss
bigboi = BrainClassifierVAE(in_channels=53, input_side_dim=48, num_classes=8, model_depth=32).cuda()
input = torch.rand((10, 53, 48, 48, 48)).cuda()
target = torch.rand((10, 8)).cuda()
lr = 1e-4
weight_L2 = 0.1
weight_KL = 0.1
dice_e = 1e-8
optim = torch.optim.AdamW(bigboi.parameters(), lr=1e-4)
# Loss for features
loss_mse = torch.nn.MSELoss()
# Loss for VAE
loss_vae = VAELoss(
weight_KL=weight_KL,
weight_L2=weight_L2
)
out_features, reconstructed_image, input_image, z_mean, z_var = bigboi(input)
loss_mse_v = loss_mse(target, out_features)
loss_vae_v = loss_vae(reconstructed_image, input_image, z_mean, z_var)
loss = loss_mse_v + loss_vae_v
loss.backward()
print('loss: {}'.format(loss.item()))
print("features shape: {}".format(out_features.shape))
print("reconstructed shape: {}".format(reconstructed_image.shape))
print("z_mean: {} and z_var shapes: {}".format(z_mean.shape, z_var.shape))
| StarcoderdataPython |
6437544 | #Author : <NAME>
import torch
class Config:
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
STORAGE_BUCKET = 'gs://storage_bucket_speech'
LOCAL_PATH = 'tmp'
OUTPUT_PATH = 'output'
TRAIN_DATA_DIR = '../../feature-extraction/train'
VALID_DATA_DIR = '../../feature-extraction/valid'
TEST_DATA_DIR = '../../feature-extraction/test'
MODEL_DIR = 'model'
MODEL_NAME = 'speech2emotion.pt'
LOG_DIR = 'logs'
LOG_FILE = "trainer.log"
TENSORBOARD_LOG_DIR = 'tensorboardlogs'
# Model parameters :
N_MELS = 128
INPUT_SPEC_SIZE = 3 * N_MELS
RNN_CELL = 'lstm' # 'lstm' | 'gru'
CNN_FILTER_SIZE = 64
NUM_GENDER_CLASSES = 2
NUM_EMOTION_CLASSES = 8
BATHC_SIZE = 128
LR = 1e-4
WEIGHT_DECAY = 1e-06
NUM_EPOCHS = 1
# For loss function
ALPHA = 1
BETA = 1
# Classes
EMOTION_NAMES = ['neutral', 'calm', 'happy', 'sad', 'angry', 'fearful', 'disgust', 'surprised']
EMOTION_LABELS = [1, 2, 3, 4, 5, 6, 7, 8]
EMOTION_LABEL_2_IXD = {1 : 0, 2 : 1, 3 : 2, 4 : 3, 5 : 4, 6 : 5, 7 : 6, 8 : 7}
GENDER_NAMES = ['female', 'male']
GENDER_LABELS = [0, 1] | StarcoderdataPython |
11327873 | <gh_stars>0
# coding: utf-8
"""
Payoneer Mobile API
Swagger specification for https://mobileapi.payoneer.com
OpenAPI spec version: 0.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import payoneer_mobile_api
from payoneer_mobile_api.rest import ApiException
from payoneer_mobile_api.models.payments_history_response import PaymentsHistoryResponse
class TestPaymentsHistoryResponse(unittest.TestCase):
""" PaymentsHistoryResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentsHistoryResponse(self):
"""
Test PaymentsHistoryResponse
"""
model = payoneer_mobile_api.models.payments_history_response.PaymentsHistoryResponse()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1980144 | <reponame>bmwant/chemister
from abc import ABC, abstractmethod
from crawler.db import get_engine
from crawler.cache import Cache
from crawler.models.transaction import (
NewTransaction,
get_transactions,
insert_new_transaction,
close_transaction,
get_hanging_transactions,
)
from utils import LoggableMixin
class BaseTrader(ABC, LoggableMixin):
def __init__(self):
self.amount = 0
self.engine = None
self.cache = None
super().__init__()
async def init(self):
self.engine = await get_engine()
self.cache = Cache() # todo: what about without a pool
# todo: maybe singletone for all traders
await self.cache._create_pool()
@abstractmethod
def trade(self, daily_data):
pass
@abstractmethod
def daily(self):
"""
Daily periodic task which will be invoked automatically. Interface for the `Factory` when
creating trading instances to be triggered periodically.
"""
@abstractmethod
def notify(self, *args):
"""
Send updates via preferred channel about trade results.
"""
async def sale_transaction(self, t, rate_close, dry_run=False):
amount = t.amount * rate_close # resulting amount of transaction
price = t.amount * t.rate_sale # initial price of transaction
profit = amount - price
if not dry_run:
async with self.engine.acquire() as conn:
self.logger.info(
'Selling {amount:.2f} ({rate_sale:.2f}) '
'at {rate_close:.2f}; '
'total: {total:.2f}; '
'profit: {profit:.2f}'.format(
amount=t.amount,
rate_sale=t.rate_sale,
rate_close=rate_close,
total=amount,
profit=profit,
))
await close_transaction(
conn,
t_id=t.id,
rate_close=rate_close,
)
self.amount += amount
return amount
async def add_transaction(self, t: NewTransaction):
async with self.engine.acquire() as conn:
await insert_new_transaction(conn, t)
async def hanging(self):
async with self.engine.acquire() as conn:
return await get_hanging_transactions(conn)
async def transactions(self):
"""
History of all transactions
"""
async with self.engine.acquire() as conn:
return await get_transactions(conn)
| StarcoderdataPython |
6643734 | <reponame>BackQuote/backtester<filename>db/upload_quotes.py
import os
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir + '/../server')
from models import Ticker, Day
ultimate_dir = current_dir + '/../backtester/backtester/ultimate_files'
engine = create_engine(os.environ['DATABASE_URL'], echo=True)
Session = sessionmaker(bind=engine)
session = Session()
quotes_file = open('quotes', 'w')
days_uploaded = False
for ultimate in os.listdir(ultimate_dir):
ticker_name = ultimate[:-4]
ticker = Ticker(ticker_name)
session.add(ticker)
session.commit()
ultimate_file = open(os.path.join(ultimate_dir, ultimate))
day = None
date = None
day_id = 0
for line in ultimate_file:
if 'new day' in line:
dinfo = line.split()
date = dinfo[3]
day_id += 1
if not days_uploaded:
day = Day(date)
session.add(day)
session.commit()
else:
qinfo = line.split(',')
# example format: 2011-05-16 15:36:38
formatted_date = '-'.join((date[:4], date[4:6], date[6:])) + ' '
hours = int(qinfo[0]) // 3600000
minutes = (int(qinfo[0]) - hours * 3600000) // 60000
seconds = (int(qinfo[0]) - hours * 3600000 - minutes * 60000) // 1000
timestamp = formatted_date + ' ' + str(hours) + ':' + str(minutes) + ':' + str(seconds)
open_price = qinfo[1][:-4] + '.' + qinfo[1][-4:-2]
high_price = qinfo[2][:-4] + '.' + qinfo[2][-4:-2]
low_price = qinfo[3][:-4] + '.' + qinfo[3][-4:-2]
close_price = qinfo[4][:-4] + '.' + qinfo[4][-4:-2]
quotes_file.write(
'{}\t{}\t{}\t{}\t{}\t{}\t{}\n'
.format(open_price, high_price, low_price, close_price, timestamp, day_id, ticker_name)
)
days_uploaded = True
ultimate_file.close()
quotes_file.close()
| StarcoderdataPython |
9603080 | from aerosandbox.dynamics.rigid_body.rigid_2D import *
from aerosandbox.dynamics.rigid_body.rigid_3D import * | StarcoderdataPython |
160746 | <reponame>Impavidity/relogic
import torch
import torch.nn as nn
class SpanGCNModule(nn.Module):
"""
SpanGCN firstly extract span from text, and then label each span based
on the learned representation of GCN
"""
def __init__(self, config, task_name, boundary_n_classes=None, label_n_classes=None):
super(SpanGCNModule, self).__init__()
self.config = config
self.task_name = task_name
self.boundary_n_classes = boundary_n_classes
self.label_n_classes = label_n_classes
if boundary_n_classes:
self.to_boundary_logits = nn.Linear(config.hidden_size, self.boundary_n_classes)
if label_n_classes:
self.to_label_logits = nn.Linear(config.hidden_size * 2, self.label_n_classes)
if config.use_gcn:
pass
else:
pass
self.padding = nn.Parameter(torch.zeros(config.hidden_size), requires_grad=False)
self.ones = nn.Parameter(torch.ones(1, 1), requires_grad=False)
def forward(self,
input, predicate_span=None,
bio_hidden=None, span_candidates=None, extra_args=None, **kwargs):
"""
Before this module, there is another module info aggregation
:param input: Sentence Only, in batch
:param predicate: Predicate Only, in batch
:param bio_hidden: hidden vector for span prediction, can be None
:param span_candidates: tuple, span_start, and span_end
:param extra_args: strategy
:return: labeling_logits
Here we need to support three mode of inference
1. Span is given
In this mode, sequence labeling and independent span generation modes are supported.
span_logits = None, span_candidates = batch of span
Another problem here is how to aggregate span. We need to specify span level and aggregate method
- For token level span
- Pooling
- Average
- Attentive
- Head Tail Attentive
- For phrase level span
- Non-Hierarchy
- Pooling
- Average
- Attentive
- Head Tail Attentive
- Hierarchy
- Use Token level aggregate
- Use Non-Hierarchy to aggregate again
2. Span is not given
In this mode, dependent span generation mode is supported.
span_logits = batch, span_candidates = None
span candidates generate from span_logits. There are two logits need to be return
This mode only have Phrase level span Aggregation
After we have span representation, how to interact with predicate
1. If the span itself is predicate aware, do we need to add predicate information again ?
So the experiments is on surface form aware or not. You can refer the IBM relation paper
2. If the span itself is not predicate aware, it will be trained faster.
How to design a module to interact the argument and the predicate
- Independent Classifier
- Bilinear
- GCN
"""
if bio_hidden:
bio_logits = self.to_span_logits(bio_hidden)
assert "label_mapping" in extra_args, "label_mapping does not in extra_args"
span_candidates = get_candidate_span(bio_logits, extra_args["label_mapping"])
start_index, end_index = span_candidates
# start_index, end_index = (batch, max_span_num)
predicate_start_index, predicate_end_index = predicate_span
# predicate_start_index, predicate_end_index = (batch)
max_span_num = len(start_index[0])
# input (batch, sentence, dim) -> (batch, max_span_num, sentence, dim)
expanded_input = input.unsqueeze(1).repeat(1, max_span_num, 1, 1)
start_index_ = start_index.view(-1)
end_index_ = end_index.view(-1)
span_hidden = select_span(expanded_input.view(-1, expanded_input.size(-2), expanded_input.size(-1)), start_index_, end_index_, self.padding)
predicate_hidden = select_span(input, predicate_start_index, predicate_end_index, self.padding)
span_repr = self.aggregate(span_hidden, end_index_-start_index_)
predicate_repr = self.aggregate(predicate_hidden, predicate_end_index-predicate_start_index)
# (batch, dim)
concat = torch.cat([span_repr, predicate_repr.unsqueeze(1).repeat(1, max_span_num, 1).view(-1, predicate_repr.size(-1))], dim=-1)
label_logits = self.to_label_logits(concat)
return label_logits.view(input.size(0), max_span_num, self.label_n_classes)
def aggregate(self, hidden, lengths):
"""
Use average for now
:param hidden: (batch, span_length, dim)
:param lengths: (batch)
:return:
"""
return torch.sum(hidden, 1) / torch.max(
self.ones.repeat(lengths.size(0), 1).float(), lengths.unsqueeze(1).float())
def select_span(input, start_index, end_index, padding):
"""
Use for loop to select
:param input:
:param start_index:
:param end_index:
:param padding:
:return:
"""
padded_tensor = []
max_span_size = torch.max(end_index - start_index)
for idx, (start, end) in enumerate(zip(start_index, end_index)):
padded_tensor.append(
torch.cat(
[torch.narrow(input[idx], 0, start, (end-start))] +
([padding.unsqueeze(0).repeat(max_span_size-(end-start), 1)] if max_span_size != (end-start)
else []), dim=0))
# list of (max_span_size, dim)
return torch.stack(padded_tensor)
def get_candidate_span(bio_logits, label_mapping):
"""
Use python for now. Will consider a C++ binding later.
:param bio_logits: (batch_size, sentence_length, 3)
:param label_mappings:
:return:
"""
preds_tags = bio_logits.argmax(-1).data.cpu().numpy()
inv_label_mapping = {v: k for k, v in label_mapping.items()}
batch_span_labels = []
max_span_num = 0
for sentence in bio_logits:
# convert to understandable labels
sentence_tags = [inv_label_mapping[i] for i in sentence]
span_labels = []
last = 'O'
start = -1
for i, tag in enumerate(sentence_tags):
pos, _ = (None, 'O') if tag == 'O' else tag.split('-', 1)
if (pos == 'S' or pos == 'B' or tag == 'O') and last != 'O':
span_labels.append((start, i - 1, last.split('-')[-1]))
if pos == 'B' or pos == 'S' or last == 'O':
start = i
last = tag
if sentence_tags[-1] != 'O':
span_labels.append((start, len(sentence_tags) - 1,
sentence_tags[-1].split('-', 1)[-1]))
max_span_num = max(len(span_labels), max_span_num)
batch_span_labels.append(span_labels)
batch_start_index = []
batch_end_index = []
for span_labels in batch_span_labels:
start_index = []
end_index = []
for span_label in span_labels:
start_index.append(span_label[0])
end_index.append(span_label[1])
start_index += (max_span_num - len(start_index)) * [0]
end_index += (max_span_num - len(end_index)) * [0]
# Just a placeholder, for loss computation, it will be ignored.
batch_start_index.append(start_index)
batch_end_index.append(end_index)
start_ids = torch.tensor(batch_start_index, dtype=torch.long).to(bio_logits.device)
end_ids = torch.tensor(batch_end_index, dtype=torch.long).to(bio_logits.device)
return (start_ids, end_ids)
| StarcoderdataPython |
4812875 | """
Copyright 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#! /usr/bin/env python
from weakboundmethod import WeakBoundMethod as Wbm
from events import TickEvent, QuitEvent, StartEvent, ReloadconfigEvent
from bot import Bot
import time, wx, eventdispatcher
import os, sys
lib_path = os.path.abspath(os.path.join(".."))
sys.path.append(lib_path)
import botinfo
class Spinner():
"""
This is our spinner class, it's what keeps the bot alive.
It will run until a QuitEvent has been sent out.
It consumes the event dispatchers queue and then sleeps for 0.01 seconds to reduce overhead.
"""
def __init__(self, parameters):
fileName, self.coreFile, self.moduleFile, GUI = parameters;
self.read_config("all")
self.ed = botinfo.bot_info["ed"] = eventdispatcher.EventDispatcher()
self.alive = True
self.bot = Bot(self.ed)
self._connection = [
self.ed.add(QuitEvent, Wbm(self.quit)),
self.ed.add(ReloadconfigEvent, Wbm(self.read_config))
]
self.bot.start()
def tick(self):
self.ed.consume_event_queue()
return self.alive
def quit(self, event):
self.alive = False
def read_config(self, event):
botinfo.bot_info.update(botinfo.read_config("core", self.coreFile))
botinfo.bot_info.update(botinfo.read_config("modules", self.moduleFile))
def main(parameters):
spin = Spinner(parameters)
while spin.tick():
time.sleep(0.01)
| StarcoderdataPython |
4871881 | <reponame>Nathaniel-Haines/easyml<gh_stars>1-10
"""Functions for sampling data.
"""
import numpy as np
__all__ = []
def resample_equal_proportion(X, y, train_size=0.667):
"""Sample in equal proportion.
Parameters
----------
:param y: array, shape (n_obs) Input data to be split
:param train_size: float, default: 0.667
Proportion to split into train and test
TODO figure out best practices for documenting Python functions
Returns
-------
self: array, shape (n_obs)
A boolean array of length n_obs where True represents that observation should be in the train set.
"""
# calculate number of observations
n_obs = len(y)
# identify index number for class1 and class2
index_class1 = np.where(y == 0)[0]
index_class2 = np.where(y == 1)[0]
# calculate number of class1 and class2 observations
n_class1 = len(index_class1)
n_class2 = len(index_class2)
# calculate number of class1 and class2 observations in the train set
n_class1_train = int(np.round(n_class1 * train_size))
n_class2_train = int(np.round(n_class2 * train_size))
# generate indices for class1 and class2 observations in the train set
index_class1_train = np.random.choice(index_class1, size=n_class1_train, replace=False)
index_class2_train = np.random.choice(index_class2, size=n_class2_train, replace=False)
index_train = np.append(index_class1_train, index_class2_train)
# return a boolean vector of len n_obs where TRUE represents
# that observation should be in the train set
mask = np.in1d(np.arange(n_obs), index_train)
# Create train and test splits
X_train = X[mask, :]
X_test = X[np.logical_not(mask), :]
y_train = y[mask]
y_test = y[np.logical_not(mask)]
return X_train, X_test, y_train, y_test
| StarcoderdataPython |
5157958 | from CTFd.admin import config
from flask import render_template, request
from CTFd.models import (
Challenges
)
from CTFd.utils.dates import ctf_ended, ctf_paused, ctf_started
from CTFd.utils.user import authed
from CTFd.utils.helpers import get_errors, get_infos
from CTFd.utils.decorators import (
require_verified_emails,
during_ctf_time_only,
ratelimit,
)
from CTFd.api.v1.challenges import ChallengeAttempt
from CTFd.plugins.challenges import get_chal_class
from CTFd.challenges import listing
from CTFd.utils.decorators.visibility import (
check_challenge_visibility,
)
class ChallengeAttemptAnonymous(ChallengeAttempt):
@ratelimit(method="POST", limit=10, interval=60)
def post(self):
if authed() is False:
if request.content_type != "application/json":
request_data = request.form
else:
request_data = request.get_json()
challenge_id = request_data.get("challenge_id")
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
chal_class = get_chal_class(challenge.type)
status, message = chal_class.attempt(challenge, request)
return {
"success": True,
"data": {
"status": "correct" if status else "incorrect",
"message": message,
}
}
else:
return super().post()
@during_ctf_time_only
@require_verified_emails
@check_challenge_visibility
def listing():
infos = get_infos()
errors = get_errors()
if ctf_started() is False:
errors.append(f"{config.ctf_name()} has not started yet")
if ctf_paused() is True:
infos.append(f"{config.ctf_name()} is paused")
if ctf_ended() is True:
infos.append(f"{config.ctf_name()} has ended")
return render_template("challenges.html", infos=infos, errors=errors)
def load(app):
app.view_functions['api.challenges_challenge_attempt'] = ChallengeAttemptAnonymous.as_view('api.challenges_challenge_attempt')
app.view_functions['challenges.listing'] = listing
| StarcoderdataPython |
5069275 | """Module containing the entrypoint to the bot"""
import discord
from discord.ext.commands import Bot
import logging
import redis
from buffs import (
handle_buff_message,
is_buff_message,
)
from commands.help import HelpCommandCog
from commands.configuration import FeatureConfigurationCog
from gear_check import (
handle_gear_check_message,
is_gear_check_message,
)
BOT_AUTHOR_ID = 822262145412628521
COMMAND_PREFIX = 'tog.'
redis_server = redis.Redis()
bot = Bot(command_prefix=COMMAND_PREFIX)
bot.add_cog(HelpCommandCog(bot))
bot.add_cog(FeatureConfigurationCog(bot, redis_server))
AUTH_TOKEN = str(redis_server.get('TOG_BOT_AUTH_TOKEN').decode('utf-8'))
WCL_TOKEN = str(redis_server.get('WCL_TOKEN').decode('utf-8'))
@bot.event
async def on_ready():
logging.debug(f'Successful Launch! {bot.user}')
@bot.event
async def on_message(message):
"""
Handler for incoming messages to all channels.
Depending on the channel that the message was sent to and the message's author,
we may send additional messages from our bot in response.
"""
try:
if message.author.id == BOT_AUTHOR_ID:
return
elif is_gear_check_message(message):
return await handle_gear_check_message(message, bot, WCL_TOKEN, redis_server)
elif is_buff_message(message, bot, redis_server):
return await handle_buff_message(message, bot, redis_server)
except Exception as e:
logging.error(e)
await bot.process_commands(message)
# this blocks and should be the last line in our file
bot.run(AUTH_TOKEN) | StarcoderdataPython |
11213083 | <filename>mcmcplot/utilities.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 06:24:12 2018
@author: prmiles
"""
import numpy as np
from scipy import pi, sin, cos
import sys
import math
def check_settings(default_settings, user_settings=None):
'''
Check user settings with default.
Recursively checks elements of user settings against the defaults
and updates settings as it goes. If a user setting does not exist
in the default, then the user setting is added to the settings.
If the setting is defined in both the user and default settings,
then the user setting overrides the default. Otherwise, the default
settings persist.
Args:
* **default_settings** (:py:class:`dict`): Default settings for \
particular method.
Kwargs:
* **user_settings** (:py:class:`dict`): User defined settings. \
Default: `None`
Returns:
* (:py:class:`dict`): Updated settings.
'''
settings = default_settings.copy() # initially define settings as default
options = list(default_settings.keys()) # get default settings
if user_settings is None: # convert to empty dict
user_settings = {}
user_options = list(user_settings.keys()) # get user settings
for uo in user_options: # iterate through settings
if uo in options:
# check if checking a dictionary
if isinstance(settings[uo], dict):
settings[uo] = check_settings(settings[uo], user_settings[uo])
else:
settings[uo] = user_settings[uo]
if uo not in options:
settings[uo] = user_settings[uo]
return settings
def generate_subplot_grid(nparam=2):
'''
Generate subplot grid.
For example, if `nparam` = 2, then the subplot will have
2 rows and 1 column.
Kwargs:
* **nparam** (:py:class:`int`): Number of parameters. \
Default: `2`
Returns:
* **ns1** (:py:class:`int`): Number of rows in subplot
* **ns2** (:py:class:`int`): Number of columns in subplot
'''
ns1 = math.ceil(math.sqrt(nparam))
ns2 = round(math.sqrt(nparam))
return ns1, ns2
def generate_names(nparam, names):
'''
Generate parameter name set.
For example, if `nparam` = 4, then the generated names are::
names = ['p_{0}', 'p_{1}', 'p_{2}', 'p_{3}']
Args:
* **nparam** (:py:class:`int`): Number of parameter names to generate
* **names** (:py:class:`list`): Names of parameters provided by user
Returns:
* **names** (:py:class:`list`): List of strings - parameter names
'''
# Check if names defined
if names is None:
names = generate_default_names(nparam)
# Check if enough names defined
if len(names) != nparam:
names = extend_names_to_match_nparam(names, nparam)
return names
def generate_default_names(nparam):
'''
Generate generic parameter name set.
For example, if `nparam` = 4, then the generated names are::
names = ['$p_{0}$', '$p_{1}$', '$p_{2}$', '$p_{3}$']
Args:
* **nparam** (:py:class:`int`): Number of parameter names to generate
Returns:
* **names** (:py:class:`list`): List of strings - parameter names
'''
names = []
for ii in range(nparam):
names.append(str('$p_{{{}}}$'.format(ii)))
return names
def extend_names_to_match_nparam(names, nparam):
'''
Append names to list using default convention
until length of names matches number of parameters.
For example, if `names = ['name_1', 'name_2']` and `nparam = 4`, then
two additional names will be appended to the `names` list.
E.g.,::
names = ['name_1', 'name_2', '$p_{2}$', '$p_{3}$']
Args:
* **names** (:py:class:`list`): Names of parameters provided by user
* **nparam** (:py:class:`int`): Number of parameter names to generate
Returns:
* **names** (:py:class:`list`): List of strings - extended \
list of parameter names
'''
if names is None:
names = []
n0 = len(names)
for ii in range(n0, nparam):
names.append(str('$p_{{{}}}$'.format(ii)))
return names
# --------------------------------------------
def make_x_grid(x, npts=100):
'''
Generate x grid based on extrema.
1. If `len(x) > 200`, then generates grid based on difference
between the max and min values in the array.
2. Otherwise, the grid is defined with respect to the array
mean plus or minus four standard deviations.
Args:
* **x** (:class:`~numpy.ndarray`): Array of points
Kwargs:
* **npts** (:py:class:`int`): Number of points to use in \
generated grid. Default: `100`
Returns:
* (:class:`~numpy.ndarray`): Uniformly spaced array of points \
with shape :code:`(npts,1)`.
'''
xmin = min(x)
xmax = max(x)
xxrange = xmax-xmin
if len(x) > 200:
x_grid = np.linspace(xmin-0.08*xxrange, xmax+0.08*xxrange, npts)
else:
x_grid = np.linspace(np.mean(x)-4*np.std(x, ddof=1),
np.mean(x)+4*np.std(x, ddof=1), npts)
return x_grid.reshape(x_grid.shape[0], 1) # returns 1d column vector
# --------------------------------------------
# see MASS 2nd ed page 181.
def iqrange(x):
'''
Interquantile range of each column of x.
Args:
* **x** (:class:`~numpy.ndarray`): Array of points.
Returns:
* (:class:`~numpy.ndarray`): Interquantile range - single \
element array, `q3 - q1`.
'''
nr, nc = x.shape
if nr == 1: # make sure it is a column vector
x = x.reshape(nc, nr)
nr = nc
nc = 1
# sort
x.sort()
i1 = math.floor((nr + 1)/4)
i3 = math.floor(3/4*(nr+1))
f1 = (nr+1)/4-i1
f3 = 3/4*(nr+1)-i3
q1 = (1-f1)*x[int(i1), :] + f1*x[int(i1)+1, :]
q3 = (1-f3)*x[int(i3), :] + f3*x[int(i3)+1, :]
return q3-q1
def gaussian_density_function(x, mu=0, sigma2=1):
'''
Standard normal/Gaussian density function.
Args:
* **x** (:py:class:`float`): Value of which to calculate probability.
Kwargs:
* **mu** (:py:class:`float`): Mean of Gaussian distribution. \
Default: `0`
* **sigma2** (:py:class:`float`): Variance of Gaussian \
distribution. Default: `1`
Returns:
* **y** (:py:class:`float`): Likelihood of `x`.
'''
y = 1/math.sqrt(2*math.pi*sigma2)*math.exp(-0.5*(x-mu)**2/sigma2)
return y
def scale_bandwidth(x):
'''
Scale bandwidth of array.
Args:
* **x** (:class:`~numpy.ndarray`): Array of points - column of chain.
Returns:
* **s** (:class:`~numpy.ndarray`): Scaled bandwidth - single \
element array.
'''
n = len(x)
if iqrange(x) <= 0:
s = 1.06*np.array([np.std(x, ddof=1)*n**(-1/5)])
else:
s = 1.06*np.array([min(np.std(x, ddof=1), iqrange(x)/1.34)*n**(-1/5)])
return s
# --------------------------------------------
def generate_ellipse(mu, cmat, ndp=100):
'''
Generates points for a probability contour ellipse
Args:
* **mu** (:class:`~numpy.ndarray`): Mean values
* **cmat** (:class:`~numpy.ndarray`): Covariance matrix
Kwargs:
* **npd** (:py:class:`int`): Number of points to generate. \
Default: `100`
Returns:
* **x** (:class:`~numpy.ndarray`): x-points
* **y** (:class:`~numpy.ndarray`): y-points
'''
# check shape of covariance matrix
if cmat.shape != (2, 2):
sys.exit('covariance matrix must be 2x2')
if check_symmetric(cmat) is not True:
sys.exit('covariance matrix must be symmetric')
# define t space
t = np.linspace(0, 2*pi, ndp)
pdflag, R = is_semi_pos_def_chol(cmat)
if pdflag is False:
sys.exit('covariance matrix must be positive definite')
x = mu[0] + R[0, 0]*cos(t)
y = mu[1] + R[0, 1]*cos(t) + R[1, 1]*sin(t)
return x, y
def generate_ellipse_plot_points(x, y, ndp=100):
'''
Generates points for a probability contour ellipse for 2 columns of chain
Args:
* **x** (:class:`~numpy.ndarray`): chain 1
* **y** (:class:`~numpy.ndarray`): chain 2
Kwargs:
* **npd** (:py:class:`int`): Number of points to generate. \
Default: `100`
Returns:
* (:py:class:`dict`): 50% and 95% probability contours.
'''
c50 = 1.3863 # critical values from chisq(2) distribution
c95 = 5.9915
sig = np.cov(x.reshape(x.size,), y.reshape(y.size,))
mu = np.mean(np.array([x, y]), axis=1)
xe50, ye50 = generate_ellipse(mu, c50*sig, ndp=ndp)
xe95, ye95 = generate_ellipse(mu, c95*sig, ndp=ndp)
return {'xe50': xe50, 'ye50': ye50, 'xe95': xe95, 'ye95': ye95}
def check_symmetric(a, tol=1e-8):
'''
Check if array is symmetric by comparing with transpose.
Args:
* **a** (:class:`~numpy.ndarray`): Array to test.
Kwargs:
* **tol** (:py:class:`float`): Tolerance for testing equality. \
Default: `1e-8`
Returns:
* (:py:class:`bool`): True -> symmetric, False -> not symmetric.
'''
return np.allclose(a, a.T, atol=tol)
def is_semi_pos_def_chol(x):
'''
Check if matrix is semi positive definite by calculating Cholesky
decomposition.
Args:
* **x** (:class:`~numpy.ndarray`): Matrix to check
Returns:
* If matrix is `not` semi positive definite return :code:`False, None`
* If matrix is semi positive definite return :code:`True` and the \
Upper triangular form of the Cholesky decomposition matrix.
'''
c = None
try:
c = np.linalg.cholesky(x)
return True, c.transpose()
except np.linalg.linalg.LinAlgError:
return False, c
def append_to_nrow_ncol_based_on_shape(sh, nrow, ncol):
'''
Append to list based on shape of array
Args:
* **sh** (:py:class:`tuple`): Shape of array.
* **nrow** (:py:class:`list`): List of number of rows
* **ncol** (:py:class:`list`): List of number of columns
Returns:
* **nrow** (:py:class:`list`): List of number of rows
* **ncol** (:py:class:`list`): List of number of columns
'''
if len(sh) == 1:
nrow.append(sh[0])
ncol.append(1)
else:
nrow.append(sh[0])
ncol.append(sh[1])
return nrow, ncol
def setup_subsample(skip, maxpoints, nsimu):
'''
Setup subsampling from posterior.
When plotting the sampling chain, it is often beneficial to subsample
in order to avoid to dense of plots. This routine determines the
appropriate step size based on the size of the chain (nsimu) and maximum
points allowed to plot (maxpoints). The function checks if the
size of the chain exceeds the maximum number of points allowed in the
plot. If yes, skip is defined such that every the max number of points
are used and sampled evenly from the start to end of the chain. Otherwise
the value of skip is return as defined by the user. A subsample index
is then generated based on the value of skip and the number of simulations.
Args:
* **skip** (:py:class:`int`): User defined skip value.
* **maxpoints** (:py:class:`int`): Maximum points allowed in each plot.
Returns:
* (:py:class:`int`): Skip value.
'''
if nsimu > maxpoints:
skip = int(np.floor(nsimu/maxpoints))
return np.arange(0, nsimu, skip)
| StarcoderdataPython |
6599109 | #%%
from prettytable import PrettyTable
#%%
class CVP:
def __init__(self, speakers, sales_pu, var_exp_pu, fix_exp, target_profit):
self.sales_pu = sales_pu
self.speakers = speakers
self.var_exp_pu = var_exp_pu
self.fix_exp = fix_exp
self.target_profit = target_profit
def sales(self):
return self.sales_pu * self.speakers
def var_exp(self):
return self.var_exp_pu * self.speakers
def contrib_margin_d(self):
return self.sales() - self.var_exp()
def contrib_margin_pu(self):
return self.contrib_margin_d() / self.speakers
def contrib_margin_rat(self):
return self.contrib_margin_d() / self.sales()
def var_exp_rat(self):
return self.var_exp() / self.sales()
def net_op_income(self):
return self.contrib_margin_d() - self.fix_exp
def break_even_pt_u(self):
return self.fix_exp / self.contrib_margin_pu()
def break_even_pt_d(self):
return self.fix_exp / self.contrib_margin_rat()
def u_sales_target_profit(self):
return (self.fix_exp + self.target_profit) / self.contrib_margin_pu()
def d_sales_target_profit(self):
return (self.fix_exp + self.target_profit) / self.contrib_margin_rat()
def marginal_safety(self):
return self.sales() - self.break_even_pt_d()
def marginal_safety_p(self):
return self.marginal_safety() / self.sales()
def degree_operating_leverage(self):
return self.contrib_margin_d() / self.net_op_income()
def expected_inc_in_net_op_inc(self, expected_inc_in_sale):
print(f"Expected increase in sales\t{expected_inc_in_sale}")
print(f"Degree of operating leverage\t{self.degree_operating_leverage()}")
print(
f"Expected increase in net operating income \t{self.degree_operating_leverage() * expected_inc_in_sale}"
)
return self.degree_operating_leverage() * expected_inc_in_sale
def table(self):
tab = PrettyTable()
tab.field_names = ["", "Total", "Per Unit"]
tab.align[""] = "l"
tab.add_row(
[f"Sales ({self.speakers} Units)", f"${self.sales()}", f"${self.sales_pu}"]
)
tab.add_row(
[f"Less: Variable Expenses", f"({self.var_exp()})", f"({self.var_exp_pu})"]
)
tab.add_row([f"", "-----", "-----"])
tab.add_row(
[
f"Contribution Margin",
f"{self.contrib_margin_d()}",
f"${self.contrib_margin_pu()}",
]
)
tab.add_row([f"Less: Fixed Expenses", f"({self.fix_exp})", "====="])
tab.add_row([f"", "-----", ""])
tab.add_row([f"Net Operating Income", f"${self.net_op_income()}", ""])
tab.add_row([f"", "=====", ""])
print(tab)
#%%
#%%
a = CVP(400, 250, 150, 35000, None)
a.table()
#%%
a.degree_operating_leverage()
#%%
a.speakers = int(a.speakers * 1.1)
a.var_exp_pu += 10
a.fix_exp -= 5000
a.table()
#%%
a = CVP(20000, 60, 45, 240000, 90000)
a.table()
print(a.contrib_margin_rat(), a.var_exp_rat())
print(a.break_even_pt_d(), a.break_even_pt_u())
print(a.d_sales_target_profit(), a.u_sales_target_profit())
print(a.marginal_safety(), a.marginal_safety_p())
print(a.expected_inc_in_net_op_inc(8))
#%%
a = CVP(20000, 60, 45, 240000, 90000)
a.table()
a.var_exp_pu += 3
a.fix_exp -= 30000
a.speakers *= 1.2
a.table()
#%%
a = CVP(2000, 90, 63, 30000, None)
a.table()
a.sales_pu *= 1.05
a.var_exp_pu *= 1.05
a.fix_exp += 5000
a.table()
a = CVP(2000, 90, 63, 30000, None)
a.var_exp_pu += 2
a.speakers *= 1.1
a.table()
#%%
if __name__ == "__main__":
a = CVP(400, 250, 150, 35000, None)
a.table()
print(a.degree_operating_leverage())
#%%
a = CVP(30000, 50, 35, 300000, None)
a.table()
# %%
| StarcoderdataPython |
1872952 | import logging
import unittest
import acme.messages
from acmetk import AcmeProxy
from tests.test_broker import TestBrokerLocalCA, TestBrokerLE
from tests.test_ca import TestAcmetiny, TestOurClient, TestOurClientStress, TestCertBot
log = logging.getLogger("acmetk.test_proxy")
class TestProxy:
"""Tests for the AcmeProxy class.
Inherits from TestBroker in order to reduce code duplication.
This means that some variables and endpoints still contain 'broker', but
the class AxmeProxy is being tested regardless.
"""
_cls = AcmeProxy
DIRECTORY = "http://localhost:8000/broker/directory"
class TestProxyLocalCA(TestProxy, TestBrokerLocalCA):
@property
def config_sec(self):
return self._config["tests"]["ProxyLocalCA"]
class TestAcmetinyProxyLocalCA(
TestAcmetiny, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
pass
class TestCertBotProxyLocalCA(
TestCertBot, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
pass
class TestCertBotWCProxyLocalCA(
TestCertBot, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
@property
def names(self):
return ["*.test.de"]
async def test_subdomain_revocation(self):
"avoid Requesting a certificate for dns.*.test.de"
pass
async def test_bad_identifier(self):
await super().test_bad_identifier()
async def test_no_wc_run(self):
self.relay._allow_wildcard = False
with self.assertRaisesRegex(
acme.messages.Error, "The ACME server can not issue a wildcard certificate"
):
await super().test_run()
class TestOurClientProxyLocalCA(
TestOurClientStress, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
pass
class TestOurClientEC384EC384ProxyLocalCA(
TestOurClientStress, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
ACCOUNT_KEY_ALG_BITS = CERT_KEY_ALG_BITS = ("EC", 384)
class TestOurClientEC521EC521ProxyLocalCA(
TestOurClient, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
ACCOUNT_KEY_ALG_BITS = ("EC", 521)
CERT_KEY_ALG_BITS = ("EC", 521)
async def test_run(self):
with self.assertRaisesRegex(acme.messages.Error, self.BAD_KEY_RE) as e:
await super().test_run()
self.assertBadKey(e, "csr")
class TestOurClientRSA1024EC384ProxyLocalCA(
TestOurClient, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
ACCOUNT_KEY_ALG_BITS = ("RSA", 1024)
CERT_KEY_ALG_BITS = ("EC", 521)
class TestOurClientRSA2048RSA1024ProxyLocalCA(
TestOurClient, TestProxyLocalCA, unittest.IsolatedAsyncioTestCase
):
ACCOUNT_KEY_ALG_BITS = ("RSA", 2048)
CERT_KEY_ALG_BITS = ("RSA", 1024)
class TestProxyLE(TestProxy, TestBrokerLE):
@property
def config_sec(self):
return self._config["tests"]["ProxyLE"]
class TestAcmetinyProxyLE(TestAcmetiny, TestProxyLE, unittest.IsolatedAsyncioTestCase):
pass
class TestCertBotProxyLE(TestCertBot, TestProxyLE, unittest.IsolatedAsyncioTestCase):
async def test_bad_identifier(self):
# Order is passed through to LE which returns different errors
pass
class TestOurClientProxyLE(
TestOurClientStress, TestProxyLE, unittest.IsolatedAsyncioTestCase
):
async def test_run(self):
await super().test_run()
async def test_run_stress(self):
# rate limits!
pass
| StarcoderdataPython |
11307753 | <reponame>hozuki/gfan<gh_stars>1-10
import numpy as np
from sklearn.datasets import load_files as load_sklearn_data_files
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from . import preprocess, read_all_stop_words
def train_sklearn() -> None:
stop_word_set = read_all_stop_words()
lrc_training = load_sklearn_data_files("data/training")
# Please use "neg" and "pos" for category names, keeping them the same as directory names in `data/training`.
labels = [("neg", "Suspect machine translation"), ("pos", "Natural translation")]
labels.sort()
label_names = [x[1] for x in labels]
for _ in range(10):
print("------")
terms_train, terms_test, y_train, y_test = train_test_split(lrc_training.data, lrc_training.target,
test_size=0.2)
count_vec = TfidfVectorizer(binary=False, decode_error='ignore', stop_words=stop_word_set)
x_train: np.ndarray = count_vec.fit_transform(terms_train)
x_test: np.ndarray = count_vec.transform(terms_test)
clf = MultinomialNB()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print("True positive rate: {}".format(np.mean(y_pred == y_test)))
test_set_size = len(y_test)
print("Test set size: ", test_set_size)
print("* Before probability correction")
bins = np.bincount(y_pred)
bins_len = len(bins)
for i, label_name in enumerate(label_names):
if i < bins_len:
print("Count of class {} ({}): {}".format(i, label_name, bins[i]))
# precision, recall, thresholds = precision_recall_curve(y_test, clf.predict(x_test))
class_prob = clf.predict_proba(x_test)
class_prob = class_prob[:, 0] # based on the assumption that categories are [neg, pos]
report = np.ndarray((test_set_size,), dtype=np.int)
for i in range(test_set_size):
prob: np.float64 = class_prob[i]
# Threshold 0.8 (all predictions are class 0) is... counter-intuitive.
# The model should have worked better.
# The right way is using clf.classes_ and read class_prob[i] to decide the class index,
# but since our probabilities are a lot higher than normal (0.5), we have to perform a custom filtering.
if prob > 0.8:
cls = 0 # neg
else:
cls = 1 # pos
report[i] = cls
print("* After probability correction")
bins = np.bincount(report)
bins_len = len(bins)
for i, label_name in enumerate(label_names):
if i < bins_len:
print("Count of class {} ({}): {}".format(i, label_name, bins[i]))
print("* Actual")
bins = np.bincount(y_test)
bins_len = len(bins)
for i, label_name in enumerate(label_names):
if i < bins_len:
print("Count of class {} ({}): {}".format(i, label_name, bins[i]))
print()
print(classification_report(y_test, report, target_names=label_names))
if __name__ == '__main__':
preprocess()
train_sklearn()
| StarcoderdataPython |
39965 | <reponame>WillBickerstaff/sundial
def splitbylength(wordlist):
initlen = len(wordlist[0])
lastlen = len(wordlist[-1])
splitlist = []
for i in range(initlen, lastlen+1):
curlist = []
for x in wordlist:
if len(x) == i: curlist.append(x.capitalize())
splitlist.append(sorted(curlist))
return splitlist
| StarcoderdataPython |
277081 | import typing
def pascal(n: int, mod: int) -> typing.List[int]:
c = [[0] * n for _ in range(n)]
for i in range(n):
c[i][0] = 1
for i in range(1, n):
for j in range(1, i + 1):
c[i][j] = c[i - 1][j] + c[i - 1][j - 1]
c[i][j] %= mod
return c
def main() -> typing.NoReturn:
n, m = map(int, input().split())
h = 2 * n
are_good = [[False] * h for _ in range(h)]
for _ in range(m):
a, b = map(int, input().split())
a -= 1
b -= 1
are_good[a][b] = are_good[b][a] = True
dp = [[0] * h for _ in range(h + 1)]
for l in range(h - 1):
r = l + 1
dp[l][r] = 1 if are_good[l][r] else 0
for l in range(1, h + 1):
for r in range(0, l):
dp[l][r] = 1
MOD = 998_244_353
choose = pascal(1 << 8, MOD)
for delta in range(3, h, 2):
for l in range(h - delta):
r = l + delta
for i in range(l + 1, r + 1, 2):
if not are_good[l][i]: continue
dp[l][r] += dp[l + 1][i - 1] * dp[i + 1][r] * choose[(delta + 1) // 2][(i - l + 1) // 2]
dp[l][r] %= MOD
print(dp[0][h - 1])
main()
| StarcoderdataPython |
11397142 | """
Purpose
-------
This fdw can be used to access data stored in a remote RDBMS.
Through the use of sqlalchemy, many different rdbms engines are supported.
.. api_compat::
:read:
:write:
:transaction:
:import_schema:
Dependencies
------------
You will need the `sqlalchemy`_ library, as well as a suitable dbapi driver for
the remote database.
You can find a list of supported RDBMs, and their associated dbapi drivers and
connection strings in the `sqlalchemy dialects documentation`_.
.. _sqlalchemy dialects documentation: http://docs.sqlalchemy.org/en/latest/dialects/
.. _sqlalchemy: http://www.sqlalchemy.org/
Connection options
~~~~~~~~~~~~~~~~~~
Connection options can be passed either with a db-url, or with a combination of
individual connection parameters.
If both a ``db_url`` and individual parameters are used, the parameters will override
the value found in the ``db_url``.
In both cases, at least the ``drivername`` should be passed, either as the url scheme in
the ``db_url`` or using the ``drivername`` parameter.
``db_url``
An sqlalchemy connection string.
Examples:
- mysql: `mysql://<user>:<password>@<host>/<dbname>`
- mssql: `mssql://<user>:<password>@<dsname>`
See the `sqlalchemy dialects documentation`_. for documentation.
``username``
The remote username.
``password``
The remote password
``host``
The remote host
``database``
The remote database
``port``
The remote port
Other options
---------------
``tablename`` (required)
The table name in the remote RDBMS.
``primary_key``
Identifies a column which is a primary key in the remote RDBMS.
This options is required for INSERT, UPDATE and DELETE operations
``schema``
The schema in which this table resides on the remote side
When defining the table, the local column names will be used to retrieve the
remote column data.
Moreover, the local column types will be used to interpret the results in the
remote table. Sqlalchemy being aware of the differences between database
implementations, it will convert each value from the remote database to python
using the converter inferred from the column type, and convert it back to a
postgresql suitable form.
What does it do to reduce the amount of fetched data ?
------------------------------------------------------
- `quals` are pushed to the remote database whenever possible. This include
simple operators :
- equality, inequality (=, <>, >, <, <=, >=)
- like, ilike and their negations
- IN clauses with scalars, = ANY (array)
- NOT IN clauses, != ALL (array)
- the set of needed columns is pushed to the remote_side, and only those columns
will be fetched.
Usage example
-------------
For a connection to a remote mysql database (you'll need a mysql dbapi driver,
such as pymysql):
.. code-block:: sql
CREATE SERVER alchemy_srv foreign data wrapper multicorn options (
wrapper 'multicorn.sqlalchemyfdw.SqlAlchemyFdw'
);
create foreign table mysql_table (
column1 integer,
column2 varchar
) server alchemy_srv options (
tablename 'table',
db_url 'mysql://myuser:mypassword@myhost/mydb'
);
"""
from . import ForeignDataWrapper, TableDefinition, ColumnDefinition
from .utils import log_to_postgres, ERROR, WARNING, DEBUG
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.sql import select, operators as sqlops, and_
# Handle the sqlalchemy 0.8 / 0.9 changes
try:
from sqlalchemy.sql import sqltypes
except ImportError:
from sqlalchemy import types as sqltypes
from sqlalchemy.schema import Table, Column, MetaData
from sqlalchemy.dialects.oracle import base as oracle_dialect
from sqlalchemy.dialects.postgresql.base import (
ARRAY, ischema_names, PGDialect, NUMERIC)
import re
import operator
def compose(*funs):
if len(funs) == 0:
raise ValueError("At least one function is necessary for compose")
if len(funs) == 1:
return funs[0]
else:
result_fun = compose(*funs[1:])
return lambda *args, **kwargs: funs[0](result_fun(*args, **kwargs))
def not_(function):
return compose(operator.inv, function)
def _parse_url_from_options(fdw_options):
if fdw_options.get('db_url'):
url = make_url(fdw_options.get('db_url'))
else:
if 'drivername' not in fdw_options:
log_to_postgres('Either a db_url, or drivername and other '
'connection infos are needed', ERROR)
url = URL(fdw_options['drivername'])
for param in ('username', 'password', 'host',
'database', 'port'):
if param in fdw_options:
setattr(url, param, fdw_options[param])
return url
OPERATORS = {
'=': operator.eq,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
'<>': operator.ne,
'~~': sqlops.like_op,
'~~*': sqlops.ilike_op,
'!~~*': not_(sqlops.ilike_op),
'!~~': not_(sqlops.like_op),
('=', True): sqlops.in_op,
('<>', False): not_(sqlops.in_op)
}
CONVERSION_MAP = {
oracle_dialect.NUMBER: NUMERIC
}
class SqlAlchemyFdw(ForeignDataWrapper):
"""An SqlAlchemy foreign data wrapper.
The sqlalchemy foreign data wrapper performs simple selects on a remote
database using the sqlalchemy framework.
Accepted options:
db_url -- the sqlalchemy connection string.
schema -- (optional) schema name to qualify table name with
tablename -- the table name in the remote database.
"""
def __init__(self, fdw_options, fdw_columns):
super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
if 'tablename' not in fdw_options:
log_to_postgres('The tablename parameter is required', ERROR)
self.metadata = MetaData()
url = _parse_url_from_options(fdw_options)
self.engine = create_engine(url)
schema = fdw_options['schema'] if 'schema' in fdw_options else None
tablename = fdw_options['tablename']
sqlacols = []
for col in fdw_columns.values():
col_type = self._get_column_type(col.type_name)
sqlacols.append(Column(col.column_name, col_type))
self.table = Table(tablename, self.metadata, schema=schema,
*sqlacols)
self.transaction = None
self._connection = None
self._row_id_column = fdw_options.get('primary_key', None)
def execute(self, quals, columns):
"""
The quals are turned into an and'ed where clause.
"""
statement = select([self.table])
clauses = []
for qual in quals:
operator = OPERATORS.get(qual.operator, None)
if operator:
clauses.append(operator(self.table.c[qual.field_name],
qual.value))
else:
log_to_postgres('Qual not pushed to foreign db: %s' % qual,
WARNING)
if clauses:
statement = statement.where(and_(*clauses))
if columns:
columns = [self.table.c[col] for col in columns]
else:
columns = self.table.c
statement = statement.with_only_columns(columns)
log_to_postgres(str(statement), DEBUG)
rs = (self.connection
.execution_options(stream_results=True)
.execute(statement))
# Workaround pymssql "trash old results on new query"
# behaviour (See issue #100)
if self.engine.driver == 'pymssql' and self.transaction is not None:
rs = list(rs)
for item in rs:
yield dict(item)
@property
def connection(self):
if self._connection is None:
self._connection = self.engine.connect()
return self._connection
def begin(self, serializable):
self.transaction = self.connection.begin()
def pre_commit(self):
if self.transaction is not None:
self.transaction.commit()
self.transaction = None
def commit(self):
# Pre-commit hook does this on 9.3
if self.transaction is not None:
self.transaction.commit()
self.transaction = None
def rollback(self):
if self.transaction is not None:
self.transaction.rollback()
self.transaction = None
@property
def rowid_column(self):
if self._row_id_column is None:
log_to_postgres(
'You need to declare a primary key option in order '
'to use the write features')
return self._row_id_column
def insert(self, values):
self.connection.execute(self.table.insert(values=values))
def update(self, rowid, newvalues):
self.connection.execute(
self.table.update()
.where(self.table.c[self._row_id_column] == rowid)
.values(newvalues))
def delete(self, rowid):
self.connection.execute(
self.table.delete()
.where(self.table.c[self._row_id_column] == rowid))
def _get_column_type(self, format_type):
"""Blatant ripoff from PG_Dialect.get_column_info"""
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r'\(.*\)', '', format_type)
# strip '[]' from integer[], etc.
attype = re.sub(r'\[\]', '', attype)
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
args = re.search('\((.*)\)', format_type)
if args and args.group(1):
args = tuple(re.split('\s*,\s*', args.group(1)))
else:
args = ()
kwargs = {}
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval', 'interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
coltype = ischema_names.get(attype, None)
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = ARRAY(coltype)
else:
coltype = sqltypes.NULLTYPE
return coltype
@classmethod
def import_schema(self, schema, srv_options, options,
restriction_type, restricts):
"""
Reflects the remote schema.
"""
metadata = MetaData()
url = _parse_url_from_options(srv_options)
engine = create_engine(url)
dialect = PGDialect()
if restriction_type == 'limit':
only = restricts
elif restriction_type == 'except':
only = lambda t, _: t not in restricts
else:
only = None
metadata.reflect(bind=engine,
schema=schema,
only=only)
to_import = []
for _, table in sorted(metadata.tables.items()):
ftable = TableDefinition(table.name)
ftable.options['schema'] = schema
ftable.options['tablename'] = table.name
for c in table.c:
# Force collation to None to prevent imcompatibilities
setattr(c.type, "collation", None)
# If the type is specialized, call the generic
# superclass method
if type(c.type) in CONVERSION_MAP:
class_name = CONVERSION_MAP[type(c.type)]
old_args = c.type.__dict__
c.type = class_name()
c.type.__dict__.update(old_args)
if c.primary_key:
ftable.options['primary_key'] = c.name
ftable.columns.append(ColumnDefinition(
c.name,
type_name=c.type.compile(dialect)))
to_import.append(ftable)
return to_import
| StarcoderdataPython |
4900439 | import conexao_bd
conexao_bd.listar()
print(conexao_bd)
import xlwt
from datetime import datetime, timedelta
##style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',
## num_format_str='#,##0.00')
style0 = xlwt.easyxf('font: name Times New Roman, color-index blue, bold on, size 12')
style1 = xlwt.easyxf(num_format_str='D-MMM-YY')
data1 = (datetime.now() - timedelta(days=1))
data1format = data1.strftime("%d%m%Y")
data2 = datetime.now()
data2format = data2.strftime("%d%m%Y")
print(data1format)
##wb = "Documents\teste_excel"
wb = xlwt.Workbook()
ws = wb.add_sheet('Teste')
ws.write(0, 0, "Setor", style0)
ws.write(0, 1, "Prefixo", style0)
ws.write(0, 2, "Unidade", style0)
ws.write(0, 3, "data", style0)
##ws.write(1, 0, datetime.now(), style1)
ws.write(1, 0, 1)
ws.write(1, 1, 1)
ws.write(1, 2, 1)
ws.write(1, 3, xlwt.Formula("B2:B3"))
wb.save('{}_{}.xls'.format(data1format,data2format))
| StarcoderdataPython |
11220477 | <reponame>kanchenxi04/vnpy-app
# -*- coding: utf-8 -*-
# 对应网址:https://www.joinquant.com/post/4739
#【缠论】日线分笔&画图显示
from chan_lun_util import *
from k_line_dto import *
import matplotlib as mat
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import time
stock_code = '600527.XSHG'
start_date = '2016-09-01'
end_date = '2017-03-17'
initial_trend = "down"
quotes = get_price(stock_code, start_date, end_date, frequency='daily', skip_paused=False, fq='pre')
quotes[quotes['volume'] == 0] = np.nan
quotes = quotes.dropna()
Close = quotes['close']
Open = quotes['open']
High = quotes['high']
Low = quotes['low']
T0 = quotes.index.values
length = len(Close)
fig = plt.figure(figsize=(16, 8))
ax1 = plt.subplot2grid((10, 4), (0, 0), rowspan=10, colspan=4)
# fig = plt.figure()
# ax1 = plt.axes([0,0,3,2])
X = np.array(range(0, length))
pad_nan = X + nan
# 计算上 下影线
max_clop = Close.copy()
max_clop[Close < Open] = Open[Close < Open]
min_clop = Close.copy()
min_clop[Close > Open] = Open[Close > Open]
# 上影线
line_up = np.array([High, max_clop, pad_nan])
line_up = np.ravel(line_up, 'F')
# 下影线
line_down = np.array([Low, min_clop, pad_nan])
line_down = np.ravel(line_down, 'F')
# 计算上下影线对应的X坐标
pad_nan = nan + X
pad_X = np.array([X, X, X])
pad_X = np.ravel(pad_X, 'F')
# 画出实体部分,先画收盘价在上的部分
up_cl = Close.copy()
up_cl[Close <= Open] = nan
up_op = Open.copy()
up_op[Close <= Open] = nan
down_cl = Close.copy()
down_cl[Open <= Close] = nan
down_op = Open.copy()
down_op[Open <= Close] = nan
even = Close.copy()
even[Close != Open] = nan
# 画出收红的实体部分
pad_box_up = np.array([up_op, up_op, up_cl, up_cl, pad_nan])
pad_box_up = np.ravel(pad_box_up, 'F')
pad_box_down = np.array([down_cl, down_cl, down_op, down_op, pad_nan])
pad_box_down = np.ravel(pad_box_down, 'F')
pad_box_even = np.array([even, even, even, even, pad_nan])
pad_box_even = np.ravel(pad_box_even, 'F')
# X的nan可以不用与y一一对应
X_left = X - 0.25
X_right = X + 0.25
box_X = np.array([X_left, X_right, X_right, X_left, pad_nan])
box_X = np.ravel(box_X, 'F')
# Close_handle=plt.plot(pad_X,line_up,color='k')
vertices_up = array([box_X, pad_box_up]).T
vertices_down = array([box_X, pad_box_down]).T
vertices_even = array([box_X, pad_box_even]).T
handle_box_up = mat.patches.Polygon(vertices_up, color='r', zorder=1)
handle_box_down = mat.patches.Polygon(vertices_down, color='g', zorder=1)
handle_box_even = mat.patches.Polygon(vertices_even, color='k', zorder=1)
ax1.add_patch(handle_box_up)
ax1.add_patch(handle_box_down)
ax1.add_patch(handle_box_even)
handle_line_up = mat.lines.Line2D(pad_X, line_up, color='k', linestyle='solid', zorder=0)
handle_line_down = mat.lines.Line2D(pad_X, line_down, color='k', linestyle='solid', zorder=0)
ax1.add_line(handle_line_up)
ax1.add_line(handle_line_down)
v = [0, length, Open.min() - 0.5, Open.max() + 0.5]
plt.axis(v)
T1 = T0[-len(T0):].astype(dt.date) / 1000000000
Ti = []
for i in range(len(T0) / 5):
a = i * 5
d = dt.date.fromtimestamp(T1[a])
# print d
T2 = d.strftime('$%Y-%m-%d$')
Ti.append(T2)
# print tab
d1 = dt.date.fromtimestamp(T1[len(T0) - 1])
d2 = d1.strftime('$%Y-%m-%d$')
Ti.append(d2)
ax1.set_xticks(np.linspace(-2, len(Close) + 2, len(Ti)))
ll = Low.min() * 0.97
hh = High.max() * 1.03
ax1.set_ylim(ll, hh)
ax1.set_xticklabels(Ti)
plt.grid(True)
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
# 处理以上分笔结果,组织成实际上图的点
k_line_list = []
date_list = quotes.index.tolist()
data_per_day = quotes.values.tolist()
x_date_list = quotes.index.values.tolist()
for index in range(len(date_list)):
date_time = date_list[index]
open_price = data_per_day[index][0]
close_price = data_per_day[index][1]
high_price = data_per_day[index][2]
low_price = data_per_day[index][3]
k_line_dto = KLineDTO(date_time,
date_time,
date_time,
open_price, high_price, low_price, close_price)
k_line_list.append(k_line_dto)
# 选择最高或者最低点,和走势此后的方向(向上up或者向下down)
min_low = min(Low)
max_high = max(High)
initial_index = 0
for i in range(len(k_line_list)):
k_line_dto = k_line_list[i]
if min_low == k_line_dto.low:
initial_trend = 'up'
initial_index = i
print(k_line_dto.begin_time.strftime('%Y-%m-%d %H:%M:%S'))
break
if max_high == k_line_dto.high:
initial_trend = 'down'
initial_index = i
print(k_line_dto.begin_time.strftime('%Y-%m-%d %H:%M:%S'))
break
# 确定需要划分的K线范围,进行截取
input_k_line_list = []
if initial_index == 0:
input_k_line_list = k_line_list
else:
input_k_line_list = k_line_list[initial_index - 1:]
# 1.K线合并,并且确定顶底分型
merge_line_list = find_peak_and_bottom(input_k_line_list, initial_trend)
# 将第一个合并K线单位置为顶/底
m_line_dto = merge_line_list[0]
if initial_trend == "up":
m_line_dto.is_bottom = "Y"
elif initial_trend == "down":
m_line_dto.is_peak = "Y"
# 2.分笔
fenbi_result, final_result_array, fenbi_seq_list = fen_bi(merge_line_list)
# 3.得到分笔结果,计算坐标显示
x_fenbi_seq = []
y_fenbi_seq = []
for i in range(len(final_result_array)):
if final_result_array[i]:
m_line_dto = merge_line_list[fenbi_seq_list[i]]
if m_line_dto.is_peak == 'Y':
peak_time = None
for k_line_dto in m_line_dto.member_list[::-1]:
if k_line_dto.high == m_line_dto.high:
# get_price返回的日期,默认时间是08:00:00
peak_time = k_line_dto.begin_time.strftime('%Y-%m-%d') + ' 08:00:00'
break
x_fenbi_seq.append(x_date_list.index(
long(time.mktime(datetime.strptime(peak_time, "%Y-%m-%d %H:%M:%S").timetuple()) * 1000000000)))
y_fenbi_seq.append(m_line_dto.high)
if m_line_dto.is_bottom == 'Y':
bottom_time = None
for k_line_dto in m_line_dto.member_list[::-1]:
if k_line_dto.low == m_line_dto.low:
# get_price返回的日期,默认时间是08:00:00
bottom_time = k_line_dto.begin_time.strftime('%Y-%m-%d') + ' 08:00:00'
break
x_fenbi_seq.append(x_date_list.index(
long(time.mktime(datetime.strptime(bottom_time, "%Y-%m-%d %H:%M:%S").timetuple()) * 1000000000)))
y_fenbi_seq.append(m_line_dto.low)
# 在原图基础上添加分笔蓝线
plt.plot(x_fenbi_seq, y_fenbi_seq)
plt.show()
| StarcoderdataPython |
11214587 | <reponame>hamroune/mlflow<filename>mlflow/mleap.py
"""
MLflow integration of the MLeap serialization tool for PySpark MLlib pipelines
This module provides utilities for saving models using the MLeap
using the MLeap library's persistence mechanism.
A companion module for loading MLFlow models with the MLeap flavor format is available in the
`mlflow/java` package.
For more information about MLeap, see https://github.com/combust/mleap.
"""
from __future__ import absolute_import
import os
import sys
import traceback
import json
from six import reraise
import mlflow
from mlflow.models import Model
FLAVOR_NAME = "mleap"
def log_model(spark_model, sample_input, artifact_path):
"""
Log a Spark MLLib model in MLeap format as an MLflow artifact
for the current run. The logged model will have the MLeap flavor.
NOTE: The MLeap model flavor cannot be loaded in Python. It must be loaded using the
Java module within the `mlflow/java` package.
:param spark_model: Spark PipelineModel to be saved. This model must be MLeap-compatible and
cannot contain any custom transformers.
:param sample_input: A sample PySpark Dataframe input that the model can evaluate. This is
required by MLeap for data schema inference.
"""
return Model.log(artifact_path=artifact_path, flavor=mlflow.mleap,
spark_model=spark_model, sample_input=sample_input)
def save_model(spark_model, sample_input, path, mlflow_model=Model()):
"""
Save a Spark MLlib PipelineModel in MLeap format at the given local path.
The saved model will have the MLeap flavor.
NOTE: The MLeap model flavor cannot be loaded in Python. It must be loaded using the
Java module within the `mlflow/java` package.
:param path: Path of the MLFlow model to which this flavor is being added.
:param spark_model: Spark PipelineModel to be saved. This model must be MLeap-compatible and
cannot contain any custom transformers.
:param sample_input: A sample PySpark Dataframe input that the model can evaluate. This is
required by MLeap for data schema inference.
:param mlflow_model: MLFlow model config to which this flavor is being added.
"""
add_to_model(mlflow_model, path, spark_model, sample_input)
mlflow_model.save(os.path.join(path, "MLmodel"))
def add_to_model(mlflow_model, path, spark_model, sample_input):
"""
Add the MLeap flavor to a pre-existing MLFlow model.
:param mlflow_model: MLFlow model config to which this flavor is being added.
:param path: Path of the MLFlow model to which this flavor is being added.
:param spark_model: Spark PipelineModel to be saved. This model must be MLeap-compatible and
cannot contain any custom transformers.
:param sample_input: A sample PySpark Dataframe input that the model can evaluate. This is
required by MLeap for data schema inference.
"""
from pyspark.ml.pipeline import PipelineModel
from pyspark.sql import DataFrame
import mleap.version
from mleap.pyspark.spark_support import SimpleSparkSerializer # pylint: disable=unused-variable
from py4j.protocol import Py4JError
if not isinstance(spark_model, PipelineModel):
raise Exception("Not a PipelineModel."
" MLeap can currently only save PipelineModels.")
if sample_input is None:
raise Exception("A sample input must be specified in order to add the MLeap flavor.")
if not isinstance(sample_input, DataFrame):
raise Exception("The sample input must be a PySpark dataframe of type `{df_type}`".format(
df_type=DataFrame.__module__))
mleap_path_full = os.path.join(path, "mleap")
mleap_datapath_sub = os.path.join("mleap", "model")
mleap_datapath_full = os.path.join(path, mleap_datapath_sub)
if os.path.exists(mleap_path_full):
raise Exception("MLeap model data path already exists at: {path}".format(
path=mleap_path_full))
os.makedirs(mleap_path_full)
dataset = spark_model.transform(sample_input)
model_path = "file:{mp}".format(mp=mleap_datapath_full)
try:
spark_model.serializeToBundle(path=model_path,
dataset=dataset)
except Py4JError as e:
tb = sys.exc_info()[2]
error_str = ("MLeap encountered an error while serializing the model. Please ensure that"
" the model is compatible with MLeap"
" (i.e does not contain any custom transformers). Error text: {err}".format(
err=str(e)))
traceback.print_exc()
reraise(Exception, error_str, tb)
input_schema = json.loads(sample_input.schema.json())
mleap_schemapath_sub = os.path.join("mleap", "schema.json")
mleap_schemapath_full = os.path.join(path, mleap_schemapath_sub)
with open(mleap_schemapath_full, "w") as out:
json.dump(input_schema, out, indent=4)
mlflow_model.add_flavor(FLAVOR_NAME,
mleap_version=mleap.version.__version__,
model_data=mleap_datapath_sub,
input_schema=mleap_schemapath_sub)
| StarcoderdataPython |
3297814 | <reponame>Bookiebookie/LieSpline
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import struct
import unittest
import time
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import rospy
g_fileno = 1
class MockSock:
def __init__(self, buff):
global g_fileno
g_fileno += 1
self.buff = buff
self._fileno = g_fileno
def fileno(self):
return self._fileno
def recv(self, buff_size):
return self.buff[:buff_size]
def close(self):
self.buff = None
def getsockname(self):
return (None, None)
class MockEmptySock:
def __init__(self):
global g_fileno
g_fileno += 1
self._fileno = g_fileno
def fileno(self):
return self._fileno
def recv(self, buff_size):
return ''
def close(self):
self.buff = None
class TestRospyTcprosBase(unittest.TestCase):
def test_constants(self):
self.assertEquals("TCPROS", rospy.impl.tcpros_base.TCPROS)
self.assert_(type(rospy.impl.tcpros_base.DEFAULT_BUFF_SIZE), int)
def test_recv_buff(self):
from rospy.impl.tcpros_base import recv_buff
buff = StringIO()
try:
recv_buff(MockEmptySock(), buff, 1)
self.fail("recv_buff should have raised TransportTerminated")
except rospy.impl.tcpros_base.TransportTerminated:
self.assertEquals('', buff.getvalue())
self.assertEquals(5, recv_buff(MockSock('1234567890'), buff, 5))
self.assertEquals('12345', buff.getvalue())
buff = StringIO()
self.assertEquals(10, recv_buff(MockSock('1234567890'), buff, 100))
self.assertEquals('1234567890', buff.getvalue())
def test_TCPServer(self):
from rospy.impl.tcpros_base import TCPServer
def handler(sock, addr):
pass
s = None
try:
s = TCPServer(handler)
self.assert_(s.port > 0)
addr, port = s.get_full_addr()
self.assert_(type(addr) == str)
self.assertEquals(handler, s.inbound_handler)
self.failIf(s.is_shutdown)
finally:
if s is not None:
s.shutdown()
self.assert_(s.is_shutdown)
def test_TCPROSTransportProtocol(self):
import rospy
import random
from rospy.impl.tcpros_base import TCPROSTransportProtocol
from rospy.impl.transport import BIDIRECTIONAL
p = TCPROSTransportProtocol('Bob', rospy.AnyMsg)
self.assertEquals('Bob', p.resolved_name)
self.assertEquals(rospy.AnyMsg, p.recv_data_class)
self.assertEquals(BIDIRECTIONAL, p.direction)
self.assertEquals({}, p.get_header_fields())
self.assertEquals(rospy.impl.tcpros_base.DEFAULT_BUFF_SIZE, p.buff_size)
v = random.randint(1, 100)
p = TCPROSTransportProtocol('Bob', rospy.AnyMsg, queue_size=v)
self.assertEquals(v, p.queue_size)
v = random.randint(1, 100)
p = TCPROSTransportProtocol('Bob', rospy.AnyMsg, buff_size=v)
self.assertEquals(v, p.buff_size)
def test_TCPROSTransport(self):
import rospy.impl.tcpros_base
from rospy.impl.tcpros_base import TCPROSTransport, TCPROSTransportProtocol
from rospy.impl.transport import OUTBOUND
p = TCPROSTransportProtocol('Bob', rospy.AnyMsg)
p.direction = OUTBOUND
try:
TCPROSTransport(p, '')
self.fail("TCPROSTransport should not accept bad name")
except rospy.impl.tcpros_base.TransportInitError: pass
t = TCPROSTransport(p, 'transport-name')
self.assert_(t.socket is None)
self.assert_(t.md5sum is None)
self.assert_(t.type is None)
self.assertEquals(p, t.protocol)
self.assertEquals('TCPROS', t.transport_type)
self.assertEquals(OUTBOUND, t.direction)
self.assertEquals('unknown', t.endpoint_id)
self.assertEquals(b'', t.read_buff.getvalue())
self.assertEquals(b'', t.write_buff.getvalue())
s = MockSock('12345')
t.set_socket(s, 'new_endpoint_id')
self.assertEquals('new_endpoint_id', t.endpoint_id)
self.assertEquals(s, t.socket)
t.close()
self.assert_(t.socket is None)
self.assert_(t.read_buff is None)
self.assert_(t.write_buff is None)
self.assert_(t.protocol is None)
| StarcoderdataPython |
6643438 | import pytest
import yaml
from src.schemathesis.utils import StringDatesYAMLLoader
@pytest.mark.parametrize(
"value, expected",
(
("'1': foo", {"1": "foo"}),
("1: foo", {"1": "foo"}),
("1: 1", {"1": 1}),
("on: off", {"on": False}),
),
ids=["string-key-string-value", "int-key-string-value", "int-key-int-value", "bool-key-bool-value"],
)
def test_parse(value, expected):
assert yaml.load(value, StringDatesYAMLLoader) == expected
| StarcoderdataPython |
120029 | import argparse
from typing import Optional, Dict, Type
from cpk.cli import AbstractCLICommand
from cpk.cli.commands.endpoint.info import CLIEndpointInfoCommand
from cpk.types import Machine, Arguments
_supported_subcommands: Dict[str, Type[AbstractCLICommand]] = {
"info": CLIEndpointInfoCommand,
}
class CLIEndpointCommand(AbstractCLICommand):
KEY = 'endpoint'
@staticmethod
def parser(parent: Optional[argparse.ArgumentParser] = None,
args: Optional[Arguments] = None) -> argparse.ArgumentParser:
# create a temporary parser used to select the subcommand
parser = argparse.ArgumentParser(parents=[parent], prog='cpk endpoint')
parser.add_argument(
'subcommand',
choices=_supported_subcommands.keys(),
help=f"Subcommand. Can be any of {', '.join(_supported_subcommands.keys())}"
)
parsed, _ = parser.parse_known_args(args)
# return subcommand's parser
subcommand = _supported_subcommands[parsed.subcommand]
return subcommand.parser(parser, args)
@staticmethod
def execute(machine: Machine, parsed: argparse.Namespace) -> bool:
subcommand = _supported_subcommands[parsed.subcommand]
return subcommand.execute(machine, parsed)
| StarcoderdataPython |
11224309 | <reponame>gadomski/nexrad-l3<gh_stars>0
import unittest
import stactools.nexrad_l3
class TestModule(unittest.TestCase):
def test_version(self):
self.assertIsNotNone(stactools.nexrad_l3.__version__)
| StarcoderdataPython |
9677075 | <filename>model/attribute/net/models.py
import torch
from torch import nn
from torch.nn import init
from torchvision import models
from model.attribute.net.utils import ClassBlock
from torch.nn import functional as F
class Backbone_nFC(nn.Module):
def __init__(self, class_num, model_name='resnet50_nfc'):
super(Backbone_nFC, self).__init__()
self.model_name = model_name
self.backbone_name = model_name.split('_')[0]
self.class_num = class_num
model_ft = getattr(models, self.backbone_name)(pretrained=True)
if 'resnet' in self.backbone_name:
model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model_ft.fc = nn.Sequential()
self.features = model_ft
self.num_ftrs = 2048
elif 'densenet' in self.backbone_name:
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model_ft.fc = nn.Sequential()
self.features = model_ft.features
self.num_ftrs = 1024
else:
raise NotImplementedError
for c in range(self.class_num):
self.__setattr__('class_%d' % c, ClassBlock(input_dim=self.num_ftrs, class_num=1, activ='sigmoid') )
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pred_label = [self.__getattr__('class_%d' % c)(x) for c in range(self.class_num)]
pred_label = torch.cat(pred_label, dim=1)
return pred_label
class Backbone_nFC_Id(nn.Module):
def __init__(self, class_num, id_num, model_name='resnet50_nfc_id'):
super(Backbone_nFC_Id, self).__init__()
self.model_name = model_name
self.backbone_name = model_name.split('_')[0]
self.class_num = class_num
self.id_num = id_num
model_ft = getattr(models, self.backbone_name)(pretrained=True)
if 'resnet' in self.backbone_name:
model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model_ft.fc = nn.Sequential()
self.features = model_ft
self.num_ftrs = 2048
elif 'densenet' in self.backbone_name:
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model_ft.fc = nn.Sequential()
self.features = model_ft.features
self.num_ftrs = 1024
else:
raise NotImplementedError
for c in range(self.class_num+1):
if c == self.class_num:
self.__setattr__('class_%d' % c, ClassBlock(self.num_ftrs, class_num=self.id_num, activ='none'))
else:
self.__setattr__('class_%d' % c, ClassBlock(self.num_ftrs, class_num=1, activ='sigmoid'))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pred_label = [self.__getattr__('class_%d' % c)(x) for c in range(self.class_num)]
pred_label = torch.cat(pred_label, dim=1)
pred_id = self.__getattr__('class_%d' % self.class_num)(x)
return pred_label, pred_id
| StarcoderdataPython |
8106699 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pianoroll_encoder_decoder."""
from magenta.music import pianoroll_encoder_decoder
import numpy as np
import tensorflow.compat.v1 as tf
class PianorollEncodingTest(tf.test.TestCase):
def setUp(self):
self.enc = pianoroll_encoder_decoder.PianorollEncoderDecoder(5)
def testProperties(self):
self.assertEqual(5, self.enc.input_size)
self.assertEqual(32, self.enc.num_classes)
self.assertEqual(0, self.enc.default_event_label)
def testEncodeInput(self):
events = [(), (1, 2), (2,)]
self.assertTrue(np.array_equal(
np.zeros(5, np.bool), self.enc.events_to_input(events, 0)))
self.assertTrue(np.array_equal(
[0, 1, 1, 0, 0], self.enc.events_to_input(events, 1)))
self.assertTrue(np.array_equal(
[0, 0, 1, 0, 0], self.enc.events_to_input(events, 2)))
def testEncodeLabel(self):
events = [[], [1, 2], [2]]
self.assertEqual(0, self.enc.events_to_label(events, 0))
self.assertEqual(6, self.enc.events_to_label(events, 1))
self.assertEqual(4, self.enc.events_to_label(events, 2))
def testDecodeLabel(self):
self.assertEqual((), self.enc.class_index_to_event(0, None))
self.assertEqual((1, 2), self.enc.class_index_to_event(6, None))
self.assertEqual((2,), self.enc.class_index_to_event(4, None))
def testExtendEventSequences(self):
seqs = ([(0,), (1, 2)], [(), ()])
samples = ([0, 0, 0, 0, 0], [1, 1, 0, 0, 1])
self.enc.extend_event_sequences(seqs, samples)
self.assertEqual(([(0,), (1, 2), ()], [(), (), (0, 1, 4)]), seqs)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
6466065 | '''Utilities for experimenting with the codes.
Assuming curve has all relevant data attached already.
'''
class Coding():
def __init__(self, curve):
self.c = curve
self.D = curve.D()
self.K = curve.K()
def get_k(self, C):
'''
Return the dimension of the code with given C and D
k = l(D-C) - L(-C)
'''
return (self.D - C).l() - (-C).l()
def constructions(self, C):
'''Given C, return correspoding CL(D, G) and
COmega(D, G) that realize that code
Gl = D - C
Gomega = K + C
'''
return {'Omega': self.K + C, 'L': self.D - C}
def get_d(self, C):
return self.c.ddk(C)
def code(self, C):
return {'n': self.D.deg, 'k': self.get_k(C), 'd': self.get_d(C)}
def print_code(self, C):
print 'D = ', self.D
print 'C = ', C
print '[%s, %s, %s] code over F%s' % (self.D.deg, self.get_k(C),
self.get_d(C), self.c.q)
for type, div in self.constructions(C).iteritems():
print type, '\tusing G =', div
def design_d(self, C):
return C.deg
def best_codes(self, one_point=False, redun=False):
''' Return a list of best tuples of classic codes in the (k,d)
format.'''
def f(div):
return (self.get_k(div), self.c.ddk(div))
best = BestTuple(f=f)
iter = self.c.two_point_iter if not one_point else self.c.one_point_iter
for div in iter():
best.update(div)
best.reduce()
return best
def dual(self, C):
'''Return C' such that C(D, C) is dual to C(D, C')'''
return self.D - self.K - C
def is_subcode(self, C, D):
'''Check C(_,C) \subcode C(_,D) up to equivalence.'''
return D.less_then(C)
def mintw_coset(self, C, D):
'''Return the minimum wt of the words in C_omega(C) not in C_omega(D)
Assumes D <= C'''
assert self.is_subcode(D, C)
d = D - C
d.make_std()
temp = C.copy()
bound = CosetBound()
for i in range(d.P):
bound.update(self.c.cpdk(temp))
temp += self.c.P()
for i in range(d.Q):
bound.update(self.c.cqdk(temp))
temp += self.c.Q()
return bound.b
def quantum(self, C1, C2, impure=True, assym=True):
C1d = self.dual(C1)
C2d = self.dual(C2)
assert self.is_subcode(C2d, C1)
data = {'n': self.D.deg, 'k': self.get_k(C1) - self.get_k(C2d)}
if impure:
data.update({ 'dx': self.mintw_coset(C2, C1d),
'dz': self.mintw_coset(C1, C2d)})
else:
data.update({ 'dx': self.get_d(C2),
'dz': self.get_d(C1)})
return data
def best_quantum(self, one_point=False):
def q_tuple(C, C2):
q = self.quantum(C, C2)
return (q['k'], q['dx'], q['dz'])
if one_point:
get_iter = self.c.ddk.one_point_iter
else:
get_iter = self.c.ddk.__iter__
best_q = BestTuple(f=q_tuple)
#TODO: use a generic divisor iterator, since d is not used directly
#for C, d in self.c.ddk:
# for C2d, d2 in self.c.ddk:
for C, d in get_iter():
for C2d, d2 in get_iter():
try:
C2 = self.dual(C2d)
best_q.update(C, C2)
except AssertionError:
pass
best_q.reduce()
return best_q
class BestTuple():
# can you do an online version?
def __init__(self, f=None):
'''Object to abstract the process of turning divisors (or pairs of
divisors) to parameters of codes and storing the best ones. The input
function f is applied to all parameters passed to method update'''
self.ind = []
self.witness = {}
self.f = f
def update(self, *arg):
a = self.f(*arg)
self.ind.append(a)
self.witness[a] = arg
def reduce(self):
for i in range(len(self.ind[0])):
self.reduce_coord(i)
self.ind.sort()
def reduce_coord(self, i):
d = {}
for tup in self.ind:
key = list(tup)
key.pop(i)
key = tuple(key)
if key in d and d[key][i] >= tup[i]:
continue
d[key] = tup
self.ind = d.values()
for k in self.witness.keys():
if k not in self.ind:
self.witness.pop(k)
def get_best(self):
return self.ind
def save(self, file):
import pickle
pickle.dump({'ind': self.ind, 'witness': self.witness}, file)
def load(self, file):
import pickle
data = pickle.load(file)
self.ind = data['ind']
self.witness = data['witness']
def closest(self, new):
''' return distance to a point covered by a best tuple, and that tuple.'''
queue = [(new, 0)]
while True:
cur, d = queue.pop(0)
if self.covered(cur):
return cur, d
# super inefficient, exp instead of poly
# improve by guaranteeing next doesn't give repetitions
for new in self.next(cur):
if new not in queue:
queue.append((new, d + 1))
def next(self, cur):
l = list(cur)
out = []
for i in range(len(l)):
new = l[:]
new[i] -= 1
out.append(tuple(new))
return out
def covered(self, new):
for i in self.ind:
if self.covered_test(new, i):
return True
return False
def covered_test(self, a, b):
return all([a[i] <= b[i] for i in range(len(a))])
def defect(best, curve):
n = curve.D().deg
return [(tup, n - sum(tup) + 1) for tup in best]
class CosetBound():
def __init__(self, start=0):
self.b = start
def update(self, new):
if self.b == 0:
self.b = new
return
if new > 0:
self.b = min(new, self.b)
| StarcoderdataPython |
371366 | def answer_type(request, json_list, nested):
for question in json_list:
if question['payload']['object_type'] == 'task_instance':
question['answer_class'] = 'task_answer'
| StarcoderdataPython |
226055 | <filename>rest_live/apps.py
from django.apps import AppConfig
class RestLiveConfig(AppConfig):
name = "rest_live"
| StarcoderdataPython |
11362938 | <filename>OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/EXT/histogram.py
'''OpenGL extension EXT.histogram
This module customises the behaviour of the
OpenGL.raw.GL.EXT.histogram to provide a more
Python-friendly API
Overview (from the spec)
This extension defines pixel operations that count occurences of
specific color component values (histogram) and that track the minimum
and maximum color component values (minmax). An optional mode allows
pixel data to be discarded after the histogram and/or minmax operations
are completed. Otherwise the pixel data continue on to the next
operation unaffected.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/histogram.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.histogram import *
from OpenGL.raw.GL.EXT.histogram import _EXTENSION_NAME
def glInitHistogramEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# OUTPUT glGetHistogramEXT.values COMPSIZE(target, format, type)
glGetHistogramParameterfvEXT=wrapper.wrapper(glGetHistogramParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetHistogramParameterivEXT=wrapper.wrapper(glGetHistogramParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# OUTPUT glGetMinmaxEXT.values COMPSIZE(target, format, type)
glGetMinmaxParameterfvEXT=wrapper.wrapper(glGetMinmaxParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetMinmaxParameterivEXT=wrapper.wrapper(glGetMinmaxParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
glGetHistogramParameterfvEXT = wrapper.wrapper(glGetHistogramParameterfvEXT).setOutput(
"params",(1,), orPassIn=True
)
glGetHistogramParameterivEXT = wrapper.wrapper(glGetHistogramParameterivEXT).setOutput(
"params",(1,), orPassIn=True
)
| StarcoderdataPython |
1991701 | <reponame>SkaarlK/Learning-Python
L = [1,3,5,7,9]
L2 = [2,4,6,8,10]
L3 = L + L2
print(L3) | StarcoderdataPython |
8196993 | import torch
from torch import nn
from models import tdl
BN_MOMENTUM = 0.1
class IdentityMapping(nn.Module):
def __init__(self, in_channels, out_channels, mode="per_channel"):
super(IdentityMapping, self).__init__()
self._mode = mode
self._setup_skip_conv(in_channels, out_channels)
self._setup_alpha(out_channels)
def _setup_skip_conv(self, in_channels, out_channels):
self._use_skip_conv = in_channels != out_channels
self.skip_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1
)
def _setup_alpha(self, out_channels):
if self._mode == "per_channel":
alpha = torch.zeros((out_channels), requires_grad=True).float()
elif self._mode == "single":
alpha = torch.zeros((1), requires_grad=True).float()
elif self._mode == "standard":
alpha = torch.zeros((1), requires_grad=False).float()
self.alpha = nn.Parameter(alpha)
def _apply_gating(self, x):
if self._mode == "per_channel":
gated_identity = x * self.alpha[None, :, None, None]
elif self._mode == "single":
gated_identity = x * self.alpha
elif self._mode == "standard":
gated_identity = x
return gated_identity
def forward(self, x):
if self._use_skip_conv:
identity = self.skip_conv(x)
else:
identity = x
# Gated identity
gated_identity = self._apply_gating(identity)
return gated_identity
class MultiScaleResblock(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(MultiScaleResblock, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels//2,
kernel_size=3,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(
in_channels=in_channels//2,
out_channels=in_channels//4,
kernel_size=3,
stride=1,
padding=1,
)
self.conv3 = nn.Conv2d(
in_channels=in_channels//4,
out_channels=in_channels//4,
kernel_size=3,
stride=1,
padding=1,
)
self.identity_mapping = IdentityMapping(
in_channels=in_channels,
out_channels=in_channels,
mode=kwargs.get("identity_gating_mode", "per_channel"),
)
self._remap_output_dim = False
if in_channels != out_channels:
self._remap_output_dim = True
self._remap_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.bn1 = nn.BatchNorm2d(in_channels//2)
self.bn2 = nn.BatchNorm2d(in_channels//4)
self.bn3 = nn.BatchNorm2d(in_channels//4)
self.relu = nn.ReLU()
def forward(self, x):
out1 = self.relu(self.bn1(self.conv1(x)))
out2 = self.relu(self.bn2(self.conv2(out1)))
out3 = self.relu(self.bn3(self.conv3(out2)))
residual = torch.cat([out1, out2, out3], dim=1)
identity = self.identity_mapping(x)
# Identity + Residual
out = residual + identity
if self._remap_output_dim:
out = self._remap_conv(out)
return out
class Bottleneck(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, **kwargs):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
bias=False
)
self.bn1 = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False
)
self.bn2 = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(
out_channels,
out_channels * self.expansion,
kernel_size=1,
bias=False
)
self.bn3 = nn.BatchNorm2d(
out_channels * self.expansion,
momentum=BN_MOMENTUM
)
self.relu = nn.ReLU(inplace=True)
self._remap_output_dim = False
if in_channels != out_channels:
self._remap_output_dim = True
self._remap_conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1
)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
print("out: ", out.shape)
if self._remap_output_dim:
identity = self._remap_conv(x)
out += identity
out = self.relu(out)
return out
class HeadLayer(nn.Module):
def __init__(self, hidden_channels, out_channels, block, kernel_size=3):
super(HeadLayer, self).__init__()
# stem net
self.conv1 = nn.Conv2d(
3,
hidden_channels,
kernel_size=kernel_size,
stride=2,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(hidden_channels, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(
hidden_channels,
out_channels,
kernel_size=kernel_size,
stride=2,
padding=1,
bias=False,
)
self.bn2 = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU()
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
return out
class MergeDecoder(nn.Module):
def __init__(self, mode, in_channels, out_channels, **kwargs):
super(MergeDecoder, self).__init__()
self._mode = mode
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.decode_conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x, down_feature):
residual = self.up(x)
if self._mode == "addition":
out = residual + down_feature
elif self._mode == "concat":
out = torch.cat([residual, down_feature], dim=1)
out = self.relu(self.bn(self.decode_conv(out)))
return out
class HourGlass(nn.Module):
def __init__(self, block, stack_i, in_channels, n_joints=16, merge_mode="addition", **kwargs):
super(HourGlass, self).__init__()
self._stack_i = stack_i
self._merge_mode = merge_mode
# Pooling and upsampling ops
self.pool = nn.MaxPool2d((2,2))
self.up = nn.Upsample(scale_factor=2, mode="nearest")
# Encoder
self.encode_1 = nn.Sequential(
block(
in_channels=in_channels,
out_channels=in_channels,
**kwargs
),
nn.MaxPool2d((2,2)),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
)
self.encode_2 = nn.Sequential(
block(
in_channels=in_channels,
out_channels=in_channels,
**kwargs
),
nn.MaxPool2d((2,2)),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
)
self.encode_3 = nn.Sequential(
block(
in_channels=in_channels,
out_channels=in_channels,
**kwargs
),
nn.MaxPool2d((2,2)),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
)
self.encode_4 = nn.Sequential(
block(
in_channels=in_channels,
out_channels=in_channels,
**kwargs
),
nn.MaxPool2d((2,2)),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
)
# Decoder
self.decode_1 = MergeDecoder(
mode=self._merge_mode,
in_channels=in_channels * 2,
out_channels=in_channels,
**kwargs,
)
self.decode_2 = MergeDecoder(
mode=self._merge_mode,
in_channels=in_channels * 2,
out_channels=in_channels,
**kwargs,
)
self.decode_3 = MergeDecoder(
mode=self._merge_mode,
in_channels=in_channels * 2,
out_channels=in_channels,
**kwargs,
)
self.final_up = nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1
)
def forward(self, x):
# Encoder
print("x: ", x.shape)
down1 = self.encode_1(x)
down2 = self.encode_2(down1)
down3 = self.encode_3(down2)
down4 = self.encode_4(down3)
# Decoder
up1 = self.decode_1(down4, down3)
up2 = self.decode_2(up1, down2)
up3 = self.decode_3(up2, down1)
up4 = self.up(up3)
out = self.final_up(up4)
return out
class PoseNet(nn.Module):
def __init__(self,
n_stacks=1,
inp_dim=256,
n_joints=16,
merge_mode="concat",
**kwargs):
super(PoseNet, self).__init__()
self._n_stacks = n_stacks
self.relu = nn.ReLU()
block = Bottleneck # Bottleneck, MultiScaleResblock
h_dim = 128
# Head layer
self.head_layer = HeadLayer(hidden_channels=64, out_channels=h_dim, block=block)
if kwargs.get("share_weights", False):
hg_model = HourGlass(
block=block,
stack_i=0,
in_channels=h_dim,
merge_mode=merge_mode,
**kwargs
)
self.hgs = nn.ModuleList([hg_model for i in range(n_stacks)])
else:
self.hgs = nn.ModuleList([
HourGlass(
block=block,
stack_i=i,
in_channels=h_dim,
merge_mode=merge_mode,
**kwargs
)
for i in range(n_stacks)
])
self.feature_maps = nn.ModuleList([
nn.Sequential(
block(
in_channels=h_dim,
out_channels=h_dim,
),
nn.Conv2d(h_dim, h_dim, kernel_size=1, stride=1),
nn.BatchNorm2d(h_dim),
self.relu,
)
for i in range(n_stacks)
])
self.logit_maps = nn.ModuleList([
nn.Sequential(nn.Conv2d(h_dim, n_joints, kernel_size=1, stride=1))
for i in range(n_stacks)
])
self.remaps = nn.ModuleList([
nn.Sequential(
nn.Conv2d(n_joints, h_dim, kernel_size=1, stride=1),
self.relu,
)
for i in range(n_stacks)
])
def forward(self, x):
x = self.head_layer(x)
logits = []
for stack_i in range(self._n_stacks):
identity = x.clone()
hg_out = self.hgs[stack_i](x)
features_i = self.feature_maps[stack_i](hg_out)
logit_i = self.logit_maps[stack_i](features_i)
logits.append(logit_i)
residual = features_i + self.remaps[stack_i](logit_i)
x = identity + residual
logits = torch.stack(logits)
return logits
def get_pose_net(cfg, is_train, **kwargs):
n_hg_stacks = cfg.MODEL.EXTRA.N_HG_STACKS
if "SHARE_HG_WEIGHTS" in cfg.MODEL.EXTRA:
share_weights = cfg.MODEL.EXTRA.SHARE_HG_WEIGHTS
else:
share_weights = False
model = PoseNet(
n_stacks=n_hg_stacks,
inp_dim=cfg.MODEL.NUM_CHANNELS,
n_joints=cfg.MODEL.NUM_JOINTS,
merge_mode=cfg.MODEL.MERGE_MODE,
identity_gating_mode="per_channel",
share_weights=share_weights,
)
return model | StarcoderdataPython |
9704050 | <reponame>godontop/python-work
# coding=utf-8
import math
import sys
import pygame
pygame.init()
screen = pygame.display.set_mode([640, 480])
screen.fill([255, 255, 255])
plotPoints = []
for x in range(0, 640):
y = int(math.sin(x / 640.0 * 4 * math.pi) * 200 + 240)
plotPoints.append([x, y])
pygame.draw.lines(screen, [0, 0, 0], False, plotPoints, 2)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| StarcoderdataPython |
3328598 | <gh_stars>1-10
from utils.sm_annotations import Annotation
from utils import uniprot
from needleman_wunsch import Align
def get_annotation(uniprot_ac_target, uniprot_ac_reference, nonconserved_color="r"):
"""
Annotate mutations of a target sequence relative to a reference
sequence. The alignment of the sequences is performed using a pairwise
Needleman-Wunsch algorithm.
:param uniprot_ac_target: Sequence for which you want the annotations
:param uniprot_ac_reference: Reference sequence from which changes are
annotated
:param nonconserved_color: Color assigned to annotations of non-conserved
amino acids
"""
s_target = uniprot.seq_from_ac(uniprot_ac_target)
s_reference = uniprot.seq_from_ac(uniprot_ac_reference)
aligned_s_target, aligned_s_reference = Align(s_target, s_reference)
# this is a sanity check if the align function did not alter the underlying
# sequenes
assert s_target == aligned_s_target.replace("-", "")
assert s_reference == aligned_s_reference.replace("-", "")
assert len(aligned_s_target) == len(aligned_s_reference)
target_rnum = 0
annotation = Annotation()
for i in range(len(aligned_s_target)):
if aligned_s_target[i] != "-":
target_rnum += 1
if aligned_s_target[i] != "-" and aligned_s_reference[i] != "-":
if aligned_s_target[i] != aligned_s_reference[i]:
annotation.add(
uniprot_ac_target,
target_rnum,
nonconserved_color,
"%s->%s" % (aligned_s_reference[i], aligned_s_target[i]),
)
return annotation
| StarcoderdataPython |
1691814 | import os,sys
from tkinter import *
import tkinter.font as font
from tkinter import filedialog
class Final(Frame):
def __init__(self, parent=None, pid=0,side=LEFT, anchor=N,wt=600,ht=400,is_next=True,is_back=True,next_frame=None,back_frame=None,info_txt="",path_frm=None,path_frm2=None,frames=[],fdict=[],prefix_var=None,cytnx_dir=None):
Frame.__init__(self, parent)
self.pid = pid
self.var = IntVar()
self.path_frm = path_frm
#self.path_frm2 = path_frm2
#self.cytnx_dir = cytnx_dir
self.frames = frames
self.fd = fdict
self.prefix_var = prefix_var
self.txtvar = StringVar()
self.lbl = Label(self,textvariable=self.txtvar,anchor='w',justify=LEFT)
self.lbl['font'] = font.Font(size=16)
self.set_info_text(info_txt)
self.lbl.pack(fill="both")
self.termf = Frame(self, height=400, width=600)
self.termf.pack(fill=BOTH, expand=YES)
if is_next:
self.nbut = Button(self,text="[ install ]",command=self._action_nxt)
self.nbut.pack(side=RIGHT)
if is_back:
self.bfram = back_frame
self.bbut = Button(self,text="<- back",command=self._action_bak)
self.bbut.pack(side=RIGHT)
def _analysis(self):
str_print = "";
if(self.frames[self.fd['MKL']].state_str()=="ON"):
str_print += "[x] USE_MKL\n"
str_print += "[x] USE_OMP [force by mkl]\n"
else:
str_print += "[ ] USE_MKL\n"
if(self.frames[self.fd['OMP']].state_str()=="ON"):
str_print += "[x] USE_OMP\n"
if(self.frames[self.fd['HPTT']].state_str()=="ON"):
str_print += "[x] USE_HPTT\n"
if(self.frames[self.fd['HPTT_finetune']].state_str()=="ON"):
str_print += " [x] HPTT finetune\n"
else:
str_print += " [ ] HPTT finetune\n"
if(self.frames[self.fd['HPTT_arch']].state_str()=="AVX"):
str_print += " [x] HPTT optim option = AVX\n"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="IBM"):
str_print += " [x] HPTT optim option = IBM\n"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="ARM"):
str_print += " [x] HPTT optim option = ARM\n"
else:
str_print += " [ ] HPTT optim option\n"
else:
str_print += "[ ] USE_HPTT\n"
if(self.frames[self.fd['CUDA']].state_str()=="ON"):
str_print += "[x] USE_CUDA\n"
if(self.frames[self.fd['CUTT']].state_str()=="ON"):
str_print += " [x] USE_CUTT\n"
if(self.frames[self.fd['CUTT_finetune']].state_str()=="ON"):
str_print += " [x] CUTT finetune\n"
else:
str_print += " [ ] CUTT finetune\n"
else:
str_print += " [ ] USE_CUTT\n"
else:
str_print += "[ ] USE_CUDA\n"
if(self.frames[self.fd['PY']].state_str()=="ON"):
str_print += "[x] BUILD_PYTHON API\n"
else:
str_print += "[ ] BUILD_PYTHON API\n"
self.txtvar.set("Review install:\n"+str_print)
def _action_nxt(self):
print("Review install")
## analysis all flags and generate command.
strout = "cmake";
if not self.prefix_var.get()=='default':
strout += " -DCMAKE_INSTALL_PREFIX=%s"%(self.prefix_var.get())
if(self.frames[self.fd['MKL']].state_str()=="ON"):
strout += " -DUSE_MKL=on"
else:
if(self.frames[self.fd['OMP']].state_str()=="ON"):
strout += " -DUSE_OMP=on"
if(self.frames[self.fd['HPTT']].state_str()=="ON"):
strout += " -DUSE_HPTT=on"
if(self.frames[self.fd['HPTT_finetune']].state_str()=="ON"):
strout += " -DHPTT_ENABLE_FINE_TUNE=on"
if(self.frames[self.fd['HPTT_arch']].state_str()=="AVX"):
strout += " -DHPTT_ENABLE_AVX=on"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="IBM"):
strout += " -DHPTT_ENABLE_IBM=on"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="ARM"):
strout += " -DHPTT_ENABLE_ARM=on"
if(self.frames[self.fd['CUDA']].state_str()=="ON"):
strout += " -DUSE_CUDA=on"
if(self.frames[self.fd['CUTT']].state_str()=="ON"):
strout += " -DUSE_CUTT=on"
if(self.frames[self.fd['CUTT_finetune']].state_str()=="ON"):
strout += " -DCUTT_ENABLE_FINE_TUNE=on"
if(self.frames[self.fd['PY']].state_str()=="ON"):
strout += " -DBUILD_PYTHON=on"
else:
strout += " -DBUILD_PYTHON=off"
strout += " ../\n"
"""
strout += " %s"%(self.cytnx_dir.get())
## check:
if(len(self.cytnx_dir.get())==0):
raise ValueError("[ERROR] invalid cytnx source path.")
else:
if not os.path.exists(self.cytnx_dir.get()):
raise ValueError("[ERROR] invalid cytnx source path. cannot find path.")
"""
# hide all op buttoms
self.lbl.destroy()
self.bbut.destroy()
self.nbut.destroy()
self.path_frm.destroy()
#self.path_frm2.destroy()
f = open("ainstall.sh",'w')
f.write("echo $PWD\n")
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("echo $PWD\n")
f.write(strout)
f.write("make\n")
f.write("make install")
os.system('xterm -into %d -geometry 95x30 -s -sb -e sh ainstall.sh&' %(self.termf.winfo_id()))
#os.system('xterm -into %d -geometry 40x20 -sb -e %s &' %(self.termf.winfo_id(),"cpuinfo"))
def _action_bak(self):
self.pack_forget()
self.bfram.pack(side=TOP,fill=X)
def jump_pack(self,direction,N,start_frame):
if N>0:
raise ValueError("cannot call jump on final page.")
else:
self.set_back_frame(start_frame)
self._analysis()
self.pack(fill="both")
def state_id(self):
return self.var.get()
def set_back_frame(self,back_frame):
self.bfram = back_frame
def set_info_text(self,txt):
self.txtvar.set(txt)
class Optionbar(Frame):
def __init__(self, parent=None, pid=0,picks=[], picks_js=None, side=LEFT, anchor=N,wt=600,ht=400,is_next=True,is_back=True,next_frame=None,back_frame=None,info_txt=""):
Frame.__init__(self, parent)
self.pid = pid
self.var = IntVar()
self.picks = picks
if picks_js is None:
self.picks_js = [1 for i in range(len(picks))]
else:
self.picks_js = picks_js
self.dic = dict(zip(picks,range(len(picks))))
self.txtvar = StringVar()
lbl = Label(self,textvariable=self.txtvar,anchor=W)
lbl['font'] = font.Font(size=16)
self.set_info_text(info_txt)
lbl.pack(fill='both')
for pick in picks:
chk = Radiobutton(self, text=pick, variable=self.var,value=self.dic[pick])
chk.pack(side=side, anchor=anchor, expand=YES)
if is_next:
self.nfram = next_frame
self.nbut = Button(self,text="next ->",command=self._action_nxt)
self.nbut.pack(side=RIGHT)
if is_back:
self.bfram = back_frame
self.bbut = Button(self,text="<- back",command=self._action_bak)
self.bbut.pack(side=RIGHT)
def _action_nxt(self):
self.pack_forget()
self.jump_pack('nxt',self.picks_js[self.state_id()],self)
def _action_bak(self):
self.pack_forget()
self.bfram.pack(fill='both')
def jump_pack(self,direction,N,start_frame):
if N>0:
if direction == 'nxt':
self.nfram.jump_pack('nxt',N-1,start_frame)
else:
raise ValueError("direction should be 'nxt' or 'bak'")
else:
self.set_back_frame(start_frame)
self.pack(fill='both')
def state_id(self):
return self.var.get()
def state_str(self):
return self.picks[self.var.get()]
def set_next_frame(self,next_frame):
self.nfram = next_frame
def set_back_frame(self,back_frame):
self.bfram = back_frame
def set_info_text(self,txt):
self.txtvar.set(txt)
def set_default(self,val,by_str=True):
ival = val
if by_str:
ival = self.dic[val]
self.var.set(ival)
top = Tk()
top.title("Cytnx installer")
#top.geometry("400x300")
top.resizable(False,False)
#main.mainloop()
PREFIX = None
prefix_var = StringVar()
prefix_var.set("default")
def get_prefix():
PREFIX = filedialog.askdirectory(title = "Select directory to install cytnx")
prefix_var.set(PREFIX)
print(PREFIX)
"""
CYTNX_DIR = None
cytnx_var = StringVar()
cytnx_var.set("")
def get_cytnx_dir():
CYTNX_DIR = filedialog.askdirectory(title = "Select cytnx source path")
cytnx_var.set(CYTNX_DIR)
print(CYTNX_DIR)
"""
frm = Frame(top)
pp = Label(frm,text="install path:",anchor=W)
pp.pack(side=LEFT)
p_str = Label(frm,textvariable=prefix_var,anchor=W)
p_str.pack(side=LEFT)
but_f = Button(frm,text="choose directory to install",command=get_prefix,anchor=E)
but_f.pack(side=RIGHT)
frm.pack(side=TOP,fill=X)
"""
frm2 = Frame(top)
pp2 = Label(frm2,text="cytnx source path:",anchor=W)
pp2.pack(side=LEFT)
p2_str = Label(frm2,textvariable=cytnx_var,anchor=W)
p2_str.pack(side=LEFT)
but_f = Button(frm2,text="choose cytnx source path",command=get_cytnx_dir,anchor=E)
but_f.pack(side=RIGHT)
frm2.pack(side=TOP,fill=X)
"""
ftype = []
## page mkl
ftype.append("MKL")
mkl_tk = Optionbar(top,0,['ON','OFF'],picks_js=[2,1],is_back=False)
mkl_tk.set_default('OFF')
mkl_tk.set_info_text("use mkl as linalg library? (default: OFF)\n"+
"[Note] 1. default use openblas\n"+
"[Note] 2. if ON, openmp is forced enable."
)
## page omp
ftype.append("OMP")
omp_tk = Optionbar(top,1,['ON','OFF'])
omp_tk.set_default('OFF')
omp_tk.set_info_text("accelerate using OpenMP? (default: OFF)")
## page hptt
ftype.append("HPTT")
hptt_tk = Optionbar(top,2,['ON','OFF'],picks_js=[1,3])
hptt_tk.set_default('OFF')
hptt_tk.set_info_text("accelerate tensor transpose using HPTT lib? (default: OFF)")
ftype.append("HPTT_finetune")
hptt_op2_tk = Optionbar(top,3,['ON','OFF'])
hptt_op2_tk.set_default('OFF')
hptt_op2_tk.set_info_text("build HPTT lib with optimization on current hardware? (default: OFF)")
ftype.append("HPTT_arch")
hptt_op_tk = Optionbar(top,4,['AVX','IBM','ARM','OFF'])
hptt_op_tk.set_default('OFF')
hptt_op_tk.set_info_text("build HPTT lib with additional instructions support? (default: OFF)")
## page cuda
ftype.append("CUDA")
cuda_tk = Optionbar(top,5,['ON','OFF'],picks_js=[1,2])
cuda_tk.set_default('OFF')
cuda_tk.set_info_text("install GPU(CUDA) support in cytnx? (default: OFF)")
## cutt
ftype.append("CUTT")
cutt_tk = Optionbar(top,6,['ON','OFF'],picks_js=[1,2])
cutt_tk.set_default('OFF')
cutt_tk.set_info_text("accelerate tensor transpose on GPU using cuTT lib? (default: OFF)")
ftype.append("CUTT_finetune")
cutt_op_tk = Optionbar(top,7,['ON','OFF'])
cutt_op_tk.set_default('OFF')
cutt_op_tk.set_info_text("build cuTT lib with optimization on current hardware? (default: OFF)")
## page python
ftype.append("PY")
python_tk = Optionbar(top,8,['ON','OFF'])
python_tk.set_default('ON')
python_tk.set_info_text("build python API? (default: ON)")
## final wrapping up
td = dict(zip(ftype,range(len(ftype))))
fin_tk = Final(top,10,path_frm=frm,frames=[mkl_tk,omp_tk,hptt_tk,hptt_op2_tk,hptt_op_tk,cuda_tk,cutt_tk,cutt_op_tk,python_tk],fdict=td,prefix_var=prefix_var)
fin_tk.set_info_text("Review install")
## chain:
mkl_tk.set_next_frame(omp_tk)
omp_tk.set_next_frame(hptt_tk)
hptt_tk.set_next_frame(hptt_op2_tk)
hptt_op2_tk.set_next_frame(hptt_op_tk)
hptt_op_tk.set_next_frame(cuda_tk)
cuda_tk.set_next_frame(cutt_tk)
cutt_tk.set_next_frame(cutt_op_tk)
cutt_op_tk.set_next_frame(python_tk)
python_tk.set_next_frame(fin_tk)
## visible entry point
mkl_tk.pack(side=TOP,fill="both")
top.mainloop()
exit(1)
def bool2str(bl):
if bl:
return "ON"
else:
return "OFF"
## list all the major options:
USE_MKL=False
USE_OMP=False
USE_CUDA=False
USE_CUTT=False
#CUTT_option_noalign=False
CUTT_option_finetune=False
USE_HPTT=False
HPTT_option_AVX=False
HPTT_option_IBM=False
HPTT_option_ARM=False
HPTT_option_finetune=False
BUILD_PYTHON=True
PREFIX=None
## checking linalg, and openmp.
tmp = input("[2] use mkl as linalg library (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_MKL=resolve_yn(tmp)
print(" >>USE_MKL: ",USE_MKL)
print("--------------")
if(USE_MKL):
print(" -->[2a] force USE_OMP=True")
print("--------------")
else:
tmp = input("[2a] use openmp accelerate (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_OMP=resolve_yn(tmp)
print(" >>USE_OMP:",USE_OMP)
print("--------------")
## checking HPTT:
tmp = input("[3] use hptt library to accelrate tensor transpose (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_HPTT=resolve_yn(tmp)
print(" >>USE_HPTT: ",USE_HPTT)
print("--------------")
if USE_HPTT:
## additional options:
tmp = input("[3a] hptt option(1): fine tune for the native hardware (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
HPTT_option_finetune=resolve_yn(tmp)
print(" >>HPTT_ENABLE_FINE_TUNE:",HPTT_option_finetune)
print("--------------")
tmp = input("[3b] hptt option(2): variant options (1: AVX 2: IBM 3: ARM, default OFF)? (1,2,3 or enter for default):")
if(len(tmp.strip())!=0):
hptttype=resolve_num(tmp,{1,2,3})
if(hptttype==1):
HPTT_option_AVX=True
print(" >>HPTT_ENABLE_ABX:",HPTT_option_AVX)
elif(hptttype==2):
HPTT_option_IBM=True
print(" >>HPTT_ENABLE_IBM:",HPTT_option_IBM)
elif(hptttype==3):
HPTT_option_ARM=True
print(" >>HPTT_ENABLE_ARM:",HPTT_option_ARM)
else:
print(" *No additional options for hptt*")
print("--------------")
## checking CUDA:
tmp = input("[4] with GPU (CUDA) support (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_CUDA=resolve_yn(tmp)
print(" >>USE_CUDA: ",USE_CUDA)
print("--------------")
if USE_CUDA:
## additional options:
tmp = input("[4a] cuda option(1): use cutt library to accelerate tensor transpose (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_CUTT=resolve_yn(tmp)
print(" >>USE_CUTT:",USE_CUTT)
print("--------------")
if USE_CUTT:
## add-additional options:
tmp = input("[4a-1] cutt option(1): fine tune for the native hardware (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
CUTT_option_finetune=resolve_yn(tmp)
print(" >>CUTT_ENABLE_FINE_TUNE:",CUTT_option_finetune)
print("--------------")
## checking PYTHON:
tmp = input("[5] Build python API (default ON)? (Y/N):")
if(len(tmp.strip())!=0):
BUILD_PYTHON=resolve_yn(tmp)
print(" >>BUILD_PYTHON: ",BUILD_PYTHON)
print("--------------")
##=================================================================
print("*************************")
print(" Review install option ")
print("")
print(" USE_MKL: ",USE_MKL)
print(" USE_OMP: ",USE_OMP)
print(" USE_HPTT: ",USE_HPTT)
if(USE_HPTT):
print(" -- HPTT_option: ")
print(" HPTT_FINE_TUNE: ",HPTT_option_finetune)
if(HPTT_option_AVX):
print(" HPTT_ENABLE_ABX:",HPTT_option_AVX)
if(HPTT_option_IBM):
print(" HPTT_ENABLE_IBM:",HPTT_option_IBM)
if(HPTT_option_ARM):
print(" HPTT_ENABLE_ARM:",HPTT_option_ARM)
print(" USE_CUDA: ",USE_CUDA)
print(" USE_CUTT: ",USE_CUTT)
if(USE_CUTT):
print(" -- CUTT_option: ")
print(" CUTT_ENABLE_FINE_TUNE: ",CUTT_option_finetune)
print(" BUILD_PYTHON: ",BUILD_PYTHON)
print("*************************")
## generate sh file:
f = open("ainstall.sh",'w')
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("cmake")
if not PREFIX is None:
f.write(" -DCMAKE_INSTALL_PREFIX=%s"%(PREFIX))
if(USE_MKL):
f.write(" -DUSE_MKL=on")
else:
if(USE_OMP):
f.write(" -DUSE_OMP=on")
if(USE_HPTT):
f.write(" -DUSE_HPTT=on")
if(HPTT_option_finetune):
f.write(" -DHPTT_ENABLE_FINE_TUNE=on")
if(HPTT_option_AVX):
f.write(" -DHPTT_ENABLE_AVX=on")
if(HPTT_option_IBM):
f.write(" -DHPTT_ENABLE_IBM=on")
if(HPTT_option_ARM):
f.write(" -DHPTT_ENABLE_ARM=on")
if(USE_CUDA):
f.write(" -DUSE_CUDA=on")
if(USE_CUTT):
f.write(" -DUSE_CUTT=on")
if(CUTT_option_finetune):
f.write("-DCUTT_ENABLE_FINE_TUNE=on")
if(BUILD_PYTHON):
f.write(" -DBUILD_PYTHON=on")
else:
f.write(" -DBUILD_PYTHON=off")
f = open("ainstall.sh",'w')
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("cmake")
f.write(" ../\n")
f.write("make\n")
f.write("make install")
f.close()
| StarcoderdataPython |
1857696 | #!/bin/env python
# coding: utf-8
from base64 import b64encode
from httplib import HTTPConnection
from urlparse import urlparse
import os
import select
import socket
import sys
def main():
def exit_with_mesg(message, status=-1, color=31):
sys.stderr.write("\x1b[%sm%s\x1b[0m\n" % (color, message))
exit(status)
if len(sys.argv) < 3:
exit_with_mesg("usage %s: host port [proxy_url]" % sys.argv[0])
host, port = sys.argv[1:3]
proxy_url = os.environ.get("http_proxy", "")
if len(sys.argv) >= 4:
proxy_url = sys.argv[3]
proxy = urlparse(proxy_url)
if proxy.hostname == None or proxy.port == None:
exit_with_mesg("Proxy Setting Error: url=%s" % proxy_url)
try:
req_headers = {}
if proxy.username or proxy.password:
auth = b64encode(proxy.username + ":" + proxy.password)
req_headers["Proxy-Authorization"] = "Basic " + auth
conn = HTTPConnection(proxy.hostname, proxy.port, timeout=5)
conn.request("CONNECT", "%s:%s" % (host, port), headers=req_headers)
resp = conn.getresponse()
if resp.status != 200:
exit_with_mesg("Proxy Error: %s %s" % (resp.status, resp.reason))
stdin, stdout = 0, 1
proxy_socket = resp.fp.fileno()
while True:
read_ready = select.select([stdin, proxy_socket], [], [])[0]
for fileno in read_ready:
buffer = os.read(fileno, 4096)
os.write(proxy_socket if fileno == stdin else stdout, buffer)
except KeyboardInterrupt:
exit_with_mesg("Keyboard Interrupt")
except socket.error, e:
exit_with_mesg("Socket Error: %s" % e)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5179954 | """
Tests for `txacme.challenges`.
"""
from operator import methodcaller
from acme import challenges
from acme.jose import b64encode
from hypothesis import strategies as s
from hypothesis import assume, example, given
from testtools import skipIf, TestCase
from testtools.matchers import (
AfterPreprocessing, Always, Contains, EndsWith, Equals, HasLength,
Is, IsInstance, MatchesAll, MatchesListwise, MatchesPredicate,
MatchesStructure, Not)
from testtools.twistedsupport import succeeded
from treq.testing import StubTreq
from twisted._threads import createMemoryWorker
from twisted.internet.defer import maybeDeferred
from twisted.python.url import URL
from twisted.web.resource import Resource
from zope.interface.verify import verifyObject
from txacme.challenges import HTTP01Responder, TLSSNI01Responder
from txacme.challenges._tls import _MergingMappingProxy
from txacme.errors import NotInZone, ZoneNotFound
from txacme.interfaces import IResponder
from txacme.test import strategies as ts
from txacme.test.doubles import SynchronousReactorThreads
from txacme.test.test_client import failed_with, RSA_KEY_512, RSA_KEY_512_RAW
try:
from txacme.challenges import LibcloudDNSResponder
from txacme.challenges._libcloud import _daemon_thread
except ImportError:
LibcloudDNSResponder = None
# A random example token for the challenge tests that need one
EXAMPLE_TOKEN = b'<KEY>'
class _CommonResponderTests(object):
"""
Common properties which every responder implementation should satisfy.
"""
def _do_one_thing(self):
"""
Make the underlying fake implementation do one thing (eg. simulate one
network request, one threaded task execution).
"""
def test_interface(self):
"""
The `.IResponder` interface is correctly implemented.
"""
responder = self._responder_factory()
verifyObject(IResponder, responder)
self.assertThat(responder.challenge_type, Equals(self._challenge_type))
@example(token=EXAMPLE_TOKEN)
@given(token=s.binary(min_size=32, max_size=32).map(b64encode))
def test_stop_responding_already_stopped(self, token):
"""
Calling ``stop_responding`` when we are not responding for a server
name does nothing.
"""
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory()
d = maybeDeferred(
responder.stop_responding,
u'example.com',
challenge,
response)
self._do_one_thing()
self.assertThat(d, succeeded(Always()))
class TLSResponderTests(_CommonResponderTests, TestCase):
"""
`.TLSSNI01Responder` is a responder for tls-sni-01 challenges that works
with txsni.
"""
_challenge_factory = challenges.TLSSNI01
_responder_factory = TLSSNI01Responder
_challenge_type = u'tls-sni-01'
@example(token=b'<KEY>')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode))
def test_start_responding(self, token):
"""
Calling ``start_responding`` makes an appropriate entry appear in the
host map.
"""
ckey = RSA_KEY_512_RAW
challenge = challenges.TLSSNI01(token=token)
response = challenge.response(RSA_KEY_512)
server_name = response.z_domain.decode('ascii')
host_map = {}
responder = TLSSNI01Responder()
responder._generate_private_key = lambda key_type: ckey
wrapped_host_map = responder.wrap_host_map(host_map)
self.assertThat(wrapped_host_map, Not(Contains(server_name)))
responder.start_responding(u'example.com', challenge, response)
self.assertThat(
wrapped_host_map.get(server_name.encode('utf-8')).certificate,
MatchesPredicate(response.verify_cert, '%r does not verify'))
# Starting twice before stopping doesn't break things
responder.start_responding(u'example.com', challenge, response)
self.assertThat(
wrapped_host_map.get(server_name.encode('utf-8')).certificate,
MatchesPredicate(response.verify_cert, '%r does not verify'))
responder.stop_responding(u'example.com', challenge, response)
self.assertThat(wrapped_host_map, Not(Contains(server_name)))
class MergingProxyTests(TestCase):
"""
``_MergingMappingProxy`` merges two mappings together.
"""
@example(underlay={}, overlay={}, key=u'foo')
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)),
key=s.text())
def test_get_overlay(self, underlay, overlay, key):
"""
Getting an key that only exists in the overlay returns the value from
the overlay.
"""
underlay.pop(key, None)
overlay[key] = object()
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
self.assertThat(proxy[key], Is(overlay[key]))
@example(underlay={}, overlay={}, key=u'foo')
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)),
key=s.text())
def test_get_underlay(self, underlay, overlay, key):
"""
Getting an key that only exists in the underlay returns the value from
the underlay.
"""
underlay[key] = object()
overlay.pop(key, None)
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
self.assertThat(proxy[key], Is(underlay[key]))
@example(underlay={}, overlay={}, key=u'foo')
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)),
key=s.text())
def test_get_both(self, underlay, overlay, key):
"""
Getting an key that exists in both the underlay and the overlay returns
the value from the overlay.
"""
underlay[key] = object()
overlay[key] = object()
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
self.assertThat(proxy[key], Not(Is(underlay[key])))
self.assertThat(proxy[key], Is(overlay[key]))
@example(underlay={u'foo': object(), u'bar': object()},
overlay={u'bar': object(), u'baz': object()})
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)))
def test_len(self, underlay, overlay):
"""
``__len__`` of the proxy does not count duplicates.
"""
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
self.assertThat(len(proxy), Equals(len(list(proxy))))
@example(underlay={u'foo': object(), u'bar': object()},
overlay={u'bar': object(), u'baz': object()})
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)))
def test_iter(self, underlay, overlay):
"""
``__iter__`` of the proxy does not produce duplicate keys.
"""
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
keys = sorted(list(proxy))
self.assertThat(keys, Equals(sorted(list(set(keys)))))
@example(underlay={u'foo': object()}, overlay={}, key=u'foo')
@example(underlay={}, overlay={}, key=u'bar')
@given(underlay=s.dictionaries(s.text(), s.builds(object)),
overlay=s.dictionaries(s.text(), s.builds(object)),
key=s.text())
def test_contains(self, underlay, overlay, key):
"""
The mapping only contains a key if it can be gotten.
"""
proxy = _MergingMappingProxy(
overlay=overlay, underlay=underlay)
self.assertThat(
key in proxy,
Equals(proxy.get(key) is not None))
class HTTPResponderTests(_CommonResponderTests, TestCase):
"""
`.HTTP01Responder` is a responder for http-01 challenges.
"""
_challenge_factory = challenges.HTTP01
_responder_factory = HTTP01Responder
_challenge_type = u'http-01'
@example(token=b'<KEY>')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode))
def test_start_responding(self, token):
"""
Calling ``start_responding`` makes an appropriate resource available.
"""
challenge = challenges.HTTP01(token=token)
response = challenge.response(RSA_KEY_512)
responder = HTTP01Responder()
challenge_resource = Resource()
challenge_resource.putChild(b'acme-challenge', responder.resource)
root = Resource()
root.putChild(b'.well-known', challenge_resource)
client = StubTreq(root)
encoded_token = challenge.encode('token')
challenge_url = URL(host=u'example.com', path=[
u'.well-known', u'acme-challenge', encoded_token]).asText()
self.assertThat(client.get(challenge_url),
succeeded(MatchesStructure(code=Equals(404))))
responder.start_responding(u'example.com', challenge, response)
self.assertThat(client.get(challenge_url), succeeded(MatchesAll(
MatchesStructure(
code=Equals(200),
headers=AfterPreprocessing(
methodcaller('getRawHeaders', b'content-type'),
Equals([b'text/plain']))),
AfterPreprocessing(methodcaller('content'), succeeded(
Equals(response.key_authorization.encode('utf-8'))))
)))
# Starting twice before stopping doesn't break things
responder.start_responding(u'example.com', challenge, response)
self.assertThat(client.get(challenge_url),
succeeded(MatchesStructure(code=Equals(200))))
responder.stop_responding(u'example.com', challenge, response)
self.assertThat(client.get(challenge_url),
succeeded(MatchesStructure(code=Equals(404))))
@skipIf(LibcloudDNSResponder is None, 'libcloud not available')
class LibcloudResponderTests(_CommonResponderTests, TestCase):
"""
`.LibcloudDNSResponder` implements a responder for dns-01 challenges using
libcloud on the backend.
"""
_challenge_factory = challenges.DNS01
_challenge_type = u'dns-01'
def _responder_factory(self, zone_name=u'example.com'):
responder = LibcloudDNSResponder.create(
reactor=SynchronousReactorThreads(),
driver_name='dummy',
username='ignored',
password='<PASSWORD>',
zone_name=zone_name,
settle_delay=0.0)
if zone_name is not None:
responder._driver.create_zone(zone_name)
responder._thread_pool, self._perform = createMemoryWorker()
return responder
def _do_one_thing(self):
return self._perform()
def test_daemon_threads(self):
"""
``_daemon_thread`` creates thread objects with ``daemon`` set.
"""
thread = _daemon_thread()
self.assertThat(thread, MatchesStructure(daemon=Equals(True)))
@example(token=EXAMPLE_TOKEN,
subdomain=u'acme-testing',
zone_name=u'example.com')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode),
subdomain=ts.dns_names(),
zone_name=ts.dns_names())
def test_start_responding(self, token, subdomain, zone_name):
"""
Calling ``start_responding`` causes an appropriate TXT record to be
created.
"""
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory(zone_name=zone_name)
server_name = u'{}.{}'.format(subdomain, zone_name)
zone = responder._driver.list_zones()[0]
self.assertThat(zone.list_records(), HasLength(0))
d = responder.start_responding(server_name, challenge, response)
self._perform()
self.assertThat(d, succeeded(Always()))
self.assertThat(
zone.list_records(),
MatchesListwise([
MatchesStructure(
name=EndsWith(u'.' + subdomain),
type=Equals('TXT'),
)]))
# Starting twice before stopping doesn't break things
d = responder.start_responding(server_name, challenge, response)
self._perform()
self.assertThat(d, succeeded(Always()))
self.assertThat(zone.list_records(), HasLength(1))
d = responder.stop_responding(server_name, challenge, response)
self._perform()
self.assertThat(d, succeeded(Always()))
self.assertThat(zone.list_records(), HasLength(0))
@example(token=EXAMPLE_TOKEN,
subdomain=u'acme-testing',
zone_name=u'example.com')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode),
subdomain=ts.dns_names(),
zone_name=ts.dns_names())
def test_wrong_zone(self, token, subdomain, zone_name):
"""
Trying to respond for a domain not in the configured zone results in a
`.NotInZone` exception.
"""
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory(zone_name=zone_name)
server_name = u'{}.{}.junk'.format(subdomain, zone_name)
d = maybeDeferred(
responder.start_responding, server_name, challenge, response)
self._perform()
self.assertThat(
d,
failed_with(MatchesAll(
IsInstance(NotInZone),
MatchesStructure(
server_name=EndsWith(u'.' + server_name),
zone_name=Equals(zone_name)))))
@example(token=EXAMPLE_TOKEN,
subdomain=u'acme-testing',
zone_name=u'example.com')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode),
subdomain=ts.dns_names(),
zone_name=ts.dns_names())
def test_missing_zone(self, token, subdomain, zone_name):
"""
`.ZoneNotFound` is raised if the configured zone cannot be found at the
configured provider.
"""
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory(zone_name=zone_name)
server_name = u'{}.{}'.format(subdomain, zone_name)
for zone in responder._driver.list_zones():
zone.delete()
d = maybeDeferred(
responder.start_responding, server_name, challenge, response)
self._perform()
self.assertThat(
d,
failed_with(MatchesAll(
IsInstance(ZoneNotFound),
MatchesStructure(
zone_name=Equals(zone_name)))))
@example(token=EXAMPLE_TOKEN,
subdomain=u'acme-testing',
extra=u'extra',
zone_name1=u'example.com',
suffix1=u'.',
zone_name2=u'example.org',
suffix2=u'')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode),
subdomain=ts.dns_names(),
extra=ts.dns_names(),
zone_name1=ts.dns_names(),
suffix1=s.sampled_from([u'', u'.']),
zone_name2=ts.dns_names(),
suffix2=s.sampled_from([u'', u'.']))
def test_auto_zone(self, token, subdomain, extra, zone_name1, suffix1,
zone_name2, suffix2):
"""
If the configured zone_name is ``None``, the zone will be guessed by
finding the longest zone that is a suffix of the server name.
"""
zone_name3 = extra + u'.' + zone_name1
zone_name4 = extra + u'.' + zone_name2
server_name = u'{}.{}.{}'.format(subdomain, extra, zone_name1)
assume(
len({server_name, zone_name1, zone_name2, zone_name3, zone_name4})
== 5)
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory(zone_name=None)
zone1 = responder._driver.create_zone(zone_name1 + suffix1)
zone2 = responder._driver.create_zone(zone_name2 + suffix2)
zone3 = responder._driver.create_zone(zone_name3 + suffix1)
zone4 = responder._driver.create_zone(zone_name4 + suffix2)
self.assertThat(zone1.list_records(), HasLength(0))
self.assertThat(zone2.list_records(), HasLength(0))
self.assertThat(zone3.list_records(), HasLength(0))
self.assertThat(zone4.list_records(), HasLength(0))
d = responder.start_responding(server_name, challenge, response)
self._perform()
self.assertThat(d, succeeded(Always()))
self.assertThat(zone1.list_records(), HasLength(0))
self.assertThat(zone2.list_records(), HasLength(0))
self.assertThat(
zone3.list_records(),
MatchesListwise([
MatchesStructure(
name=AfterPreprocessing(
methodcaller('rstrip', u'.'),
EndsWith(u'.' + subdomain)),
type=Equals('TXT'),
)]))
self.assertThat(zone4.list_records(), HasLength(0))
@example(token=<PASSWORD>,
subdomain=u'acme-testing',
zone_name1=u'example.com',
zone_name2=u'example.org')
@given(token=s.binary(min_size=32, max_size=32).map(b64encode),
subdomain=ts.dns_names(),
zone_name1=ts.dns_names(),
zone_name2=ts.dns_names())
def test_auto_zone_missing(self, token, subdomain, zone_name1, zone_name2):
"""
If the configured zone_name is ``None``, and no matching zone is found,
``NotInZone`` is raised.
"""
server_name = u'{}.{}'.format(subdomain, zone_name1)
assume(not server_name.endswith(zone_name2))
challenge = self._challenge_factory(token=token)
response = challenge.response(RSA_KEY_512)
responder = self._responder_factory(zone_name=None)
zone = responder._driver.create_zone(zone_name2)
self.assertThat(zone.list_records(), HasLength(0))
d = maybeDeferred(
responder.start_responding, server_name, challenge, response)
self._perform()
self.assertThat(
d,
failed_with(MatchesAll(
IsInstance(NotInZone),
MatchesStructure(
server_name=EndsWith(u'.' + server_name),
zone_name=Is(None)))))
__all__ = [
'HTTPResponderTests', 'TLSResponderTests', 'MergingProxyTests',
'LibcloudResponderTests']
| StarcoderdataPython |
167717 | from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from crispy_forms.layout import Layout, Field
from crispy_forms.bootstrap import (AppendedText)
from crispy_forms.helper import FormHelper
from .models import Profile
class CustomCheckbox(Field):
template = 'checkbox_input.html'
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = [
'username',
'email',
'first_name',
'last_name',
'<PASSWORD>',
'<PASSWORD>'
]
class UserProfileForm(ModelForm):
class Meta:
model = Profile
fields = '__all__'
exclude = [
'user'
]
class MealCatForm(ModelForm):
class Meta:
model = Profile
fields = '__all__'
exclude = ['user', 'food_order_day', 'meal_repeat', 'profile_setup']
def __init__(self, *args, **kwargs):
super(MealCatForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_errors = True
self.helper.form_class = 'form-horizontal row'
self.helper.label_class = 'col-6 col-md-5'
self.helper.field_class = 'col-6 col-md-7'
class MealRepeatForm(ModelForm):
class Meta:
model = Profile
fields = [
'meal_repeat',
]
def __init__(self, *args, **kwargs):
super(MealRepeatForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_errors = True
self.helper.form_class = 'form-horizontal row'
self.helper.label_class = 'col-6 col-md-5'
self.helper.field_class = 'col-6 col-md-7'
self.helper.layout = Layout(
AppendedText('meal_repeat', 'days', min=7, max=28),
)
class NewProfileForm(ModelForm):
class Meta:
model = Profile
exclude = [
'user', 'profile_setup',
]
def __init__(self, *args, **kwargs):
super(NewProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_errors = True
self.helper.form_show_labels = True
self.helper.form_class = 'form-inline'
self.helper.field_template = 'bootstrap4/layout/inline_field.html'
self.helper.layout = Layout(
Field('food_order_day'),
AppendedText('meal_repeat', 'days', min=7, max=28),
Field(
'monday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'tuesday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'wednesday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'thursday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'friday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'saturday', css_class="selectpicker",
data_live_search="true", data_size="5"),
Field(
'sunday', css_class="selectpicker",
data_live_search="true", data_size="5"),
)
| StarcoderdataPython |
3504521 | choice = raw_input('Enjoying the course? (y/n)')
while choice != "y" or choice != "n":
if choice == "y" or choice == "n":#fill in the condition
print("pass")
break
else:
choice = raw_input("Sorry I didn't catch that. Enter again: ") | StarcoderdataPython |
6596960 | <gh_stars>1-10
import argparse
import wgenpatex
import model
import torch
parser = argparse.ArgumentParser()
parser.add_argument('target_image_path', help='paths of target texture image')
parser.add_argument('-w', '--patch_size', type=int,default=4, help="patch size (default: 4)")
parser.add_argument('-nmax', '--n_iter_max', type=int, default=5000, help="max iterations of the algorithm(default: 5000)")
parser.add_argument('-npsi', '--n_iter_psi', type=int, default=10, help="max iterations for psi (default: 10)")
parser.add_argument('-nin', '--n_patches_in', type=int, default=-1, help="number of patches of the synthetized texture used at each iteration, -1 corresponds to all patches (default: -1)")
parser.add_argument('-nout', '--n_patches_out', type=int, default=2000, help="number maximum of patches of the target texture used, -1 corresponds to all patches (default: 2000)")
parser.add_argument('-sc', '--scales', type=int, default=5, help="number of scales used (default: 5)")
parser.add_argument('--visu', action='store_true', help='show intermediate results')
parser.add_argument('--save', action='store_true', help='save temp results in /tmp folder')
parser.add_argument('--keops', action='store_true', help='use keops package')
args = parser.parse_args()
generator = wgenpatex.learn_model(args)
# save the texture generator
torch.save(generator.state_dict(), 'generator.pt')
# sample an image and save it
synth_img = model.sample_fake_img(generator, [1,3,512,512] , n_samples=1)
wgenpatex.imshow(synth_img)
wgenpatex.imsave('synthesized.png', synth_img)
| StarcoderdataPython |
3458649 | #!/usr/local/bin/python
# <NAME> | 05/29/2018
#|__This script requires Python 3.4 and modules - numpy & scipy
#|__extracts the quality string and determine the length and average quality score of each read
#|__Converts the raw values for each read set into descriptive statistics
#|__Provides descriptive stats for Read Lengths and Read Qualities, number and percentage of reads below Q30 and Ambiguous base counts
#|__Outputs separate tables for different read length buckets (150bp,250bp and 300bp)
# Usage: ./read_length_quality_and_stats_fastq.py
import numpy as np
from scipy.stats import skew,mstats
import glob
import os
import re
# ------------------------------------------ DECLARATIONS AND INITIALIZATIONS ------------------------------------------------#
quality_scores_R1 = []
quality_scores_R2 = []
average_quality = 0
read1_length = []
read2_length = []
inserts = []
insert_sizes = []
countN1 = 0
countN2 = 0
Q1_lt_30 = 0
Q2_lt_30 = 0
R1 = []
R2 = []
Q1 = []
Q2 = []
file1 = []
file2 = []
files_149 = [] #Stores paired read files
files_249 = [] #Stores paired read files
files_299 = [] #Stores paired read files
# Following lists are to store all results for 149bp bucket
N_mean_149 = ["Mean:"]
SD_149 = ["Std_Deviation:"]
Variance_149 = ["Variance"]
median_149 = ["Median"]
Q1_149 = ["1st_Quartile:"]
Q3_149 = ["3rd_Quartile:"]
lwhisker_149 = ["Lower_whisker:"]
hwhisker_149 = ["Upper_Whisker:"]
Skew_149 = ["Skewness:"]
G_mean_149 = ["Geometric_Mean:"]
qual_N_mean_149 = ["Mean:"]
qual_SD_149 = ["Std_Deviation:"]
qual_Variance_149 = ["Variance:"]
qual_median_149 = ["Median:"]
qual_Q1_149 = ["1st_Quartile:"]
qual_Q3_149 = ["3rd_Quartile:"]
qual_lwhisker_149 = ["Lower_whisker:"]
qual_hwhisker_149 = ["Upper_Whisker:"]
qual_skew_149 = ["Skewness:"]
qual_G_mean_149 = ["Geometric_Mean:"]
# Following lists are to store all results for 249bp bucket
N_mean_249 = ["Mean:"]
SD_249 = ["Std_Deviation:"]
Variance_249 = ["Variance"]
median_249 = ["Median"]
Q1_249 = ["1st_Quartile:"]
Q3_249 = ["3rd_Quartile:"]
lwhisker_249 = ["Lower_whisker:"]
hwhisker_249 = ["Upper_Whisker:"]
Skew_249 = ["Skewness:"]
G_mean_249 = ["Geometric_Mean:"]
qual_N_mean_249 = ["Mean:"]
qual_SD_249 = ["Std_Deviation:"]
qual_Variance_249 = ["Variance:"]
qual_median_249 = ["Median:"]
qual_Q1_249 = ["1st_Quartile:"]
qual_Q3_249 = ["3rd_Quartile:"]
qual_lwhisker_249 = ["Lower_whisker:"]
qual_hwhisker_249 = ["Upper_Whisker:"]
qual_skew_249 = ["Skewness:"]
qual_G_mean_249 = ["Geometric_Mean:"]
# Following lists are to store all results for 299bp bucket
N_mean_299 = ["Mean:"]
SD_299 = ["Std_Deviation:"]
Variance_299 = ["Variance"]
median_299 = ["Median"]
Q1_299 = ["1st_Quartile:"]
Q3_299 = ["3rd_Quartile:"]
lwhisker_299 = ["Lower_whisker:"]
hwhisker_299 = ["Upper_Whisker:"]
Skew_299 = ["Skewness:"]
G_mean_299 = ["Geometric_Mean:"]
qual_N_mean_299 = ["Mean:"]
qual_SD_299 = ["Std_Deviation:"]
qual_Variance_299 = ["Variance:"]
qual_median_299 = ["Median:"]
qual_Q1_299 = ["1st_Quartile:"]
qual_Q3_299 = ["3rd_Quartile:"]
qual_lwhisker_299 = ["Lower_Whisker:"]
qual_hwhisker_299 = ["Upper_Whisker:"]
qual_skew_299 = ["Skewness:"]
qual_G_mean_299 = ["Geometric_Mean:"]
total_no_reads_149 = ["Read_count:"]
total_no_reads_249 = ["Read_count:"]
total_no_reads_299 = ["Read_count:"]
qual_lt_30_149 = ["Reads_<_Q30:"]
qual_lt_30_249 = ["Reads_<_Q30:"]
qual_lt_30_299 = ["Reads_<_Q30:"]
perc_qual_lt_30_149 = ["Percentage_reads_<_Q30"]
perc_qual_lt_30_249 = ["Percentage_reads_<_Q30"]
perc_qual_lt_30_299 = ["Percentage_reads_<_Q30"]
ambi_calls_149 = ["Ambiguous_base_calls:"]
ambi_calls_249 = ["Ambiguous_base_calls:"]
ambi_calls_299 = ["Ambiguous_base_calls:"]
R_lt_149 = ["Reads_<_149:"]
R_ge_149 = ["Reads_>=_149:"]
R_lt_249 = ["Reads_<_249:"]
R_ge_249 = ["Reads_>=_249:"]
R_lt_299 = ["Reads_<_299:"]
R_ge_299 = ["Reads_>=_299:"]
r_median = 0
i_median = 0
final_perc_R1_lt_149 = ["%_Reads_<_149:"]
final_perc_R1_ge_149 = ["%_Reads_>=_149:"]
final_perc_R1_lt_249 = ["%_Reads_<_249:"]
final_perc_R1_gt_249 = ["%_Reads_>=_249:"]
final_perc_R1_lt_299 = ["%_Reads_<_299:"]
final_perc_R1_gt_299 = ["%_Reads_>=_299:"]
final_avg_quality_lt_149 = ["Average_Quality_<_149:"]
final_avg_quality_ge_149 = ["Average_Quality_>=_149:"]
final_avg_length_lt_149 = ["Average_Length_<_149"]
final_avg_length_ge_149 = ["Average_Length_>=_149"]
final_avg_quality_lt_249 = ["Average_Quality_<_249:"]
final_avg_quality_ge_249 = ["Average_Quality_>=_249:"]
final_avg_length_lt_249 = ["Average_Length_<_249"]
final_avg_length_ge_249 = ["Average_Length_>=_249"]
final_avg_quality_lt_299 = ["Average_Quality_<_299:"]
final_avg_quality_ge_299 = ["Average_Quality_>=_299:"]
final_avg_length_lt_299 = ["Average_Length_<_299"]
final_avg_length_ge_299 = ["Average_Length_>=_299"]
# ------------------------------------------ FUNCTIONS ------------------------------------------------#
# To parse fastq file
def parseFastq(fastq_infile):
sequences = []
qualities = []
with open(fastq_infile,"r", encoding="utf8", errors='ignore') as f:
while True:
f.readline()
seq = f.readline().rstrip() # gets sequence line
f.readline()
qual = f.readline().rstrip() # gets quality line
if len(seq) == 0: # if seq length is 0; reached end of file so break out of the loop
break
sequences.append(seq) # append seq to sequences list
qualities.append(qual) # append qual to sequences list
return sequences,qualities
# To convert ASCII to quality scores
def phred33toQ(qual):
return ord(qual) - 33 # ord converts char to ASCII values and returns
# To calculate descriptive stats
def stats(in_array):
a = np.array(in_array)
mean = a.mean()
mean = round(mean) # rounding off
std_dev = a.std()
std_dev = round(std_dev) # rounding off
variance = np.var(a)
variance = round(variance) # rounding off
Q1 = np.percentile(a,25)
Q1 = round(Q1) # rounding off
median = np.percentile(a,50)
median = round(median) # rounding off
Q3 = np.percentile(a,75)
Q3 = round(Q3) # rounding off
skewness = skew(a)
skewness = round(skewness) # rounding off
geometric_mean = mstats.gmean(a)
geometric_mean = round(geometric_mean) # rounding off
high = []
low = []
IQR = Q3 - Q1
lower = Q1 - (1.5*IQR)
upper = Q3 - (1.5*IQR)
if(min(in_array) < lower):
low_whisker = min(in_array)
else:
low_whisker = min(in_array)
if(max(in_array) > upper):
high_whisker = max(in_array)
else:
high_whisker = max(in_array)
low_whisker = round(low_whisker) # rounding off
high_whisker = round(high_whisker) # rounding off
return mean,std_dev,variance,Q1,median,Q3,skewness,geometric_mean,low_whisker,high_whisker
# Ambiguous base counts
def countN(seq):
count = 0
for s in seq:
count += s.count("N")
return count
# quality thresholds
def Q30(qual_list):
count_lt_30 = 0
for x in qual_list:
if(x >= 0 and x < 30):
#print(x,"<","30") # Sanity check!
count_lt_30 += 1
else:
continue
return count_lt_30
# To get average quality scores for each read1
def qual_score(qual):
quality_scores = []
read_len = []
for Q in qual:
score = 0
read_len.append(len(Q))
for val in Q:
score += phred33toQ(val)
average_quality = (score/len(Q))
quality_scores.append(average_quality)
return read_len,quality_scores
def print_150bp():
print("\n\n-----Stats_for_149_bucket---------")
print('\t','\t'.join(files_149))
print("Read_Length_Stats:")
print(*lwhisker_149, sep='\t')
print(*Q1_149, sep='\t')
print(*median_149, sep='\t')
print(*N_mean_149, sep='\t')
print(*G_mean_149, sep='\t')
print(*Q3_149, sep='\t')
print(*hwhisker_149, sep='\t')
print(*SD_149, sep='\t')
print(*Variance_149, sep='\t')
print(*Skew_149, sep='\t')
print(*total_no_reads_149, sep='\t')
print(*R_lt_149, sep='\t')
print(*R_ge_149, sep='\t')
print(*final_perc_R1_lt_149, sep='\t')
print(*final_perc_R1_ge_149, sep='\t')
print(*final_avg_quality_lt_149, sep='\t')
print(*final_avg_quality_ge_149, sep='\t')
print(*final_avg_length_lt_149, sep='\t')
print(*final_avg_length_ge_149, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_149, sep='\t')
print(*qual_Q1_149, sep='\t')
print(*qual_median_149, sep='\t')
print(*qual_N_mean_149, sep='\t')
print(*qual_G_mean_149, sep='\t')
print(*qual_Q3_149, sep='\t')
print(*qual_hwhisker_149, sep='\t')
print(*qual_SD_149, sep='\t')
print(*qual_Variance_149, sep='\t')
print(*qual_skew_149, sep='\t')
print(*qual_lt_30_149, sep='\t')
print(*perc_qual_lt_30_149, sep='\t')
print(*ambi_calls_149, sep='\t')
def print_250bp():
print("\n\n-----Stats_for_249_bucket---------")
print('\t','\t'.join(files_249))
print("Read_Length_Stats:")
print(*lwhisker_249, sep='\t')
print(*Q1_249, sep='\t')
print(*median_249, sep='\t')
print(*N_mean_249, sep='\t')
print(*G_mean_249, sep='\t')
print(*Q3_249, sep='\t')
print(*hwhisker_249, sep='\t')
print(*SD_249, sep='\t')
print(*Variance_249, sep='\t')
print(*Skew_249, sep='\t')
print(*total_no_reads_249, sep='\t')
print(*R_lt_249, sep='\t')
print(*R_ge_249, sep='\t')
print(*final_perc_R1_lt_249, sep='\t')
print(*final_perc_R1_gt_249, sep='\t')
print(*final_avg_quality_lt_249, sep='\t')
print(*final_avg_quality_ge_249, sep='\t')
print(*final_avg_length_lt_249, sep='\t')
print(*final_avg_length_ge_249, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_249, sep='\t')
print(*qual_Q1_249, sep='\t')
print(*qual_median_249, sep='\t')
print(*qual_N_mean_249, sep='\t')
print(*qual_G_mean_249, sep='\t')
print(*qual_Q3_249, sep='\t')
print(*qual_hwhisker_249, sep='\t')
print(*qual_SD_249, sep='\t')
print(*qual_Variance_249, sep='\t')
print(*qual_skew_249, sep='\t')
print(*qual_lt_30_249, sep='\t')
print(*perc_qual_lt_30_249, sep='\t')
print(*ambi_calls_249, sep='\t')
def print_300bp():
print("\n\n-----Stats_for_299_bucket---------")
print('\t','\t'.join(files_299))
print("Read_Length_Stats:")
print(*lwhisker_299, sep='\t')
print(*Q1_299, sep='\t')
print(*median_299, sep='\t')
print(*N_mean_299, sep='\t')
print(*G_mean_299, sep='\t')
print(*Q3_299, sep='\t')
print(*hwhisker_299, sep='\t')
print(*SD_299, sep='\t')
print(*Variance_299, sep='\t')
print(*Skew_299, sep='\t')
print(*total_no_reads_299, sep='\t')
print(*R_lt_299, sep='\t')
print(*R_ge_299, sep='\t')
print(*final_perc_R1_lt_299, sep='\t')
print(*final_perc_R1_gt_299, sep='\t')
print(*final_avg_quality_lt_299, sep='\t')
print(*final_avg_quality_ge_299, sep='\t')
print(*final_avg_length_lt_299, sep='\t')
print(*final_avg_length_ge_299, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_299, sep='\t')
print(*qual_Q1_299, sep='\t')
print(*qual_median_299, sep='\t')
print(*qual_N_mean_299, sep='\t')
print(*qual_G_mean_299, sep='\t')
print(*qual_Q3_299, sep='\t')
print(*qual_hwhisker_299, sep='\t')
print(*qual_SD_299, sep='\t')
print(*qual_Variance_299, sep='\t')
print(*qual_skew_299, sep='\t')
print(*qual_lt_30_299, sep='\t')
print(*perc_qual_lt_30_299, sep='\t')
print(*ambi_calls_299, sep='\t')
# ---------------------------------------------------- MAIN ----------------------------------------------------------------- #
for x in os.listdir('.'):
if re.match('.*_R1.*.fastq$|.*_1.fastq$', x):
file1.append(x)
for x in os.listdir('.'):
if re.match('.*_R2.*.*fastq$|.*_2.fastq$', x):
file2.append(x)
# sorting lists for pairs to be in the same order
file1 = sorted(file1)
file2 = sorted(file2)
for f1,f2 in zip(file1,file2):
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
# Descriptive stats for read1 length: function call (getting the median for both R1 and R2)
mean1,stdDev1,var1,Q1_1,r_median,Q3_1,skew1,gmean1,lwhisker1,hwhisker1 = stats(read1_length)
mean2,stdDev2,var2,Q1_2,i_median,Q3_2,skew2,gmean2,lwhisker2,hwhisker2 = stats(read2_length)
# Result lists
if(hwhisker1 >= 149 and hwhisker1 <= 152 and hwhisker2 >= 149 and hwhisker2 <= 152):
files_149.extend((f1,f2))
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
total_no_reads_149.extend((read_count1,read_count2)) # read count
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
R1_lt_149 = 0
R1_ge_149 = 0
R2_lt_149 = 0
R2_ge_149 = 0
tot_len1_ge_149 = 0
tot_len1_lt_149 = 0
tot_len2_lt_149 = 0
tot_len2_ge_149 = 0
for x in read1_length:
if(x < 149):
R1_lt_149 += 1
tot_len1_lt_149 += x
elif(x >= 149):
R1_ge_149 += 1
tot_len1_ge_149 += x
for x in read2_length:
if(x < 149):
R2_lt_149 += 1
tot_len2_lt_149 += x
elif(x >= 149):
R2_ge_149 += 1
tot_len2_ge_149 += x
R_lt_149.extend((R1_lt_149,R2_lt_149))
R_ge_149.extend((R1_ge_149,R2_ge_149))
# quality threshold function call: function call
Q1_lt_30 = Q30(quality_scores_R1)
Q2_lt_30 = Q30(quality_scores_R2)
qual_lt_30_149.extend((Q1_lt_30,Q2_lt_30))
percent_reads_lt_30_R1 = Q1_lt_30/read_count1 * 100
percent_reads_lt_30_R2 = Q2_lt_30/read_count2 * 100
# rounding off
percent_reads_lt_30_R1 = round(percent_reads_lt_30_R1)
percent_reads_lt_30_R2 = round(percent_reads_lt_30_R2)
perc_qual_lt_30_149.extend((percent_reads_lt_30_R1,percent_reads_lt_30_R2))
# Ambiguous base function call: function call
countN1 = countN(seqs1)
countN2 = countN(seqs2)
ambi_calls_149.extend((countN1,countN2))
# Descriptive stats for read1 length: function call
r_mean,r_stdDev,r_var,r_Q1,r_median,r_Q3,r_skew,r_gmean,r_lwhisker,r_hwhisker = stats(read1_length)
i_mean,i_stdDev,i_var,i_Q1,i_median,i_Q3,i_skew,i_gmean,i_lwhisker,i_hwhisker = stats(read2_length)
N_mean_149.extend((r_mean,i_mean))
SD_149.extend((r_stdDev,i_stdDev))
Variance_149.extend((r_var,i_var))
median_149.extend((r_median,i_median))
Q1_149.extend((r_Q1,i_Q1))
Q3_149.extend((r_Q3,i_Q3))
lwhisker_149.extend((r_lwhisker,i_lwhisker))
hwhisker_149.extend((r_hwhisker,i_hwhisker))
Skew_149.extend((r_skew,i_skew))
G_mean_149.extend((r_gmean,i_gmean))
# Descriptive stats for Q1 quality: function call
q_mean,q_stdDev,q_var,q_Q1,q_median,q_Q3,q_skew,q_gmean,q_lwhisker,q_hwhisker = stats(quality_scores_R1)
s_mean,s_stdDev,s_var,s_Q1,s_median,s_Q3,s_skew,s_gmean,s_lwhisker,s_hwhisker = stats(quality_scores_R2)
qual_N_mean_149.extend((q_mean,s_mean))
qual_SD_149.extend((q_stdDev,s_stdDev))
qual_Variance_149.extend((q_var,s_var))
qual_median_149.extend((q_median,s_median))
qual_Q1_149.extend((q_Q1,s_Q1))
qual_Q3_149.extend((q_Q3,s_Q3))
qual_lwhisker_149.extend((q_lwhisker,s_lwhisker))
qual_hwhisker_149.extend((q_hwhisker,s_hwhisker))
qual_skew_149.extend((q_skew,s_skew))
qual_G_mean_149.extend((q_gmean,s_gmean))
# Calculating percent reads above and below 149
perc_R1_lt_149 = (R1_lt_149/read_count1) * 100
perc_R1_ge_149 = (R1_ge_149/read_count1) * 100
perc_R2_lt_149 = (R2_lt_149/read_count2) * 100
perc_R2_ge_149 = (R2_ge_149/read_count2) * 100
# rounding off
perc_R1_lt_149 = round(perc_R1_lt_149)
perc_R1_ge_149 = round(perc_R1_ge_149)
perc_R2_lt_149 = round(perc_R2_lt_149)
perc_R2_ge_149 = round(perc_R2_ge_149)
final_perc_R1_lt_149.extend((perc_R1_lt_149,perc_R2_lt_149))
final_perc_R1_ge_149.extend((perc_R1_ge_149,perc_R2_ge_149))
# Average Quality score calculation
avg_quality_1_le_149 = 0
avg_quality_1_gt_149 = 0
avg_quality_2_le_149 = 0
avg_quality_2_gt_149 = 0
avg_length_1_le_149 = 0
avg_length_1_gt_149 = 0
avg_length_2_le_149 = 0
avg_length_2_gt_149 = 0
tot_qual1_lt_149 = 0
tot_qual1_ge_149 = 0
tot_qual2_lt_149 = 0
tot_qual2_ge_149 = 0
for l,q in zip(read1_length,quality_scores_R1):
if(l < 149): # for lengths le 149
tot_qual1_lt_149 += q
elif(l >= 149):
tot_qual1_ge_149 += q
for l,q in zip(read2_length,quality_scores_R2):
if(l < 149): # for lengths le 149
tot_qual2_lt_149 += q
elif(l >= 149):
tot_qual2_ge_149 += q
if(R1_lt_149 == 0 and R2_lt_149 == 0):
avg_quality_1_le_149 = 0
avg_quality_2_le_149 = 0
avg_quality_1_gt_149 = tot_qual1_ge_149 / R1_ge_149
avg_quality_2_gt_149 = tot_qual2_ge_149 / R2_ge_149
elif(R1_ge_149 == 0 and R2_ge_149 == 0):
avg_quality_1_le_149 = tot_qual1_lt_149 / R1_lt_149
avg_quality_2_le_149 = tot_qual2_lt_149 / R2_lt_149
avg_quality_1_gt_149 = 0
avg_quality_2_gt_149 = 0
else:
avg_quality_1_le_149 = tot_qual1_lt_149 / R1_lt_149
avg_quality_2_le_149 = tot_qual2_lt_149 / R2_lt_149
avg_quality_1_gt_149 = tot_qual1_ge_149 / R1_ge_149
avg_quality_2_gt_149 = tot_qual2_ge_149 / R2_ge_149
# rounding off
avg_quality_1_le_149 = round(avg_quality_1_le_149)
avg_quality_1_gt_149 = round(avg_quality_1_gt_149)
avg_quality_2_le_149 = round(avg_quality_2_le_149)
avg_quality_2_gt_149 = round(avg_quality_2_gt_149)
final_avg_quality_lt_149.extend((avg_quality_1_le_149,avg_quality_2_le_149))
final_avg_quality_ge_149.extend((avg_quality_1_gt_149,avg_quality_2_gt_149))
# Calculating average length of reads above and below 149
if(R1_lt_149 == 0 and R2_lt_149 == 0):
avg_length_1_le_149 = 0
avg_length_1_gt_149 = tot_len1_ge_149/R1_ge_149
avg_length_2_le_149 = 0
avg_length_2_gt_149 = tot_len2_ge_149/R2_ge_149
elif(R1_ge_149 == 0 and R2_ge_149 == 0):
avg_length_1_le_149 = tot_len1_lt_149/R1_lt_149
avg_length_1_gt_149 = 0
avg_length_2_le_149 = tot_len2_lt_149/R2_lt_149
avg_length_2_gt_149 = 0
else:
avg_length_1_le_149 = tot_len1_lt_149/R1_lt_149
avg_length_1_gt_149 = tot_len1_ge_149/R1_ge_149
avg_length_2_le_149 = tot_len2_lt_149/R2_lt_149
avg_length_2_gt_149 = tot_len2_ge_149/R2_ge_149
# rounding off
avg_length_1_le_149 = round(avg_length_1_le_149)
avg_length_1_gt_149 = round(avg_length_1_gt_149)
avg_length_2_le_149 = round(avg_length_2_le_149)
avg_length_2_gt_149 = round(avg_length_2_gt_149)
final_avg_length_lt_149.extend((avg_length_1_le_149,avg_length_2_le_149))
final_avg_length_ge_149.extend((avg_length_1_gt_149,avg_length_2_gt_149))
elif(hwhisker1 >= 249 and hwhisker1 <= 252 and hwhisker2 >= 249 and hwhisker2 <= 252 ):
files_249.extend((f1,f2))
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
total_no_reads_249.extend((read_count1,read_count2))
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
R1_lt_249 = 0
R1_ge_249 = 0
R2_lt_249 = 0
R2_ge_249 = 0
tot_len1_lt_249 = 0
tot_len1_ge_249 = 0
tot_len2_lt_249 = 0
tot_len2_ge_249 = 0
for x in read1_length:
if(x < 249):
R1_lt_249 += 1
tot_len1_lt_249 += x
elif(x >= 249):
R1_ge_249 += 1
tot_len1_ge_249 += x
for x in read2_length:
if(x < 249):
R2_lt_249 += 1
tot_len2_lt_249 += x
elif(x >= 249):
R2_ge_249 += 1
tot_len2_ge_249 += x
R_lt_249.extend((R1_lt_249,R2_lt_249))
R_ge_249.extend((R1_ge_249,R2_ge_249))
# quality threshold function call: function call
Q1_lt_30 = Q30(quality_scores_R1)
Q2_lt_30 = Q30(quality_scores_R2)
qual_lt_30_249.extend((Q1_lt_30,Q2_lt_30))
percent_reads_lt_30_R1 = Q1_lt_30/read_count1 * 100
percent_reads_lt_30_R2 = Q2_lt_30/read_count2 * 100
# rounding off
percent_reads_lt_30_R1 = round(percent_reads_lt_30_R1)
percent_reads_lt_30_R2 = round(percent_reads_lt_30_R2)
perc_qual_lt_30_249.extend((percent_reads_lt_30_R1,percent_reads_lt_30_R2))
# Ambiguous base function call: function call
countN1 = countN(seqs1)
countN2 = countN(seqs2)
ambi_calls_249.extend((countN1,countN2))
# Descriptive stats for read1 length: function call
r_mean,r_stdDev,r_var,r_Q1,r_median,r_Q3,r_skew,r_gmean,r_lwhisker,r_hwhisker = stats(read1_length)
i_mean,i_stdDev,i_var,i_Q1,i_median,i_Q3,i_skew,i_gmean,i_lwhisker,i_hwhisker = stats(read2_length)
N_mean_249.extend((r_mean,i_mean))
SD_249.extend((r_stdDev,i_stdDev))
Variance_249.extend((r_var,i_var))
median_249.extend((r_median,i_median))
Q1_249.extend((r_Q1,i_Q1))
Q3_249.extend((r_Q3,i_Q3))
lwhisker_249.extend((r_lwhisker,i_lwhisker))
hwhisker_249.extend((r_hwhisker,i_hwhisker))
Skew_249.extend((r_skew,i_skew))
G_mean_249.extend((r_gmean,i_gmean))
# Descriptive stats for Q1 quality: function call
q_mean,q_stdDev,q_var,q_Q1,q_median,q_Q3,q_skew,q_gmean,q_lwhisker,q_hwhisker = stats(quality_scores_R1)
s_mean,s_stdDev,s_var,s_Q1,s_median,s_Q3,s_skew,s_gmean,s_lwhisker,s_hwhisker = stats(quality_scores_R2)
qual_N_mean_249.extend((q_mean,s_mean))
qual_SD_249.extend((q_stdDev,s_stdDev))
qual_Variance_249.extend((q_var,s_var))
qual_median_249.extend((q_median,s_median))
qual_Q1_249.extend((q_Q1,s_Q1))
qual_Q3_249.extend((q_Q3,s_Q3))
qual_lwhisker_249.extend((q_lwhisker,s_lwhisker))
qual_hwhisker_249.extend((q_hwhisker,s_hwhisker))
qual_skew_249.extend((q_skew,s_skew))
qual_G_mean_249.extend((q_gmean,s_gmean))
perc_R1_lt_249 = (R1_lt_249/read_count1) * 100
perc_R1_gt_249 = (R1_ge_249/read_count1) * 100
perc_R2_lt_249 = (R2_lt_249/read_count2) * 100
perc_R2_gt_249 = (R2_ge_249/read_count2) * 100
# rounding off
perc_R1_lt_249 = round(perc_R1_lt_249)
perc_R1_gt_249 = round(perc_R1_gt_249)
perc_R2_lt_249 = round(perc_R2_lt_249)
perc_R2_gt_249 = round(perc_R2_gt_249)
final_perc_R1_lt_249.extend((perc_R1_lt_249,perc_R2_lt_249))
final_perc_R1_gt_249.extend((perc_R1_gt_249,perc_R2_gt_249))
# Average Quality score calculation
avg_quality_1_le_249 = 0
avg_quality_1_gt_249 = 0
avg_quality_2_le_249 = 0
avg_quality_2_gt_249 = 0
avg_length_1_le_249 = 0
avg_length_1_gt_249 = 0
avg_length_2_le_249 = 0
avg_length_2_gt_249 = 0
tot_qual1_lt_249 = 0
tot_qual1_ge_249 = 0
tot_qual2_lt_249 = 0
tot_qual2_ge_249 = 0
for l,q in zip(read1_length,quality_scores_R1):
if(l < 249): # for lengths le 249
tot_qual1_lt_249 += q
elif(l >= 249):
tot_qual1_ge_249 += q
for l,q in zip(read2_length,quality_scores_R2):
if(l < 249): # for lengths le 249
tot_qual2_lt_249 += q
elif(l >= 249):
tot_qual2_ge_249 += q
# Average quality per bucket
if(R1_lt_249 == 0 and R2_lt_249 == 0):
avg_quality_1_le_249 = 0 # if there are no reads less than 251
avg_quality_1_gt_249 = tot_qual1_ge_249 / R1_ge_249
avg_quality_2_le_249 = 0 # if there are no reads less than 251
avg_quality_2_gt_249 = tot_qual2_ge_249 / R2_ge_249
elif(R1_ge_249 == 0 and R2_ge_249 == 0):
avg_quality_1_le_249 = tot_qual1_lt_249 / R1_lt_249
avg_quality_1_gt_249 = 0
avg_quality_2_le_249 = tot_qual2_lt_249 / R2_lt_249
avg_quality_2_gt_249 = 0
else:
avg_quality_1_le_249 = tot_qual1_lt_249 / R1_lt_249
avg_quality_1_gt_249 = tot_qual1_ge_249 / R1_ge_249
avg_quality_2_le_249 = tot_qual2_lt_249 / R2_lt_249
avg_quality_2_gt_249 = tot_qual2_ge_249 / R2_ge_249
# rounding off
avg_quality_1_le_249 = round(avg_quality_1_le_249)
avg_quality_1_gt_249 = round(avg_quality_1_gt_249)
avg_quality_2_le_249 = round(avg_quality_2_le_249)
avg_quality_2_gt_249 = round(avg_quality_2_gt_249)
final_avg_quality_lt_249.extend((avg_quality_1_le_249,avg_quality_2_le_249))
final_avg_quality_ge_249.extend((avg_quality_1_gt_249,avg_quality_2_gt_249))
if(R1_lt_249 == 0 and R2_lt_249 == 0):
avg_length_1_le_249 = 0
avg_length_1_gt_249 = tot_len1_ge_249 / R1_ge_249
avg_length_2_le_249 = 0
avg_length_2_gt_249 = tot_len2_ge_249 / R2_ge_249
elif(R1_ge_249 == 0 and R2_ge_249 == 0):
avg_length_1_le_249 = tot_len1_lt_249 / R1_lt_249
avg_length_1_gt_249 = 0
avg_length_2_le_249 = tot_len2_lt_249 / R2_lt_249
avg_length_2_gt_249 = 0
else:
avg_length_1_le_249 = tot_len1_lt_249 / R1_lt_249
avg_length_1_gt_249 = tot_len1_ge_249 / R1_ge_249
avg_length_2_le_249 = tot_len2_lt_249 / R2_lt_249
avg_length_2_gt_249 = tot_len2_ge_249 / R2_ge_249
# rounding off
avg_length_1_le_249 = round(avg_length_1_le_249)
avg_length_1_gt_249 = round(avg_length_1_gt_249)
avg_length_2_le_249 = round(avg_length_2_le_249)
avg_length_2_gt_249 = round(avg_length_2_gt_249)
final_avg_length_lt_249.extend((avg_length_1_le_249,avg_length_2_le_249))
final_avg_length_ge_249.extend((avg_length_1_gt_249,avg_length_2_gt_249))
else:
files_299.extend((f1,f2))
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
total_no_reads_299.extend((read_count1,read_count2))
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
R1_lt_299 = 0
R1_ge_299 = 0
R2_lt_299 = 0
R2_ge_299 = 0
tot_len1_lt_299 = 0
tot_len1_ge_299 = 0
tot_len2_lt_299 = 0
tot_len2_ge_299 = 0
for x in read1_length:
if(x < 299):
R1_lt_299 += 1
tot_len1_lt_299 += x
elif(x >= 299):
R1_ge_299 += 1
tot_len1_ge_299 += x
for x in read2_length:
if(x < 299):
R2_lt_299 += 1
tot_len2_lt_299 += x
elif(x >= 299):
R2_ge_299 += 1
tot_len2_ge_299 += x
R_lt_299.extend((R1_lt_299,R2_lt_299))
R_ge_299.extend((R1_ge_299,R2_ge_299))
# quality threshold function call: function call
Q1_lt_30 = Q30(quality_scores_R1)
Q2_lt_30 = Q30(quality_scores_R2)
qual_lt_30_299.extend((Q1_lt_30,Q2_lt_30))
percent_reads_lt_30_R1 = Q1_lt_30/len(seqs1) * 100
percent_reads_lt_30_R2 = Q2_lt_30/len(seqs2) * 100
# rounding off
percent_reads_lt_30_R1 = round(percent_reads_lt_30_R1)
percent_reads_lt_30_R2 = round(percent_reads_lt_30_R2)
perc_qual_lt_30_299.extend((percent_reads_lt_30_R1,percent_reads_lt_30_R2))
# Ambiguous base function call: function call
countN1 = countN(seqs1)
countN2 = countN(seqs2)
ambi_calls_299.extend((countN1,countN2))
# Descriptive stats for read1 length: function call
r_mean,r_stdDev,r_var,r_Q1,r_median,r_Q3,r_skew,r_gmean,r_lwhisker,r_hwhisker = stats(read1_length)
i_mean,i_stdDev,i_var,i_Q1,i_median,i_Q3,i_skew,i_gmean,i_lwhisker,i_hwhisker = stats(read2_length)
N_mean_299.extend((r_mean,i_mean))
SD_299.extend((r_stdDev,i_stdDev))
Variance_299.extend((r_var,i_var))
median_299.extend((r_median,i_median))
Q1_299.extend((r_Q1,i_Q1))
Q3_299.extend((r_Q3,i_Q3))
lwhisker_299.extend((r_lwhisker,i_lwhisker))
hwhisker_299.extend((r_hwhisker,i_hwhisker))
Skew_299.extend((r_skew,i_skew))
G_mean_299.extend((r_gmean,i_gmean))
# Descriptive stats for Q1 quality: function call
q_mean,q_stdDev,q_var,q_Q1,q_median,q_Q3,q_skew,q_gmean,q_lwhisker,q_hwhisker = stats(quality_scores_R1)
s_mean,s_stdDev,s_var,s_Q1,s_median,s_Q3,s_skew,s_gmean,s_lwhisker,s_hwhisker = stats(quality_scores_R2)
qual_N_mean_299.extend((q_mean,s_mean))
qual_SD_299.extend((q_stdDev,s_stdDev))
qual_Variance_299.extend((q_var,s_var))
qual_median_299.extend((q_median,s_median))
qual_Q1_299.extend((q_Q1,s_Q1))
qual_Q3_299.extend((q_Q3,s_Q3))
qual_lwhisker_299.extend((q_lwhisker,s_lwhisker))
qual_hwhisker_299.extend((q_hwhisker,s_hwhisker))
qual_skew_299.extend((q_skew,s_skew))
qual_G_mean_299.extend((q_gmean,s_gmean))
perc_R1_lt_299 = (R1_lt_299/read_count1) * 100
perc_R1_gt_299 = (R1_ge_299/read_count1) * 100
perc_R2_lt_299 = (R2_lt_299/read_count2) * 100
perc_R2_gt_299 = (R2_ge_299/read_count2) * 100
# rounding off
perc_R1_lt_299 = round(perc_R1_lt_299)
perc_R1_gt_299 = round(perc_R1_gt_299)
perc_R2_lt_299 = round(perc_R2_lt_299)
perc_R2_gt_299 = round(perc_R2_gt_299)
final_perc_R1_lt_299.extend((perc_R1_lt_299,perc_R2_lt_299))
final_perc_R1_gt_299.extend((perc_R1_gt_299,perc_R2_gt_299))
#header.append("\n\n-----Stats for 299 bucket---------")
avg_quality_1_le_299 = 0
avg_quality_1_gt_299 = 0
avg_quality_2_le_299 = 0
avg_quality_2_gt_299 = 0
avg_length_1_le_299 = 0
avg_length_1_gt_299 = 0
avg_length_2_le_299 = 0
avg_length_2_gt_299 = 0
tot_qual1_lt_299 = 0
tot_qual1_ge_299 = 0
tot_qual2_lt_299 = 0
tot_qual2_ge_299 = 0
for l,q in zip(read1_length,quality_scores_R1):
if(l <= 299): # for lengths le 249
tot_qual1_lt_299 += q
elif(l > 299):
tot_qual1_ge_299 += q
for l,q in zip(read2_length,quality_scores_R2):
if(l <= 299): # for lengths le 249
tot_qual2_lt_299 += q
elif(l > 299):
tot_qual2_ge_299 += q
if(R1_lt_299 == 0 and R2_lt_299 == 0):
avg_quality_1_le_299 = 0
avg_quality_1_gt_299 = tot_qual1_ge_299 / R1_ge_299
avg_quality_2_le_299 = 0
avg_quality_2_gt_299 = tot_qual2_ge_299 / R2_ge_299
elif(R1_ge_299 == 0 and R2_ge_299 == 0):
avg_quality_1_le_299 = tot_qual1_lt_299 / R1_lt_299
avg_quality_1_gt_299 = 0
avg_quality_2_le_299 = tot_qual2_lt_299 / R2_lt_299
avg_quality_2_gt_299 = 0
else:
avg_quality_1_le_299 = tot_qual1_lt_299 / R1_lt_299
avg_quality_1_gt_299 = tot_qual1_ge_299 / R1_ge_299
avg_quality_2_le_299 = tot_qual2_lt_299 / R2_lt_299
avg_quality_2_gt_299 = tot_qual2_ge_299 / R2_ge_299
# rounding off upto 5 decimal places
avg_quality_1_le_299 = round(avg_quality_1_le_299)
avg_quality_1_gt_299 = round(avg_quality_1_gt_299)
avg_quality_2_le_299 = round(avg_quality_2_le_299)
avg_quality_2_gt_299 = round(avg_quality_2_gt_299)
final_avg_quality_lt_299.extend((avg_quality_1_le_299,avg_quality_2_le_299))
final_avg_quality_ge_299.extend((avg_quality_1_gt_299,avg_quality_2_gt_299))
if(R1_lt_299 == 0 and R2_lt_299 == 0):
avg_length_1_le_299 = 0
avg_length_1_gt_299 = tot_len1_ge_299 / R1_ge_299
avg_length_2_le_299 = 0
avg_length_2_gt_299 = tot_len2_ge_299 / R2_ge_299
elif(R1_ge_299 == 0 and R2_ge_299 == 0):
avg_length_1_le_299 = tot_len1_lt_299 / R1_lt_299
avg_length_1_gt_299 = 0
avg_length_2_le_299 = tot_len2_lt_299 / R2_lt_299
avg_length_2_gt_299 = 0
else:
avg_length_1_le_299 = tot_len1_lt_299 / R1_lt_299
avg_length_1_gt_299 = tot_len1_ge_299 / R1_ge_299
avg_length_2_le_299 = tot_len2_lt_299 / R2_lt_299
avg_length_2_gt_299 = tot_len2_ge_299 / R2_ge_299
# rounding off
avg_length_1_le_299 = round(avg_length_1_le_299)
avg_length_1_gt_299 = round(avg_length_1_gt_299)
avg_length_2_le_299 = round(avg_length_2_le_299)
avg_length_2_gt_299 = round(avg_length_2_gt_299)
final_avg_length_lt_299.extend((avg_length_1_le_299,avg_length_2_le_299))
final_avg_length_ge_299.extend((avg_length_1_gt_299,avg_length_2_gt_299))
#function call
print_150bp()
print_250bp()
print_300bp()
| StarcoderdataPython |
9643432 | <reponame>meganbkratz/neuroanalysis<gh_stars>1-10
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph.parametertree as pt
from scipy.ndimage import gaussian_filter
from ..filter import remove_artifacts, bessel_filter
class SignalFilter(QtCore.QObject):
"""A user-configurable signal filter.
todo: turn this into a flowchart-based filter
"""
def __init__(self):
self.params = pt.Parameter.create(name='filter', type='bool', value=True, children=[
#{'name': 'filter type', 'type': 'list', 'values': ['bessel', 'butterworth']},
{'name': 'cutoff', 'type': 'float', 'value': 1000, 'step': 0.5, 'minStep': 1e-3, 'dec': True, 'limits': [0, None], 'suffix': 'Hz', 'siPrefix': True},
{'name': 'order', 'type': 'int', 'value': 4, 'step': 1, 'limits': [1, None]},
{'name': 'passband', 'type': 'list', 'values': ['low', 'high']},
])
def process(self, trace):
if self.params.value() is False:
return trace
return bessel_filter(trace, cutoff=self.params['cutoff'], order=self.params['order'], btype=self.params['passband'])
class ArtifactRemover(QtCore.QObject):
"""
"""
def __init__(self, user_width=False):
self.params = pt.Parameter.create(name='remove artifacts', type='bool', value=True, children=[
{'name': 'width', 'type': 'float', 'value': 200e-6, 'step': 1e-5, 'limits': [0, None], 'suffix': 's', 'siPrefix': True, 'visible': user_width},
{'name': 'fill window', 'type': 'float', 'value': 100e-6, 'step': 1e-5, 'limits': [0, None], 'suffix': 's', 'siPrefix': True},
])
self._user_width = user_width
def process(self, trace, edges):
if self.params.value() is False:
return trace
if self._user_width:
w = self.params['width'] / trace.dt
edges = [(t, t+w) for t in edges]
return remove_artifacts(trace, edges, self.params['fill window'])
| StarcoderdataPython |
3371879 | from capitolweb.settings import *
ES_CW_INDEX = 'test-index'
| StarcoderdataPython |
8111267 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import pytest
import numpy as np
import vineyard
from vineyard.core import default_builder_context, default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
def test_pandas_dataframe(vineyard_client):
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_pandas_dataframe_int_columns(vineyard_client):
df = pd.DataFrame({1: [1, 2, 3, 4], 2: [5, 6, 7, 8]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_pandas_dataframe_mixed_columns(vineyard_client):
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8], 1: [9, 10, 11, 12], 2: [13, 14, 15, 16]})
object_id = vineyard_client.put(df)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_dataframe_reindex(vineyard_client):
df = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
expected = df.reindex(index=np.arange(10, 1, -1))
object_id = vineyard_client.put(expected)
pd.testing.assert_frame_equal(expected, vineyard_client.get(object_id))
def test_dataframe_set_index(vineyard_client):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
expected = df1.set_index('y', drop=True)
object_id = vineyard_client.put(expected)
pd.testing.assert_frame_equal(expected, vineyard_client.get(object_id))
def test_sparse_array(vineyard_client):
arr = np.random.randn(10)
arr[2:5] = np.nan
arr[7:8] = np.nan
sparr = pd.arrays.SparseArray(arr)
object_id = vineyard_client.put(sparr)
pd.testing.assert_extension_array_equal(sparr, vineyard_client.get(object_id))
def test_dataframe_with_sparse_array(vineyard_client):
df = pd.DataFrame(np.random.randn(100, 4), columns=['x', 'y', 'z', 'a'])
df.iloc[:98] = np.nan
sdf = df.astype(pd.SparseDtype("float", np.nan))
object_id = vineyard_client.put(sdf)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_dataframe_with_sparse_array_int_columns(vineyard_client):
df = pd.DataFrame(np.random.randn(100, 4), columns=[1, 2, 3, 4])
df.iloc[:98] = np.nan
sdf = df.astype(pd.SparseDtype("float", np.nan))
object_id = vineyard_client.put(sdf)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
def test_dataframe_with_sparse_array_mixed_columns(vineyard_client):
df = pd.DataFrame(np.random.randn(100, 4), columns=['x', 'y', 'z', 0])
df.iloc[:98] = np.nan
sdf = df.astype(pd.SparseDtype("float", np.nan))
object_id = vineyard_client.put(sdf)
pd.testing.assert_frame_equal(df, vineyard_client.get(object_id))
| StarcoderdataPython |
8125248 | from geosolver.diagram.computational_geometry import distance_between_points, midpoint, cartesian_angle, \
signed_distance_between_cartesian_angles, arc_midpoint, line_length, arc_length
from geosolver.ontology.instantiator_definitions import instantiators
import numpy as np
__author__ = 'minjoon'
def label_distance_to_line(label_point, line, is_length):
"""
minimum distance among:
end points, mid point.
:param point:
:param line:
:return:
"""
mp = midpoint(line.a, line.b)
distance = distance_between_points(label_point, mp)
if is_length:
return distance
l = 1.0/line_length(line) # To favor longer line if matching near the points
return min(distance,
distance_between_points(label_point, line.a) + l,
distance_between_points(label_point, line.b) + l)
def label_distance_to_arc(label_point, arc):
angle = instantiators['angle'](arc.a, arc.circle.center, arc.b)
return label_distance_to_angle(label_point, angle)
def label_distance_to_angle(label_point, angle):
"""
If outside of the convex area, then distance is very high.
:param point:
:param angle:
:return:
"""
caa = cartesian_angle(angle.b, angle.a)
cam = cartesian_angle(angle.b, label_point)
cac = cartesian_angle(angle.b, angle.c)
dm = signed_distance_between_cartesian_angles(caa, cam)
dc = signed_distance_between_cartesian_angles(caa, cac)
cav = caa + dc/2.0
if cav > 2*np.pi:
cav -= 2*np.pi
cad = min(signed_distance_between_cartesian_angles(cam, cav), signed_distance_between_cartesian_angles(cav, cam))
dist = distance_between_points(label_point, angle.b)
# print cad, cam, cav, dc, dm, cac, caa, dist*(1+cad+dc)
if dc > dm and cad < 0.35*dc:
return dist*(1+cad+dc)
else:
return 1000*dist # effectively infinite
def label_distance_to_point(label_point, point):
return distance_between_points(label_point, point) | StarcoderdataPython |
1683298 | <filename>captain/tlinject.py
import hmac
import hashlib
import base64
import binascii
import os
import time
import json
import sys
from datetime import datetime
import asyncpg
from tornado.web import RequestHandler
from .dispatch import route
from . import pageutils
def batches(iterable, groups_of=500):
if len(iterable) <= groups_of:
yield iterable
return
start = 0
while 1:
group = iterable[start : start + groups_of]
if not group:
break
yield group
start = start + groups_of
class TLInjectContext(object):
def __init__(self, coordinator):
self.coordinator = coordinator
self.secret = os.environ.get("AS_TLINJECT_SECRET").encode("utf8")
self.supported_languages = set()
async def init_models(self):
async with self.coordinator.pool.acquire() as c:
async with c.transaction():
await c.execute(
"""
CREATE TABLE IF NOT EXISTS tlinject_v1 (
langid varchar(8),
key text,
translated text,
sender varchar(48),
ts timestamp
);
CREATE TABLE IF NOT EXISTS tlinject_cache_v1 (
langid varchar(8),
key text,
translated text,
PRIMARY KEY (langid, key)
);
CREATE TABLE IF NOT EXISTS tlinject_languages_v1 (
langid varchar(8) primary key
);
INSERT INTO tlinject_languages_v1 VALUES ('en') ON CONFLICT DO NOTHING;
"""
)
self.supported_languages = set(
row[0] for row in await c.fetch("SELECT langid FROM tlinject_languages_v1")
)
def is_language_valid(self, langid):
return langid in self.supported_languages
def is_assr_valid(self, key, assr):
calc_assr = hmac.new(self.secret, key.encode("utf8"), hashlib.sha224).digest()[:12]
return hmac.compare_digest(assr, calc_assr)
async def write_string(self, lang, key, string, sender):
if lang not in self.supported_languages:
return (1, "Translations for this language are not accepted at this time.")
if string is not None:
string = string.strip()
if len(string) < 2:
return (1, "Please submit a longer string.")
async with self.coordinator.pool.acquire() as c, c.transaction():
now = datetime.utcnow()
await c.execute(
"INSERT INTO tlinject_v1 VALUES($1, $2, $3, $4, $5)",
lang,
key,
string,
sender,
now,
)
await c.execute(
"INSERT INTO tlinject_cache_v1 VALUES ($1, $2, $3) ON CONFLICT (langid, key) DO UPDATE SET translated = excluded.translated",
lang,
key,
string,
)
return (0, "OK")
async def read_strings(self, lang, sset):
if not sset:
return (0, {})
if not self.is_language_valid(lang):
return (1, "Translations for this language are not available at this time.")
ret = {}
async with self.coordinator.pool.acquire() as c:
for page in batches(list(sset)):
params = ",".join(f"${i}" for i in range(2, 2 + len(page)))
q = f"""
SELECT key, translated FROM tlinject_cache_v1
WHERE langid = $1 AND key IN ({params}) AND translated IS NOT NULL"""
strings = await c.fetch(q, lang, *page)
for record in strings:
ret[record["key"]] = record["translated"]
return (0, ret)
@route(r"/api/private/tlinject/bootstrap.json")
class TLInjectBootstrapAPI(RequestHandler):
def get(self):
langs = self.settings["tlinject_context"].supported_languages
self.write({"result": {"languages": sorted(langs, key=lambda x: 0 if x == "en" else 1)}})
@route(r"/api/private/tlinject/([a-z_A-Z]+)/strings.json")
class TLInjectReadAPI(RequestHandler):
PER_REQUEST_HARD_LIMIT = 500
async def post(self, lang):
if not self.settings["tlinject_context"].is_language_valid(lang):
self.set_status(400)
self.write({"error": "Translations for this language are not available at this time."})
return
try:
load = json.loads(self.request.body.decode("utf8"))
except ValueError:
self.set_status(400)
self.write({"error": "Input must be provided as a JSON list."})
return
else:
if not isinstance(load, list):
self.set_status(400)
self.write({"error": "Input must be provided as a JSON list."})
return
unique = set(load)
if len(unique) > self.PER_REQUEST_HARD_LIMIT:
self.set_status(400)
self.write({"error": "Request fewer strings."})
return
error, sd = await self.settings["tlinject_context"].read_strings(lang, unique)
if error != 0:
self.set_status(400)
self.write({"error": sd})
else:
self.set_status(200)
self.write({"results": sd})
@route(r"/api/private/tlinject/([a-z_A-Z]+)/submit.json")
class TLInjectWriteAPI(RequestHandler):
async def post(self, lang):
try:
load = json.loads(self.request.body.decode("utf8"))
except ValueError:
self.set_status(400)
self.write({"error": "Input must be provided as a JSON dict."})
return
else:
if not isinstance(load, dict):
self.set_status(400)
self.write({"error": "Input must be provided as a JSON dict."})
return
key = load.get("key")
assr = load.get("assr")
tstring = load.get("translated")
if not all((key, assr)):
self.set_status(400)
self.write({"error": "You're missing input."})
try:
assr = base64.urlsafe_b64decode(assr)
except (ValueError, TypeError):
self.set_status(400)
self.write({"error": "The assurance value is invalid."})
return
if not self.settings["tlinject_context"].is_assr_valid(key, assr):
self.set_status(400)
self.write({"error": "The assurance value is invalid."})
return
status, message = await self.settings["tlinject_context"].write_string(
lang, key, tstring, self.request.remote_ip
)
if status != 0:
self.set_status(400)
self.write({"error": message})
else:
self.set_status(200)
self.write({"results": {key: tstring}})
| StarcoderdataPython |
97337 | __source__ = 'https://leetcode.com/problems/guess-number-higher-or-lower-ii/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/guess-number-higher-or-lower-ii.py
# Time: O(n^2)
# Space: O(n^2)
#
# Description: Leetcode # 375. Guess Number Higher or Lower II
#
# We are playing the Guess Game. The game is as follows:
#
# I pick a number from 1 to n. You have to guess which number I picked.
#
# Every time you guess wrong, I'll tell you whether the number I picked is higher or lower.
#
# However, when you guess a particular number x, and you guess wrong,
# you pay $x. You win the game when you guess the number I picked.
#
# Example:
#
# n = 10, I pick 8.
#
# First round: You guess 5, I tell you that it's higher. You pay $5.
# Second round: You guess 7, I tell you that it's higher. You pay $7.
# Third round: You guess 9, I tell you that it's lower. You pay $9.
#
# Game over. 8 is the number I picked.
#
# You end up paying $5 + $7 + $9 = $21.
# Given a particular n >= 1, find out how much money you need to have to guarantee a win.
#
# Hint:
#
# The best strategy to play the game is to minimize the maximum loss
# you could possibly face. Another strategy is to minimize the expected loss.
# Here, we are interested in the first scenario.
# Take a small example (n = 3). What do you end up paying in the worst case?
# Check out this article if you're still stuck.
# The purely recursive implementation of minimax would be worthless
# for even a small n. You MUST use dynamic programming.
# As a follow-up, how would you modify your code to solve the problem of
# minimizing the expected loss, instead of the worst-case loss?
#
# Companies
# Google
# Related Topics
# Dynamic Programming Minimax
# Similar Questions
# Flip Game II Guess Number Higher or Lower Can I Win Find K Closest Elements
#
import unittest
# 468ms 68.49%
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
pay = [[0] * n for _ in xrange(n+1)]
for i in reversed(xrange(n)):
for j in xrange(i+1, n):
pay[i][j] = min(k+1 + max(pay[i][k-1], pay[k+1][j]) \
for k in xrange(i, j+1))
return pay[0][n-1]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/guess-number-higher-or-lower-ii/solution/
# 7ms 76.45%
class Solution {
public int getMoneyAmount(int n) {
int[][] table = new int[n+1][n+1];
return DP(table, 1, n);
}
int DP(int[][] t, int s, int e){
if(s >= e) return 0;
if(t[s][e] != 0) return t[s][e];
int res = Integer.MAX_VALUE;
for(int x=s; x<=e; x++){
int tmp = x + Math.max(DP(t, s, x-1), DP(t, x+1, e));
res = Math.min(res, tmp);
}
t[s][e] = res;
return res;
}
}
# 5ms 95.53%
class Solution {
public int getMoneyAmount(int n) {
int[][] dp = new int[n + 1][n + 1];
for (int len = 2; len <= n; len++) {
for (int start = 1; start <= n - len + 1; start++) {
int minres = Integer.MAX_VALUE;
for (int piv = start; piv < start + len - 1; piv++) {
int res = piv + Math.max(dp[start][piv - 1], dp[piv + 1][start + len - 1]);
minres = Math.min(res, minres);
}
dp[start][start + len - 1] = minres;
}
}
return dp[1][n];
}
}
# DFS
# 1ms 99.49%
class Solution {
int[][] dp;
public int getMoneyAmount(int n) {
dp = new int[n + 1][n + 1];
return helper(1, n);
}
private int helper(int start, int end) {
if (dp[start][end] != 0) {
return dp[start][end];
}
if (start >= end) {
return 0;
}
if (start >= end - 2) {
return dp[start][end] = end - 1;
}
int mid = (start + end) / 2 - 1, min = Integer.MAX_VALUE;
while (mid < end) {
int left = helper(start, mid - 1);
int right = helper(mid + 1, end);
min = Math.min(min, mid + Math.max(left, right));
if (right <= left) break;
mid++;
}
return dp[start][end] = min;
}
}
''' | StarcoderdataPython |
3293903 | <filename>lib/galaxy/model/migrate/versions/0098_genome_index_tool_data_table.py
"""
Migration script to create the genome_index_tool_data table.
"""
from __future__ import print_function
import datetime
import logging
import sys
from sqlalchemy import Column, DateTime, ForeignKey, Integer, MetaData, String, Table
from galaxy.model.migrate.versions.util import create_table, drop_table
now = datetime.datetime.utcnow
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
log.addHandler(handler)
metadata = MetaData()
# New table in changeset TODO:TODO
GenomeIndexToolData_table = Table("genome_index_tool_data", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True),
Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True),
Column("fasta_path", String(255)),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now),
Column("indexer", String(64)),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
create_table(GenomeIndexToolData_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_table(GenomeIndexToolData_table)
| StarcoderdataPython |
3475098 | <gh_stars>0
import random
import time
from datetime import datetime
from typing import Dict
from urllib.parse import urlsplit
import requests
# from fake_useragent import UserAgent
from requests import Response
class RequestsTimeout:
CONNECTION_TIMEOUT = 300
READ_TIMEOUT = 300
TIMEOUT_TUPLE = (CONNECTION_TIMEOUT, READ_TIMEOUT)
class Throttle:
def __init__(self, delay=60):
# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}
def wait(self, url):
""" Delay if have accessed this domain recently"""
domain = urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
print("Sleeping for: ", sleep_secs)
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
# class WebScraperUtility:
# def __init__(self, delay=5):
# self.headers = []
# self.throttle = Throttle(delay=delay)
#
# def get_useragent(self):
# ua = UserAgent()
# self.headers = [ua.chrome, ua.google,
# ua['google chrome'], ua.firefox, ua.ff]
# return {'User-Agent': random.choice(self.headers)}
#
# def wait(self, url):
# """ Delay if have accessed this domain recently"""
# self.throttle.wait(url)
class RequestMaker:
def __init__(self, delay=10):
self.delay = delay
# self.scrape_utility = WebScraperUtility(delay=delay)
self.proxy_dict = None
def _make_request(self, url, request_method, parameters, retry):
i = 0
# headers = self.scrape_utility.get_useragent()
headers = {}
status_message = None
request_parameters = {'proxies': self.proxy_dict,
'timeout': RequestsTimeout.TIMEOUT_TUPLE}
request_parameters = {**parameters, **request_parameters}
while i < retry:
i += 1
try:
# self.scrape_utility.wait(url)
page = request_method(url, headers=headers, **request_parameters)
status_message = 'Status: {status}, {reason} for URL: {url}'.format(status=page.status_code,
reason=page.reason, url=url)
if page.status_code == 200:
return page
elif page.status_code == 404:
raise ValueError(status_message)
print(" Status code :{status}, Retrying ...".format(status=page.status_code))
except requests.exceptions.RequestException as e:
print(e)
time.sleep(self.delay)
print("Retrying")
print(url)
status_message = 'RequestException {reason} for URL: {url}'.format(reason=e, url=url)
raise ValueError(status_message)
def get_request(self, url: str, params=None, retry=5) -> Response:
request_method = requests.get
request_parameters = {'params': params}
return self._make_request(url, request_method, parameters=request_parameters, retry=retry)
def post_request(self, url: str, json=None, retry=5) -> Response:
request_method = requests.post
request_parameters = {'json': json}
return self._make_request(url, request_method, parameters=request_parameters, retry=retry)
def activate_proxy(self, host, port, username, password) -> Dict:
proxy = 'http://{}:{}@{}:{}'.format(username, password, host, port)
self.proxy_dict = {'http': proxy,
'https': proxy}
return self.proxy_dict
| StarcoderdataPython |
6514926 | <gh_stars>0
from django.contrib import messages
from django.utils.translation import ugettext_lazy
GRAY = 'gray'
GREEN = 'green'
BLUE = 'blue'
YELLOW = 'yellow'
RED = 'red'
SUCCESS = ugettext_lazy('Success')
ERROR = ugettext_lazy('Error')
WARNING = ugettext_lazy('We are sorry')
errors_list = {
'title': {
'400': ugettext_lazy('Bad Request'),
'401': ugettext_lazy('Unauthorized'),
'403': ugettext_lazy('Forbidden'),
'404': ugettext_lazy('Not Found'),
'405': ugettext_lazy('Method not allowed'),
'406': ugettext_lazy('Not Acceptable'),
'409': ugettext_lazy('Conflict'),
'415': ugettext_lazy('Unsupported Media Type'),
'500': ugettext_lazy('Internal Server Error'),
'503': ugettext_lazy('Service Unavailable'),
'sorry': 'Lo sentimos',
},
'body': {
'no_action': ugettext_lazy(
'We are sorry. You are trying to execute an unknown'
' or prohibited action.'),
'incorrect_method': ugettext_lazy(
'We are sorry. You are trying to execute an'
' action with an incorrect header or'
' method.'),
'missing_step': ugettext_lazy(
'We are sorry. You must complete the previous'
' steps to execute this action.'),
'bad_login': ugettext_lazy(
'The user/password combination is invalid. Please'
' try again.'),
'no_perm': ugettext_lazy(
'You do not have enough permissions to execute this'
' action. If this is a mistake, please contact the'
' administrator.'),
'ticket': ugettext_lazy(
'An internal error has occurred. The administrator has'
' been notified.'),
'encryption': ugettext_lazy(
'La combinación de cer, key y contraseña es incorrecta.'
' Los cambios en estos 3 campos se han descartado.'),
'verification_pending': ugettext_lazy(
'Aún no ha verificado su correo electrónico.'),
'verification_error': ugettext_lazy(
'El correo no existe en el sistema o ya fue'
' verificado anteriormente.'),
'email_inc_exists': ugettext_lazy(
'El correo electrónico está en un formato'
' invalido o ya existe en el sistema'),
'no_user_exists': ugettext_lazy(
'El usuario no existe en el sistema.'),
'not_in_team': ugettext_lazy(
'Usted no pertenece a este negocio.'),
'no_team': ugettext_lazy(
'El negocio no existe en el sistema.'),
'team_exists': ugettext_lazy(
'El negocio ya existe en el sistema.'),
'already_claimed': ugettext_lazy(
'Esta invitación ya fue procesada anteriormente.'),
'already_in_team': ugettext_lazy(
'El usuario ya se encuentra en este negocio.'),
'invalid_data': ugettext_lazy(
'La información es incorrecta.'),
}
}
def generate_msg(request, state=GRAY, title='Debug', body='Empty'):
mtype = None
if state == GRAY:
mtype = messages.DEBUG
elif state == GREEN:
mtype = messages.SUCCESS
elif state == BLUE:
mtype = messages.INFO
elif state == YELLOW:
mtype = messages.WARNING
elif state == RED:
mtype = messages.ERROR
messages.add_message(
request,
mtype,
extra_tags=title,
message=body,
fail_silently=True
)
def _parseToJSON(message):
return {
'level': message.level_tag,
'tags': message.extra_tags,
'body': message.message,
}
def getMessagesJSON(request):
storage = messages.get_messages(request)
array = list()
for message in storage:
array.append(_parseToJSON(message))
return array
| StarcoderdataPython |
5051162 | <gh_stars>0
#!/usr/bin/env python3
# This file is Copyright (c) 2020 <NAME> <<EMAIL>>
# License: BSD
import os
import argparse
import sys
from migen import *
from migen.genlib.misc import WaitTimer
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import colorlight_5a_75b
from litex.soc.cores.clock import *
from litex.soc.cores.spi_flash import ECP5SPIFlash
from litex.soc.cores.gpio import GPIOOut
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
from litex.build.generic_platform import *
from litex.boards.platforms import genesys2
from litex.soc.interconnect.csr import *
# IOs ----------------------------------------------------------------------------------------------
_gpios = [
("gpio", 0, Pins("j4:0"), IOStandard("LVCMOS33")),
("gpio", 1, Pins("j4:1"), IOStandard("LVCMOS33")),
]
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
# # #
# Clk / Rst
clk25 = platform.request("clk25")
rst_n = platform.request("user_btn_n", 0)
platform.add_period_constraint(clk25, 1e9/25e6)
# PLL
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~rst_n)
pll.register_clkin(clk25, 25e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
class DAC(Module, AutoCSR):
"""Basic first order sigma-delta DAC running at sys clock"""
def __init__(self):
self.dacval = CSRStorage(16, description="DAC value")
self.inp = Signal((16))
self.out = Signal()
###
accu = Signal(17)
self.comb += self.inp.eq(self.dacval.storage)
self.sync += [
accu.eq(accu[:-1] + self.inp),
self.out.eq(accu[-1])
]
# ColorLite ----------------------------------------------------------------------------------------
class ColorLite(SoCMini, AutoCSR):
def __init__(self, with_etherbone=True, ip_address=None, mac_address=None):
platform = colorlight_5a_75b.Platform(revision="7.0")
sys_clk_freq = int(125e6)
# SoCMini ----------------------------------------------------------------------------------
SoCMini.__init__(self, platform, clk_freq=sys_clk_freq)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# Etherbone --------------------------------------------------------------------------------
if with_etherbone:
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_csr("ethphy")
self.add_etherbone(
phy = self.ethphy,
ip_address = ip_address,
mac_address = mac_address,
)
# SPIFlash ---------------------------------------------------------------------------------
self.submodules.spiflash = ECP5SPIFlash(
pads = platform.request("spiflash"),
sys_clk_freq = sys_clk_freq,
spi_clk_freq = 5e6,
)
self.add_csr("spiflash")
# GPIOs ------------------------------------------------------------------------------------
platform.add_extension(_gpios)
# Led --------------------------------------------------------------------------------------
#self.submodules.led = GPIOOut(platform.request("user_led_n"))
#self.add_csr("led")
# Pulsegen RAM -----------------------------------------------------------------------------
self.add_ram("pgen_ram", 0x10000000, 1024)
port = self.pgen_ram.mem.get_port()
self.specials += port
# DAC --------------------------------------------------------------------------------------
self.submodules.dac = DAC()
outp = platform.request("gpio", 1)
led = platform.request("user_led_n")
self.comb += [
port.adr.eq(self.dac.inp),
outp.eq(self.dac.out),
led.eq(port.dat_r[0]),
#led.eq(self.dac.inp[0]),
]
self.add_csr("dac")
# Build -------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="pulsegen test using LiteX and colorlite")
parser.add_argument("--build", action="store_true", help="build bitstream")
parser.add_argument("--load", action="store_true", help="load bitstream")
parser.add_argument("--flash", action="store_true", help="flash bitstream")
parser.add_argument("--ip-address", default="192.168.1.20", help="Ethernet IP address of the board.")
parser.add_argument("--mac-address", default="0x726b895bc2e2", help="Ethernet MAC address of the board.")
args = parser.parse_args()
soc = ColorLite(ip_address=args.ip_address, mac_address=int(args.mac_address, 0))
builder = Builder(soc, output_dir="build", csr_csv="csr.csv")
builder.build(build_name="pulsegen", run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".svf"))
if args.flash:
prog = soc.platform.create_programmer()
os.system("cp bit_to_flash.py build/gateware/")
os.system("cd build/gateware && ./bit_to_flash.py pulsegen.bit pulsegen.svf.flash")
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".svf.flash"))
if __name__ == "__main__":
main()
| StarcoderdataPython |
11367893 | <filename>vespid/pipeline.py
# -*- coding: utf-8 -*-
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import timedelta
from time import time
from joblib import dump as dump_obj, load as load_obj
from copy import copy, deepcopy
import os
import ray
import pathlib
from vespid import get_current_datetime
from vespid import setup_logger, get_memory_usage
logger = setup_logger(__name__)
class Stage():
'''
Class that defines an instance of a stage of the pipeline. Note that,
if you want to use the output of stage N in any stage other than
N+1, you need to instantiate the Stage outside of a Pipeline so that
it may be referenced more than when it is first run in the Pipeline.
When a Stage object is called directly it will return the output of
the stage. Example:
s = Stage(...)
s.execute(...)
s()
#### Returns s.output, if available
'''
def __init__(
self,
name,
function,
cache_stage_output=False,
**kwargs
):
'''
Parameters
----------
name: str. Allows for name-based access to this Stage when it is used
in a Pipeline.
function: python function. Defines the processing to be done with
the input(s) and what should be output.
cache_stage_output: bool. If True, indicates that you want the output of
the stage in question to be kept in memory such that it can be
accessed by later stages (but not necessarily only the very
next stage). Use the `other_inputs` key to provide other
stages' outputs saved to memory using `cache_stage` as input
into the relevant stage(s).
kwargs: keyword arguments that get passed to ``function``. Note that
these should be of the form `<keyword>=(<value>, <is_stage_name>)`,
wherein <is_stage_name> is a boolean that indicates if <value>
should be a string that corresponds to another Stage's name
attribute (commonly done when a Stage is inside a Pipeline object
to reference the output value of that Stage) or if it is simply
a standard string.
'''
self.name = name
self._function = function
self._cache_stage_output = cache_stage_output
# Error-check the 2-tuples of kwargs
for k, (v, is_stage) in kwargs.items():
# Check all kwargs to ensure that, if they are a Stage name,
# the value is a string
if is_stage and not isinstance(v, str):
raise ValueError("kwargs that refer to other Stages "
"must be names of type str. "
f"Received {type(v)} for kwarg '{k}' "
"instead")
self._kwargs = kwargs
self._output = None
self._execution_time = None
self._initialization_time = get_current_datetime()
self._memory_percentage_used_start = None
self._memory_percentage_used_end = None
def is_output_cached(self):
'''
Determines if a Stage's output is being/will be stored in memory.
Returns
-------
bool
True if the output is/will be cached, False otherwise.
'''
return self._cache_stage_output
def execute(self, input=None):
'''
Execute the Stage's assigned function given the input provided
either as an arg or the kwargs of the Stage itself.
Parameters
----------
input : Any
First argument to be given to `function` in addition to the kwargs.
Only kwargs are provided if this is None.
Returns
-------
Output type of self._function
Output of the Stage's assigned function, given its input and/or
the Stage's kwargs
'''
kwargs = {}
start_time = time()
# If any of our kwargs are Stage objects,
# explicitly use their output in their place
# This is a workaround to ensure we can pass previous Stages
# to later Stages at instantiation time without throwing an error
self._memory_percentage_used_start = get_memory_usage(logger)
for k, (v, is_stage) in self._kwargs.items():
if isinstance(v, type(self)) and is_stage:
kwargs[k] = v._output
elif not isinstance(v, type(self)) and is_stage:
raise ValueError(f"kwarg '{k} expected to be of type Stage, "
f"but got {type(v)} instead")
else:
kwargs[k] = v
if input is not None:
output = self._function(input, **kwargs)
else:
output = self._function(**kwargs)
if self._cache_stage_output:
self._output = output
self._execution_time = timedelta(seconds=time() - start_time)
logger.info(f"Stage {self.name} took {self._execution_time} to execute")
self._memory_percentage_used_end = get_memory_usage(logger)
self._executed = True
return output
def get_results(self):
'''
Returns the output of the Stage, assuming execution has already
occurred.
'''
if self._output is not None:
return self._output
elif not self._executed and self._cache_stage_output:
raise RuntimeError("This Stage has no output because it has not "
"yet been run. Please run Stage.execute()")
elif not self._cache_stage_output:
raise RuntimeError("This Stage has no output because "
"cache_stage_output is False")
#FIXME: see if we can do better than this last case
else:
raise RuntimeError("This Stage has no output but the reason why"
"is unclear")
def __str__(self):
d = {**self.__dict__}
return f"Stage called {self.name} with inputs {str(d)}"
# This variant throws errors when a Stage object is part of another iterable (e.g. list or dict)
#def __repr__(self):
#return str(self)
def use_preceding_input(self):
'''
Flags the Stage as being one expecting the output of the immediately
preceding Stage in a Pipeline as its input.
Returns
-------
PipelineFlag
PipelineFlag with the Stage stored for providing info to the
Pipeline about its execution.
'''
return PipelineFlag('use_preceding_input', stage=self)
@classmethod
def load_stage_results_from_disk(cls, filename):
logger.info(f"Loading Stage from disk: `{filename}`... ")
stage_result = load_obj(filename)
return stage_result
class PipelineFlag():
'''
A class designed specifically to just flag to Pipelines that something
is being set that they should pay attention to, e.g. a Stage that has
been flagged as needing the Pipeline to provide the output of the preceding
Stage for it as input.
'''
def __init__(self, type, value=None, stage=None):
'''
Initializes the flag with basic info.
Parameters
----------
type : str
Indicates the type of flag being used. Can be one of the
following:
* 'use_preceding_input': tells Pipeline to cache the output of
the previous Stage to use as the first input to this Stage.
Can be used to save memory during a big Pipeline run.
* 'cancel_pipeline': tells Pipeline to stop executing and return
`value` as the result of `Pipeline.run()`
value : Any, optional
Indicates what value to associate with this flag, default None
stage : Stage, optional
If not None, provides a Stage to be used by the Pipeline, augmented
with the information provided by the flag, default None
'''
allowed_types = ['use_preceding_input', 'cancel_pipeline']
if type not in allowed_types:
raise ValueError(f"`type` of {type} not supported")
self.type = type
self.value = value
self.stage = stage
@property
def name(self):
if self.stage is not None:
return self.stage.name
else:
return None
def __str__(self):
output = f"Flag of type '{self.type}'"
if self.value is not None:
output += f" having value {self.value}"
if self.stage is not None:
output += f" referring to a Stage called '{self.stage.name}'"
return output
def __repr__(self):
return str(self)
class Pipeline():
'''
Class for creating linear data pipelines using arbitrary inputs and
functions for each step/stage.
'''
def __init__(
self,
stages,
save_stages=False,
**first_stage_kwargs,
):
'''
Parameters
----------
stages: list of Stage and possibly PipelineFlag objects.
NOTE: when instantiated, the Pipeline will make a deep copy of
``stages`` and every Stage in it. As such, please ensure that no
large objects (e.g. a large array) are being included as kwargs
to any Stage, as they will be copied in memory.
save_stages: bool indicating if a pickled and compressed form of the
data output from each stage of the Pipeline is saved. If True,
compressed *.joblib files are saved to the working directory
in a new subdirectory named PipelineStages_<current_datetime>.
first_stage_kwargs: if provided, these kwargs will be used to overwrite
the identified kwargs of stages[0]. Any kwargs defined in the Stage
that are not provided here will be left as they are. This is useful
for providing a different input to copies of the Pipeline without
re-defining the starting Stage each time (e.g. when parallelizing
the Pipeline).
Should be 2-tuples provided in the form
`<stages[0]_function_kwarg>=(<value>, <is_stage>)`, with the first
tuple element being the actual value to pass as the kwarg and the
second element indicating if the value is actually a string name
of another Stage in ``stages``, the output of which should be used
as the value for the kwarg in question.
This format allows the Pipeline to setup inter-Stage dependencies
when instantiated but before running it.
'''
#TODO: do initial inputs checking and error-raising
#TODO: refactor to stop requiring 2-tuples to indicate if an earlier Stage is being referenced in `stages`
self.stages = {}
for i,s in enumerate(stages):
if isinstance(s, Stage):
self.stages[s.name] = copy(s)
elif isinstance(s, PipelineFlag) and i == 0:
raise RuntimeError("PipelineFlag cannot be the first stage")
elif s.name in self.stages.keys():
raise ValueError(f"Stage name '{s.name}' used more than once")
elif isinstance(s, PipelineFlag):
self.stages[s.stage.name] = copy(s)
else:
raise RuntimeError(f"Stage is of unknown type '{type(s)}'")
self._executed = False
self._first_stage_kwargs = deepcopy(first_stage_kwargs)
# Parse the Stage kwargs, checking for inter-Stage references
# and replace kwarg values with Stage object from stages where relevant
self._insert_stage_kwargs()
self.save_stage_output = save_stages
self._build_time = time()
self._build_time_str = get_current_datetime()
if save_stages:
self.cache_filepath = f"pipeline_{self._build_time_str}/"
if not os.path.isdir(self.cache_filepath):
pathlib.Path(self.cache_filepath).mkdir(parents=True, exist_ok=True)
else:
self.cache_filepath = None
def _insert_stage_kwargs(self):
'''
Parses the kwargs provided for Stages (either when the Stages were
constructed or when the Pipeline was constructed) and
inserts/overwrites the kwargs for the first Stage with the values from
``first_stage_kwargs``. Then, for any kwarg identified as being
a reference to another Stage in the Pipeline, replaces the string
name identifier of the other Stage with the actual Stage object.
Raises
------
RuntimeError
Checks if the Stage being referenced as a kwarg exists in the
Pipeline's ``stages``. Also checks that the Stage being referenced
is caching its results in memory.
'''
# Parse the Stages
for i, (_, stage) in enumerate(self.stages.items()):
if isinstance(stage, PipelineFlag):
flag_type = stage.type
stage = stage.stage
# Parse the kwargs for each Stage
for k, (v, is_stage) in deepcopy(stage._kwargs).items():
# Replace any relevant kwargs in first Stage with what was given
# to Pipeline as first_stage_kwargs
if i == 0:
for k,v in self._first_stage_kwargs.items():
stage._kwargs[k] = v
# Check that a Stage used as kwarg is actually caching itself
# in memory
if is_stage and v not in self.stages.keys():
raise RuntimeError(f"kwarg {k} refers to a Stage that was "
"not provided in ``stages``")
elif is_stage and not self.stages[v].is_output_cached():
raise RuntimeError(f"kwarg {k} uses a Stage ('{v}') that "
"is not being cached in memory when "
"executed. Please re-instantiate the Stage "
"with `cache_output_stages=True`")
# Grab the Stage needed
elif is_stage:
stage._kwargs[k] = (self.stages[v], is_stage)
def __str__(self):
'''
Print the schema of this Pipeline (source->sink relationships
and transformations along the way).
Parameters
----------
None.
Returns
-------
Nothing, prints to stdout.
'''
output = ' -> '.join(self.stages.keys())
if self._executed:
output += f"\nPipeline took {self.execution_time} to execute fully."
#print(self.input)
return output
def __repr__(self):
return str(self)
def run(self, verbose=False, return_performance_report=False):
'''
Runs the Pipeline.
Parameters
----------
verbose: bool. If True, elevates status updates for each stage
from INFO to DEBUG logger level.
return_performance_report: bool. If True, returns a DataFrame
reporting how long each Stage took, memory at start and end of
Stage, etc.
Returns
-------
If return_performance_report is True, returns pandas DataFrame
produced by self.track_stages(). Otherwise returns the output of the
final Stage of the Pipeline.
'''
reporter = logger.debug if verbose else logger.info
proceed = False
if self._executed:
raise RuntimeError(f"This pipeline has already been run previously.")
execution_start_time = time()
for i, (stage_name, stage) in enumerate(self.stages.items()):
reporter(f"Starting stage {stage_name}...")
start_time = time()
if isinstance(stage, Stage):
data_in = stage.execute()
elif isinstance(stage, PipelineFlag) \
and stage.type == 'use_preceding_input':
data_in = stage.stage.execute(data_in)
if self.save_stage_output and i != len(self.stages) - 1:
dump_obj(data_in,
f"{self.cache_filepath}{stage.name}.joblib",
compress=('gzip', 3))
self._executed = True
# Check if Pipeline cancel signal received and break out of loop if so
if isinstance(data_in, PipelineFlag) \
and data_in.type == 'cancel_pipeline':
# Extract the real return value from the PipelineFlag object
data_in = data_in.value
logger.warning("Received a cancellation signal from Stage "
f"{stage.name}")
break
self.execution_time = timedelta(seconds=time() - execution_start_time)
reporter(f"Pipeline took {self.execution_time} to execute.")
if return_performance_report:
return self.track_stages()
else:
return data_in
def track_stages(self):
'''
Provides metadata about Stages executed.
Returns
-------
pandas DataFrame
Log of executed Stages. Note that "absolute_percent_memory_change"
column is calculated by subtracting percent of memory used at
start of Stage excecution from the percent used at the end
(e.g. 10% start -> 11% end = 1%).
'''
execution_times = []
build_times = []
memory_used_start = []
memory_used_end = []
for stage in self.stages.values():
execution_times.append(stage._execution_time)
build_times.append(stage._initialization_time)
memory_used_start.append(stage._memory_percentage_used_start),
memory_used_end.append(stage._memory_percentage_used_end)
output = pd.DataFrame({
'stage': self.stages.keys(),
'built_on': build_times,
'time_to_execute': execution_times,
'percent_memory_used_start': memory_used_start,
'percent_memory_used_end': memory_used_end
})
output['absolute_percent_memory_change'] = \
output['percent_memory_used_end'] \
- output['percent_memory_used_start']
return output
def load_stage_output(self, stage_name, from_memory=True):
'''
Given the name of a stage in the already-executed pipeline, load up
the cached stage file and return the resulting Python object.
Parameters
----------
stage_name: str. Name used for the stage in question.
from_memory: bool. If True, loads the Stage output from a cached version
of the Stage, instead of trying to load it into memory from disk.
Returns
-------
Object that was generated as the output of the named stage (often
a DataFrame).
'''
if stage_name not in self.stages.keys():
raise ValueError(f"{stage_name} not a stage from this Pipeline")
elif from_memory:
stage_index = self.stages.keys().index(stage_name)
output = self.stages[stage_index].get_results()
elif not self.save_stage_output:
raise ValueError("This Pipeline did not save its stages")
elif not self._executed:
raise RuntimeError("This Pipeline has not yet been executed. \
Please use the run() method to execute so that saved stages may be \
inspected")
else:
with open(f"{self.cache_filepath}{stage_name}.joblib", 'rb') as f:
output = load_obj(f)
return output
@classmethod
def cancel(cls, return_value=None):
return PipelineFlag('cancel_pipeline', value=return_value)
def save(self, filepath=None):
'''
Archives a copy of the Pipeline so it can be used later/shared.
Loading the saved Pipeline can be achieved via:
from vespid.data import load_pipeline
load_pipeline(filepath)
Parameters
----------
filepath: str indicating the destination to which the Pipeline should
be saved. Should be of the format 'path/to/pipeline.joblib'. If
None, will save to the directory used for saving Stages, if available,
else will save to the current working directory.
Returns
-------
Nothing.
'''
if not filepath and self.cache_filepath is not None:
output_path = self.cache_filepath + f'Pipeline.joblib'
elif filepath:
output_path = filepath
else:
output_path = f'Pipeline_{self._build_time}.joblib'
dump_obj(self, output_path, compress=False)
@classmethod
def load(cls, filepath):
'''
Given the location of a Pipeline saved on disk, loads it into memory for
use.
Parameters
----------
filepath: str indicating the destination from which the Pipeline should
be loaded. Should be of the format 'path/to/pipeline.joblib'.
Returns
-------
Pipeline object.
'''
return load_obj(filepath)
@ray.remote
class ParallelPipeline(Pipeline):
'''
Class for creating linear data pipelines using arbitrary inputs and
functions for each step/stage. This class is designed to be identical
to the Pipeline class, but with ray-enhanced parallelization.
Note that the constructor should not be called via ParallelPipeline(args),
but rather via ParallelPipeline.remote(args). Likewise, methods should
be called via parallel_pipe.method.remote(method_args).
''' | StarcoderdataPython |
4992208 | type(Key.END)
sleep(1)
exit(0)
| StarcoderdataPython |
4942418 | <reponame>Jie-Re/GraphGallery<gh_stars>0
from .trainer import Trainer
from .registered_models import (TensorFlow, PyTorch, PyG,
DGL_PyTorch, DGL_TensorFlow,
Common,
MAPPING)
import graphgallery
from functools import partial
def is_enabled(model: str) -> bool:
"""Return true if the model is enabled by the current backend.
Parameters
----------
model : str
The model name.
Returns
-------
bool
True if the model is enabled by the current backend.
"""
return model in enabled_models()
def enabled_models(with_common=True):
"""Return the models in the gallery enabled by the current backend.
Parameters
----------
with_common : bool
Whether to return common models (framework-agnostic).
Returns
-------
graphgallry.functional.BuhcnDict
A dict of models enabled by the current backend.
"""
return get_registry() + Common
graphgallery.load_models(__name__, mapping=MAPPING)
get_registry = partial(graphgallery.get_registry, mapping=MAPPING)
models = enabled_models
| StarcoderdataPython |
1964302 | #!/usr/bin/env python
"""
Name: structurama_from_genotypes.py
Author: <NAME>
Date: 11 July 2013
Convert genotype probabilities file output by Tom White's post-UNEAK processing scripts to input
file for structurama (Huelsenbeck and Andolfatto 2007).
Usage: python structurama_from_genotypes.py in_file out_file sample_size
Ex.: python structurama_from_genotypes.py HapMapPairedFilt.txt \
HapMapPairedFiltStruct.txt 73
"""
import os
import sys
import argparse
import csv
import numpy
def get_args():
parser = argparse.ArgumentParser(
description="""Program description""")
parser.add_argument(
"in_file",
type=str,
help="""The input genotype probabilities file from Tom White's scripts"""
)
parser.add_argument(
"out_file",
type=str,
help="""The file name"""
)
parser.add_argument(
"sample_size",
type=str,
help="""The number of samples/individuals in file"""
)
return parser.parse_args()
def read_samples(infile, sample_size):
samples = list()
first_line = infile.readline()
parts = first_line.split()
for i in range(int(sample_size)):
parts2 = parts[4+i]
parts3 = parts2.split('_')
samples.append(parts3[0])
return samples
def unphase(infile, sample_size):
array = list()
for line in infile:
parts = line.split()
alleles = str(parts[3]).split('/')
a1 = alleles[0]
a2 = alleles[1]
seq = list()
for i in range(int(sample_size)):
if parts[4+i] == "NA":
seq.append("?,?")
else:
counts = str(parts[4+i]).split(',')
seq.append("{0},{1}".format(counts[0], counts[1]))
array.append(seq)
return array
def main():
args = get_args()
infile = open("{0}".format(args.in_file), 'r')
outfile = open("{0}".format(args.out_file), 'wb')
samples = read_samples(infile, args.sample_size)
array = unphase(infile, args.sample_size)
alignment = zip(*array)
outfile.write("begin data;\n")
outfile.write("\tdimensions nind={0} nloci={1};\n".format(args.sample_size, len(alignment[0])))
outfile.write("\tinfo\n")
i = 0
for sample in samples:
outfile.write("\t{0}".format(sample))
for j in range(len(alignment[0])):
counts = str(alignment[i][j]).split(',')
count1 = counts[0]
count2 = counts[1]
outfile.write(" ( {0} , {1} )".format(count1, count2))
if i < (len(samples)-1):
outfile.write(",")
outfile.write("\n")
i += 1
outfile.write("\t;\n")
outfile.write("end;\n")
infile.close()
outfile.close()
if __name__ == '__main__':
main() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.