id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1630034 | <reponame>TwistedCore/external_v8
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# Check for presence of harfbuzz-icu library, use it if present.
'harfbuzz_libraries':
'<!(python <(DEPTH)/tools/compile_test/compile_test.py '
'--code "int main() { return 0; }" '
'--run-linker '
'--on-success "harfbuzz harfbuzz-icu" '
'--on-failure "harfbuzz" '
'-- -lharfbuzz-icu)',
},
'targets': [
{
'target_name': 'harfbuzz-ng',
'type': 'none',
'cflags': [
'<!@(pkg-config --cflags <(harfbuzz_libraries))',
],
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags <(harfbuzz_libraries))',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other <(harfbuzz_libraries))',
],
'libraries': [
'<!@(pkg-config --libs-only-l <(harfbuzz_libraries))',
],
},
'variables': {
'headers_root_path': 'src',
'header_filenames': [
'hb.h',
],
},
'includes': [
'../../build/shim_headers.gypi',
],
},
],
}
| StarcoderdataPython |
82399 | # -*- coding: utf-8 -*-
# @File : session.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import Session_MSG_ZH, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, Session_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.notice import Notice
from Lib.rpcclient import RpcClient
from Lib.sessionlib import SessionLib
from Lib.xcache import Xcache
from Msgrpc.serializers import SessionLibSerializer
class Session(object):
"""session信息"""
@staticmethod
def list(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
session_interface = SessionLib(sessionid, rightinfo=True, uacinfo=True, pinfo=True)
result = SessionLibSerializer(session_interface).data
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200))
return context
@staticmethod
def update(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
Xcache.set_session_info(sessionid, None)
session_lib = SessionLib(sessionid, rightinfo=True, uacinfo=True, pinfo=True)
result = SessionLibSerializer(session_lib).data
context = data_return(203, result, Session_MSG_ZH.get(203), Session_MSG_EN.get(203))
return context
@staticmethod
def destroy(sessionid=None):
if sessionid is None or sessionid <= 0:
context = data_return(304, {}, Session_MSG_ZH.get(304), Session_MSG_EN.get(304))
return context
else:
params = [sessionid]
try:
result = RpcClient.call(Method.SessionStop, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None: # 删除超时
Notice.send_success(f"{Session_MSG_ZH.get(202)} SID: {sessionid}",
f"{Session_MSG_EN.get(202)} SID: {sessionid}")
context = data_return(202, {}, Session_MSG_ZH.get(202), Session_MSG_EN.get(202))
return context
elif result.get('result') == 'success':
Notice.send_success(f"{Session_MSG_ZH.get(201)} SID: {sessionid}",
f"{Session_MSG_EN.get(201)} SID: {sessionid}")
context = data_return(201, {}, Session_MSG_ZH.get(201), Session_MSG_EN.get(201))
return context
else:
Notice.send_warning(f"{Session_MSG_ZH.get(301)} SID: {sessionid}",
f"{Session_MSG_EN.get(301)} SID: {sessionid}")
context = data_return(301, {}, Session_MSG_ZH.get(301), Session_MSG_EN.get(301))
return context
except Exception as E:
logger.error(E)
Notice.send_warning(f"{Session_MSG_ZH.get(301)} SID: {sessionid}",
f"{Session_MSG_EN.get(301)} SID: {sessionid}")
context = data_return(301, {}, Session_MSG_ZH.get(301), Session_MSG_EN.get(301))
return context
| StarcoderdataPython |
3220594 | <filename>Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/azpy/shared/common/envar_utils.py
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -- This line is 75 characters -------------------------------------------
from __future__ import unicode_literals
# -------------------------------------------------------------------------
'''
Module: <DCCsi>\\azpy\\shared\\common\\config_utils.py
A set of utility functions
<to do: further document this module>
To Do:
ATOM-5859
'''
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# built in's
import os
import sys
import logging as _logging
# 3rd Party
from box import Box
from unipath import Path
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# global space
import azpy.env_bool as env_bool
from azpy.constants import ENVAR_O3DE_DEV
from azpy.constants import ENVAR_O3DE_PROJECT
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
from azpy.constants import FRMT_LOG_LONG
_DCCSI_GDEBUG = env_bool.env_bool(ENVAR_DCCSI_GDEBUG, False)
_DCCSI_DEV_MODE = env_bool.env_bool(ENVAR_DCCSI_DEV_MODE, False)
_MODULENAME = __name__
if _MODULENAME is '__main__':
_MODULENAME = 'azpy.shared.common.envar_utils'
# set up module logging
for handler in _logging.root.handlers[:]:
_logging.root.removeHandler(handler)
_LOGGER = _logging.getLogger(_MODULENAME)
_logging.basicConfig(format=FRMT_LOG_LONG)
_LOGGER.debug('Initializing: {0}.'.format({_MODULENAME}))
# -------------------------------------------------------------------------
# -- envar util ----------------------------------------------------------
def get_envar_default(envar, envar_default=None, envar_set=Box(ordered_box=True)):
'''
Check the environment variable (envar)
Get from the system environment, or the module dictionary (a Box):
like the test one in __main__ below,
TEST_ENV_VALUES = Box(ordered_box=True)
TEST_ENV_VALUES[ENVAR_O3DE_PROJECT] = '${0}'.format(ENVAR_O3DE_PROJECT)
This dictionary provides a simple way to pack a default set into a
structure and decouple the getter implementation.
These envars resolve to specific values at import time.
Envars set in the environment trump the default values.
:param var:
:return: Some value for the variable, current or default.
'''
envar = str(envar)
value = os.getenv(envar, envar_default)
if not value:
value = envar_set.get(envar)
if value is not None:
value = Path(value).expand_vars()
return value
# -------------------------------------------------------------------------
# -- envar util ----------------------------------------------------------
def set_envar_defaults(envar_set, env_root=get_envar_default(ENVAR_O3DE_DEV)):
"""
Set each environment variable if not alreay set with value.
Must be safe, will not over-write existing.
:return: envarSet
"""
if env_root:
env_root = Path(env_root)
if env_root.exists():
os.environ[ENVAR_O3DE_DEV] = env_root
envar_set[ENVAR_O3DE_DEV] = env_root
else:
raise ValueError("EnvVar Root is not valid: {0}".format(env_root))
for envar in iter(envar_set.keys()):
envar = str(envar)
value = os.getenv(envar)
if _DCCSI_GDEBUG:
if not value:
_LOGGER.debug('~ EnVar value NOT found: {0}\r'.format(envar))
if not value:
value = envar_set.get(envar)
elif value:
if Path(value).exists():
# re-set to Path object, if it is a valid existing path
value = Path(value)
envar_set[envar] = value
os.environ[envar] = value.expand_vars()
elif value:
envar_set[envar] = value
return envar_set
# -------------------------------------------------------------------------
# -- envar util class ----------------------------------------------------
class Validate_Envar(object):
'''Simple Class to resolve environment references at runtime
after the project_root has been defined'''
def __init__(self, envar=''):
self._envar = envar
self._envar_value = None
@property
def envar(self):
return self._envar
@envar.setter
def envar(self, envar):
self._envar = envar
self._envar_value = get_envar_default(self._envar)
return self._envar
@envar.getter
def envar(self):
return self._envar
@property
def envar_value(self):
return get_envar_default(self._envar_value)
@envar_value.getter
def envar_value(self):
self._envar_value = get_envar_default(self._envar)
return self._envar_value
def __str__(self):
return str('{0}'.format(self.envar_value))
def __repr__(self):
return "Validate_Envar(envar='{0}')".format(self.envar)
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
# imports for local testing
import json
# srun simple tests?
test = True
# happy print
_LOGGER.info("# {0} #".format('-' * 72))
_LOGGER.info('~ config_utils.py ... Running script as __main__')
_LOGGER.info("# {0} #\r".format('-' * 72))
# set up base totally non-functional defauls (denoted with $<ENVAR>)
TEST_ENV_VALUES = Box(ordered_box=True)
# ^^ that results in "~ EnVar value NOT found: ordered_box"
# which is a little bit odd, I assume the Box object stores that
# it should be benign but leaving this comment here in case of funk
# tes envars
TEST_ENV_VALUES[ENVAR_O3DE_PROJECT] = '${0}'.format(ENVAR_O3DE_PROJECT)
TEST_ENV_VALUES[ENVAR_O3DE_DEV] = Path('${0}'.format(ENVAR_O3DE_DEV))
# try to fetch and set the base values from the environment
# this makes sure all envars set, are resolved on import
TEST_ENV_VALUES = set_envar_defaults(TEST_ENV_VALUES)
_LOGGER.info('Pretty print: TEST_ENV_VALUES')
print(json.dumps(TEST_ENV_VALUES,
indent=4, sort_keys=False,
ensure_ascii=False), '\r')
# simple tests
_ENV_TAG = 'O3DE_DEV'
foo = get_envar_default(_ENV_TAG)
_LOGGER.info("~ Results of getVar on tag, '{0}':'{1}'\r".format(_ENV_TAG, foo))
envar_value = Validate_Envar(envar=_ENV_TAG)
_LOGGER.info('~ Repr is: {0}\r'.format(str(repr(envar_value))))
_LOGGER.info("~ Results of ValidEnvars(envar='{0}')=='{1}'\r".format(_ENV_TAG, envar_value))
# custom prompt
sys.ps1 = "[azpy]>>"
| StarcoderdataPython |
3331596 | <gh_stars>0
import os
import time
import numpy as np
from sklearn.metrics import roc_auc_score
import oneflow as flow
from config import get_args
from dataloader_utils import OFRecordDataLoader
from wide_and_deep_module import WideAndDeep
from util import dump_to_npy, save_param_npy
from eager_train import prepare_modules, print_eval_metrics
if __name__ == "__main__":
args = get_args()
train_dataloader, val_dataloader, wdl_module, bce_loss, opt = prepare_modules(args)
class WideAndDeepGraph(flow.nn.Graph):
def __init__(self, dataloader):
super(WideAndDeepGraph, self).__init__()
self.module = wdl_module
self.dataloader = dataloader
self.bce_loss = bce_loss
def build(self):
with flow.no_grad():
return self.graph()
def graph(self):
(
labels,
dense_fields,
wide_sparse_fields,
deep_sparse_fields,
) = self.dataloader()
labels = labels.to("cuda").to(dtype=flow.float32)
dense_fields = dense_fields.to("cuda")
wide_sparse_fields = wide_sparse_fields.to("cuda")
deep_sparse_fields = deep_sparse_fields.to("cuda")
predicts = self.module(dense_fields, wide_sparse_fields, deep_sparse_fields)
loss = self.bce_loss(predicts, labels)
return predicts, labels, loss
class WideAndDeepTrainGraph(WideAndDeepGraph):
def __init__(self, dataloader):
super(WideAndDeepTrainGraph, self).__init__(dataloader)
self.add_optimizer(opt)
def build(self):
predicts, labels, loss = self.graph()
loss.backward()
return predicts, labels, loss
eval_graph = WideAndDeepGraph(val_dataloader)
train_graph = WideAndDeepTrainGraph(train_dataloader)
losses = []
wdl_module.train()
for i in range(args.max_iter):
predicts, labels, train_loss = train_graph()
losses.append(train_loss.numpy().mean())
if (i + 1) % args.print_interval == 0:
l = sum(losses) / len(losses)
losses = []
print(f"iter {i+1} train_loss {l} time {time.time()}")
if args.eval_batchs <= 0:
continue
eval_loss_acc = 0.0
lables_list = []
predicts_list = []
wdl_module.eval()
for j in range(args.eval_batchs):
predicts, labels, eval_loss = eval_graph()
eval_loss_acc += eval_loss.numpy().mean()
lables_list.append(labels.numpy())
predicts_list.append(predicts.numpy())
print_eval_metrics(
i + 1, eval_loss_acc / args.eval_batchs, lables_list, predicts_list
)
wdl_module.train()
| StarcoderdataPython |
155340 | # -*-encoding:utf-8-*-
from karlooper.utils.encrypt import StrEncryption
from karlooper.utils.base64encrypt import Encryption
from karlooper.utils.des_encrypt import DES
def test_encrypt():
str_encryption = StrEncryption()
str_encryption.input_key("test")
_str = "make a test"
encode_str = str_encryption.encode(_str)
decode_str = str_encryption.decode(encode_str)
if _str != decode_str:
print(encode_str)
print(decode_str)
print(_str)
assert "encode string error"
def test_base64encrypt():
str_encryption = Encryption()
str_encryption.input_key("test")
_str = "make a test"
encode_str = str_encryption.encode(_str)
decode_str = str_encryption.decode(encode_str)
if _str != decode_str:
print(encode_str)
print(decode_str)
print(_str)
assert "base64 encode string error"
def test_des_encrypt():
str_encryption = DES()
str_encryption.input_key("test123456")
_str = "make a test"
encode_str = str_encryption.encode(_str)
decode_str = str_encryption.decode(encode_str)
if _str != decode_str:
print(encode_str)
print(decode_str)
print(_str)
assert "des encode string error"
def test():
test_encrypt()
test_base64encrypt()
test_des_encrypt()
if __name__ == '__main__':
test()
| StarcoderdataPython |
1631156 | <reponame>ImperialCollegeLondon/sap-quick-audio-demo
import sys
import os
import glob
from shutil import copyfile
def fileToStr(fileName):
"""Return a string containing the contents of the named file."""
fin = open(fileName);
contents = fin.read();
fin.close()
return contents
def strToFile(text, filename):
"""Write a file with the given name and the given text."""
output = open(filename,"w")
output.write(text)
output.close()
def make_html(source_path,demo_title):
# output directory is hardwired
out_audio_dir = 'audio'
out_dir_rel = os.path.join('html',out_audio_dir)
os.makedirs(out_dir_rel, exist_ok=True)
# title is easy
title = demo_title
# deal with files
# - create empty string
# - loop over files
# -- copy file
# -- append track element by substituting track template
# - substitute list of tracks into player template
# - substitute player into index template
track_list = []
search_str = source_path + '/*.wav'
input_files = glob.glob(search_str)
for inpath in input_files:
track_name = os.path.basename(inpath)
track_path = os.path.join(out_audio_dir,track_name)
copyfile(inpath,os.path.join(out_dir_rel,track_name))
track_list.append(fileToStr('trackTemplate.html').format(**locals()))
track_list = ''.join(track_list)
player = fileToStr('playerTemplate.html').format(**locals())
contents = fileToStr('indexTemplate.html').format(**locals())
strToFile(contents, 'html/index.html')
if __name__ == "__main__":
demo_title = 'Audio demo'
if len(sys.argv) < 2:
print("Usage python make_html.py path_to_audio [demo_title]")
else:
source_path = sys.argv[1]
if len(sys.argv) > 2:
demo_title = sys.argv[2]
make_html(source_path,demo_title) | StarcoderdataPython |
1636228 | <filename>aydin/analysis/demo/demo_snr_estimate.py
import pytest
from numpy.random.mtrand import normal
from aydin.analysis.snr_estimate import snr_estimate
from aydin.io.datasets import camera, normalise
from aydin.util.log.log import lprint, Log
def demo_snr_estimate(display: bool = False):
Log.enable_output = True
clean_image = normalise(camera())
noise = normal(size=clean_image.size).reshape(*clean_image.shape)
noisy_image_1 = clean_image + 100 * noise
noisy_image_2 = clean_image + 10 * noise
noisy_image_3 = clean_image + noise
noisy_image_4 = clean_image + 0.1 * noise
noisy_image_5 = clean_image + 0.01 * noise
noise1_dB = snr_estimate(noisy_image_1)
noise2_dB = snr_estimate(noisy_image_2)
noise3_dB = snr_estimate(noisy_image_3)
noise4_dB = snr_estimate(noisy_image_4)
noise5_dB = snr_estimate(noisy_image_5)
lprint(f"noise1_dB={noise1_dB}")
lprint(f"noise2_dB={noise2_dB}")
lprint(f"noise3_dB={noise3_dB}")
lprint(f"noise4_dB={noise4_dB}")
lprint(f"noise5_dB={noise5_dB}")
if display:
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(clean_image, name='clean_image')
viewer.add_image(noisy_image_1, name='noisy_image_1')
viewer.add_image(noisy_image_2, name='noisy_image_2')
viewer.add_image(noisy_image_3, name='noisy_image_3')
viewer.add_image(noisy_image_4, name='noisy_image_4')
viewer.add_image(noisy_image_5, name='noisy_image_5')
assert noise1_dB == pytest.approx(-42, 1, 3)
assert noise2_dB == pytest.approx(-20, 1, 1)
assert noise3_dB == pytest.approx(-0, 1, 1)
assert noise4_dB == pytest.approx(19, 1, 1)
assert noise5_dB == pytest.approx(33, 1, 1)
demo_snr_estimate()
| StarcoderdataPython |
149491 | # -*- coding: utf-8 -*-
# © 2016 <NAME>
# © 2016 Niboo SPRL (<https://www.niboo.be/>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models
class ProjectProject(models.Model):
_inherit = 'project.project'
scrum_team_id = fields.Many2one('project.scrum.team', 'Scrum Team')
class ProjectScrumTeam(models.Model):
_name = 'project.scrum.team'
name = fields.Char(string='Name', required=True)
sprint_ids = fields.One2many('project.sprint', 'scrum_team_id', 'Sprints')
project_ids = fields.One2many('project.project', 'scrum_team_id',
'Projects')
class ProjectTask(models.Model):
_inherit = 'project.task'
sprint_id = fields.Many2one('project.sprint', 'Sprint', required=True,
track_visibility='onchange')
#<EMAIL>
def go_to_sprint_action(self):
self.ensure_one()
return self.sprint_id.view_tasks_action()
#@<EMAIL>
def assign_to_me(self):
self.ensure_one()
self.user_id = self._uid
class ProjectSprint(models.Model):
_name = 'project.sprint'
_rec_name = 'display_name'
_order = 'start_date DESC'
name = fields.Char(string='Name', required=True)
start_date = fields.Date(string='Start Date', required=True)
end_date = fields.Date(string='End Date', required=True)
scrum_team_id = fields.Many2one('project.scrum.team', 'Scrum Team',
required=True)
task_count = fields.Integer(string='# Tasks', compute='_task_count')
is_current_sprint = fields.Boolean(string='Is Current Sprint')
is_previous_sprint = fields.Boolean(string='Is Previous Sprint')
display_name = fields.Char(string='Display Name',
compute='_compute_display_name', store=True)
@api.depends('name', 'start_date', 'end_date')
def _compute_display_name(self):
for sprint in self:
sprint.display_name = '%s' % (sprint.name)
# sprint.display_name = '%s - %s/%s' % (sprint.name,
# sprint.end_date[8:10],
# sprint.end_date[5:7])
# @api.multi
def _task_count(self):
ProjectTask = self.env['project.task']
for sprint in self:
tasks = ProjectTask.search([('sprint_id', '=', sprint.id)])
sprint.task_count = len(tasks)
#@api.multi
@api.constrains('is_current_sprint')
def check_current_sprint(self):
self.ensure_one()
self.check_is_not_both_previous_and_current()
if self.is_current_sprint:
old_previous = self.search([('is_previous_sprint', '=', True)])
if old_previous:
old_previous.is_previous_sprint = False
old_current = self.search([('is_current_sprint', '=', True),
('id', '!=', self.id)])
if old_current:
old_current.is_current_sprint = False
old_current.is_previous_sprint = True
# @api.multi
@api.constrains('is_previous_sprint')
def check_previous_sprint(self):
self.ensure_one()
self.check_is_not_both_previous_and_current()
if len(self.search([('is_previous_sprint', '=', True)])) > 1:
raise exceptions.ValidationError('A single previous sprint is '
'permitted')
# @api.multi
def check_is_not_both_previous_and_current(self):
self.ensure_one()
if self.is_current_sprint and self.is_previous_sprint:
raise exceptions.ValidationError('A sprint cannot be previous'
' and current at the same time')
# @api.multi
@api.constrains('start_date', 'end_date')
def check_dates(self):
for sprint in self:
concurrent_sprints = self.search([
'&',
'|',
'|',
'&',
('start_date', '<=', sprint.end_date),
('start_date', '>=', sprint.start_date),
'&',
('end_date', '<=', sprint.end_date),
('end_date', '>=', sprint.start_date),
'&',
('start_date', '<=', sprint.start_date),
('end_date', '>=', sprint.end_date),
'&',
('id', '!=', sprint.id),
('scrum_team_id', '=', sprint.scrum_team_id.id)
])
if concurrent_sprints:
raise exceptions.ValidationError('Sprints cannot overlap')
# @api.multi
def view_tasks_action(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'res_model': 'project.task',
'view_type': 'form',
'view_mode': 'tree,form',
'target': 'current',
'name': self.name,
'display_name': self.display_name,
'domain': [('sprint_id', '=', self.id)],
'context': {'default_sprint_id': self.id},
}
| StarcoderdataPython |
140555 | import argparse
import torch
from stereo import MinSumStereo, BlockMatchStereo, RefinedMinSumStereo
import data
import imageio
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--im0', action='store', required=True, type=str)
parser.add_argument('--im1', action='store', required=True, type=str)
parser.add_argument('--min-disp', action='store', default=0, type=int)
parser.add_argument('--max-disp', action='store', default=127, type=int)
parser.add_argument('--stride-in', action='store', default=1, type=int)
parser.add_argument('--stride-out', action='store', default=1, type=int)
parser.add_argument('--multi-level-output', action='store_true', default=False)
parser.add_argument('--activation', action='store', choices=['relu', 'leakyrelu', 'elu'], default='leakyrelu')
parser.add_argument('--with-bn', action='store_true', default=False)
parser.add_argument('--with-upconv', action='store_true', default=False)
parser.add_argument('--with-output-bn', action='store_true', default=False)
parser.add_argument('--pad', action='store', default=(0, 0), nargs=2, type=int,
help='extra padding of in height and in width on every side')
parser.add_argument('--model', action='store', default='bp+ms+h',
choices=['wta', 'bp+ms', 'bp+ms+h', 'bp+ms+ref+h'])
parser.add_argument('--checkpoint-unary', action='store', default=None, type=str)
parser.add_argument('--checkpoint-matching', action='store', default=[], nargs='+', type=str)
parser.add_argument('--checkpoint-affinity', action='store', default=None, type=str)
parser.add_argument('--checkpoint-crf', action='append', default=[], type=str, nargs='+')
parser.add_argument('--checkpoint-refinement', action='store', default=None, type=str)
parser.add_argument('--lbp-min-disp', action='store_true', default=False)
parser.add_argument('--max-iter', action='store', default=1, type=int)
parser.add_argument('--num-bp-layers', action='store', default=1, type=int)
parser.add_argument('--bp-inference', action='store', default='sub-exp',
choices=['wta', 'expectation', 'sub-exp'], type=str)
parser.add_argument('--matching', action='store', choices=['corr', 'sad', 'conv3d'],
default='sad', type=str)
parser.add_argument('--input-level-offset', action='store', default=1, type=int,
help='1 means that level 1 is the input resolution')
parser.add_argument('--output-level-offset', action='store', default=1, type=int,
help="0 means that level 0 (=full res) is the output resolution")
args = parser.parse_args()
I0_pyramid, I1_pyramid = data.load_sample(args.im0, args.im1)
device = 'cuda:0'
with torch.no_grad():
if args.model == 'wta':
model = BlockMatchStereo(device, args)
elif args.model == 'bp+ms':
model = MinSumStereo(device, args)
elif args.model == 'bp+ms+h':
model = MinSumStereo(device, args)
elif args.model == 'bp+ms+ref+h':
model = RefinedMinSumStereo(device, args)
max_disp = None # use original max-disp
res_dict = model.to(device).forward(I0_pyramid, I1_pyramid, max_disp=args.max_disp, step=1)
imageio.imwrite("data/output/stereo/" + args.model + ".pfm",
np.flipud(res_dict['disps0'][0].squeeze().float().detach().cpu().numpy())) | StarcoderdataPython |
172180 | def wire(data):
x, y = (0, 0)
for dir, step in [(x[0], int(x[1:])) for x in data.split(",")]:
for _ in range(step):
x += -1 if dir == "L" else 1 if dir == "R" else 0
y += -1 if dir == "D" else 1 if dir == "U" else 0
yield x, y
def aoc(data):
wires = [set(wire(line)) for line in data.split("\n")]
dist = set()
for cord in wires[0] | wires[1]:
if cord in wires[0] and cord in wires[1]:
dist.add(abs(cord[0]) + abs(cord[1]))
return min(dist)
| StarcoderdataPython |
30932 | <reponame>materialsproject/maggflow
""" Simple API Interface for Maggma """
| StarcoderdataPython |
3267710 | from typing import Callable, Sequence
from functools import partial
from operator import itemgetter
import torch
from keyedtensor import KeyedTensor
def _many_to_one(keyedtensors: Sequence[KeyedTensor], op: Callable, dim: int) -> KeyedTensor:
keys = list(keyedtensors[0])
getter = itemgetter(*keys)
op = partial(op, dim=dim)
return KeyedTensor(zip(keys, map(op, zip(*map(getter, keyedtensors)))))
def cat(keyedtensors: Sequence[KeyedTensor], dim: int = 0) -> KeyedTensor:
"""like torch.cat but for KeyedTensors, concatenates a sequence of KeyedTensor along existing\
dimension.
Args:
keyedtensors: a sequence of KeyedTensors. should all have the same keys (though they may be\
differently ordered) and shapes should be alignable just as they
would need to be with `torch.cat`.
dim: integer dimension to concatenate on dim should. Defaults to 0.
Example:
>>> import torch
>>>
>>> import keyedtensor as kt
>>> from keyedtensor import KeyedTensor
>>>
>>> x1 = KeyedTensor(a=torch.ones(2, 3), b=torch.ones(2))
>>> x2 = KeyedTensor(a=torch.ones(3, 3), b=torch.ones(3)) * 2
>>> x3 = KeyedTensor(b=torch.ones(1), a=torch.ones(1, 3)) * 3
>>>
>>> kt.cat((x1, x2, x3), dim=0).to(torch.int64)
KeyedTensor(a=tensor([[1, 1, 1],
[1, 1, 1],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[3, 3, 3]]),
b=tensor([1, 1, 2, 2, 2, 3]))
"""
return _many_to_one(keyedtensors, torch.cat, dim=dim)
def stack(keyedtensors: Sequence[KeyedTensor], dim: int = 0) -> KeyedTensor:
"""like torch.stack but for KeyedTensors, stacks a sequence of KeyedTensor along existing\
dimension.
Args:
keyedtensors: a sequence of KeyedTensors. should all have the same keys (though they may be\
differently ordered) and shapes should be the same just as they
would need to be with `torch.stack`.
dim: integer dimension to stack on dim should. Defaults to 0.
Example:
>>> import torch
>>>
>>> import keyedtensor as kt
>>> from keyedtensor import KeyedTensor
>>>
>>> x1 = KeyedTensor(a=torch.ones(3), b=torch.ones(2))
>>> x2 = KeyedTensor(a=torch.ones(3), b=torch.ones(2)) * 2
>>> x3 = KeyedTensor(b=torch.ones(2), a=torch.ones(3)) * 3
>>>
>>> kt.stack((x1, x2, x3), dim=0).to(torch.int64)
KeyedTensor(a=tensor([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]),
b=tensor([[1, 1],
[2, 2],
[3, 3]]))
"""
return _many_to_one(keyedtensors, torch.stack, dim=dim)
| StarcoderdataPython |
1737690 | <reponame>ErenKaracan47/TemelProgramlama<filename>ProgramlamayaGiris/2 - List and If/0 - List.py
list1 = [0, 1, 2, 3, 4]
list2 = [9, 8, 7, 6, 5]
list3 = [0, 0, 0, 0, 0]
list3[0] = list1[0] + list2[0]
list3[1] = list1[1] - list2[1]
list3[2] = list1[2] * list2[2]
list3[3] = list1[3] / list2[3]
list3[4] = list1[4] ** list2[4]
print(list1)
print(list2)
print(list3)
| StarcoderdataPython |
421 | import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
| StarcoderdataPython |
1685746 | class FindObject:
"""An object represented a find(1) command"""
def __init__(self, cmd):
self.exec_cmd = ''
self.path = ''
self.opts = ''
if cmd.startswith('find'):
# find ./find -type f -exec rm -rf {} ;
# 012345 012 0123456
#
# 8 possibilities:
# find
# find -exec rm -rf {} ;
# find -type f
# find -type f -exec rm -rf {} ;
# find ./bla
# find ./bla -exec rm -rf {} ;
# find ./bla -type f
# find ./bla -type f -exec rm -rf {} ;
exec_from = cmd.find('-exec ')
if exec_from is not -1:
exec_to = cmd.rfind('{}')
self.exec_cmd = cmd[exec_from+6:exec_to].rstrip()
cmd = cmd[:exec_from]
path_end = cmd.find(' -')
if path_end is not -1:
self.path = cmd[5:path_end].rstrip()
self.opts = cmd[path_end:].strip()
else:
self.path = cmd[5:].rstrip()
@classmethod
def build_with(cls, path, opts, exec_cmd):
cmd = cls('')
cmd.path = path
cmd.opts = opts
cmd.exec_cmd = exec_cmd
return cmd
def toCmd(self):
if self.path != '':
cmd = "find %s" % self.path
else:
cmd = "find "
if self.exec_cmd != '':
cmd += " %s %s" % (self.opts, self.exec_cmd)
else:
cmd += " %s" % (self.opts)
return cmd.rstrip()
| StarcoderdataPython |
26453 | <gh_stars>0
from pygraphblas import *
def test_add_identity():
A = Matrix.sparse(INT8, 10, 10)
assert add_identity(A) == 10
A = Matrix.sparse(INT8, 10, 10)
A[5,5] = 42
assert add_identity(A) == 9
| StarcoderdataPython |
1760949 | import numpy as np
from plotoptix import TkOptiX
from PIL import Image
im = Image.open("samples/color spectrum.png")
px = im.load()
colors = []
cont = 0
for row in range(0, im.height):
for col in range(0, im.width):
pix = px[col, row]
newCol = (round(pix[0] / 255, 2), round(pix[1] / 255, 2), round(pix[2] / 255, 2))
colors.append(newCol)
if cont % 100000 == 0:
print(cont, end=' ')
cont += 1
colors = np.unique(colors, axis=0)
print(len(colors))
optix = TkOptiX() # create and configure, show the window later
optix.set_param(max_accumulation_frames=30) # accumulate up to 30 frames (override default of 4 frames)
optix.set_background(0.10) # white background
optix.set_data(name="colors",
pos=colors,
r=0.005,
u=[0.005, 0, 0],
w=[0, 0, 0.005],
c=[[r, g, b, 0.01] for r, g, b in colors], # map_to_colors(cubes, 'Spectral')
geom="Parallelepipeds") # ParticleSet
optix.set_coordinates() # show coordinates box
optix.show()
optix.setup_light("light1", color=10 * np.array([0.99, 0.9, 0.7]), radius=2)
print("done")
| StarcoderdataPython |
20982 | <reponame>Vertexwahn/depend_on_what_you_use
def load_external_repo():
native.local_repository(
name = "ext_repo",
path = "test/external_repo/repo",
)
| StarcoderdataPython |
1746608 | <filename>menagerie/util/cloning_plans.py
import json
from pydent.models import Sample
from util.plans import ExternalPlan, PlanStep, Transformation, get_obj_by_attr
from util.plasmid_assembly_legs import GibsonLeg, SangerSeqLeg, PCRLeg
from util.plasmid_assembly_legs import YeastTransformationLeg, YeastGenotypingLeg
class CloningPlan(ExternalPlan):
"""
Interface for working with the Aquarium Session and Plan models.
Originally based on JSON schema derived from BU/SAIL Puppeteer schema.
"""
def __init__(self, plan_path, aq_instance, aq_plan_name=None):
"""
In addition to super(), populates self.steps with new instances
of PlanStep (PCRStep, GibsonStep or YeastTransformationStep).
:param plan_path: name of folder containing configuration files
Also used as the name of the Plan record in Aquarium
:type plan_path: str
:param aq_instance: the instance of Aquarium to use
Corresponds to a key in the secrets.json file
:type aq_instance: str
:return: new CloningPlan
"""
super().__init__(plan_path, aq_instance, aq_plan_name)
for step in self.steps:
dst_sample_type = self.destination_sample_type(step.type)
# Why is this block not also in XPlan?
for txn in step.transformations:
for dst in txn.destination:
samples = self.get_samples(dst_sample_type, dst["name"], txn.source)
sample = samples[0]
self.add_input_sample(dst["name"], sample)
def initialize_step(self, step_data):
step = super().initialize_step(step_data)
if not step:
step_type = step_data["type"]
if step_type == "pcr":
step = PCRStep(self, step_data)
elif step_type == "gibson":
step = GibsonStep(self, step_data)
elif step_type == "yeast_transformation":
step = YeastTransformationStep(self, step_data)
else:
step = None
return step
def destination_sample_type(self, step_type):
if step_type == "pcr":
dst_sample_type = "Fragment"
elif step_type == "gibson":
dst_sample_type = "Plasmid"
elif step_type == "yeast_transformation":
dst_sample_type = "Yeast Strain"
else:
dst_sample_type = None
return dst_sample_type
class CloningPlanStep(PlanStep):
def __init__(self, plan, plan_step):
super().__init__(plan, plan_step)
for txn in self.operator.get('transformations', []):
self.transformations.append(CloningPlanTransformation(self, txn))
class GoldenGateStep(CloningPlanStep):
def __init__(self, plan, plan_step):
super().__init__(plan, plan_step)
class GibsonStep(CloningPlanStep):
def __init__(self, plan, plan_step):
super().__init__(plan, plan_step)
def create_step(self, cursor, n_qcs=1, step_outputs={}):
for txn in self.transformations:
src = txn.source
fragment_src = [o for o in src if o["input_name"] == "Fragment"]
fragment_src.sort(key=lambda s: step_outputs.get(s["name"]).x)
for dst in txn.destination:
build_leg = GibsonLeg(self, cursor)
build_leg.set_sample_io(src, dst)
build_leg.add()
upstr_ops = []
for s in fragment_src:
upstr_ops.append(step_outputs.get(s["name"]))
dnstr_op = build_leg.get_input_op()
# build_leg.wire_input_array(upstr_ops, dnstr_op)
upstr_op = build_leg.get_output_op()
cursor.incr_x()
for i in range(n_qcs):
qc_leg = SangerSeqLeg(self, cursor)
qc_leg.set_sample_io(src, dst)
qc_leg.add()
dnstr_op = qc_leg.get_input_op()
qc_leg.wire_ops(upstr_op, dnstr_op)
cursor.incr_x()
cursor.incr_y(qc_leg.length())
if not step_outputs.get(dst["name"]):
plasmid_op = qc_leg.get_output_op()
step_outputs[dst["name"]] = plasmid_op
self.plan.add_input_sample(dst["name"], plasmid_op.output("Plasmid").sample)
cursor.return_y()
return step_outputs
class PCRStep(CloningPlanStep):
def __init__(self, plan, plan_step):
super().__init__(plan, plan_step)
def create_step(self, cursor, step_outputs={}):
for txn in self.transformations:
src = txn.source
template_src = get_obj_by_attr(src, "input_name", "Template")
for dst in txn.destination:
sample_type = self.plan.input_sample(template_src["name"]).sample_type.name
if sample_type == "DNA Library":
container_opt = "dna_library"
else:
container_opt = "plasmid_stock"
build_leg = PCRLeg(self, cursor)
build_leg.set_sample_io(src, dst)
build_leg.add(container_opt)
step_outputs[dst["name"]] = build_leg.get_output_op()
cursor.incr_x()
cursor.return_y()
return step_outputs
class YeastTransformationStep(CloningPlanStep):
def __init__(self, plan, plan_step):
super().__init__(plan, plan_step)
def create_step(self, cursor, n_qcs=1, step_outputs={}):
for txn in self.transformations:
src = txn.source
integrant_src = get_obj_by_attr(src, "input_name", "Integrant")
for dst in txn.destination:
build_leg = YeastTransformationLeg(self, cursor)
build_leg.set_sample_io(src, dst)
build_leg.add()
build_leg.wire_plasmid()
upstr_op = step_outputs.get(integrant_src)
dnstr_op = build_leg.get_input_op()
src, dst = [upstr_op.output("Plasmid"), dnstr_op.input("Integrant")]
self.plan.add_wire(src, dst)
upstr_op = build_leg.get_output_op()
step_outputs[dst] = upstr_op
cursor.incr_x()
for i in range(n_qcs):
qc_leg = YeastGenotypingLeg(self, cursor)
qc_leg.set_sample_io(src, dst)
qc_leg.add()
dnstr_op = qc_leg.get_input_op()
qc_leg.wire_ops(upstr_op, dnstr_op)
cursor.incr_x()
cursor.incr_y(qc_leg.length())
cursor.return_y()
return step_outputs
class CloningPlanTransformation(Transformation):
def __init__(self, plan_step, transformation):
super().__init__(plan_step, transformation)
| StarcoderdataPython |
3301819 | from socket import socket
from typing import Optional
from select import select
from Dhcp.packet import Packet
from Dhcp.opcodes import Opcodes
from Dhcp.message_type import MessageType
class Receivers:
@staticmethod
def discover_receiver(sock: socket, timeout: int = 5) -> Optional[Packet]:
"""
Waits for an DHCP discover packet, captures it and returns it
:param sock: socket from which to listen
:param timeout: amount of time to listen until gives up
:return: DHCP discover packet received or None if times out
"""
while True:
message_received, _, _ = select([sock], [], [], timeout)
packet = Packet(sock.recv(1024)) if message_received else None
if packet is None:
return None
if packet.opcode == Opcodes.REQUEST and packet.dhcp_message_type == MessageType.DISCOVER:
return packet
packet = None
@staticmethod
def offer_receiver(sock: socket, timeout: int = 5) -> Optional[Packet]:
"""
Waits for an DHCP offer packet, captures it and returns it
:param sock: socket from which to listen
:param timeout: amount of time to listen until gives up
:return: DHCP offer packet received or None if times out
"""
while True:
message_received, _, _ = select([sock], [], [], timeout)
packet = Packet(sock.recv(1024)) if message_received else None
if packet is None:
return None
if packet.opcode == Opcodes.REPLY and packet.dhcp_message_type == MessageType.OFFER:
return packet
packet = None
@staticmethod
def request_receiver(sock: socket, timeout: int = 5) -> Optional[Packet]:
"""
Waits for an DHCP request packet, captures it and returns it
:param sock: socket from which to listen
:param timeout: amount of time to listen until gives up
:return: DHCP request packet received or None if times out
"""
while True:
message_received, _, _ = select([sock], [], [], timeout)
packet = Packet(sock.recv(1024)) if message_received else None
if packet is None:
return None
if packet.opcode == Opcodes.REQUEST and packet.dhcp_message_type == MessageType.REQUEST:
return packet
packet = None
@staticmethod
def ack_receiver(sock: socket, timeout: int = 5) -> Optional[Packet]:
"""
Waits for an DHCP ack packet, captures it and returns it
:param sock: socket from which to listen
:param timeout: amount of time to listen until gives up
:return: DHCP ack packet received or None if times out
"""
while True:
message_received, _, _ = select([sock], [], [], timeout)
packet = Packet(sock.recv(1024)) if message_received else None
if packet is None:
return None
if packet.opcode == Opcodes.REPLY and packet.dhcp_message_type == MessageType.ACK:
return packet
packet = None
@staticmethod
def release_receiver(sock: socket, timeout: int = 5) -> Optional[Packet]:
"""
Waits for an DHCP ack packet, captures it and returns it
:param sock: socket from which to listen
:param timeout: amount of time to listen until gives up
:return: DHCP ack packet received or None if times out
"""
while True:
message_received, _, _ = select([sock], [], [], timeout)
packet = Packet(sock.recv(1024)) if message_received else None
if packet is None:
return None
if packet.opcode == Opcodes.REQUEST and packet.dhcp_message_type == MessageType.RELEASE:
return packet
packet = None
| StarcoderdataPython |
1602442 | import numpy as np
from deepthought.experiments.encoding.experiment_templates.base import NestedCVExperimentTemplate
class SVCBaseline(NestedCVExperimentTemplate):
def pretrain_encoder(self, *args, **kwargs):
def dummy_encoder_fn(indices):
if type(indices) == np.ndarray:
indices = indices.tolist() # ndarray is not supported as indices
# read the chunk of data for the given indices
state = self.full_hdf5.open()
data = self.full_hdf5.get_data(request=indices, state=state)
self.full_hdf5.close(state)
# get only the features source
source_idx = self.full_hdf5.sources.index('features')
data = np.ascontiguousarray(data[source_idx])
# apply optional channel mean
if self.hyper_params['ch_mean'] is True:
data = data.mean(axis=1) # bc01 format -> will result in b01 format
return data
return dummy_encoder_fn
def run(self, verbose=False):
from deepthought.experiments.encoding.classifiers.linear_svc import LinearSVCClassifierFactory
super(SVCBaseline, self).run(classifiers=(('linear_svc', LinearSVCClassifierFactory()),), verbose=verbose)
| StarcoderdataPython |
3377040 | '''
configurations and schedule for network training
this implementation includes some kind fancy tools,
like prefetch_generator, tqdm and tensorboardx.
I also use logging to print information into log file
rather than print function.
'''
import argparse
import os
import time
import logging
import numpy as np
import torch
import torch.utils.data as Data
import torchvision
from torchvision import models
from torchvision import transforms
from torch import nn
from torch import optim
import torch.nn.functional as F
from prefetch_generator import BackgroundGenerator
from tensorboardX import SummaryWriter
from FMPNet import FMPNet
from ResNet import ResNet
from tqdm import tqdm
from criterion import LabelSmoothing
from utils import accuracy, time_stamp, init_params
from autoaugment import CIFAR10Policy
from cutout import Cutout
logging.basicConfig(
filename='logs/train.log',
format='%(levelname)-10s %(asctime)s %(message)s',
level=logging.INFO
)
log = logging.getLogger('train')
torch.backends.cudnn.benchmark = True
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
parser = argparse.ArgumentParser(description="Train a network for cifar10")
# parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet32',
# choices=model_names,
# help='model architecture: ' + ' | '.join(model_names) +
# ' (default: resnet32)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=180, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
# parser.add_argument('--print-freq', '-p', default=50, type=int,
# metavar='N', help='print frequency (default: 20)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
# help='evaluate model on validation set')
# parser.add_argument('--pretrained', dest='pretrained', action='store_true',
# help='use pre-trained model')
parser.add_argument('--data-dir', dest='data_dir',
help='The directory of data',
default='./data', type=str)
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint', type=str)
parser.add_argument('--save-every', dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int, default=10)
args = parser.parse_args()
log.info('start training.')
# Data
log.info('==> Preparing data..')
# augment = transforms.RandomChoice(
# [transforms.RandomAffine(degrees=2),
# transforms.RandomCrop(32, padding=4)
# ]
# )
# augment = transforms.RandomChoice(
# [transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.8, 0.9), shear=0.9),
# transforms.RandomCrop(32, padding=4)
# transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1)
# ]
# )
# transform_train = transforms.Compose([
# transforms.RandomApply([augment], p=0.5),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# ])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4, fill=128), # fill parameter needs torchvision installed from source
transforms.RandomHorizontalFlip(), CIFAR10Policy(),
transforms.ToTensor(),
# (https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py)
Cutout(n_holes=1, length=16),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# don't forget to change download after download
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=False, transform=transform_train)
trainloader = Data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=False, transform=transform_test, drop_last=True)
testloader = Data.DataLoader(
testset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True)
def train():
'''
training network
TODO: early stopping
'''
log.info('==> Building model...')
net = ResNet().double().cuda()
# log.info(net)
net = torch.nn.DataParallel(net)
# load checkpoint if needed/ wanted
start_n_iter = 0
start_epoch = 0
if args.resume:
log.info("=> loading checkpoint '{}'".format(args.resume))
# custom method for loading last checkpoint
ckpt = torch.load(args.resume)
net.load_state_dict(ckpt['net'])
start_epoch = ckpt['epoch']
start_n_iter = ckpt['n_iter']
# optim.load_state_dict(ckpt['state_dict'])
log.info("last checkpoint restored")
else:
init_params(net)
# criterion = LabelSmoothing(10, 0.02).cuda()
criterion = torch.nn.CrossEntropyLoss()
optim = torch.optim.SGD(net.parameters(), lr=args.lr,
weight_decay=args.weight_decay, momentum=args.momentum)
# optim.param_groups[0]['initial_lr'] = 0.001
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optim,
# milestones=[80, 120, 160], last_epoch=start_epoch - 1)
# typically we use tensorboardX to keep track of experiments
writer = SummaryWriter(log_dir='./logs')
# now we start the main loop
n_iter = start_n_iter
loss_ = 1
test_acc = 0
flag = True
for epoch in range(start_epoch, args.epochs):
# use prefetch_generator and tqdm for iterating through data
pbar = tqdm(enumerate(BackgroundGenerator(trainloader)),
total=len(trainloader))
start_time = time.time()
# if loss_ < 0.1:
net.train()
if epoch == 2:
# flag = False
for param in optim.param_groups:
param['lr'] *= 10
if epoch == 90: # about 35k iterations
for param in optim.param_groups:
param['lr'] *= 0.1
if epoch == 135: # about 50k iterations
for param in optim.param_groups:
param['lr'] *= 0.1
log.info('start epoch: ' + str(epoch) +
' |current lr {:.5e}'.format(optim.param_groups[0]['lr']))
# for loop going through dataset
for i, data in pbar:
# data preparation
X, target = data
X = X.double().cuda()
target = target.cuda()
# It's very good practice to keep track of preparation time and
# computation time using tqdm to find any issues in your dataloader
prepare_time = time.time() - start_time
# forward and backward pass
out = net(X)
loss = criterion(out, target)
loss_ = loss.item()
optim.zero_grad()
loss.backward()
optim.step()
# udpate tensorboardX
if n_iter % 50 == 0:
acc = accuracy(out, target)
log.info('iter: %3d | loss: %6.3f | accuracy: %6.3f'
% (n_iter, loss_, acc))
writer.add_scalars('loss', {'train': loss_}, n_iter)
writer.add_scalars('acc', {'train': acc}, n_iter)
n_iter += 1
# compute computation time and *compute_efficiency*
process_time = time.time() - start_time - prepare_time
pbar.set_description("%2.1f|%2.f|l:%6.3f|ep%3d" % (
prepare_time, process_time, loss_, epoch))
start_time = time.time()
# change lr if needed
# lr_scheduler.step()
x = 5
# save checkpoint if needed
if epoch % (2*x) == (2*x) - 1:
state = {
'net': net.state_dict(),
'epoch': epoch,
'n_iter': n_iter
}
torch.save(state,
args.save_dir + '/' + time_stamp() + '_' +
str(epoch) + '_' + '%.4f'%(test_acc) + '.pkl')
# maybe do a test pass every x epochs
if epoch % x == x - 1:
with torch.no_grads():
# bring models to evaluation mode
net.eval()
#do some tests
pbar = tqdm(enumerate(BackgroundGenerator(testloader)),
total=len(testloader))
cnt = 0
test_acc = 0
for i, data in pbar:
X, target = data
X = X.double().cuda()
target = target.cuda()
out = net(X)
test_loss = criterion(out, target)
test_loss_ = loss.item()
test_acc += accuracy(out, target)
cnt += 1
test_acc /= cnt
log.info('test accuracy: %6.3f' % (test_acc))
writer.add_scalars('loss', {'test': test_loss_}, n_iter)
writer.add_scalars('acc', {'test': test_acc}, n_iter)
writer.close()
if __name__ == '__main__':
train()
| StarcoderdataPython |
55648 | <gh_stars>1-10
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class base(nn.Module):
def __init__(self):
super(base, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class VGG3(nn.Module):
def __init__(self):
super(VGG3, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(
nn.Linear(2048, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 10)
)
def forward(self, x):
#print(x.shape)
out = self.feature(x)
#print(out.shape)
out = out.view(out.size(0), -1)
#print(out.shape)
out = self.classifier(out)
#print(out.shape)
return out
class VGG3_batchNorm(nn.Module):
def __init__(self):
super(VGG3_batchNorm, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(
nn.Linear(2048, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 10)
)
def forward(self, x):
out = self.feature(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
class VGG3_final(nn.Module):
def __init__(self):
super(VGG3_final, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.2),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.3),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.4)
)
self.classifier = nn.Sequential(
nn.Linear(2048, 1024),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(1024, 10)
)
def forward(self, x):
out = self.feature(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def train(net, optimizer, criterion, trainLoader, validLoader, epoch, device):
print("Training {} rounds.\n".format(epoch))
accList = [[], []]
lossList = [[], []]
for i in range(epoch):
loss = 0
for batch in trainLoader:
data, labels = batch
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(data.float())
batchLoss = criterion(outputs, labels)
batchLoss.backward()
optimizer.step()
loss += batchLoss.item()
lossList[0].append(loss / len(trainLoader))
accList[0].append(test(net, trainLoader, device))
# validation after each epoch
loss = 0
for batch in validLoader:
data, labels = batch
with torch.no_grad():
outputs = net(data.float())
batchLoss = criterion(outputs, labels)
loss += batchLoss.item()
lossList[1].append(loss / len(validLoader))
accList[1].append(test(net, validLoader, device))
print('epoch:{}, trainLoss:{}, validLoss:{}'.format(i+1, lossList[0][-1], lossList[1][-1]))
print('epoch:{}, TrainAcc:{}, validAcc:{}'.format(i+1, accList[0][-1], accList[1][-1]))
return accList, lossList
def test(net, testLoader, device):
total = 0
accuracy = 0
with torch.no_grad():
for batch in testLoader:
data, labels = batch
data, labels = data.to(device), labels.to(device)
outputs = net(data.float())
_, predicted = torch.max(outputs.data, 1)
total += len(labels)
accuracy += (predicted == labels).sum().item()
return accuracy / total * 100 | StarcoderdataPython |
3356652 |
# %%
import numpy as np
from matplotlib import pyplot as plt
import copy,os
import pyhsmm
from pyhsmm.util.text import progprint_xrange
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib import font_manager
import matplotlib as mpl
zhfont1 = font_manager.FontProperties(fname='SimHei.ttf')
from mpl_toolkits.axes_grid1 import make_axes_locatable
from collections import Counter
print("this file contains hdp-hsmm in action")
# %%
def loading_data(path):
import pandas as pd
import os
datas=[]
data = pd.read_csv(path)
#print(data.head())
#print(data.tail(3))
#print(data.columns)
delta_d = np.array(data['range'])
delta_v = np.array(data['rangerate'])
acc = np.array(data['ax'])
datas.append([delta_d,delta_v,acc])
#print(acc.max())
#print(np.shape(new_data)) (564,3)
return datas
###############data is np.array
datas_orig = loading_data('car-following/10106/2.csv')
iter = 200
kappa_0=0.75
init=2.
# %%
####################
###normalize data###
####################
def initializing(datas):
d=[]
v=[]
a=[]
for data in datas:
data=np.array(data)
print(np.shape(data))
d=d+list(data[0].T)
v=v+list(data[1].T)
a=a+list(data[2].T)
data_info=[[np.mean(d),np.std(d)],[np.mean(v),np.std(v)],[np.mean(a),np.std(a)]]
datas_new=[]
for data in datas:
data = np.array(data)
d_new = (data[0]-data_info[0][0])/data_info[0][1]
v_new = (data[1]-data_info[1][0])/data_info[1][1]
a_new = (data[2]-data_info[2][0])/data_info[2][1]
datas_new.append(np.array([d_new.T,v_new.T,a_new.T]).T)
return datas_new,data_info
datas,data_info = initializing(datas_orig)
# %%
print(np.shape(datas[0]))
# %%
######################
###hdp-hsmm process###
######################
def hdp_hsmm(datas,iter):
#limit truncation level
Nmax = 25
#hyperparameters
obs_dim = datas[0].shape[1]
#print(obs_dim) (3)
d=[]
v=[]
a=[]
for data in datas:
d=d+list(data.T[0])
v=v+list(data.T[1])
a=a+list(data.T[2])
data_all=np.array([np.array(d),np.array(v),np.array(a)])
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.cov(data_all),
'kappa_0':kappa_0,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':2*40,
'beta_0':2}
#generate distributions
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(Nmax)]
#defining model
posteriormodel = pyhsmm.models.WeakLimitHDPHSMM(
alpha_a_0=1.,alpha_b_0=1.,
gamma_a_0=1.,gamma_b_0=1.,
init_state_concentration=init,
obs_distns=obs_distns,
dur_distns=dur_distns
)
#return posteriormodel
#def add_data(model,data,trunc=80):
for i in range(len(datas)):
posteriormodel.add_data(datas[i],trunc=80)
#return model
#running model
#def running_model(model,iteration=5):
logs = []
for idx in progprint_xrange(iter): ###defining iteration number,normal 150
posteriormodel.resample_model()
log = posteriormodel.log_likelihood()
logs.append(log)
#plotting_logs(logs)
return posteriormodel,logs
###drawing log figures
hdp_hsmm_model,hdp_hsmm_logs= hdp_hsmm(datas,iter)
# %%
#########################
# hdp-hmm process #
#########################
def hdp_hmm(datas,iter):
# Set the weak limit truncation level
Nmax = 25
d=[]
v=[]
a=[]
for data in datas:
d=d+list(data.T[0])
v=v+list(data.T[1])
a=a+list(data.T[2])
data_all=np.array([np.array(d),np.array(v),np.array(a)])
obs_dim = datas[0].shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.cov(data_all),
'kappa_0':kappa_0,
'nu_0':obs_dim+2}
### HDP-HMM without the sticky bias
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitHDPHMM(alpha_a_0=1.,alpha_b_0=1.,gamma_a_0=1.,gamma_b_0=1., init_state_concentration=init, obs_distns=obs_distns)
for data in datas:
posteriormodel.add_data(data)
logs = []
for idx in progprint_xrange(iter):
posteriormodel.resample_model()
log = posteriormodel.log_likelihood()
logs.append(log)
return posteriormodel,logs
hdp_hmm_model,hdp_hmm_logs = hdp_hmm(datas,iter)
# %%
def hdp_s_hmm(datas,iter):
# Set the weak limit truncation level
Nmax = 25
d=[]
v=[]
a=[]
for data in datas:
d=d+list(data.T[0])
v=v+list(data.T[1])
a=a+list(data.T[2])
data_all=np.array([np.array(d),np.array(v),np.array(a)])
# and some hyperparameters
obs_dim = datas[0].shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.cov(data_all),
'kappa_0':kappa_0,
'nu_0':obs_dim+2}
from pybasicbayes.distributions.multinomial import GammaCompoundDirichlet
kappa = GammaCompoundDirichlet(Nmax,100,1).concentration
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.WeakLimitStickyHDPHMM(
kappa=float(kappa),
alpha_a_0=1.,alpha_b_0=1.,
gamma_a_0=1.,gamma_b_0=1.,
init_state_concentration=init,
obs_distns=obs_distns)
for data in datas:
posteriormodel.add_data(data)
logs=[]
for idx in progprint_xrange(iter):
posteriormodel.resample_model()
log=posteriormodel.log_likelihood()
logs.append(log)
if abs(log)<1e-3 or (idx>1 and abs(log-logs[idx-2])<1e-6):
progprint_xrange(idx+1)
break
return posteriormodel,logs
hdp_s_hmm_model,hdp_s_hmm_logs = hdp_s_hmm(datas,iter)
# %%
#########################waiting for downing
def plotting_logs(all_logs):
plt.figure(figsize=(6,5))
x = np.arange(len(all_logs[0]))
plt.plot(x,all_logs[0],color='red',label='HDP-HMM')
plt.plot(x,all_logs[1],color='blue',label='sticky HDP-HMM')
plt.plot(x,all_logs[2],color='green',label='HDP-HSMM')
plt.title('对数似然',fontsize=20,fontproperties=zhfont1)
plt.xlabel('重复采样次数',fontsize=18,fontproperties=zhfont1)
plt.ylabel('对数似然值',fontsize=18,fontproperties=zhfont1)
plt.legend(fontsize=17)
plt.tick_params(labelsize=15)
plotting_logs([hdp_hmm_logs,hdp_s_hmm_logs,hdp_hsmm_logs])
# %%
###############################
###drawing state and feature###
###############################
from matplotlib.lines import Line2D
def show_figures(model):
plotting_state(model) #written in models.py
plotting_feature(model) #defined in this file
plt.show()
def plotting_state(model,fig=None,plot_slice=slice(None),update=False,draw=True):
#update = upfeature_ax, date and (fig is not None)
fig = fig if fig else model.make_figure()
stateseq_axs = _get_state_axes(model,fig)
assert len(stateseq_axs) == len(model.states_list)
sp2_artists = \
[artist for s,ax in zip(model.states_list,stateseq_axs)
for artist in plot_stateseq(model,s,ax,plot_slice,update=update,draw=False)]
if draw: plt.draw()
plt.title('时间序列划分',fontproperties=zhfont1)
return sp2_artists
def plotting_state_plain(model,fig=None,plot_slice=slice(None),update=False,draw=True):
#update = upfeature_ax, date and (fig is not None)
fig = fig if fig else model.make_figure()
stateseq_axs = _get_state_axes(model,fig)
assert len(stateseq_axs) == len(model.states_list)
sp3_artists = \
[artist for s,ax in zip(model.states_list,stateseq_axs)
for artist in plot_stateseq_plain(model,s,ax,plot_slice,update=update,draw=False)]
if draw: plt.draw()
plt.title('各参数的时间序列',fontproperties=zhfont1)
return sp3_artists
def _get_state_axes(model,fig):
#sz = self._fig_sz
if hasattr(fig,'_stateseq_axs'):
return fig._stateseq_axs
else:
if len(model.states_list) <= 2:
gs = GridSpec(4,1)
#feature_ax = plt.subplot(gs[:sz,:])
stateseq_axs = [plt.subplot(gs[idx]) for idx in range(len(model.states_list))]
else:
gs = GridSpec(1,2)
sgs = GridSpecFromSubplotSpec(len(model.states_list),1,subplot_spec=gs[1])
#feature_ax = plt.subplot(gs[0])
stateseq_axs = [plt.subplot(sgs[idx]) for idx in range(len(model.states_list))]
for ax in stateseq_axs:
ax.grid('off')
fig._stateseq_axs = stateseq_axs
return stateseq_axs
def plot_stateseq(model,s,ax=None,plot_slice=slice(None),update=False,draw=True):
s = model.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = model._get_colors(scalars=True)
_plot_stateseq_pcolor(model,s,ax,state_colors,plot_slice,update)
data_values_artist = _plot_stateseq_data_values(model,s,ax,state_colors,plot_slice,update)
if draw: plt.draw()
return [data_values_artist]
def plot_stateseq_plain(model,s,ax=None,plot_slice=slice(None),update=False,draw=True):
s = model.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = model._get_colors(scalars=True)
ax.set_xlim((0,len(model.datas[0])))
_plot_stateseq_pcolor_plain(model,s,ax,state_colors,plot_slice,update)
data_values_artist = _plot_stateseq_data_values(model,s,ax,state_colors,plot_slice,update)
plt.grid(False)
plt.show()
return [data_values_artist]
def _plot_stateseq_pcolor(model,s,ax=None,state_colors=None,
plot_slice=slice(None),update=False,color_method=None):
from pyhsmm.util.general import rle
s = model.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors \
else model._get_colors(scalars=True,color_method=color_method)
data = s.data[plot_slice].T
real_data = np.array(list(data[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info))))
stateseq = s.stateseq[plot_slice]
stateseq_norep, durations = rle(stateseq)
datamin, datamax = np.floor(real_data[0].min()/5)*5, np.ceil(real_data[0].max()/5)*5
datamin_,datamax_ = np.floor(min(real_data[1].min(),real_data[2].min())),np.ceil(max(real_data[1].max(),real_data[2].max()))
x, y = np.hstack((0,durations.cumsum())), np.array([datamin,datamax])
C = np.atleast_2d([state_colors[state] for state in stateseq_norep])
s._pcolor_im = ax.pcolormesh(x,y,C,vmin=0,vmax=1,alpha=0.3)
ax_ = ax.twinx()
ax.set_xlim((0,len(stateseq)))
ax.set_ylim((datamin,datamax))
ax.set_xlabel('跟车时间/s',loc='right',fontproperties=zhfont1)
ax.set_xticks(list(np.arange(0,len(data.T),100)))
ax.set_xticklabels(list(np.arange(0,len(data.T)/100)*10))
ax.set_yticks(list(np.arange(datamin,datamax+1,10)))
ax.set_yticklabels(list(np.arange(datamin,datamax+1,10)))
ax.set_ylabel('相对距离/[m]',fontproperties=zhfont1)
ax_.set_ylim((datamin_,datamax_))
ax_.set_yticks(list(np.arange(datamin_,datamax_+1,2)))
ax_.set_yticklabels(list(np.arange(datamin_,datamax_+1,2)))
ax_.set_ylabel('相对速度[m/s],加速度'+r'$[m/s^2]$',fontproperties=zhfont1)
def _plot_stateseq_pcolor_plain(model,s,ax=None,state_colors=None,
plot_slice=slice(None),update=False,color_method=None):
from pyhsmm.util.general import rle
s = model.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors \
else model._get_colors(scalars=True,color_method=color_method)
data = s.data[plot_slice].T
real_data = np.array(list(data[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info))))
datamin, datamax = np.floor(real_data[0].min()/5)*5, np.ceil(real_data[0].max()/5)*5
datamin_,datamax_ = np.floor(real_data[1].min()),np.ceil(real_data[1].max())
datamin__,datamax__=np.floor(real_data[2].min()),np.ceil(real_data[2].max())
plt.xlim(0,len(data[0]))
ax.set_xlim((0,len(data[0])))
ax.set_ylim((datamin,datamax))
ax.set_xlabel('跟车时间/s',loc='right',fontproperties=zhfont1)
ax.set_xticks(list(np.arange(0,len(data.T),100)))
ax.set_xticklabels(list(np.arange(0,len(data.T)/100)*10))
ax.set_yticks(list(np.arange(datamin,datamax+1,10)))
ax.set_yticklabels(list(np.arange(datamin,datamax+1,10)))
ax.set_ylabel('相对距离/[m]',fontproperties=zhfont1)
ax_ = ax.twinx()
ax_.set_xlim((0,len(data[0])))
ax_.set_ylim((datamin_,datamax_))
ax_.set_yticks(list(np.arange(datamin_,datamax_+1,2)))
ax_.set_yticklabels(list(np.arange(datamin_,datamax_+1,2)))
ax_.set_ylabel('相对速度[m/s],加速度'+r'$[m/s^2]$',fontproperties=zhfont1)
def _plot_stateseq_data_values(model,s,ax,state_colors,plot_slice,update):
from matplotlib.collections import LineCollection
from pyhsmm.util.general import AR_striding, rle
data = s.data[plot_slice]
stateseq = s.stateseq[plot_slice]
real_data = np.array(list(data.T[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info))))
colorseq = np.tile(np.array([state_colors[state] for state in stateseq[:-1]]),data.shape[1])
datamin, datamax = np.floor(real_data[0].min()/5)*5, np.ceil(real_data[0].max()/5)*5
datamin_,datamax_ = np.floor(min(real_data[1].min(),real_data[2].min())),np.ceil(max(real_data[1].max(),real_data[2].max()))
draw_data = list()
draw_data.append(real_data[0])
mid,whole = (datamin+datamax)/2.,(datamax-datamin)/2.
mid_,whole_ = (datamin_+datamax_)/2.,(datamax_-datamin_)/2.
draw_data.append((real_data[1]-mid_)/whole_*whole+mid)
draw_data.append((real_data[2]-mid_)/whole_*whole+mid)
draw_data = np.array(draw_data)
if update and hasattr(s,'_data_lc'):
s._data_lc.set_array(colorseq)
else:
ts = np.arange(len(stateseq))
segments = np.vstack(
[AR_striding(np.hstack((ts[:,None], scalarseq[:,None])),1).reshape(-1,2,2)
for scalarseq in draw_data])
lc = s._data_lc = LineCollection(segments)
one_list = np.ones(len(stateseq))
z =[500,100,0]
new_color = np.array(list(one_list*200)+list(one_list*100)+list(one_list*0))
lc.set_array(new_color)
lc.set_linewidth(2)
ax.add_collection(lc)
ax.set_xlim(0,len(stateseq))
ax.autoscale()
proxies = [make_proxy(item,lc) for item in z]
ax.legend(proxies,['相对距离','相对速度','加速度'],prop=zhfont1,bbox_to_anchor=(0.8,-0.2),ncol=3)
return s._data_lc
def make_proxy(zvalue, scalar_mappable, **kwargs):
color = scalar_mappable.cmap(zvalue)
return Line2D([0, 1], [0, 1], color=color, **kwargs)
def plotting_feature(model,fig=None,plot_slice=slice(None),update=False,draw=True):
update = update and (fig is not None)
sp1_artists = _plot_3d_data_scatter(model,state_colors=model._get_colors(scalars=True))
plt.draw()
return sp1_artists
def _plot_3d_data_scatter(model,ax=None,state_colors=None,plot_slice=slice(None),update=False):
data = np.array(model.datas).T
real_data = np.array(list(data[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info)))).reshape(len(data),len(data[0]))
datamin = [np.floor(real_data[0].min()/5)*5,np.floor(real_data[1].min()),np.floor(real_data[2].min())]
datamax = [np.ceil(real_data[0].max()/5)*5,np.ceil(real_data[1].max()),np.ceil(real_data[2].max())]
fig = plt.figure(figsize=(12,12))
fontsize=20
#plt.suptitle('按照参数种类表示',fontsize=23)
ax1 = fig.add_subplot(221,projection='3d')
plt.tick_params(labelsize=fontsize-10)
ax1.set_xlabel('相对距离[m]',fontsize=fontsize,fontproperties=zhfont1)
ax1.set_xlim((datamin[0],datamax[0]))
ax1.set_xticks(list(np.arange(datamin[0],datamax[0]+1,10)))
ax1.set_ylabel('相对速度[m/s]',fontsize=fontsize,fontproperties=zhfont1)
ax1.set_ylim((datamin[1],datamax[1]))
ax1.set_yticks(list(np.arange(datamin[1],datamax[1])))
ax1.set_zlabel('加速度'+r'$[m/s^2]$',fontsize=fontsize,fontproperties=zhfont1)
ax1.set_zlim((datamin[2],datamax[2]))
ax1.set_zticks(list(np.arange(datamin[2],datamax[2],0.5)))
state_colors = state_colors if state_colors \
else model._get_colors(scalars=True)
artists = []
for s, data in zip(model.states_list,model.datas):
data = data[plot_slice]
colorseq = [state_colors[state] for state in s.stateseq[plot_slice]]
if update and hasattr(s,'_data_scatter'):
s._data_scatter.set_offsets(data[:,:2])
s._data_scatter.set_color(colorseq)
else:
s._data_scatter = ax1.scatter3D(real_data[0],real_data[1],real_data[2],c=colorseq,s=120)
artists.append(s._data_scatter)
ax2 = plt.subplot(2,2,2)
plt.tick_params(labelsize=fontsize-5)
artists.append(ax2.scatter(real_data[0],real_data[1],c=colorseq,s=120))
ax2.set_xlabel('相对距离[m]',fontsize=fontsize,fontproperties=zhfont1)
ax2.set_xlim((datamin[0],datamax[0]))
ax2.set_xticks(list(np.arange(datamin[0],datamax[0]+1,10)))
ax2.set_ylabel('相对速度[m/s]',fontsize=fontsize,fontproperties=zhfont1)
ax2.set_ylim((datamin[1],datamax[1]))
ax2.set_yticks(list(np.arange(datamin[1],datamax[1]+0.01)))
plt.grid(True)
ax3 = plt.subplot(2,2,3)
plt.tick_params(labelsize=fontsize-5)
artists.append(ax3.scatter(real_data[2],real_data[1],c=colorseq,s=120))
ax3.set_xlabel('加速度'+r'$[m/s^2]$',fontsize=fontsize,fontproperties=zhfont1)
ax3.set_xlim((datamin[2],datamax[2]))
ax3.set_xticks(list(np.arange(datamin[2],datamax[2]+0.01,0.5)))
ax3.set_ylabel('相对速度[m/s]',fontsize=fontsize,fontproperties=zhfont1)
ax3.set_ylim((datamin[1],datamax[1]))
ax3.set_yticks(list(np.arange(datamin[1],datamax[1]+0.01)))
plt.grid(True)
ax4 = plt.subplot(2,2,4)
plt.tick_params(labelsize=fontsize-5)
artists.append(ax4.scatter(real_data[0],real_data[2],c=colorseq,s=120))
ax4.set_xlabel('相对距离[m]',fontsize=fontsize,fontproperties=zhfont1)
ax4.set_xlim((datamin[0],datamax[0]))
ax4.set_xticks(list(np.arange(datamin[0],datamax[0]+1,10)))
ax4.set_ylabel('加速度'+r'$[m/s^2]$',fontsize=fontsize,fontproperties=zhfont1)
ax4.set_ylim((datamin[2],datamax[2]))
ax4.set_yticks(list(np.arange(datamin[2],datamax[2]+0.01,0.5)))
plt.grid(True)
plt.subplots_adjust(wspace=0.4,hspace=0.3)
return artists
plotting_state_plain(hdp_hmm_model)
show_figures(hdp_hmm_model)
show_figures(hdp_s_hmm_model)
show_figures(hdp_hsmm_model)
# %%
class get_states_list():
def __init__(self,seq):
self.stateseq=seq
class k_means():
def __init__(self,model,k=1):
self.get_state = get_states_list
self.data_info = data_info
self.make_figure=model.make_figure
self.old_datas = model.datas
self.stateseqs = model.stateseqs
#self.states_list = model.states_list[0]
self.state_colors = model._get_colors(scalars=True)
self._get_colors = model._get_colors
self.datas = self.get_k_data(k)
#self.groups,self.new_seq = self.seg_into_group(self.stateseq)
###get num and time of state
stating_counter = Counter(self.new_seqs)
self.num_state = len(list(stating_counter))
#self.stateseqs = get_seq(stateseq)
def get_k_data(self,k=1):
events_groups,self.new_seqs = self.seg_into_group(self.stateseqs)
#print(type(self.new_seqs[0]))
#print(events_groups)
new_datas = [self.get_k_means(group_data,k) for group_data in events_groups]
self.states_list=[self.get_state(seq=self.new_seqs)]
self.time_state = [self.get_time_state(group_data) for group_data in events_groups]
#print('get_k_data done')
return new_datas
def get_time_state(self,groups):
duration_time = []
#print('get_time_state done')
for group in groups:
#print(group,'group')
duration_time.append(len(group))
return duration_time
def seg_into_group(self,seqs):
datas = self.old_datas
#data = data.T
events_groups=[]
new_seqs=[]
assert len(datas)==len(seqs)
for i in range(len(datas)):
#print(len(data))
data=datas[i]
seq=seqs[i]
groups = list()
group = [data[0]]
new_seq = list()
for idx in range(1,len(data)): #############
if idx==len(data)-1:
groups.append(np.array(group))
new_seq.append(seq[idx])
if seq[idx-1]==seq[idx]:
group.append(data[idx])
else:
groups.append(np.array(group))
group=[data[idx]]
new_seq.append(seq[idx-1])
events_groups.append(groups)
new_seqs=new_seqs+new_seq
return events_groups,np.array(new_seqs)
def get_k_means(self,groups,k=1):
from sklearn.cluster import KMeans
new_data = []
for group in groups:
kmodel = KMeans(n_clusters=k)
kmodel.fit(group)
new_data.append(kmodel.cluster_centers_[0])
return new_data
hmm_k =k_means(hdp_hmm_model)
print('共有',str(hmm_k.num_state),'种驾驶模式')
print('分了',str(len(hmm_k.new_seqs)),'个驾驶片段')
print('各个驾驶片段行驶的时间为',str(hmm_k.time_state),'*0.1s')
shmm_k =k_means(hdp_s_hmm_model)
print('共有',str(shmm_k.num_state),'种驾驶模式')
print('分了',str(len(shmm_k.new_seqs)),'个驾驶片段')
print('各个驾驶片段行驶的时间为',str(shmm_k.time_state),'*0.1s')
hsmm_k =k_means(hdp_hsmm_model)
print('共有',str(hsmm_k.num_state),'种驾驶模式')
print('分了',str(len(hsmm_k.new_seqs)),'个驾驶片段')
print('各个驾驶片段行驶的时间为',str(hsmm_k.time_state),'*0.1s')
# %%
plotting_feature(hsmm_k)
# %%
def get_seg_d(data):
data = np.array(data).T
threshold = [[59.26,20.02,5.00],[-1.19,-0.2,0.25,1.23],[-0.20,-0.06,0.07,0.20]]
real_data = np.array(list(data[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info)))).reshape(len(data),len(data[0]))
final_data = real_data.T
LD_data,ND_data,CD_data = [],[],[]
for point in final_data:
if point[0]>threshold[0][0]:
LD_data.append(point)
elif point[0]>=threshold[0][1]:
ND_data.append(point)
elif point[0]>=threshold[0][2]:
CD_data.append(point)
else:
print('Data has a wrong delta_d')
return [LD_data,ND_data,CD_data],threshold
#range_datas=[LD_data,ND_data,CD_data]
def get_seg_v_a(range_datas,threshold):
analyze_data = []
for range_data in range_datas:
group_data = np.ones(shape=(len(range_data),2))*20
for idx in range(len(range_data)):
if range_data[idx][1]<threshold[1][0]:
group_data[idx][1] = -2
elif range_data[idx][1]<threshold[1][1]:
group_data[idx][1]= -1
elif range_data[idx][1]<threshold[1][2]:
group_data[idx][1]=0
elif range_data[idx][1]<threshold[1][3]:
group_data[idx][1]= 1
else:
group_data[idx][1] = 2
if range_data[idx][2]<threshold[2][0]:
group_data[idx][0] = -2
elif range_data[idx][2]<threshold[2][1]:
group_data[idx][0]= -1
elif range_data[idx][2]<threshold[2][2]:
group_data[idx][0]=0
elif range_data[idx][2]<threshold[2][3]:
group_data[idx][0]= 1
else:
group_data[idx][0] = 2
analyze_data.append(group_data)
return analyze_data
# %%
def get_prob(analyze_data):
all_counts = []
for idx in range(len(analyze_data)):
counts = np.zeros((5,5))
for h in range(len(analyze_data[idx])):
for i in range(5):
for j in range(5):
if (analyze_data[idx][h] == [i-2,j-2]).all():
counts[i][j] = counts[i][j]+1
if not len(analyze_data[idx]):
pass
else:
counts = counts/float(len(analyze_data[idx]))
all_counts.append(counts)
return all_counts
all_counts=np.zeros((3,5,5))
for data in hsmm_k.datas:
range_datas,thre = get_seg_d(data)
anylyze_data=get_seg_v_a(range_datas,thre)
all_counts = all_counts+np.array(get_prob(anylyze_data))
# %%
def plotting_style(all_counts):
Y = ['急减','缓减','匀速','缓加','急加']
X = ['快近','渐近','维持','渐远','快远']
fig = plt.figure(figsize=(12,12))
axes = []
img = []
sizes=20
titles = ['远距离','中距离','短距离']
for idx in range(len(all_counts)):
axes.append(fig.add_subplot(2,2,idx+1))
axes[-1].set_ylabel('加速度'+r'$a_x$',size=sizes,fontproperties=zhfont1)
axes[-1].set_xticks(np.linspace(0.5,4.5,5,endpoint=True))
axes[-1].set_xticklabels(X,fontproperties=zhfont1,size=sizes-2)
axes[-1].set_xlabel('相对速度'+r'$\Delta$v',size=sizes,fontproperties=zhfont1)
axes[-1].set_yticks(np.linspace(0.5,4.5,5,endpoint=True))
axes[-1].set_yticklabels(Y,fontproperties=zhfont1,size=sizes-2)
axes[-1].set_title(titles[idx],fontproperties=zhfont1,size=sizes)
plt.subplots_adjust(wspace=0.4,hspace=0.3)
img.append(axes[-1].pcolormesh(all_counts[idx],cmap = mpl.cm.Spectral_r))
divider = make_axes_locatable(axes[-1])
cax = divider.append_axes("right",size='5%',pad=0.05)
#print(np.linspace(0.5,4.5,5,endpoint=True))
norm = mpl.colors.Normalize(vmin=0,vmax=all_counts[idx].max())
cmap = mpl.cm.Spectral_r
cb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm,cmap=cmap),cax=cax)
all_counts=all_counts/len(hsmm_k.datas)
plotting_style(all_counts)
#plt.xla
# %%
# %%
| StarcoderdataPython |
144914 | <reponame>LeandroIssa/ha-nest-protect
"""Models used by PyNest."""
from dataclasses import dataclass, field
import datetime
from typing import Any
@dataclass
class NestLimits:
"""Nest Limits."""
thermostats_per_structure: int
structures: int
smoke_detectors_per_structure: int
smoke_detectors: int
thermostats: int
@dataclass
class NestUrls:
"""Nest Urls."""
rubyapi_url: str
czfe_url: str
log_upload_url: str
transport_url: str
weather_url: str
support_url: str
direct_transport_url: str
@dataclass
class NestResponse:
"""Class that reflects a Nest API response."""
access_token: float
email: str
expires_in: str
userid: str
is_superuser: bool
language: str
weave: dict[str, str]
user: str
is_staff: bool
urls: NestUrls = field(default_factory=NestUrls)
limits: NestLimits = field(default_factory=NestLimits)
_2fa_state: str = None
_2fa_enabled: bool = None
_2fa_state_changed: str = None
def is_expired(self):
"""Check if session is expired."""
# Tue, 01-Mar-2022 23:15:55 GMT
expiry_date = datetime.datetime.strptime(
self.expires_in, "%a, %d-%b-%Y %H:%M:%S %Z"
)
if expiry_date <= datetime.datetime.now():
return True
return False
@dataclass
class Bucket:
"""Class that reflects a Nest API response."""
object_key: str
object_revision: str
object_timestamp: str
value: Any
@dataclass
class WhereBucket(Bucket):
"""Class that reflects a Nest API response."""
object_key: str
object_revision: str
object_timestamp: str
value: Any
@dataclass
class TopazBucketValue:
"""Nest Protect values."""
spoken_where_id: str
creation_time: int
installed_locale: str
ntp_green_led_brightness: int
component_buzzer_test_passed: bool
wifi_ip_address: str
wired_led_enable: bool
wifi_regulatory_domain: str
co_blame_duration: int
is_rcs_capable: bool
fabric_id: str
battery_health_state: int
steam_detection_enable: bool
hushed_state: bool
capability_level: float
home_alarm_link_type: int
model: str
component_smoke_test_passed: bool
component_speaker_test_passed: bool
removed_from_base: bool
smoke_sequence_number: int
home_away_input: bool
device_locale: str
co_blame_threshold: int
kl_software_version: str
component_us_test_passed: bool
auto_away: bool
night_light_enable: bool
component_als_test_passed: bool
speaker_test_results: 32768
wired_or_battery: int
is_rcs_used: bool
replace_by_date_utc_secs: int
certification_body: 2
component_pir_test_passed: bool
structure_id: str
software_version: str
component_hum_test_passed: bool
home_alarm_link_capable: bool
night_light_brightness: int
device_external_color: str
latest_manual_test_end_utc_secs: int
smoke_status: int
latest_manual_test_start_utc_secs: int
component_temp_test_passed: bool
home_alarm_link_connected: bool
co_status: int
heat_status: int
product_id: int
night_light_continuous: bool
co_previous_peak: int
auto_away_decision_time_secs: int
component_co_test_passed: bool
where_id: str
serial_number: str
component_heat_test_passed: bool
latest_manual_test_cancelled: bool
thread_mac_address: str
resource_id: str
buzzer_test_results: int
wifi_mac_address: str
line_power_present: bool
gesture_hush_enable: bool
device_born_on_date_utc_secs: int
ntp_green_led_enable: bool
component_led_test_passed: bool
co_sequence_number: int
thread_ip_address: list[str]
component_wifi_test_passed: bool
heads_up_enable: bool
battery_level: int
@dataclass
class TopazBucket(Bucket):
"""Class that reflects a Nest API response."""
value: TopazBucketValue = field(default_factory=TopazBucketValue)
@dataclass
class GoogleAuthResponse:
"""TODO."""
access_token: str
expires_in: int
scope: str
token_type: str
id_token: str
def is_expired(self):
"""Check if access token is expired."""
expiry_date = datetime.datetime.now() + datetime.timedelta(
seconds=self.expires_in - 5
)
if expiry_date <= datetime.datetime.now():
return True
return False
# TODO rewrite to snake_case
@dataclass
class NestAuthClaims:
"""TODO."""
subject: Any
expirationTime: str
policyId: str
structureConstraint: str
@dataclass
class NestAuthResponse:
"""TODO."""
jwt: str
claims: NestAuthClaims = field(default_factory=NestAuthClaims)
| StarcoderdataPython |
130934 | <reponame>ONSdigital/ras-frontstage<filename>tests/integration/test_jwt_authorization.py
import unittest
from unittest import mock
from uuid import uuid4
from jose import JWTError
from frontstage import app
from frontstage.common.authorisation import jwt_authorization
from frontstage.common.session import Session
from frontstage.exceptions.exceptions import JWTValidationError
valid_jwt = (
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"jc2NzQwMDAuMH0.m94R50EPIKTJmE6gf6PvCmCq8ZpYwwV8PHSqsJh5fnI"
)
expired_jwt = (
"<KEY>"
"<KEY>"
"<KEY>"
"GFydHlfaWQiOiJkYjAzNmZkNy1jZTE3LTQwYzItYThmYy05MzJlN2MyMjgzOTcifQ.ro95XUJ2gqgz7ecF2r3guSi-kh4wI_XYTgUF"
"8IZFHDA"
)
no_expiry_jwt = (
"<KEY>"
"<KEY>"
)
class TestJWTAuthorization(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
self.session = Session.from_party_id("test")
def tearDown(self):
self.session.delete_session()
@staticmethod
def decorator_test(request):
@jwt_authorization(request)
def test_function(session):
pass
test_function()
def test_jwt_authorization_success(self):
self.session.encoded_jwt_token = valid_jwt
self.session.session_key = str(uuid4())
self.session.save()
request = mock.MagicMock(cookies={"authorization": self.session.session_key})
# If this function runs without exceptions the test is considered passed
self.decorator_test(request)
def test_jwt_authorization_expired_jwt(self):
self.session.encoded_jwt_token = expired_jwt
self.session.session_key = str(uuid4())
self.session.save()
request = mock.MagicMock(cookies={"authorization": self.session.session_key})
with self.assertRaises(JWTValidationError):
self.decorator_test(request)
def test_jwt_authorization_no_expiry(self):
self.session.encoded_jwt_token = no_expiry_jwt
self.session.session_key = str(uuid4())
self.session.save()
request = mock.MagicMock(cookies={"authorization": self.session.session_key})
with self.assertRaises(JWTValidationError):
self.decorator_test(request)
@mock.patch("frontstage.common.authorisation.decode")
def test_jwt_authorization_decode_failure(self, mock_decode):
self.session.encoded_jwt_token = valid_jwt
self.session.session_key = str(uuid4())
self.session.save()
request = mock.MagicMock(cookies={"authorization": self.session.session_key})
mock_decode.side_effect = JWTError
with self.assertRaises(JWTValidationError):
self.decorator_test(request)
| StarcoderdataPython |
1707988 | from typing import List, Tuple, Set, Dict
def add_vec(v1: Tuple[int, int], v2: Tuple[int, int]) -> Tuple[int, int]:
return v1[0] + v2[0], v1[1] + v2[1]
def map_moves(steps: List[Tuple[str, int]]) -> Dict[Tuple[int, int], int]:
movement_vec = {
"U": (1, 0),
"D": (-1, 0),
"R": (0, 1),
"L": (0, -1),
}
pos, step = (0, 0), 1
visited: Dict[Tuple[int, int], int] = {}
for direction, length in steps:
for i in range(1, length + 1):
pos = add_vec(pos, movement_vec[direction])
visited[pos] = visited.get(pos, step)
step += 1
return visited
def part1(inp: List[List[Tuple[str, int]]]) -> int:
cable1, cable2, *a = inp
path1, path2 = map_moves(cable1), map_moves(cable2)
intersections = set(path1.keys()) & set(path2.keys())
distances = [abs(a) + abs(b) for (a, b) in intersections]
return min(distances)
def part2(inp: List[List[Tuple[str, int]]]) -> int:
cable1, cable2, *a = inp
path1, path2 = map_moves(cable1), map_moves(cable2)
combined_steps = {
length + path2[position]
for position, length in path1.items()
if position in path2
}
return min(combined_steps)
def main() -> None:
with open("input.txt") as f:
inp = [
list(map(lambda o: (o[0], int(o[1:])), x.strip().split(",")))
for x in f.readlines()
]
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
main()
| StarcoderdataPython |
3234941 | from .headed_frame import HeadedFrame
from .key_value_display import KeyValueDisplay
from .ttk_text import TtkText | StarcoderdataPython |
1654790 | ## Engineering spectral features
import librosa as lr
# Calculate the spectral centroid and bandwidth for the spectrogram
bandwidths = lr.feature.spectral_bandwidth(S=spec)[0]
centroids = lr.feature.spectral_centroid(S=spec)[0]
________________________________________________________________
from librosa.core import amplitude_to_db
from librosa.display import specshow
# Convert spectrogram to decibels for visualization
spec_db = amplitude_to_db(spec)
# Display these features on top of the spectrogram
fig, ax = plt.subplots(figsize=(10, 5))
ax = specshow(spec_db, x_axis='time', y_axis='hz', hop_length=HOP_LENGTH)
ax.plot(times_spec, centroids)
ax.fill_between(times_spec, centroids - bandwidths / 2, centroids + bandwidths / 2, alpha=.5)
ax.set(ylim=[None, 6000])
plt.show()
| StarcoderdataPython |
10576 | <reponame>christopherferreira3/Python-ADB-Tools
import subprocess
import os
def get_connected_devices() -> list:
"""
Returns a list of tuples containing the Device name and the android Version
:return:
"""
devices = []
devices_output = subprocess.check_output(["adb", "devices"]).decode("utf-8").strip("List of devices attached").split("\n")
for device in devices_output:
if device is None or device == "":
pass
else:
device_name = device.strip('\tdevice')
android_version = subprocess.check_output(["adb", "-s", device_name, "shell", "getprop", "ro.build.version.release"])
devices.append((device_name, android_version.decode('utf-8').strip("\r\n")))
return devices
def install_app(apk_path=None, device=None) -> bool:
"""
Installs an APK file into a device.
The app installed with the -r option so the apk gets replaced it exists or installed if it doenst
:param apk_path: Path for the APK
:param device: Device name
:return: True if success , False if fail
"""
path = os.getcwd() + apk_path if str(apk_path).startswith("/") else os.getcwd() + "/" + apk_path
if apk_path is not None and device is not None:
if os.path.isfile(path):
command = ["adb", "-s" , device, "install", "-r", path]
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
print("APK {0} was installed in {1}".format(apk_path, device))
return True
else:
print("File {0} not found!".format(path))
else:
print("Device and/or apk not found or not specified")
return False
def is_device_connected(device) -> bool:
all_connected = get_connected_devices()
for device_connected, version in all_connected:
if device == device_connected:
return True
return False
def unintall_app(package=None, device=None) -> None:
"""
Uninstall an app from the device
:return:
"""
command = ["adb", "-s", device, "uninstall", package]
if package is not None:
if device is None:
command.pop(1)
command.pop(1)
p = subprocess.Popen(command, stdout=None)
p.wait()
p.terminate()
else:
print("App package was not specified.")
def is_app_installed(package=None, device=None) -> bool:
"""
Returns True if the package is installed or False if it is not
:param package:
:return:
"""
command = ["adb", "-s", device, "shell", "pm", "list", "packages |", "grep", package]
if device is None:
command.pop(1)
command.pop(1)
out = subprocess.check_output(command, stderr=None)
return True if out.decode('utf-8').strip("\r\n") == "package:{0}".format(package) else False
def run_command(arg_string=None, arg_list=None) -> None:
"""
Run a general ABD command
:return:
"""
command = arg_list if arg_list else str(arg_string).split(" ")
p = subprocess.check_output(command, stderr=None)
print(p.decode('utf-8'))
def kill_server() -> None:
"""
Kills the ADB server
:return: None
"""
command = ["adb", "kill-server"]
p = subprocess.Popen(command, stdout=None, stderr=None)
p.wait(timeout=10)
print("ADB server has been killed.")
def start_server() -> None:
"""
Starts the ADB server
:return: None
"""
command = ["adb", "start-server"]
p = subprocess.Popen(command, stderr=None, stdout=None)
p.wait(timeout=10)
print("ADB server has been started.")
def get_apk_from_device(package=None, device=None) -> bool:
"""
Retrieves the APK of an application if it exists
:param package:
:param device:
:return: bool
"""
# adb shell pm path com.example.someapp
# adb pull /data/app/com.example.someapp-2.apk path/to/desired/destination
command_apk_path = ["adb", "-s", device, "pm", "path", package]
if package is None:
print("Package is required but it was not specified.")
return False
if device is None and len(get_connected_devices()) != 1:
print("There are multiple devices connected, please specify a device to get the APK from")
return False
elif device is None:
command_apk_path.pop(1)
command_apk_path.pop(1)
apk_path = subprocess.check_output(command_apk_path, stderr=None)
# TODO: Rest of the stuff
def push_file_to_device() -> None: # For now...
"""
Pushes a file to the device
:param device:
:return: None
"""
pass
def list_files_in_device() -> None:
"""
Gets a list of files in a specific folder
:param device:
:param path:
:return: list of files
"""
pass
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True
| StarcoderdataPython |
3287352 | <reponame>gunpowder78/webdnn
from typing import Tuple
from webdnn.graph import traverse
from webdnn.graph.axis import Axis
from webdnn.graph.graph import Graph
from webdnn.graph.operators.linear import Linear
from webdnn.graph.operators.sgemm import Sgemm
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.order import OrderNHWC, OrderHWCN, OrderNC, OrderCN
class ReplaceLinearBySgemm(OptimizeRule):
"""
Replace Linear by Sgemm
"""
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
flag_changed = False
for op in traverse.filter_nodes(traverse.listup_operators(graph), Linear): # type: Linear
x = op.inputs["x"]
w = op.inputs["w"]
y = op.outputs["y"]
assert x.order == OrderNC or x.order == OrderNHWC, f"(x.order) = {x.order}"
assert w.order == OrderCN or w.order == OrderHWCN, f"(x.order) = {w.order}"
assert y.order == OrderNC or y.order == OrderNHWC, f"(x.order) = {y.order}"
assert w.ndim == x.ndim
flag_changed = True
op.remove_all()
sgemm = Sgemm(None,
M=y.shape_dict[Axis.N],
N=y.size // y.shape_dict[Axis.N],
K=x.size // x.shape_dict[Axis.N],
out_shape=y.shape,
out_order=y.order,
transpose_A=True,
transpose_B=True)
new_y, = sgemm(x, w)
sgemm.replace_output(new_y, y)
return graph, flag_changed
| StarcoderdataPython |
153146 | # encoding:utf-8
from utils import get_url
subreddit = 'hmmm'
t_channel = '@r_hmmm'
NSFW_EMOJI = u'\U0001F51E'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
if what not in ('img'):
return False
if submission.over_18:
url = submission.url
text = '{emoji}NSFW\n{url}\n{title}\n\n{link}\n\nby {channel}'.format(
emoji=NSFW_EMOJI,
url=url,
title=title,
link=link,
channel=t_channel
)
return r2t.send_text(text, disable_web_page_preview=True)
if r2t.dup_check_and_mark(url) is True:
return False
return r2t.send_img(url, ext, text)
| StarcoderdataPython |
3310598 | """
Script to compute features used for posture and activity recognition in multilocation paper.
features:
"MEAN"
'STD'
'MAX'
'DOM_FREQ'
'DOM_FREQ_POWER_RATIO'
'HIGHEND_FREQ_POWER_RATIO'
'RANGE'
'ACTIVE_SAMPLE_PERC'
'NUMBER_OF_ACTIVATIONS'
'ACTIVATION_INTERVAL_VAR'
Usage:
Production:
On all participants
`mh -r . process TimeFreqFeatureComputer --pattern Derived/preprocessed/**/Actigraph*.sensor.csv --setname test_timefreqfeature > DerivedCrossParticipants/TimeFreq.feature.csv`
On single participant
`mh -r . -p SPADES_1 process TimeFreqFeatureComputer --par --pattern Derived/preprocessed/**/Actigraph*.sensor.csv > SPADES_1/Derived/TimeFreq.feature.csv`
Debug:
`mh -r . -p SPADES_1 process TimeFreqFeatureComputer --verbose --pattern Derived/preprocessed/**/Actigraph*.sensor.csv --setname test_timefreqfeature`
"""
import os
import pandas as pd
import numpy as np
from ..api import numeric_feature as mnf
from ..api import windowing as mw
from ..api import utils as mu
from .BaseProcessor import SensorProcessor
def build(**kwargs):
return TimeFreqFeatureComputer(**kwargs).run_on_file
class TimeFreqFeatureComputer(SensorProcessor):
def __init__(self, verbose=True, independent=False, setname='Feature', sessions='DerivedCrossParticipants/sessions.csv', ws=12800, ss=12800, threshold=0.2):
SensorProcessor.__init__(self, verbose=verbose, independent=independent)
self.name = 'TimeFreqFeatureComputer'
self.setname = setname
self.sessions = sessions
self.ws = ws
self.ss = ss
self.threshold = threshold
def _run_on_data(self, combined_data, data_start_indicator, data_stop_indicator):
st, et = mu.get_st_et(combined_data, self.meta['pid'], self.sessions, st_col=0, et_col=0)
ws = self.ws
ss = self.ss
col_names = combined_data.columns[1:]
if self.verbose:
print('Session start time: ' + str(st))
print('Session stop time: ' + str(et))
sr = mu._sampling_rate(combined_data)
def freq_features(X):
ncols = X.shape[1]
result = mnf.frequency_features(X, sr, freq_range=None, top_n_dominant = 1)
if len(result) == 0:
return np.array([np.nan] * ncols * 3)
n_features = int(result.shape[0] / ncols)
p1 = list()
p1ratio = list()
phratio = list()
for i in range(0, ncols):
p1.append(result[i * n_features + 1])
p1ratio.append(result[i * n_features + 1] / result[i * n_features + 2])
phratio.append(result[i * n_features + 3] / result[i * n_features + 2])
return np.array(p1 + p1ratio + phratio)
features = [
mnf.mean,
mnf.std,
mnf.positive_amplitude,
freq_features,
mnf.amplitude_range,
lambda x: mnf.active_perc(x, self.threshold),
lambda x: mnf.activation_count(x, self.threshold),
lambda x: mnf.activation_std(x, self.threshold)
]
feature_names = [
"MEAN",
'STD',
'MAX',
'DOM_FREQ',
'DOM_FREQ_POWER_RATIO',
'HIGHEND_FREQ_POWER_RATIO',
'RANGE',
'ACTIVE_SAMPLE_PERC',
'NUMBER_OF_ACTIVATIONS',
'ACTIVATION_INTERVAL_VAR'
]
all_feature_names = [feature_name + "_" + col_name for feature_name in feature_names for col_name in col_names]
windows = mw.get_sliding_window_boundaries(start_time=st, stop_time=et, window_duration=ws, step_size=ss)
chunk_windows_mask = (windows[:,0] >= data_start_indicator) & (windows[:,0] < data_stop_indicator)
chunk_windows = windows[chunk_windows_mask,:]
if len(chunk_windows) == 0:
return pd.DataFrame()
result_data = mw.apply_to_sliding_windows(df=combined_data, sliding_windows=chunk_windows, window_operations=features, operation_names=all_feature_names, return_dataframe=True)
return result_data
def _post_process(self, result_data):
output_path = mu.generate_output_filepath(self.file, self.setname, 'feature', 'TimeFreq')
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
result_data.to_csv(output_path, index=False, float_format='%.6f')
if self.verbose:
print('Saved feature data to ' + output_path)
result_data['pid'] = self.meta['pid']
result_data['sid'] = self.meta['sid']
return result_data
# def main(file, verbose=True, prev_file=None, next_file=None, sessions="DerivedCrossParticipants/sessions.csv", name='multilocation_2017', ws=12800, ss=12800, threshold=0.2, subwins=4, **kwargs):
# file = os.path.abspath(file)
# if verbose:
# print("Compute features for " + file)
# df = pd.read_csv(file, parse_dates=[0], infer_datetime_format=True)
# pid = mu.extract_pid(file)
# sid = mu.extract_id(file)
# if verbose:
# print("Prev file is " + str(prev_file))
# print("Next file is " + str(next_file))
# if not os.path.exists(prev_file):
# prev_file = None
# if not os.path.exists(next_file):
# next_file = None
# sessions = os.path.abspath(sessions)
# if sessions is None or pid is None:
# st = df.iloc[0, 0]
# et = df.iloc[df.shape[0]-1, 0]
# else:
# session_df = pd.read_csv(sessions, parse_dates=[0, 1], infer_datetime_format=True)
# selected_sessions = session_df.loc[session_df['pid'] == pid, :]
# if selected_sessions.shape[0] == 0:
# st = df.iloc[0, 0]
# et = df.iloc[df.shape[0]-1, 0]
# else:
# st = selected_sessions.iloc[0, 0]
# et = selected_sessions.iloc[selected_sessions.shape[0] - 1, 1]
# if verbose:
# print('Session start time: ' + str(st))
# print('Session stop time: ' + str(et))
# result_df = run_compute_features(df, verbose=verbose, prev_file=prev_file, next_file=next_file, st=st, et=et, ws=ws, ss=ss, **kwargs)
# if "MasterSynced" in file:
# output_path = file.replace("MasterSynced", "Derived/" + name).replace("sensor", "feature")
# elif "Derived" in file:
# derived_folder_name = utils.extract_derived_folder_name(file)
# output_path = file.replace(derived_folder_name, name).replace('sensor', 'feature')
# if not os.path.exists(os.path.dirname(output_path)):
# os.makedirs(os.path.dirname(output_path))
# result_df.to_csv(output_path, index=False, float_format='%.3f')
# if verbose:
# print('Saved feature data to ' + output_path)
# result_df['pid'] = pid
# result_df['sid'] = sid
# return result_df
# def run_compute_features(df, verbose=True, prev_file=None, next_file=None, st=None, et=None, ws=12800, ss=12800, threshold=0.2, subwins=4, lowpass_cutoff = 20, **kwargs):
# # save current file's start and stop time
# chunk_st = df.iloc[0, 0].to_datetime64().astype('datetime64[h]')
# if chunk_st < st.to_datetime64():
# chunk_st = st.to_datetime64()
# chunk_et = df.iloc[df.shape[0]-1, 0].to_datetime64().astype('datetime64[h]') + np.timedelta64(1, 'h')
# if chunk_et > et.to_datetime64():
# chunk_et = et.to_datetime64()
# if prev_file is not None and prev_file != 'None':
# prev_df = pd.read_csv(prev_file, parse_dates=[0], infer_datetime_format=True)
# else:
# prev_df = pd.DataFrame()
# if next_file is not None and next_file != 'None':
# next_df = pd.read_csv(next_file, parse_dates=[0], infer_datetime_format=True)
# else:
# next_df = pd.DataFrame()
# combined_df = pd.concat([prev_df, df, next_df], axis=0, ignore_index=True)
# sr = mu._sampling_rate(combined_df)
# # 20 Hz lowpass filter on vector magnitude data and original data
# vm_data = mnt.vector_magnitude(combined_df.values[:,1:4]).ravel()
# b, a = signal.butter(4, lowpass_cutoff/sr, 'low')
# vm_data_filtered = signal.filtfilt(b, a, vm_data)
# combined_data_filtered = signal.filtfilt(b, a, combined_df.values[:,1:4], axis=0)
# vm_df = pd.DataFrame(data={"HEADER_TIME_STAMP": combined_df.iloc[:,0].values, "VM": vm_data_filtered})
# combined_df.values[:,1:4] = combined_data_filtered
# def freq_features(X):
# result = mnf.frequency_features(X, sr, freq_range=None, top_n_dominant = 1)
# if len(result) == 0:
# return np.array([np.nan, np.nan, np.nan])
# p1 = result[1]
# pt = result[2]
# ph = result[3]
# p1ratio = p1 / pt
# phratio = ph / pt
# return np.array([p1, p1ratio, phratio])
# vm_features = [
# mnf.mean,
# mnf.std,
# mnf.positive_amplitude,
# freq_features,
# mnf.amplitude_range,
# lambda x: mnf.active_perc(x, threshold),
# lambda x: mnf.activation_count(x, threshold),
# lambda x: mnf.activation_std(x, threshold)
# ]
# vm_feature_names = [
# "MEAN",
# 'STD',
# 'MAX',
# 'DOM_FREQ',
# 'DOM_FREQ_POWER_RATIO',
# 'HIGHEND_FREQ_POWER_RATIO',
# 'RANGE',
# 'ACTIVE_SAMPLE_PERC',
# 'NUMBER_OF_ACTIVATIONS',
# 'ACTIVATION_INTERVAL_VAR'
# ]
# axis_features = [
# lambda x: mnf.accelerometer_orientation_features(x, subwins=subwins)
# ]
# axis_feature_names = [
# "MEDIAN_X_ANGLE",
# "MEDIAN_Y_ANGLE",
# "MEDIAN_Z_ANGLE",
# "RANGE_X_ANGLE",
# "RANGE_Y_ANGLE",
# "RANGE_Z_ANGLE"
# ]
# windows = mw.get_sliding_window_boundaries(start_time=st, stop_time=et, window_duration=ws, step_size=ss)
# chunk_windows_mask = (windows[:,0] >= chunk_st) & (windows[:,0] <= chunk_et)
# chunk_windows = windows[chunk_windows_mask,:]
# vm_feature_df = mw.apply_to_sliding_windows(df=vm_df, sliding_windows=chunk_windows, window_operations=vm_features, operation_names=vm_feature_names, return_dataframe=True)
# axis_feature_df = mw.apply_to_sliding_windows(df=combined_df, sliding_windows=chunk_windows, window_operations=axis_features, operation_names=axis_feature_names, return_dataframe=True)
# feature_df = vm_feature_df.merge(axis_feature_df, on = ['START_TIME', 'STOP_TIME'])
# return feature_df | StarcoderdataPython |
3313141 | import asyncio
import logging
import os
from mqttrpc import MQTTRPC, dispatcher
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('hbmqtt').setLevel(level=logging.INFO)
class TestMQTTRPC(MQTTRPC):
@dispatcher.public
async def test(name=''):
print('Hello')
return 'Hello, {}'.format(name)
loop = asyncio.get_event_loop()
server = TestMQTTRPC(client_uid='test', loop=loop)
loop.run_until_complete(server.process_messages())
| StarcoderdataPython |
177553 | # Environments
# Put all custom environments here
import numpy as np
import gym
import logging
logger = logging.getLogger(__name__)
import sys
sys.path.append("../gym_tetris")
from gym_tetris import TetrisEnvironment
# can just download premade tetris environment online
# to register, look at torchkit (good example of base environment and multiple subtypes)
class FeatureWrapper(object):
def __init__(self,env):
self._env = env
self._terminal = env._terminal
def set_state(self, state):
self._env.set_state(state)
def step(self,action):
s_tp1,r_tp1,terminal,inf = self._env.step(action)
self._terminal = self._env._terminal
board_features = self._env.get_features()
return board_features,r_tp1,terminal,inf
def get_action_set(self):
return self._env.get_action_set()
def reset(self):
self._env.reset()
self._terminal = self._env._terminal
# @property
def state(self):
return self._env.get_features()
| StarcoderdataPython |
1743907 | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pytest
import time
import unittest
import numpy as np
import tensorflow as tf
from tensorforce import Agent, Environment, util
from test.unittest_base import UnittestBase
class TestSaving(UnittestBase, unittest.TestCase):
min_timesteps = 3
require_observe = True
directory = 'test/test-saving'
def test_config(self):
# FEATURES.MD
self.start_tests(name='config')
# Remove directory if exists
if os.path.exists(path=self.__class__.directory):
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
os.rmdir(path=self.__class__.directory)
# default
saver = dict(directory=self.__class__.directory)
agent, environment = self.prepare(saver=saver)
states = environment.reset()
agent.close()
agent = Agent.load(directory=self.__class__.directory, environment=environment)
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'agent.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'graph.pbtxt'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.meta'))
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=self.__class__.directory)
self.finished_test()
# no load
saver = dict(directory=self.__class__.directory)
agent, environment = self.prepare(saver=saver)
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
saver = dict(directory=self.__class__.directory, load=False)
agent, environment = self.prepare(saver=saver)
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'agent.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'graph.pbtxt'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.meta'))
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=self.__class__.directory)
self.finished_test()
# @pytest.mark.skip(reason='currently takes too long')
def test_config_extended(self):
self.start_tests(name='config extended')
# Remove directory if exists
if os.path.exists(path=self.__class__.directory):
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
os.rmdir(path=self.__class__.directory)
# filename
saver = dict(directory=self.__class__.directory, filename='test')
agent, environment = self.prepare(saver=saver)
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
agent = Agent.load(
directory=self.__class__.directory, filename='test', environment=environment
)
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'test.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'graph.pbtxt'))
os.remove(path=os.path.join(self.__class__.directory, 'test-0.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'test-0.index'))
os.remove(path=os.path.join(self.__class__.directory, 'test-0.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'test-1.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'test-1.index'))
os.remove(path=os.path.join(self.__class__.directory, 'test-1.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'test-2.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'test-2.index'))
os.remove(path=os.path.join(self.__class__.directory, 'test-2.meta'))
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=self.__class__.directory)
self.finished_test()
# frequency
saver = dict(directory=self.__class__.directory, frequency=1)
agent, environment = self.prepare(saver=saver)
states = environment.reset()
time.sleep(1)
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
time.sleep(1)
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'agent.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'graph.pbtxt'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-2.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-2.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-2.meta'))
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
assert filename.startswith('events.out.tfevents.'), filename
break
os.rmdir(path=self.__class__.directory)
self.finished_test()
# load filename
saver = dict(directory=self.__class__.directory)
agent, environment = self.prepare(saver=saver)
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
saver = dict(directory=self.__class__.directory, load='agent-0')
agent, environment = self.prepare(saver=saver)
states = environment.reset()
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'agent.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'graph.pbtxt'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-0.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.meta'))
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=self.__class__.directory)
self.finished_test()
def test_explicit(self):
# FEATURES.MD
self.start_tests(name='explicit')
# Remove directory if exists
if os.path.exists(path=self.__class__.directory):
for filename in os.listdir(path=self.__class__.directory):
os.remove(path=os.path.join(self.__class__.directory, filename))
os.rmdir(path=self.__class__.directory)
# TODO: currently Protobuf saving is not compatible with internal state RNNs
# episodes update to guarantee inequality between weights2 and weights3
agent, environment = self.prepare(
policy=dict(network=dict(type='auto', size=8, depth=1, internal_rnn=False)), memory=50,
update=dict(unit='episodes', batch_size=1)
)
states = environment.reset()
# save: default tensorflow format
weights0 = agent.get_variable(variable='policy/policy-network/dense0/weights')
agent.save(directory=self.__class__.directory)
agent.close()
self.finished_test()
# load: only directory
agent = Agent.load(directory=self.__class__.directory, environment=environment)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights0).all())
self.assertEqual(agent.timesteps, 0)
self.finished_test()
# one timestep
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# save: numpy format, append timesteps
weights1 = agent.get_variable(variable='policy/policy-network/dense0/weights')
agent.save(directory=self.__class__.directory, format='numpy', append='timesteps')
agent.close()
self.finished_test()
# load: numpy format and directory
agent = Agent.load(
directory=self.__class__.directory, format='numpy', environment=environment
)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights1).all())
self.assertEqual(agent.timesteps, 1)
self.finished_test()
# one timestep
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# save: numpy format, append timesteps
weights2 = agent.get_variable(variable='policy/policy-network/dense0/weights')
agent.save(directory=self.__class__.directory, format='numpy', append='timesteps')
agent.close()
self.finished_test()
# load: numpy format and directory
agent = Agent.load(
directory=self.__class__.directory, format='numpy', environment=environment
)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights2).all())
self.assertEqual(agent.timesteps, 2)
self.finished_test()
# one episode
while not terminal:
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# save: hdf5 format, filename, append episodes
weights3 = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertFalse((weights3 == weights2).all())
self.assertEqual(agent.episodes, 1)
agent.save(
directory=self.__class__.directory, filename='agent2', format='hdf5', append='episodes'
)
agent.close()
self.finished_test()
# env close
environment.close()
# differing agent config: episode length, update, parallel_interactions
environment = Environment.create(environment=self.environment_spec(max_episode_timesteps=7))
# load: filename (hdf5 format implicit)
agent = Agent.load(
directory=self.__class__.directory, filename='agent2', environment=environment,
update=dict(unit='episodes', batch_size=2), parallel_interactions=2
)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights3).all())
self.assertEqual(agent.episodes, 1)
agent.close()
self.finished_test()
# load: tensorflow format (filename explicit)
agent = Agent.load(
directory=self.__class__.directory, format='tensorflow', environment=environment,
update=dict(unit='episodes', batch_size=2), parallel_interactions=2
)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights0).all())
self.assertEqual(agent.timesteps, 0)
self.assertEqual(agent.episodes, 0)
agent.close()
self.finished_test()
# load: numpy format, full filename including timesteps suffix
agent = Agent.load(
directory=self.__class__.directory, filename='agent-1', format='numpy',
environment=environment, update=dict(unit='episodes', batch_size=2),
parallel_interactions=2
)
x = agent.get_variable(variable='policy/policy-network/dense0/weights')
self.assertTrue((x == weights1).all())
self.assertEqual(agent.timesteps, 1)
self.assertEqual(agent.episodes, 0)
agent.close()
self.finished_test()
# load: pb-actonly format
agent = Agent.load(directory=self.__class__.directory, format='pb-actonly')
x = agent.session.run(fetches='agent/policy/policy-network/dense0/weights:0')
self.assertTrue((x == weights0).all())
# one episode
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
actions, internals = agent.act(states=states, internals=internals)
states, terminal, _ = environment.execute(actions=actions)
agent.close()
environment.close()
os.remove(path=os.path.join(self.__class__.directory, 'agent.json'))
os.remove(path=os.path.join(self.__class__.directory, 'checkpoint'))
os.remove(path=os.path.join(self.__class__.directory, 'agent.data-00000-of-00001'))
os.remove(path=os.path.join(self.__class__.directory, 'agent.index'))
os.remove(path=os.path.join(self.__class__.directory, 'agent.meta'))
os.remove(path=os.path.join(self.__class__.directory, 'agent.pb'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-1.npz'))
os.remove(path=os.path.join(self.__class__.directory, 'agent-2.npz'))
os.remove(path=os.path.join(self.__class__.directory, 'agent2.json'))
os.remove(path=os.path.join(self.__class__.directory, 'agent2-1.hdf5'))
os.rmdir(path=self.__class__.directory)
self.finished_test()
| StarcoderdataPython |
3380237 | import numpy as np
import itertools
import sys
import os
from qmla.exploration_strategies import connected_lattice
import qmla.shared_functionality.probe_set_generation
import qmla.shared_functionality.latex_model_names
from qmla import construct_models
# flatten list of lists
def flatten(l): return [item for sublist in l for item in sublist]
class FermiHubbardBase(
connected_lattice.ConnectedLattice
):
def __init__(
self,
exploration_rules,
**kwargs
):
# print("[Exploration Strategies] init nv_spin_experiment_full_tree")
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
# self.true_model = 'FHhop_1h2_up_d2'
self.true_model = 'FHhop_1h2_down_d3+FHhop_1h2_up_d3+FHhop_1h3_down_d3+FHhop_2h3_up_d3+FHonsite_1_d3+FHonsite_2_d3+FHonsite_3_d3' # for testing
self.tree_completed_initially = True
self.initial_models = [
self.true_model
]
self.latex_string_map_subroutine = qmla.shared_functionality.latex_model_names.fermi_hubbard_latex
self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.separable_fermi_hubbard_half_filled
# unless specifically different set of probes required
self.simulator_probes_generation_subroutine = self.system_probes_generation_subroutine
self.shared_probes = True # i.e. system and simulator get same probes for learning
self.plot_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.fermi_hubbard_half_filled_superposition
# self.plot_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.FermiHubbard_single_spin_n_sites
# self.max_time_to_consider = 20
self.num_sites_true = construct_models.get_num_qubits(self.true_model)
self.num_qubits_true = 2*self.num_sites_true # FH uses 2 qubits per sites (up and down spin)
self.max_num_qubits = (self.num_sites_true * 2) + 2
self.num_processes_to_parallelise_over = 9
self.max_num_models_by_shape = {
1: 0,
2: 0,
4: 10,
6 : 1,
'other': 0
}
class FermiHubbardProbabilistic(
FermiHubbardBase
):
def __init__(
self,
exploration_rules,
**kwargs
):
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
self.max_num_sites = 4
self.max_num_probe_qubits = self.max_num_sites
self.max_num_qubits = self.max_num_sites
self.timing_insurance_factor = 20
# self.num_probes = 20
self.lattice_dimension = 1
self.tree_completed_initially = False
self.num_top_models_to_build_on = 1
self.model_generation_strictness = 0
self.fitness_win_ratio_exponent = 1
self.qhl_models = [
'FHhop_1h2_down_d3+FHonsite_3_d3'
]
self.true_model_terms_params = {
# term : true_param
# 'FHhop_1h2_up_d2' : 1,
}
self.max_num_models_by_shape = {
'other': 4
}
self.setup_exploration_class()
def check_model_validity(
self,
model,
**kwargs
):
# possibility that some models not valid; not needed by default but
# checked for general case
terms = construct_models.get_constituent_names_from_name(model)
if np.all(['FHhop' in a for a in terms]):
return True
elif np.all(['FHonsite' in a for a in terms]):
# onsite present in all terms: discard
# self.log_print(
# ["Rejecting model", model, "b/c all onsite terms"]
# )
return False
else:
hopping_sites = []
number_term_sites = []
chemical_sites = []
num_sites = construct_models.get_num_qubits(model)
for term in terms:
constituents = term.split('_')
constituents.remove('d{}'.format(num_sites))
if 'FHhop' in term:
constituents.remove('FHhop')
for c in constituents:
if 'h' in c:
hopping_sites.extend(c.split('h'))
elif 'FHonsite' in term:
constituents.remove('FHonsite')
number_term_sites.extend(constituents)
elif 'FHchemical' in term:
constituents.remove('FHchemical')
chemical_sites.extend(constituents)
# print("hopping_sites:", hopping_sites)
# print('number term sites:', number_term_sites)
hopping_sites = set(hopping_sites)
number_term_sites = set(number_term_sites)
overlap = number_term_sites.intersection(hopping_sites)
if number_term_sites.issubset(hopping_sites):
return True
else:
# no overlap between hopping sites and number term sites
# so number term will be constant
self.log_print(
[
"Rejecting model", model,
"bc number terms present"
"which aren't present in kinetic term"
]
)
return False
def match_dimension(
self,
mod_name,
num_sites,
**kwargs
):
dimension_matched_name = match_dimension_hubbard(
mod_name,
num_sites,
)
return dimension_matched_name
def generate_terms_from_new_site(
self,
**kwargs
):
return generate_new_terms_hubbard(**kwargs)
def combine_terms(
self,
terms,
):
addition_string = '+'
terms = sorted(terms)
return addition_string.join(terms)
class FermiHubbardPredetermined(
FermiHubbardProbabilistic
):
def __init__(
self,
exploration_rules,
**kwargs
):
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
# self.true_model = 'FHhop_1h2_up_d2'
self.tree_completed_initially = True
# self.max_time_to_consider = 5
self.num_processes_to_parallelise_over = 9
self.max_num_models_by_shape = {
# Note dN here requires 2N qubits so d3 counts as shape 6
1: 0,
2: 0,
3 : 4,
4: 4,
6: 4,
8: 4,
'other': 0
}
self.max_num_qubits = 8
self.initial_models = [
'FHhop_1h2_down_d2',
'FHhop_1h2_up_d2',
'FHhop_1h2_down_d2+FHhop_1h2_up_d2',
'FHhop_1h2_down_d3+FHhop_1h2_up_d3+FHhop_1h3_down_d3+FHonsite_1_d3+FHonsite_2_d3+FHonsite_3_d3',
'FHhop_1h2_down_d3+FHhop_1h2_up_d3+FHhop_1h3_down_d3+FHhop_1h3_up_d3+FHhop_2h3_down_d3+FHhop_2h3_up_d3',
'FHhop_1h2_down_d3+FHhop_1h2_up_d3+FHhop_1h3_down_d3+FHhop_1h3_up_d3+FHhop_2h3_down_d3+FHhop_2h3_up_d3+FHonsite_1_d3+FHonsite_2_d3+FHonsite_3_d3',
'FHhop_1h2_down_d3+FHhop_1h2_up_d3+FHhop_1h3_down_d3+FHhop_2h3_up_d3+FHonsite_1_d3+FHonsite_2_d3+FHonsite_3_d3',
# 'FHhop_1h2_down_d4+FHhop_1h2_up_d4+FHhop_1h3_down_d4+FHhop_2h4_down_d4+FHonsite_1_d4+FHonsite_2_d4+FHonsite_3_d4+FHonsite_4_d4',
]
self.max_num_sites = 4
self.qhl_models = self.initial_models
self.true_model_terms_params = {
'FHhop_1h2_down_d3': 0.56358314008409693,
'FHhop_1h2_up_d3': 0.61068905874925739,
'FHhop_1h3_down_d3': 0.49954927338652116,
'FHhop_2h3_up_d3': 0.52398243911927589,
'FHonsite_1_d3': 0.6260708716247626,
'FHonsite_2_d3': 0.51488767350916587,
'FHonsite_3_d3': 0.6516084737325778
}
if self.true_model not in self.initial_models:
self.log_print(
[
"True model not present in initial models for predetermined set; adding it."
]
)
self.initial_models.append(self.true_model)
self.log_print(
[
"Predetermined models:", self.initial_models
]
)
self.setup_exploration_class()
def generate_new_terms_hubbard(
connected_sites,
num_sites,
new_sites,
**kwargs
):
new_terms = []
for pair in connected_sites:
i = pair[0]
j = pair[1]
for spin in ['up', 'down']:
hopping_term = "FHhop_{}h{}_{}_d{}".format(
i, j, spin, num_sites
)
new_terms.append(hopping_term)
for site in new_sites:
onsite_term = "FHonsite_{}_d{}".format(
site, num_sites
)
new_terms.append(onsite_term)
return new_terms
def match_dimension_hubbard(
model_name,
num_sites,
**kwargs
):
redimensionalised_terms = []
terms = model_name.split('+')
for term in terms:
parts = term.split('_')
for part in parts:
if part[0] == 'd' and part not in ['down', 'double']:
parts.remove(part)
parts.append("d{}".format(num_sites))
new_term = "_".join(parts)
redimensionalised_terms.append(new_term)
new_model_name = "+".join(redimensionalised_terms)
return new_model_name
| StarcoderdataPython |
3386991 | import abc
from enum import IntEnum, auto
class Flags(IntEnum):
###################################################
# Documentation of flags can be found in flags.md
###################################################
#Null flags
ROW_ID_NULL = auto()
SERVICE_DATE_NULL = auto()
VEHICLE_NUMBER_NULL = auto()
LEAVE_TIME_NULL = auto()
TRAIN_NULL = auto()
ROUTE_NUMBER_NULL = auto()
DIRECTION_NULL = auto()
SERVICE_KEY_NULL = auto()
TRIP_NUMBER_NULL = auto()
STOP_TIME_NULL = auto()
ARRIVE_TIME_NULL = auto()
DWELL_NULL = auto()
LOCATION_ID_NULL = auto()
DOOR_NULL = auto()
ONS_NULL = auto()
OFFS_NULL = auto()
ESTIMATED_LOAD_NULL = auto()
LIFT_NULL = auto()
MAXIMUM_SPEED_NULL = auto()
TRAIN_MILEAGE_NULL = auto()
PATTERN_DISTANCE_NULL = auto()
LOCATION_DISTANCE_NULL = auto()
X_COORDINATE_NULL = auto()
Y_COORDINATE_NULL = auto()
DATA_SOURCE_NULL = auto()
SCHEDULE_STATUS_NULL = auto()
TRIP_ID_NULL = auto()
#Unobserved stop flag
UNOBSERVED_STOP = auto()
#Unopened door flag
UNOPENED_DOOR = auto()
#Duplicate flag
DUPLICATE = auto()
class Flagger(abc.ABC):
# Name must be overwritten
@property
def name(self):
raise NotImplementedError
@abc.abstractmethod
def flag(self, data):
# Child classes must return a lit of flags.
pass
class FlagInfo:
def __init__(self, name="", desc=""):
self.name = name
self.desc = desc
flag_descriptions = {
Flags.ROW_ID_NULL: FlagInfo("null-row-id", "ROW_ID_NULL"),
Flags.SERVICE_DATE_NULL: FlagInfo("null-service-date", "SERVICE_DATE_NULL"),
Flags.VEHICLE_NUMBER_NULL: FlagInfo("null-vehicle-number", "VEHICLE_NUMBER_NULL"),
Flags.LEAVE_TIME_NULL: FlagInfo("null-leave-time", "LEAVE_TIME_NULL"),
Flags.TRAIN_NULL: FlagInfo("null-train", "TRAIN_NULL"),
Flags.ROUTE_NUMBER_NULL: FlagInfo("null-route-number", "ROUTE_NUMBER_NULL"),
Flags.DIRECTION_NULL: FlagInfo("null-direction", "DIRECTION_NULL"),
Flags.SERVICE_KEY_NULL: FlagInfo("null-service-key", "SERVICE_KEY_NULL"),
Flags.TRIP_NUMBER_NULL: FlagInfo("null-trip-number", "TRIP_NUMBER_NULL"),
Flags.STOP_TIME_NULL: FlagInfo("null-stop-time", "STOP_TIME_NULL"),
Flags.ARRIVE_TIME_NULL: FlagInfo("null-arrive-time", "ARRIVE_TIME_NULL"),
Flags.DWELL_NULL: FlagInfo("null-dwell-time", "DWELL_NULL"),
Flags.LOCATION_ID_NULL: FlagInfo("null-location-id", "LOCATION_ID_NULL"),
Flags.DOOR_NULL: FlagInfo("null-door", "DOOR_NULL"),
Flags.ONS_NULL: FlagInfo("null-ons", "ONS_NULL"),
Flags.OFFS_NULL: FlagInfo("null-offs", "OFFS_NULL"),
Flags.ESTIMATED_LOAD_NULL: FlagInfo("null-estimated-load", "ESTIMATED_LOAD_NULL"),
Flags.LIFT_NULL: FlagInfo("null-lift", "LIFT_NULL"),
Flags.MAXIMUM_SPEED_NULL: FlagInfo("null-maximum-speed", "MAXIMUM_SPEED_NULL"),
Flags.TRAIN_MILEAGE_NULL: FlagInfo("null-train-mileage", "TRAIN_MILEAGE_NULL"),
Flags.PATTERN_DISTANCE_NULL: FlagInfo("null-pattern-distance", "PATTERN_DISTANCE_NULL"),
Flags.LOCATION_DISTANCE_NULL: FlagInfo("null-location-distance", "LOCATION_DISTANCE_NULL"),
Flags.X_COORDINATE_NULL: FlagInfo("null-x-coordinate", "X_COORDINATE_NULL"),
Flags.Y_COORDINATE_NULL: FlagInfo("null-y-coordinate", "Y_COORDINATE_NULL"),
Flags.DATA_SOURCE_NULL: FlagInfo("null-data-source", "DATA_SOURCE_NULL"),
Flags.SCHEDULE_STATUS_NULL: FlagInfo("null-schedule-status", "SCHEDULE_STATUS_NULL"),
Flags.TRIP_ID_NULL: FlagInfo("null-trip-id", "TRIP_ID_NULL"),
Flags.UNOBSERVED_STOP: FlagInfo("unobserved-stop", "UNOBSERVED_STOP"),
Flags.UNOPENED_DOOR: FlagInfo("unopened-door", "UNOPENED_DOOR"),
Flags.DUPLICATE: FlagInfo("duplicate", "DUPLICATE"),
}
flaggers = []
| StarcoderdataPython |
199811 | <filename>DataLogger 01.py<gh_stars>0
# Programa de dataloogger com Cayenne
# Autor: <NAME>
# Data: junho/2018
# Comunicacao com o Cayenne
# pip3 install cayenne-mqtt
# Data Logger
import cayenne.client
import time, sys, csv
from sense_hat import *
import numpy as np
import RPi.GPIO as GPIO
# --------------------------
# Informações de autenticação obtidas no dashboard da Cayenne.
MQTT_USERNAME = "d8c8efc0-806c-11e7-88a0-c7ea1897b6ae"
MQTT_PASSWORD = "<PASSWORD>"
MQTT_CLIENT_ID = "de20f590-8885-11e8-b98d-6b2426cc1856"
# Retorno do recebimento de mensagem pela Cayenne.
def on_message(message):
print("Mensagem recebida: " + str(message))
# Caso ocorra erro de processamento retorne string.
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
# --------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
sh = SenseHat()
sh.clear()
tempo_leituras = 10 # Segundos
while True:
try:
time.sleep(tempo_leituras)
client.loop()
tFile = open('/sys/class/thermal/thermal_zone0/temp')
temp = float(tFile.read())
t_CPU = temp/1000
#GPIO.output(17, 1)
t_SH = sh.get_temperature()
t_SH_H = sh.get_temperature_from_humidity()
t_SH_P = sh.get_temperature_from_pressure()
P = sh.get_pressure()
UR = sh.get_humidity()
client.celsiusWrite(0, t_SH)
client.celsiusWrite(1, t_SH_H)
# client.celsiusWrite(2, t_SH_P)
client.virtualWrite(2, t_SH_P, "temp", "c")
client.virtualWrite(3, P, "bp", "pa")
client.virtualWrite(4, UR, "rel_hum", "p")
client.celsiusWrite(5, t_CPU)
sh.show_message('%.1fC' % t_CPU, text_colour=[255, 0, 0]) # Temperaturada CPU
sh.show_message('%.1fC' % t_SH, text_colour=[0, 255, 0]) # Temperatura do Sense Hat
sh.show_message('%.1fpa' % P, text_colour=[130, 130, 130]) # Pressão atmosférica
sh.show_message('%.1f%%' % UR, text_colour=[0, 0, 255]) # Umidade relativa
print(time.strftime("%d-%m-%Y %H:%M:%S"),',', t_CPU,',', t_SH,',', P,',',UR)
# t_CPU, t, t_h, t_p, UR, P
registro = [time.strftime("%d-%m-%Y %H:%M:%S"), t_CPU, t_SH, t_SH_H, t_SH_P, P, UR]
with open('templog.csv', 'a') as f:
w = csv.writer(f)
w.writerow(registro)
except:
tFile.close()
#GPIO.cleanup()
f.close()
exit
| StarcoderdataPython |
192815 | <reponame>eugenividal/GEOG5995M_Assessment_2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 11:48:17 2018
GEOG5995M Programming for Social Science: Core Skills
@author: <NAME>
"""
# The algorithm for the model:
# 1. Set up the environment
# 2. Make the drunks and give them a name
# 3. Move the drunks and draw the density
# 4. Save the density map to a file as text
# First, we import all packages we will need for the model.
import csv
import matplotlib.pyplot
import matplotlib.animation
import drunkframework
# 1. Set up the environment.
## Create the list.
environment = []
## Read the raster data.
f = open('drunk.plan.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
## Put drunk.plan file csv into rows.
for row in reader:
rowlist =[]
for value in row:
rowlist.append(value)
environment.append(rowlist)
f.close()
## Check how the environment looks.
"""
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.show()
"""
# 2. Make the drunks and give them a name.
## Create the list.
drunks = []
## Stablish the number of drunks and iterations.
num_of_drunks = 25; # Number drunks range(10, 250, 10).
num_of_iterations = 1000000 # Infinite steps would have been better, but dind't know how to do it.
## Code to size the plot and animating.
fig = matplotlib.pyplot.figure(figsize=(5, 5))
ax = fig.add_axes([0, 0, 1, 1])
## Make and name the drunks.
for i in range(num_of_drunks):
name = ((1+i)*10) # because Python starts in 0 we have to add 1 first and then just multiply by 10.
# print(name) # test names are right
drunks.append(drunkframework.Drunk(environment, drunks, name))
# 3. Move the drunks and draw the density.
for i in range (num_of_drunks):
drunk = drunks[i]
for j in range(num_of_iterations):
if environment [drunk.y][drunk.x] != drunk.name: # move the drunks until they don't find their own home
drunks[i].move()
drunks[i].steps()
## Plot to check if the model works
matplotlib.pyplot.xlim(0,300)
matplotlib.pyplot.ylim(0,300)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_drunks):
matplotlib.pyplot.scatter(drunks[i].x,drunks[i].y)
matplotlib.pyplot.show()
# 4. Save the density map to a file as text.
with open('environment.density.txt', 'w', newline='') as f:
csvwriter = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in environment:
csvwriter.writerow(row)
"""
THE END
""" | StarcoderdataPython |
3291647 | # -*- encoding: utf-8 -*-
#
# Copyright © 2020–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import distutils.util
import logging
import os
import secrets
import typing
import dotenv
import voluptuous
from mergify_engine import github_types
# NOTE(sileht) we coerce bool and int in case they are loaded from the environment
def CoercedBool(value: typing.Any) -> bool:
return bool(distutils.util.strtobool(str(value)))
def CoercedLoggingLevel(value: str) -> int:
value = value.upper()
if value in ("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"):
return int(getattr(logging, value))
raise ValueError(value)
def CommaSeparatedStringList(value: str) -> typing.List[str]:
if value:
return value.split(",")
else:
return []
def CommaSeparatedStringTuple(
v: str, split: int = 2
) -> typing.List[typing.Tuple[str, ...]]:
d = []
for bot in v.split(","):
if bot.strip():
values = bot.split(":", maxsplit=split)
if len(values) != split:
raise ValueError("not enough :")
d.append(tuple(v.strip() for v in values))
return d
def AccountTokens(v: str) -> typing.Dict[str, str]:
try:
return dict(
typing.cast(
typing.List[typing.Tuple[str, str]],
CommaSeparatedStringTuple(v, split=2),
)
)
except ValueError:
raise ValueError("wrong format, expect `login1:token1,login2:token2`")
API_ACCESS_KEY_LEN = 32
API_SECRET_KEY_LEN = 32
class ApplicationAPIKey(typing.TypedDict):
api_secret_key: str
api_access_key: str
account_id: int
account_login: str
def ApplicationAPIKeys(v: str) -> typing.Dict[str, ApplicationAPIKey]:
try:
applications = CommaSeparatedStringTuple(v, 3)
for api_key, _, _ in applications:
if len(api_key) != API_ACCESS_KEY_LEN + API_ACCESS_KEY_LEN:
raise ValueError
return {
api_key[:API_ACCESS_KEY_LEN]: {
"api_access_key": api_key[:API_ACCESS_KEY_LEN],
"api_secret_key": api_key[API_ACCESS_KEY_LEN:],
"account_id": int(account_id),
"account_login": account_login,
}
for api_key, account_id, account_login in applications
}
except ValueError:
raise ValueError(
"wrong format, expect `api_key1:github_account_id1:github_account_login1,api_key1:github_account_id2:github_account_login2`, api_key must be 64 character long"
)
Schema = voluptuous.Schema(
{
voluptuous.Required(
"VERSION", default=os.getenv("HEROKU_SLUG_COMMIT", "dev")
): str,
# Logging
voluptuous.Required(
"LOG_DEBUG_LOGGER_NAMES", default=""
): CommaSeparatedStringList,
voluptuous.Required("API_ENABLE", default=False): CoercedBool,
voluptuous.Required("LOG_LEVEL", default="INFO"): CoercedLoggingLevel,
voluptuous.Required("LOG_RATELIMIT", default=False): CoercedBool,
voluptuous.Required("LOG_STDOUT", default=True): CoercedBool,
voluptuous.Required("LOG_STDOUT_LEVEL", default=None): voluptuous.Any(
None, CoercedLoggingLevel
),
voluptuous.Required("LOG_DATADOG", default=False): CoercedBool,
voluptuous.Required("LOG_DATADOG_LEVEL", default=None): voluptuous.Any(
None, CoercedLoggingLevel
),
voluptuous.Required("SENTRY_URL", default=None): voluptuous.Any(None, str),
voluptuous.Required("SENTRY_ENVIRONMENT", default="test"): str,
# GitHub App mandatory
voluptuous.Required("INTEGRATION_ID"): voluptuous.Coerce(int),
voluptuous.Required("PRIVATE_KEY"): str,
voluptuous.Required("OAUTH_CLIENT_ID"): str,
voluptuous.Required("OAUTH_CLIENT_SECRET"): str,
voluptuous.Required("WEBHOOK_SECRET"): str,
voluptuous.Required(
"WEBHOOK_SECRET_PRE_ROTATION", default=None
): voluptuous.Any(None, str),
# GitHub common
voluptuous.Required("BOT_USER_ID"): voluptuous.Coerce(int),
voluptuous.Required("BOT_USER_LOGIN"): str,
# GitHub optional
voluptuous.Required("GITHUB_URL", default="https://github.com"): str,
voluptuous.Required(
"GITHUB_REST_API_URL", default="https://api.github.com"
): str,
voluptuous.Required(
"GITHUB_GRAPHQL_API_URL", default="https://api.github.com/graphql"
): str,
#
# Dashboard settings
#
voluptuous.Required(
"SUBSCRIPTION_BASE_URL", default="http://localhost:5000"
): str,
# OnPremise special config
voluptuous.Required("SUBSCRIPTION_TOKEN", default=None): voluptuous.Any(
None, str
),
voluptuous.Required("ACCOUNT_TOKENS", default=""): voluptuous.Coerce(
AccountTokens
),
voluptuous.Required("APPLICATION_APIKEYS", default=""): voluptuous.Coerce(
ApplicationAPIKeys
),
# Saas Special config
voluptuous.Required("ENGINE_TO_DASHBOARD_API_KEY"): str,
voluptuous.Required("DASHBOARD_TO_ENGINE_API_KEY"): str,
voluptuous.Required(
"DASHBOARD_TO_ENGINE_API_KEY_PRE_ROTATION", default=None
): voluptuous.Any(None, str),
voluptuous.Required("WEBHOOK_APP_FORWARD_URL", default=None): voluptuous.Any(
None, str
),
voluptuous.Required(
"WEBHOOK_MARKETPLACE_FORWARD_URL", default=None
): voluptuous.Any(None, str),
voluptuous.Required(
"WEBHOOK_FORWARD_EVENT_TYPES", default=None
): voluptuous.Any(None, CommaSeparatedStringList),
#
# Mergify Engine settings
#
voluptuous.Required("BASE_URL", default="http://localhost:8802"): str,
voluptuous.Required(
"REDIS_SSL_VERIFY_MODE_CERT_NONE", default=False
): CoercedBool,
voluptuous.Required(
"REDIS_STREAM_WEB_MAX_CONNECTIONS", default=None
): voluptuous.Any(None, voluptuous.Coerce(int)),
voluptuous.Required(
"REDIS_CACHE_WEB_MAX_CONNECTIONS", default=None
): voluptuous.Any(None, voluptuous.Coerce(int)),
voluptuous.Required("STORAGE_URL", default="redis://localhost:6379?db=8"): str,
voluptuous.Required("QUEUE_URL", default=None): voluptuous.Any(None, str),
voluptuous.Required("STREAM_URL", default=None): voluptuous.Any(None, str),
voluptuous.Required("SHARED_STREAM_PROCESSES", default=1): voluptuous.Coerce(
int
),
voluptuous.Required(
"SHARED_STREAM_TASKS_PER_PROCESS", default=7
): voluptuous.Coerce(int),
voluptuous.Required(
"BUCKET_PROCESSING_MAX_SECONDS", default=30
): voluptuous.Coerce(int),
voluptuous.Required("CACHE_TOKEN_SECRET"): str,
voluptuous.Required("CONTEXT", default="mergify"): str,
voluptuous.Required("GIT_EMAIL", default="<EMAIL>"): str,
voluptuous.Required("WORKER_SHUTDOWN_TIMEOUT", default=10): voluptuous.Coerce(
float
),
voluptuous.Required("ALLOW_COMMIT_MESSAGE_OPTION", default=True): CoercedBool,
# For test suite only (eg: tox -erecord)
voluptuous.Required(
"TESTING_FORWARDER_ENDPOINT",
default="https://test-forwarder.mergify.io/events",
): str,
voluptuous.Required("INSTALLATION_ID", default=15398551): voluptuous.Coerce(
int
),
voluptuous.Required(
"TESTING_REPOSITORY_ID", default=258840104
): voluptuous.Coerce(int),
voluptuous.Required(
"TESTING_REPOSITORY_NAME", default="functional-testing-repo"
): str,
voluptuous.Required(
"TESTING_ORGANIZATION_ID", default=40527191
): voluptuous.Coerce(int),
voluptuous.Required(
"TESTING_ORGANIZATION_NAME", default="mergifyio-testing"
): str,
voluptuous.Required(
"ORG_ADMIN_PERSONAL_TOKEN",
default="<ORG_ADMIN_PERSONAL_TOKEN>",
): str,
voluptuous.Required(
"EXTERNAL_USER_PERSONAL_TOKEN", default="<EXTERNAL_USER_TOKEN>"
): str,
voluptuous.Required(
"ORG_USER_PERSONAL_TOKEN", default="<EXTERNAL_USER_TOKEN>"
): str,
voluptuous.Required(
"ORG_ADMIN_GITHUB_APP_OAUTH_TOKEN",
default="<ORG_USER_GITHUB_APP_OAUTH_TOKEN>",
): str,
voluptuous.Required(
"TESTING_MERGIFY_TEST_1_ID", default=38494943
): voluptuous.Coerce(int),
voluptuous.Required(
"TESTING_MERGIFY_TEST_2_ID", default=38495008
): voluptuous.Coerce(int),
"TESTING_GPGKEY_SECRET": str,
"TESTING_ID_GPGKEY_SECRET": str,
}
)
# Config variables available
VERSION: str
BASE_URL: str
API_ENABLE: bool
SENTRY_URL: str
SENTRY_ENVIRONMENT: str
CACHE_TOKEN_SECRET: str
PRIVATE_KEY: bytes
GITHUB_URL: str
GITHUB_REST_API_URL: str
GITHUB_GRAPHQL_API_URL: str
WEBHOOK_MARKETPLACE_FORWARD_URL: str
WEBHOOK_APP_FORWARD_URL: str
WEBHOOK_FORWARD_EVENT_TYPES: str
WEBHOOK_SECRET: str
WEBHOOK_SECRET_PRE_ROTATION: typing.Optional[str]
SHARED_STREAM_PROCESSES: int
SHARED_STREAM_TASKS_PER_PROCESS: int
EXTERNAL_USER_PERSONAL_TOKEN: str
BOT_USER_ID: int
BOT_USER_LOGIN: str
STORAGE_URL: str
STREAM_URL: str
QUEUE_URL: str
BUCKET_PROCESSING_MAX_SECONDS: int
INTEGRATION_ID: int
SUBSCRIPTION_BASE_URL: str
SUBSCRIPTION_TOKEN: str
ENGINE_TO_DASHBOARD_API_KEY: str
DASHBOARD_TO_ENGINE_API_KEY: str
DASHBOARD_TO_ENGINE_API_KEY_PRE_ROTATION: str
OAUTH_CLIENT_ID: str
OAUTH_CLIENT_SECRET: str
GIT_EMAIL: str
CONTEXT: str
ACCOUNT_TOKENS: typing.Dict[str, str]
APPLICATION_APIKEYS: typing.Dict[str, ApplicationAPIKey]
WORKER_SHUTDOWN_TIMEOUT: float
REDIS_SSL_VERIFY_MODE_CERT_NONE: bool
REDIS_STREAM_WEB_MAX_CONNECTIONS: typing.Optional[int]
REDIS_CACHE_WEB_MAX_CONNECTIONS: typing.Optional[int]
TESTING_ORGANIZATION_ID: github_types.GitHubAccountIdType
TESTING_ORGANIZATION_NAME: github_types.GitHubLogin
TESTING_REPOSITORY_ID: github_types.GitHubRepositoryIdType
TESTING_REPOSITORY_NAME: str
TESTING_FORWARDER_ENDPOINT: str
LOG_LEVEL: int # This is converted to an int by voluptuous
LOG_STDOUT: bool
LOG_STDOUT_LEVEL: int # This is converted to an int by voluptuous
LOG_DATADOG: bool
LOG_DATADOG_LEVEL: int # This is converted to an int by voluptuous
LOG_DEBUG_LOGGER_NAMES: typing.List[str]
ORG_ADMIN_PERSONAL_TOKEN: str
ORG_ADMIN_GITHUB_APP_OAUTH_TOKEN: github_types.GitHubOAuthToken
ORG_USER_PERSONAL_TOKEN: github_types.GitHubOAuthToken
TESTING_MERGIFY_TEST_1_ID: int
TESTING_MERGIFY_TEST_2_ID: int
TESTING_GPGKEY_SECRET: bytes
TESTING_ID_GPGKEY_SECRET: str
ALLOW_COMMIT_MESSAGE_OPTION: bool
configuration_file = os.getenv("MERGIFYENGINE_TEST_SETTINGS")
if configuration_file is not None:
dotenv.load_dotenv(dotenv_path=configuration_file, override=True)
CONFIG: typing.Dict[str, typing.Any] = {}
for key, _ in Schema.schema.items():
val = os.getenv(f"MERGIFYENGINE_{key}")
if val is not None:
CONFIG[key] = val
# DASHBOARD API KEYS are required only for Saas
if "SUBSCRIPTION_TOKEN" in CONFIG:
for key in ("DASHBOARD_TO_ENGINE_API_KEY", "ENGINE_TO_DASHBOARD_API_KEY"):
CONFIG[key] = secrets.token_hex(16)
legacy_api_url = os.getenv("MERGIFYENGINE_GITHUB_API_URL")
if legacy_api_url is not None:
if legacy_api_url[-1] == "/":
legacy_api_url = legacy_api_url[:-1]
if legacy_api_url.endswith("/api/v3"):
CONFIG["GITHUB_REST_API_URL"] = legacy_api_url
CONFIG["GITHUB_GRAPHQL_API_URL"] = f"{legacy_api_url[:-3]}/graphql"
globals().update(Schema(CONFIG))
if globals()["STREAM_URL"] is None:
STREAM_URL = globals()["STORAGE_URL"]
if globals()["QUEUE_URL"] is None:
QUEUE_URL = globals()["STORAGE_URL"]
# NOTE(sileht): Docker can't pass multiline in environment, so we allow to pass
# it in base64 format
if not CONFIG["PRIVATE_KEY"].startswith("----"):
CONFIG["PRIVATE_KEY"] = base64.b64decode(CONFIG["PRIVATE_KEY"])
PRIVATE_KEY = CONFIG["PRIVATE_KEY"]
if "TESTING_GPGKEY_SECRET" in CONFIG and not CONFIG["TESTING_GPGKEY_SECRET"].startswith(
"----"
):
CONFIG["TESTING_GPGKEY_SECRET"] = base64.b64decode(CONFIG["TESTING_GPGKEY_SECRET"])
TESTING_GPGKEY_SECRET = CONFIG["TESTING_GPGKEY_SECRET"]
def is_saas() -> bool:
return (
typing.cast(str, globals()["GITHUB_REST_API_URL"]) == "https://api.github.com"
)
| StarcoderdataPython |
27558 | <filename>custom_components/weatheralerts/sensor.py
"""
A component which allows you to get information about next departure from spesified stop.
For more details about this component, please refer to the documentation at
https://github.com/custom-components/sensor.weatheralerts
"""
import voluptuous as vol
from datetime import timedelta
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (PLATFORM_SCHEMA)
__version_ = '0.0.3'
REQUIREMENTS = ['weatheralerts']
CONF_SAMEID = 'sameid'
ATTR_DESTINATION = 'destination'
ATTR_PUBLISHED = 'published'
ATTR_URGENCY = 'urgency'
ATTR_SEVERITY = 'severety'
ATTR_CATEGORY = 'category'
ATTR_TITLE = 'title'
ATTR_SUMMARY = 'summary'
ATTR_LINK = 'link'
SCAN_INTERVAL = timedelta(seconds=30)
ICON = 'mdi:weather-hurricane'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SAMEID): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
sameid = str(config.get(CONF_SAMEID))
add_devices([WeatherAlertsSensor(sameid)])
class WeatherAlertsSensor(Entity):
def __init__(self, sameid):
self._sameid = sameid
self.update()
def update(self):
from weatheralerts import WeatherAlerts
nws = WeatherAlerts(samecodes=self._sameid)
self._published = nws.alerts[0].published
self._state = nws.alerts[0].event
self._urgency = nws.alerts[0].urgency
self._severity = nws.alerts[0].severity
self._category = nws.alerts[0].category
self._title = nws.alerts[0].title
self._summary = nws.alerts[0].summary
self._link = nws.alerts[0].link
@property
def name(self):
return 'WeatherAlerts'
@property
def state(self):
return self._state
@property
def icon(self):
return ICON
@property
def device_state_attributes(self):
return {
ATTR_PUBLISHED: self._published,
ATTR_URGENCY: self._urgency,
ATTR_SEVERITY: self._severity,
ATTR_CATEGORY: self._category,
ATTR_TITLE: self._title,
ATTR_SUMMARY: self._summary,
ATTR_LINK: self._link,
}
| StarcoderdataPython |
1782220 | <reponame>poffey21/demo
from django.contrib import messages
from django.contrib.admin.utils import unquote
from django.contrib.auth import logout, login, authenticate, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.http import Http404
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.views import generic
from . import models
class UserIDRequiredMixin(object):
@method_decorator(login_required(login_url=reverse_lazy('account:login')))
def dispatch(self, request, *args, **kwargs):
return super(UserIDRequiredMixin, self).dispatch(request, *args, **kwargs)
class Logout(generic.View):
def get(self, *args, **kwargs):
logout(self.request)
return HttpResponseRedirect('/')
class GroupAssociationsListView(UserIDRequiredMixin, generic.ListView):
model = Group
template_name = 'authentication/user_group_list.html'
def get_queryset(self):
qs = super(GroupAssociationsListView, self).get_queryset()
return qs.prefetch_related('user_set').order_by('name')
class Profile(UserIDRequiredMixin, generic.DetailView):
model = get_user_model()
template_name = 'authentication/profile.html'
def get_object(self, queryset=None):
queryset = self.get_queryset()
try:
return queryset.get(pk=self.request.user.pk)
except queryset.model.DoesNotExist as e:
raise Http404("No MyModel matches the given query.")
class GenerateApiKey(UserIDRequiredMixin, generic.CreateView):
model = models.Token
fields = []
template_name = 'ajax_form.html'
def get_context_data(self, **kwargs):
context = super(GenerateApiKey, self). get_context_data(**kwargs)
context['button_message'] = 'Generate API Key'
context['url'] = '{}?next={}'.format(self.request.path, self.request.GET.get('next', reverse_lazy('account:profile')))
return context
def form_valid(self, form):
user = self.request.user
api = self.model.objects.get_or_create(user=user)[0]
api_key = api.generate_api_key()
messages.add_message(
self.request,
messages.SUCCESS,
format_html(
'Your API Key is now set to: <input class="form-control" type="text" value="{}" readonly>',
api_key
)
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return unquote(self.request.GET.get('next', '/'))
class SetSessionUser(generic.View):
""" This is for administrators and possibly could be used behind a Access Management Cookie """
def get(self, *args, **kwargs):
next_url = self.request.GET.get('next', '/')
if not self.request.user.is_superuser:
messages.add_message(self.request, messages.ERROR, 'You are not allowed to use this feature.')
return HttpResponseRedirect(next_url)
user_id = self.request.GET.get('user_id')
if self.request.user and user_id != self.request.user.id:
logout(self.request)
new_user = authenticate(username=user_id)
if new_user:
login(
self.request,
new_user,
backend='django.contrib.auth.backends.ModelBackend',
)
else:
messages.add_message(self.request, messages.ERROR, 'Unable to login as different user. Authenticate stage failed')
return HttpResponseRedirect(next_url)
| StarcoderdataPython |
3331835 | """Modify grb/config/path.py as needed.
There are four paths to assign:
- ROOT: the root directory for the repo, absolute path
- FITS: data directory for FITS file, relative to ROOT
- TABLE: data directory for tables ((e)csv, txt, etc.), relative to ROOT
- IMAGE: image directory, relative to ROOT
"""
from pathlib import Path
import logging
# def main():
logging.basicConfig(level=logging.WARNING)
configpath = 'grb/config/path.py'
p = Path()
ROOT = f"Path('{p.resolve()}')"
d = {'FITS': "ROOT / 'data/fits'", 'TABLE': "ROOT / 'data/table'", \
'IMAGE': "ROOT / 'data/image'"}
print('Current/Working directory (default to ROOT):\n', p.resolve())
for key, value in d.items():
print(key, '(relative to ROOT):\n', d[key].split("'")[1])
d['ROOT'] = ROOT
while True:
try:
key = input("\nPlease type the name (ROOT, FITS, TABLE, or IMAGE) to modify:\n(type 'c' or Enter to install the package: grb)\n")
if key == 'c' or key == '':
break
elif key == 'ROOT':
ROOT = input("Please assign the ROOT absolute path:\n")
d[key] = f"Path('{ROOT}')"
# check if ROOT exists:
if not eval(d[key]).exists:
logging.warning(f"{ROOT} not exists!\n")
elif key in d.keys():
# key is one of 'FITS', 'TABLES' and 'IMAGES'
datapath = input("Please assign the data path relative to ROOT (e.g., data/new):\n")
d[key] = 'ROOT / ' + f"'{datapath}'"
# check if datapath exists:
newpath = d['ROOT'] + '/' + datapath
if not Path(newpath).exists:
logging.warning(f"{datapath} not exists!\n")
except EOFError:
# Handle the EOFError in readthedocs.org
break
except Exception as exception:
# Output unexpected Exceptions.
logging.warning(exception)
break
pathlist = list(d.keys())
# read all lines
with open(configpath) as c:
lines = c.readlines()
# do substitution
for i, line in enumerate(lines):
# ! The walrus operator := is a new feature in Python 3.8
if len(pathlist) > 0 and line.startswith(key := pathlist[0]):
newline = f'{key} = {d[key]}\n'
lines[i] = newline
pathlist.pop(0)
# make change(s) to file
with open(configpath, 'w') as c:
c.writelines(lines) | StarcoderdataPython |
197571 | <filename>app/concat_unmask.py
import os
import subprocess
import time
import yaml
import argparse
"""
Given a directory of PAN20 formatted datasets, runs `unmask.py run` on
all of them.
Inputs:
- job.yml with <output_dir> and <transcription> placeholders
- path to directory with PAN20 formatted datasets
"""
def now(): return time.strftime("%Y-%m-%d_%H-%M-%S")
def main():
parser = argparse.ArgumentParser(
prog="concat_unmask",
description="Automate unmasking for multiple datasets",
add_help=True)
parser.add_argument('--input',
'-i',
help='Path to directory of PAN20 formatted datasets')
args = parser.parse_args()
directory = [d for d in os.scandir(args.input)]
print(f'Found {len(directory)} PAN20 data files.')
output_folder = f'unmasking_curves_{now()}/'
for i, dir_entry in enumerate(directory):
print(f'Unmasking {dir_entry.name}... ({i+1}/{len(directory)})')
# Workaround, as there is no --input option for `unmask.py run`
with open(os.path.join('app', 'job.yml')) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
doc['job%']['input']['parser']['parameters']['corpus_path'] = dir_entry.path
with open(os.path.join('app', 'temp_job.yml'), 'w') as f:
yaml.dump(doc, f)
# Trigger Unmasking
subprocess.run(['./unmask', 'run',
'-o', os.path.join('..',
'data',
output_folder,
dir_entry.name),
os.path.join('app', 'temp_job.yml')])
os.remove(os.path.join('app', 'temp_job.yml'))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1745250 | from multiprocessing.sharedctypes import Value
from typing import List, Callable, Dict
from torchmetrics.functional import accuracy
from .data.imagedataset import ImageSet
from .models.convnext import ConvNeXt
from .models.convnext_isotropic import ConvNeXtIsotropic
def Convnext(
type=None,
in_chans: int=3,
num_classes: int=1000,
depths: list=[3, 3, 9, 3],
dims: list=[96, 192, 384, 768],
drop_path_rate: int=0.,
layer_scale_init_value: float=1e-6,
head_init_scale: float=1.,
lr: float=1e-4,
momentum: float=1e-4,
weight_decay: float=1e-2,
metrics: Dict[str, Callable] = {
'acc' : accuracy
},
):
if type == None:
model = ConvNeXt(
in_chans=in_chans,
num_classes=num_classes,
depths=depths,
dims=dims,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
head_init_scale=head_init_scale,
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
metrics=metrics
)
elif type == 'isotropic':
model = ConvNeXtIsotropic(
in_chans=in_chans,
num_classes=num_classes,
depths=depths,
dims=dims,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
head_init_scale=head_init_scale,
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
metrics=metrics
)
else:
raise ValueError(f"Invalid value in type {type}. Must be one of [None, 'isotropic']")
return model | StarcoderdataPython |
1242 | <filename>gremlin-python/src/main/jython/tests/driver/test_client.py
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import pytest
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.client import Client
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.request import RequestMessage
from gremlin_python.process.strategies import OptionsStrategy
from gremlin_python.process.graph_traversal import __
from gremlin_python.structure.graph import Graph
__author__ = '<NAME> (<EMAIL>)'
def test_connection(connection):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
results_set = connection.write(message).result()
future = results_set.all()
results = future.result()
assert len(results) == 6
assert isinstance(results, list)
assert results_set.done.done()
assert 'host' in results_set.status_attributes
def test_client_simple_eval(client):
assert client.submit('1 + 1').all().result()[0] == 2
def test_client_simple_eval_bindings(client):
assert client.submit('x + x', {'x': 2}).all().result()[0] == 4
def test_client_eval_traversal(client):
assert len(client.submit('g.V()').all().result()) == 6
def test_client_error(client):
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as ex:
assert 'exceptions' in ex.status_attributes
assert 'stackTrace' in ex.status_attributes
def test_client_connection_pool_after_error(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as gse:
# expecting the pool size to be 1 again after query returned
assert gse.status_code == 597
assert client.available_pool_size == 1
def test_client_bytecode(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_client_bytecode_options(client):
# smoke test to validate serialization of OptionsStrategy. no way to really validate this from an integration
# test perspective because there's no way to access the internals of the strategy via bytecode
g = Graph().traversal()
t = g.withStrategies(OptionsStrategy(options={"x": "test", "y": True})).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
##
t = g.with_("x", "test").with_("y", True).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_iterate_result_set(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 6
def test_client_async(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_connection_share(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# This future has to finish for the second to yield result - pool_size=1
assert future.done()
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_multi_conn_pool(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
client = Client('ws://localhost:45940/gremlin', 'g', pool_size=1)
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# with connection pool `future` may or may not be done here
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_big_result_set(client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
def test_big_result_set_secure(secure_client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
| StarcoderdataPython |
1740085 | import os
import csv
import shutil
import urllib.request
def get_score(model_name=None, dataset_name=None,
dataset_res=None, dataset_split=None, task_name=None):
# download the csv file from server
url = "https://www.cs.cmu.edu/~clean-fid/files/leaderboard.csv"
local_path = "/tmp/leaderboard.csv"
with urllib.request.urlopen(url) as response, open(local_path, 'wb') as f:
shutil.copyfileobj(response, f)
d_field2idx = {}
l_matches = []
with open(local_path, 'r') as f:
csvreader = csv.reader(f)
l_fields = next(csvreader)
for idx, val in enumerate(l_fields):
d_field2idx[val.strip()] = idx
# iterate through all rows
for row in csvreader:
# skip empty rows
if len(row) == 0:
continue
# skip if the filter doesn't match
if model_name is not None and (row[d_field2idx["model_name"]].strip() != model_name):
continue
if dataset_name is not None and (row[d_field2idx["dataset_name"]].strip() != dataset_name):
continue
if dataset_res is not None and (row[d_field2idx["dataset_res"]].strip() != dataset_res):
continue
if dataset_split is not None and (row[d_field2idx["dataset_split"]].strip() != dataset_split):
continue
if task_name is not None and (row[d_field2idx["task_name"]].strip() != task_name):
continue
curr = {}
for f in l_fields:
curr[f.strip()] = row[d_field2idx[f.strip()]].strip()
l_matches.append(curr)
os.remove(local_path)
return l_matches
| StarcoderdataPython |
1638920 | import discord
import asyncio
from discord.ext import commands
from Components.MangoPi import MangoPi
from Components.RaidFilter import RaidFilter
def setup(bot: MangoPi):
"""
Function necessary for loading Cogs. This will update AntiRaid's data from mongoDB.
Parameters
----------
bot : MangoPi
pass in bot reference to add Cog
"""
bot.add_cog(AntiRaid(bot))
print("Load Cog:\tAntiRaid")
def teardown(bot: MangoPi):
"""
Function to be called upon unloading this Cog.
Parameters
----------
bot : MangoPi
pass in bot reference to remove Cog
"""
bot.remove_cog("AntiRaid")
print("Unload Cog:\tAntiRaid")
class AntiRaid(commands.Cog):
"""
Class inherited from commands.Cog that contains anti-raid commands.
Attributes
----------
bot : MangoPi
MangoPi bot reference
data : dict
Dictionary containing server's anti-raid system
db : MongoClient
MongoDB client reference for "anti-raid" collection
"""
def __init__(self, bot: MangoPi):
"""
Constructor for AntiRaid class.
Parameters
----------
bot : MangoPi
pass in bot reference for the bot
"""
self.bot = bot
self.data = {}
self.db = bot.mongo["anti-raid"]
self.update()
def update(self, guild: int = None):
"""
Method to update data from MongoDB
Parameters
----------
guild : int
the specific server data to update
Returns
-------
RaidFilter
if guild parameter is not None and data been successfully update, return the RaidFilter reference
"""
if guild:
try:
self.data.pop(guild)
except KeyError:
pass
data = self.db.find_one({"_id": guild})
if data:
self.data.update({guild: RaidFilter(self.bot, data)})
return self.data[guild]
else:
self.data.clear()
data = self.db.find({})
for i in data:
self.data.update({i['_id']: RaidFilter(self.bot, i)})
async def verify(self, ctx: commands.Context):
"""
Check to see if server have an anti-raid system.
Parameters
----------
ctx : commands.Context
pass in context for analysis
Returns
-------
discord.Message
if there is no anti-raid system, return the alert message sent
RaidFilter
if server contains anti-raid, return RaidFilter class associated with that server
"""
try:
return self.data[ctx.guild.id]
except KeyError:
return await ctx.reply(f"This server have not setup an anti-raid yet. Do "
f"`{ctx.prefix}ar create <raider role>` to set it up.")
def database_update(self, data: RaidFilter):
"""
Method to update the mongoDB data from RaidFilter class data.
Parameters
----------
data : RaidFilter
RaidFilter class date to update MongoDB
"""
self.db.update_one({"_id": data.guild_id},
{"$set": {"power": data.switch, "interval": data.interval, "amount": data.count,
"role_id": data.role_id, "timeout": data.timeout}})
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
"""
Cog event that calls this method when a member joins the server. Add the new member to either holding cell
or raid cell if applicable.
Parameters
----------
member : discord.Member
The newly joined member
"""
try:
data = self.data[member.guild.id]
except KeyError:
return
if data.switch:
await data.new_member(member)
@commands.guild_only()
@commands.group(aliases=['ar'])
@commands.has_permissions(ban_members=True, kick_members=True)
async def anti_raid(self, ctx: commands.Context):
"""Anti-raid group commands. Calling this without additional parameter will return command help."""
if not ctx.invoked_subcommand:
pre = ctx.prefix
embed = discord.Embed(
title="`Anti Raid` Commands",
colour=0xf368e0
)
embed.add_field(inline=False, name=f"{pre}ar create <raider role mention or ID>",
value="Create anti raid system with given raider role")
embed.add_field(inline=False, name=f"{pre}ar clear (True or False)",
value="Turn off the anti raid alarm if it's on, and pass in whether or not to free all "
"marked raiders. Default is no.")
embed.add_field(inline=False, name=f"{pre}ar raid (True or False)",
value="Turn on the anti raid mode and put recent members into the raid cell indefinitely. "
"Additional parameter for if the raid mode is indefinite, default is yes.")
embed.add_field(inline=False, name=f"{pre}ar kick (True or False)",
value="Kick all members inside the anti raid cell and pass in whether or not "
"to switch off the anti raid alarm. Default is no.")
embed.add_field(inline=False, name=f"{pre}ar ban (True or False)",
value="Ban all members inside the anti raid cell and pass in whether or not to"
" switch off the anti raid alarm. Default is yes.")
embed.add_field(inline=False, name=f"{pre}ar status (Page#)", value="Show anti raid cell status.")
embed.add_field(inline=False, name=f"{pre}ar + <member mention or ID>",
value="Add the target into the anti raid cell.")
embed.add_field(inline=False, name=f"{pre}ar - <user mention or ID>",
value="Remove the target from the anti raid cell if they are in it.")
embed.add_field(inline=False, name=f"{pre}ar s", value="Bring up anti raid setting menu")
await ctx.reply(embed=embed)
@anti_raid.command()
async def clear(self, ctx: commands.Context, release: bool = False):
"""Turn off raid mode and pass in additional argument to whether or not to release all users from raid cell."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
data.raid = False
if not release:
await ctx.message.add_reaction(emoji='✔')
else:
ret = await data.release_all()
for i in ret:
await ctx.reply(embed=discord.Embed(title="Free marked raiders",
colour=0x4cd137, description=i))
@anti_raid.command()
async def raid(self, ctx: commands.Context, indefinite: bool = True):
"""Turn on raid mode and send all user in holding cell to raid cell.
Additional parameter whether or not the raid mode is indefinite."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
await data.triggered(indefinite)
await ctx.message.add_reaction(emoji="🏃")
@anti_raid.command()
async def ban(self, ctx: commands.Context, stop: bool = True):
"""Ban all users with server's raider role and turn off raid mode as default (can be specified)."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
result = list(await data.ban_all(ctx, stop))
await ctx.message.add_reaction(emoji='✅')
for i in range(len(result)):
await ctx.reply(
embed=discord.Embed(title=f"All Banned Raiders {i + 1}", description=result[i], colour=0xff4757)
)
@anti_raid.command()
async def kick(self, ctx: commands.Context, stop: bool = True):
"""Kick all users with server's raider role and turn off raid mode as default (can be specified)."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
result = list(await data.kick_all(stop))
await ctx.message.add_reaction(emoji='✅')
for i in range(len(result)):
await ctx.reply(
embed=discord.Embed(title=f"All Kicked Raiders {i + 1}", description=result[i], colour=0xff4757)
)
@anti_raid.command()
async def create(self, ctx: commands.Context, role: discord.Role):
"""Create an anti-raid system for the server with the specified raider role."""
data = self.db.find_one({"_id": ctx.guild.id})
if data:
return await ctx.reply("This server already have an anti-raid system, no need to create another one.")
self.db.insert_one({"_id": ctx.guild.id, "interval": 5, "amount": 3, "power": True, "role_id": role.id,
"timeout": 60})
self.update(ctx.guild.id)
await ctx.message.add_reaction(emoji='👍')
@anti_raid.command()
async def status(self, ctx: commands.Context, page: int = 1):
"""Return people in the server who are marked as raiders."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
if not data.switch:
return await ctx.reply("Anti Raid system is not online")
embed = discord.Embed(
colour=0xe056fd,
title="AntiRaid Status " + ("⚠ RAID!" if data.raid else "🧘 Clear"),
timestamp=ctx.message.created_at
)
raid = list(data.raiders_to_string())
hold = list(data.holding_to_string())
if len(raid) >= page:
temp = raid[page - 1]
if temp != '':
embed.add_field(name=f"Raid Cell {page}", value=temp)
if len(hold) >= page:
temp = hold[page - 1]
if temp != '':
embed.add_field(name=f"Watch List {page}", value=temp)
await ctx.reply(embed=embed)
@anti_raid.command(aliases=['+'])
async def mark(self, ctx: commands.Context, *target: discord.Member):
"""Mark target users as raider."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
for i in target:
try:
await data.add(i)
except ValueError:
await ctx.reply(f"{i.mention} is already a marked raider")
await ctx.message.add_reaction(emoji='👍')
@anti_raid.command(aliases=['-'])
async def unmark(self, ctx: commands.Context, *target: discord.Member):
"""Remove users from raid cell."""
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
for i in target:
try:
await data.remove(i)
except ValueError:
await ctx.reply(f"Can not find {i.mention} within raider cell")
await ctx.message.add_reaction(emoji='👍')
@anti_raid.command(aliases=['s'])
async def setting(self, ctx: commands.Context):
"""Brings up anti-raid setting menu."""
emotes = ['💡', '👪', '⏱', '📛', '😴', '⏸']
def check(reaction1, user1):
return reaction1.emoji in emotes and user1.id == ctx.author.id
data = await self.verify(ctx)
if isinstance(data, RaidFilter):
de_role = ctx.guild.get_role(data.role_id)
embed = discord.Embed(
title="Anti-Raid Setting Menu " + ("[Active]" if data.switch else "[Inactive]"),
colour=0x2ecc71 if data.switch else 0xc0392b,
timestamp=ctx.message.created_at,
description=f"💡 - Toggle Anti-Raid \n👪 - Amount of People Required to Trigger [{data.count}]\n"
f"⏱ - Timer [{data.interval} seconds]\n"
f"😴 - Raid Timeout: {data.timeout} seconds \n"
f"📛 - Raider Role: " + (f"{de_role.mention}" if de_role else "**Error!!**") + "\n"
f"⏸ - Setting Menu Pause"
).set_footer(text="React to Modify", icon_url=self.bot.user.avatar_url_as(size=128))
msg = await ctx.reply(embed=embed)
for i in emotes:
await msg.add_reaction(emoji=i)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=10, check=check)
except asyncio.TimeoutError:
await msg.edit(embed=embed.set_footer(text="Menu Timed Out",
icon_url=self.bot.user.avatar_url_as(size=64)))
return await msg.clear_reactions()
await msg.clear_reactions()
def check_m(message):
return message.author.id == ctx.author.id
if reaction.emoji == '⏸':
await msg.edit(
embed=embed.set_footer(text="Menu Paused", icon_url=self.bot.user.avatar_url_as(size=64)))
elif reaction.emoji == "💡":
result = data.toggle()
await msg.edit(embed=None, content="Anti-Raid now enabled" if result else "Anti-Raid now disabled")
elif reaction.emoji == '📛':
await msg.edit(embed=None, content="Enter the role ID of the new raider role.")
try:
m = await self.bot.wait_for('message', timeout=20, check=check_m)
except asyncio.TimeoutError:
return await msg.edit(content="Anti-Raid Menu Timed Out.")
try:
rol = ctx.guild.get_role(int(m.content))
except ValueError:
return await msg.edit(content="Input not a number, action cancelled.")
if not rol:
return await msg.edit(content="Role not found, action cancelled")
await data.update_role(rol)
await msg.edit(content=f"Changed raid role to {data.role.mention}")
else:
store = {
'👪': "Enter the amount(integer) of user join needed to trigger",
'⏱': "Enter the amount(integer) in seconds of the interval",
'😴': "Enter the amount(integer) in seconds for Anti-Raid to time itself out"
}
try:
await msg.edit(embed=None, content=store[reaction.emoji])
m = await self.bot.wait_for('message', timeout=10, check=check_m)
try:
m = int(m.content)
except ValueError:
return await msg.edit(content="Value entered is not an integer. Action cancelled")
if m < 1:
return await msg.edit(content="Value must be 1 or bigger")
if reaction.emoji == '👪':
data.count = m
await msg.edit(content=f"member join flow holder is now set to `{m}` people")
elif reaction.emoji == '😴':
data.timeout = m
await msg.edit(content=f"Anti-raid automatic timeout is now set to __{m}__ seconds")
else:
data.interval = m
await msg.edit(content=f"member join timer is now set **{m}** seconds")
except asyncio.TimeoutError:
return await msg.edit(content="Anti-Raid Menu Timed Out.")
self.database_update(data)
| StarcoderdataPython |
125577 | import os
from django.core.wsgi import get_wsgi_application
try:
import newrelic.agent
newrelic.agent.initialize("/home/openstates/newrelic.ini")
newrelic.agent.capture_request_params()
except Exception as e:
print("newrelic couldn't be initialized:", e)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openstates.settings")
application = get_wsgi_application()
| StarcoderdataPython |
1705303 | <gh_stars>1-10
# (C) British Crown Copyright 2011 - 2020, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import operator
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from cartopy import config
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
import cartopy.crs as ccrs
import cartopy.img_transform as im_trans
from functools import reduce
class TestRegrid(object):
def test_array_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = np.linspace(-180.0,
180.0,
source_nx).astype(np.float64)
source_y = np.linspace(-90, 90.0, source_ny).astype(np.float64)
source_x, source_y = np.meshgrid(source_x, source_y)
data = np.arange(source_nx * source_ny,
dtype=np.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grid
target_nx = 23
target_ny = 45
target_proj = ccrs.PlateCarree()
target_x, target_y, extent = im_trans.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = im_trans.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
# Check dimensions of return array
assert new_array.shape == target_x.shape
assert new_array.shape == target_y.shape
assert new_array.shape == (target_ny, target_nx)
def test_different_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = np.linspace(-180.0, 180.0,
source_nx).astype(np.float64)
source_y = np.linspace(-90, 90.0,
source_ny).astype(np.float64)
source_x, source_y = np.meshgrid(source_x, source_y)
data = np.arange(source_nx * source_ny,
dtype=np.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grids (different shapes)
target_x_shape = (23, 45)
target_y_shape = (23, 44)
target_x = np.arange(reduce(operator.mul, target_x_shape),
dtype=np.float64).reshape(target_x_shape)
target_y = np.arange(reduce(operator.mul, target_y_shape),
dtype=np.float64).reshape(target_y_shape)
target_proj = ccrs.PlateCarree()
# Attempt regrid
with pytest.raises(ValueError):
im_trans.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
if MPL_VERSION < '2':
# Changes in zooming in old versions.
regrid_tolerance = 2.5
elif MPL_VERSION < '2.0.1':
regrid_tolerance = 0.5
elif MPL_VERSION < '2.1.0':
# Bug in latest Matplotlib that we don't consider correct.
regrid_tolerance = 4.78
else:
# Bug in latest Matplotlib that we don't consider correct.
regrid_tolerance = 5.55
@pytest.mark.natural_earth
@ImageTesting(['regrid_image'],
tolerance=regrid_tolerance)
def test_regrid_image():
# Source data
fname = os.path.join(config["repo_data_dir"], 'raster', 'natural_earth',
'50-natural-earth-1-downsampled.png')
nx = 720
ny = 360
source_proj = ccrs.PlateCarree()
source_x, source_y, _ = im_trans.mesh_projection(source_proj, nx, ny)
data = plt.imread(fname)
# Flip vertically to match source_x/source_y orientation
data = data[::-1]
# Target grid
target_nx = 300
target_ny = 300
target_proj = ccrs.InterruptedGoodeHomolosine()
target_x, target_y, target_extent = im_trans.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = im_trans.regrid(data, source_x, source_y, source_proj,
target_proj, target_x, target_y)
# Plot
plt.figure(figsize=(10, 10))
gs = mpl.gridspec.GridSpec(nrows=4, ncols=1,
hspace=1.5, wspace=0.5)
# Set up axes and title
ax = plt.subplot(gs[0], projection=target_proj)
plt.imshow(new_array, origin='lower', extent=target_extent)
ax.coastlines()
# Plot each color slice (tests masking)
cmaps = {'red': 'Reds', 'green': 'Greens', 'blue': 'Blues'}
for i, color in enumerate(['red', 'green', 'blue']):
ax = plt.subplot(gs[i + 1], projection=target_proj)
plt.imshow(new_array[:, :, i], extent=target_extent, origin='lower',
cmap=cmaps[color])
ax.coastlines()
# Tighten up layout
gs.tight_layout(plt.gcf())
| StarcoderdataPython |
1741075 | <filename>qutip/control/optimconfig.py<gh_stars>1000+
# -*- coding: utf-8 -*-
# @author: <NAME>
# @email1: <EMAIL>
# @email2: <EMAIL>
# @organization: Aberystwyth University
# @supervisor: <NAME>
"""
Configuration parameters for control pulse optimisation
"""
import numpy as np
# QuTiP logging
import qutip.logging_utils
logger = qutip.logging_utils.get_logger()
import qutip.control.io as qtrlio
class OptimConfig(object):
"""
Configuration parameters for control pulse optimisation
Attributes
----------
log_level : integer
level of messaging output from the logger.
Options are attributes of qutip.logging_utils,
in decreasing levels of messaging, are:
DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL
Anything WARN or above is effectively 'quiet' execution,
assuming everything runs as expected.
The default NOTSET implies that the level will be taken from
the QuTiP settings file, which by default is WARN
dyn_type : string
Dynamics type, i.e. the type of matrix used to describe
the dynamics. Options are UNIT, GEN_MAT, SYMPL
(see Dynamics classes for details)
prop_type : string
Propagator type i.e. the method used to calculate the
propagtors and propagtor gradient for each timeslot
options are DEF, APPROX, DIAG, FRECHET, AUG_MAT
DEF will use the default for the specific dyn_type
(see PropagatorComputer classes for details)
fid_type : string
Fidelity error (and fidelity error gradient) computation method
Options are DEF, UNIT, TRACEDIFF, TD_APPROX
DEF will use the default for the specific dyn_type
(See FidelityComputer classes for details)
"""
def __init__(self):
self.reset()
def reset(self):
self.log_level = logger.getEffectiveLevel()
self.alg = 'GRAPE' # Alts: 'CRAB'
# *** AJGP 2015-04-21: This has been replaced optim_method
#self.optim_alg = 'LBFGSB'
self.optim_method = 'DEF'
self.dyn_type = 'DEF'
self.fid_type = 'DEF'
# *** AJGP 2015-04-21: phase_option has been moved to the FidComputer
#self.phase_option = 'PSU'
# *** AJGP 2015-04-21: amp_update_mode has been replaced by tslot_type
#self.amp_update_mode = 'ALL' # Alts: 'DYNAMIC'
self.fid_type = 'DEF'
self.tslot_type = 'DEF'
self.init_pulse_type = 'DEF'
######################
# Note the following parameteres are for constrained optimisation
# methods e.g. L-BFGS-B
# *** AJGP 2015-04-21:
# These have been moved to the OptimizerLBFGSB class
# self.amp_lbound = -np.Inf
# self.amp_ubound = np.Inf
# self.max_metric_corr = 10
# These moved to termination conditions
# self.accuracy_factor = 1e7
# ***
# ####################
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, lvl):
"""
Set the log_level attribute and set the level of the logger
that is call logger.setLevel(lvl)
"""
logger.setLevel(lvl)
def check_create_output_dir(self, output_dir, desc='output'):
"""
Checks if the given directory exists, if not it is created
Returns
-------
dir_ok : boolean
True if directory exists (previously or created)
False if failed to create the directory
output_dir : string
Path to the directory, which may be been made absolute
msg : string
Error msg if directory creation failed
"""
return qtrlio.create_dir(output_dir, desc=desc)
# create global instance
optimconfig = OptimConfig()
| StarcoderdataPython |
1756206 | <gh_stars>1-10
import os
import shutil
import subprocess
import traceback
from pathlib import Path
from termcolor import colored
os.system("color")
print(
colored(
"""
ooooo ooo ooooo ooooo ooooooo ooooo
`888' `8' `888' `888' `8888 d8'
888 8 888 888 Y888..8P
888 8 888 888 `8888'
888 8 888 888 .8PY888.
`88. .8' 888 o 888 d8' `888b
`YbodP' o888ooooood8 o888o o888o o88888o
+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+
|E|X|E|C|U|T|A|B|L|E| |B|U|I|L|D|E|R|
+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+
""",
"cyan",
)
)
print(
colored(
"""
This program will build your game into an .exe file.
The game will be playable even if people do not have a Python installation.
Once completed the finished product can be found in the 'export' folder.
The operation will require an amount of disk space roughly similar to the size of your project.
Building your game may take a while depending on the size of the project and the speed of your computer.
""",
"yellow",
)
)
print(
colored(
"""WARNING!!! THE 'export' FOLDER AND ALL ITS CONTENTS WILL BE REMOVED UPON CONTINUING
""",
"red",
)
)
print(colored("Please specify (y/n) whether you want to continue.", "magenta"))
while (inp := input() or "none") :
if inp == "y":
break
if inp == "n":
print("Aborting...")
exit()
print("Please specify (y/n) whether you want to continue.")
try:
print(
colored(
"""
====================================================================================================
(1/8)
Removing 'export'.
""",
"yellow",
)
)
path_export = Path("export")
if path_export.is_dir():
shutil.rmtree(path_export)
print(
colored(
"""
====================================================================================================
(2/8)
Compiling world.ldtk.
""",
"yellow",
)
)
subprocess.run(
r"""python game.py --compile-world world""", shell=True, check=True,
)
print(
colored(
"""
====================================================================================================
(3/8)
Creating fresh virtualenv '.exportvenv'.
""",
"yellow",
)
)
subprocess.run(
r"""python -m venv .exportvenv""", shell=True, check=True,
)
print(
colored(
"""
====================================================================================================
(4/8)
Installing requirements.
""",
"yellow",
)
)
subprocess.run(
r"""call .exportvenv\Scripts\activate.bat && pip install -r requirements.txt""",
shell=True,
check=True,
)
print(
colored(
"""
====================================================================================================
(5/8)
Building the executable.
""",
"yellow",
)
)
subprocess.run(
r"""call .exportvenv\Scripts\activate.bat && cxfreeze -c game.py --target-dir export --packages "moderngl,moderngl_window,pyglet,moderngl_window.context.pyglet,glcontext,moderngl_window.loaders.texture,moderngl_window.loaders.program" """,
shell=True,
check=True,
)
print(
colored(
"""
====================================================================================================
(6/8)
Copying game files.
""",
"yellow",
)
)
shutil.copytree(Path("game"), path_export / "game")
print(
colored(
"""
====================================================================================================
(7/8)
Copying resources files.
""",
"yellow",
)
)
shutil.copytree(Path("resources"), path_export / "resources")
print(
colored(
"""
====================================================================================================
(8/8)
Copying world.ldtkc.
""",
"yellow",
)
)
shutil.copy(Path("world.ldtkc"), path_export / "world.ldtkc")
except Exception as e:
traceback.print_exc()
print(
colored(
"""EXPORT_GAME has cancelled due to an error.
""",
"red",
)
)
finally:
print(
colored(
"""
====================================================================================================
Removing '.exportvenv'.
""",
"yellow",
)
)
path_venv = Path(".exportvenv")
if path_venv.is_dir():
shutil.rmtree(path_venv)
print(colored("Waiting for input to quit...", "magenta"))
input()
| StarcoderdataPython |
3235297 | <gh_stars>0
from typing import List, Tuple
from utils.DatabaseConnection import DatabaseConnection
data_file = 'data.db'
Book = Tuple[int, str, str, int]
def create_book_table() -> None:
with DatabaseConnection(data_file) as connection:
cursor = connection.cursor()
# SQLite automatically makes `integer primary key` row auto-incrementing
cursor.execute('CREATE TABLE books (id integer primary key, name text, author text, read integer default 0)')
def get_all_books() -> List[Book]:
with DatabaseConnection(data_file) as connection:
cursor = connection.cursor()
cursor.execute('SELECT * FROM books')
books = cursor.fetchall()
return books
def insert_book(name: str, author: str) -> None:
with DatabaseConnection('data.db') as connection:
cursor = connection.cursor()
cursor.execute('INSERT INTO books (name, author) VALUES (?, ?)', (name, author))
def mark_book_as_read(name: str) -> None:
with DatabaseConnection('data.db') as connection:
cursor = connection.cursor()
cursor.execute('UPDATE books SET read=1 WHERE name=?', (name,))
def delete_book(name: str) -> None:
with DatabaseConnection('data.db') as connection:
cursor = connection.cursor()
cursor.execute('DELETE FROM books WHERE name=?', (name,)) | StarcoderdataPython |
1786123 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 11 11:20:14 2018
@author: <EMAIL>
"""
import numpy as np
def Bresenham_line(ends):
w, h = np.diff(ends, axis=0)[0]
pt1, __ = ends
x, y = pt1
longest = np.absolute(w)
shortest = np.absolute(h)
if w != 0:
dx = int(np.absolute(w)/w)
if h != 0:
dy = int(np.absolute(h)/h)
if not (longest > shortest):
longest = np.absolute(h)
shortest = np.absolute(w)
numerator = int(longest/2)
for i in range(longest+1):
yield x, y
numerator += shortest
if not (numerator < longest):
numerator -= longest
x += dx
y += dy
elif (np.absolute(w) > np.absolute(h)):
x += dx
else:
y += dy
def connect(ends):
w, h = np.abs(np.diff(ends, axis=0))[0]
if w > h:
return np.c_[np.linspace(ends[0, 0], ends[1, 0], w+1, dtype=np.int32),
np.round(np.linspace(ends[0, 1], ends[1, 1], w+1))
.astype(np.int32)]
else:
return np.c_[np.round(np.linspace(ends[0, 0], ends[1, 0], h+1))
.astype(np.int32),
np.linspace(ends[0, 1], ends[1, 1], h+1, dtype=np.int32)]
def Murphy_line_vectorize(start_pts, end_pts, thk = 1):
# Modified verion of Murphy's 2D line thickening through vectorization
w, h = np.abs(start_pts - end_pts)[0]
if w > h:
return np.c_[(np.linspace(start_pts[0, 0], end_pts[0, 0], w+1, dtype=np.int32)[:,None] + start_pts[:,0] - start_pts[0, 0]).reshape(-1), \
np.round(np.linspace(start_pts[0, 1], end_pts[0, 1], w+1, dtype=np.int32)[:,None] + start_pts[:,1] - start_pts[0, 1]).reshape(-1)]
else:
return np.c_[np.round(np.linspace(start_pts[0, 0], end_pts[0, 0], h+1, dtype=np.int32)[:,None] + start_pts[:,0] - start_pts[0, 0]).reshape(-1), \
(np.linspace(start_pts[0, 1], end_pts[0, 1], h+1, dtype=np.int32)[:,None] + start_pts[:,1] - start_pts[0, 1]).reshape(-1)]
def connect2(ends):
d0, d1 = np.diff(ends, axis=0)[0]
if np.abs(d0) > np.abs(d1):
return np.c_[np.arange(ends[0, 0], ends[1,0] + np.sign(d0), np.sign(d0), dtype=np.int32),
np.arange(ends[0, 1] * np.abs(d0) + np.abs(d0)//2,
ends[0, 1] * np.abs(d0) + np.abs(d0)//2 + (np.abs(d0)+1) * d1, d1, dtype=np.int32) // np.abs(d0)]
else:
return np.c_[np.arange(ends[0, 0] * np.abs(d1) + np.abs(d1)//2,
ends[0, 0] * np.abs(d1) + np.abs(d1)//2 + (np.abs(d1)+1) * d0, d0, dtype=np.int32) // np.abs(d1),
np.arange(ends[0, 1], ends[1,1] + np.sign(d1), np.sign(d1), dtype=np.int32)]
def connect_nd(ends):
d = np.diff(ends, axis=0)[0]
j = np.argmax(np.abs(d))
D = d[j]
aD = np.abs(D)
return ends[0] + (np.outer(np.arange(aD + 1), d) + (aD>>1)) // aD
| StarcoderdataPython |
175749 | from inspect import ismethod
import numpy as np
from htm.bindings.sdr import SDR
#
# from mdp_planner import DataEncoder, DataMultiEncoder, TemporalMemory, HtmAgent
#
#
# class TestDataEncoder:
# def __init__(self):
# self.encoder = DataEncoder('-', n_vals=2, value_bits=3, activation_threshold=2)
#
# def test_encode_dense(self):
# result = self.encoder.encode_dense(1)
# expected = np.array([0, 0, 0, 1, 1, 1], dtype=np.int8)
# assert np.array_equal(result, expected)
#
# def test_encode_sparse(self):
# arr_sparse = self.encoder.encode_sparse(1)
# assert arr_sparse == [3, 4, 5]
#
# def test_str_from_dense(self):
# arr_dense = np.array([0, 0, 0, 1, 1, 1], dtype=np.int8)
# res = self.encoder.str_from_dense(arr_dense)
# assert res == '000 111'
#
# def test_decode_dense(self):
# decoded = self.encoder.decode_dense(np.array([0, 1, 1, 0, 1, 0]))
# assert decoded == [0]
#
# decoded = self.encoder.decode_dense(np.array([0, 1, 1, 1, 1, 1]))
# assert decoded == [0, 1]
#
# def test_decode_sparse(self):
# decoded = self.encoder.decode_sparse([1, 2, 4])
# assert decoded == [0]
#
# decoded = self.encoder.decode_sparse([1, 2, 3, 4, 5])
# assert decoded == [0, 1]
#
# def test_to_str(self):
# assert str(self.encoder) == 'DataEncoder("-", v2 x b3)'
#
#
# class TestDataMultiEncoder:
# def __init__(self):
# self.encoder = DataMultiEncoder((
# DataEncoder('1', 2, 4),
# DataEncoder('2', 3, 2)
# ))
#
# def test_encode_dense(self):
# result = self.encoder.encode_dense((0, 2))
# expected = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], dtype=np.int8)
# assert np.array_equal(result, expected)
#
# def test_encode_sparse(self):
# result = self.encoder.encode_sparse((1, 1))
# expected = [4, 5, 6, 7, 10, 11]
# assert result == expected
#
# def test_str_from_dense(self):
# test_arr = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], dtype=np.int8)
# result = self.encoder.str_from_dense(test_arr)
# expected = '1111 0000 00 00 11'
# assert result == expected
#
#
# class TestHtmAgent:
# def __init__(self):
# self.encoder = DataMultiEncoder((
# DataEncoder('1', 2, 4),
# DataEncoder('2', 3, 2)
# ))
# # total_bits == 14
# self.tm = TemporalMemory(
# n_columns=self.encoder.total_bits,
# cells_per_column=2,
# activation_threshold=5, learning_threshold=3,
# initial_permanence=.5, connected_permanence=.5
# )
# self.agent = HtmAgent(self.tm, self.encoder)
#
# def test_str_from_cells(self):
# active_cells = SDR((self.tm.n_columns, self.tm.cells_per_column))
# active_cells.dense[5, 0] = 1
# active_cells.dense[9, 1] = 1
#
# result = self.agent._str_from_cells(active_cells, 'test_name')
# expected = '''0000 0100 00 00 00 test_name
# 0000 0000 01 00 00'''
#
# assert result == expected
#
#
# def _test_all(*objects):
# def test_all_for_obj(obj):
# for name in dir(obj):
# attribute = getattr(obj, name)
# if ismethod(attribute) and name.startswith('test_'):
# attribute()
#
# for obj in objects:
# test_all_for_obj(obj)
#
#
# def test_all():
# _test_all(
# TestDataEncoder(),
# TestDataMultiEncoder(),
# TestHtmAgent()
# )
| StarcoderdataPython |
3217831 | <filename>login/migrations/0036_auto_20170414_1643.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-14 11:13
from __future__ import unicode_literals
from django.db import migrations, models
import draceditor.models
class Migration(migrations.Migration):
dependencies = [
('login', '0035_auto_20170411_0238'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=draceditor.models.DraceditorField(),
),
migrations.AlterField(
model_name='question',
name='details',
field=models.TextField(blank=True, max_length=1000, null=True),
),
]
| StarcoderdataPython |
4800690 | from enum import Enum
HOUSE = 'House'
HOTEL = 'Hotel'
class Card():
def __init__(self, index, value):
self.index = index
self.value = value
class Cashable(Card):
pass
class CashCard(Cashable):
def __repr__(self):
return f'<CashCard (${self.value})>'
class ActionCard(Cashable):
def __init__(self, index, value, action_type, description):
super().__init__(index, value)
self.action_type = action_type
self.description = description
def __repr__(self):
return f'<ActionCard: {self.action_type.value} (${self.value})>'
class PropertyCard(Card):
def __init__(self, index, value, name, colors, rent, buildable):
super().__init__(index, value)
self.name = name
self.colors = colors
self.rent = rent
self.buildable = buildable
def __repr__(self):
return f'<PropertyCard: {self.name} (${self.value})>'
class RentCard(Cashable):
def __init__(self, index, value, colors, wild):
super().__init__(index, value)
self.colors = colors # Set
self.wild = wild # Boolean - Targeting
def __repr__(self):
return f'<RentCard: {self.colors} (${self.value})>'
class ActionType(Enum):
BDAY = "It's my birthday!"
DOUBLE_THE_RENT = "Double the Rent"
DEAL_BREAKER = "Deal Breaker"
JUST_SAY_NO = "Just Say No!"
DEBT_COLLECTOR = "Debt Collector"
SLY_DEAL = "Sly Deal"
FORCED_DEAL = "Forced Deal"
PASS_GO = "Pass Go"
class Color(Enum):
RED = "red"
DBLUE = "darkblue"
LBLUE = "lightblue"
PURPLE = "purple"
GREEN = "green"
ORANGE = "orange"
YELLOW = "yellow"
BROWN = "brown"
RR = "railroad"
UTIL = "utility"
ALL = "all"
def __repr__(self):
return self.value
deck = {
1: PropertyCard(1, 3, HOUSE, {Color.ALL}, [], False),
2: PropertyCard(2, 3, HOUSE, {Color.ALL}, [], False),
3: PropertyCard(3, 3, HOUSE, {Color.ALL}, [], False),
4: PropertyCard(4, 4, HOTEL, {Color.ALL}, [], False),
5: PropertyCard(5, 4, HOTEL, {Color.ALL}, [], False),
6: PropertyCard(6, 4, HOTEL, {Color.ALL}, [], False),
7: ActionCard(7, 2, ActionType.BDAY, "All players give you $2M as a gift."),
8: ActionCard(8, 2, ActionType.BDAY, "All players give you $2M as a gift."),
9: ActionCard(9, 2, ActionType.BDAY, "All players give you $2M as a gift."),
10: ActionCard(10, 1, ActionType.DOUBLE_THE_RENT, "Needs to be played with a rent card."),
11: ActionCard(11, 1, ActionType.DOUBLE_THE_RENT, "Needs to be played with a rent card."),
12: ActionCard(12, 5, ActionType.DEAL_BREAKER, "Steal a complete set from any player (includes any buildings)"),
13: ActionCard(13, 5, ActionType.DEAL_BREAKER, "Steal a complete set from any player (includes any buildings)"),
14: ActionCard(14, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
15: ActionCard(15, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
16: ActionCard(16, 4, ActionType.JUST_SAY_NO, "Use any time when an action card is played against you."),
17: ActionCard(17, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
18: ActionCard(18, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
19: ActionCard(19, 3, ActionType.DEBT_COLLECTOR, "Force any player to pay you $5M"),
20: ActionCard(20, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
21: ActionCard(21, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
22: ActionCard(22, 3, ActionType.SLY_DEAL, "Steal a property from a player of your choice (cannot be a part of a full set)!"),
23: ActionCard(23, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
24: ActionCard(24, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
25: ActionCard(25, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
26: ActionCard(26, 3, ActionType.FORCED_DEAL, "Swap any property with another player (cannot be part of a full set)!"),
27: ActionCard(27, 1, ActionType.PASS_GO, "Draw two extra cards!"),
28: ActionCard(28, 1, ActionType.PASS_GO, "Draw two extra cards!"),
29: ActionCard(29, 1, ActionType.PASS_GO, "Draw two extra cards!"),
30: ActionCard(30, 1, ActionType.PASS_GO, "Draw two extra cards!"),
31: ActionCard(31, 1, ActionType.PASS_GO, "Draw two extra cards!"),
32: ActionCard(32, 1, ActionType.PASS_GO, "Draw two extra cards!"),
33: ActionCard(33, 1, ActionType.PASS_GO, "Draw two extra cards!"),
34: ActionCard(34, 1, ActionType.PASS_GO, "Draw two extra cards!"),
35: ActionCard(35, 1, ActionType.PASS_GO, "Draw two extra cards!"),
36: ActionCard(36, 1, ActionType.PASS_GO, "Draw two extra cards!"),
37: PropertyCard(37, 2, "Electric Company", {Color.UTIL}, [1, 2], True),
38: PropertyCard(38, 2, "Waterworks", {Color.UTIL}, [1, 2], True),
39: PropertyCard(39, 2, "Pennsylvania Railroad", {Color.RR}, [1, 2, 3, 4], True),
40: PropertyCard(40, 2, "Reading Railroad", {Color.RR}, [1, 2, 3, 4], True),
41: PropertyCard(41, 2, "B. & O. Railroad", {Color.RR}, [1, 2, 3, 4], True),
42: PropertyCard(42, 2, "Short Line Railroad", {Color.RR}, [1, 2, 3, 4], True),
43: PropertyCard(43, 1, "Baltic Avenue", {Color.BROWN}, [1, 2], True),
44: PropertyCard(44, 1, "Mediterranean Avenue", {Color.BROWN}, [1, 2], True),
45: PropertyCard(45, 1, "Oriental Avenue", {Color.LBLUE}, [1, 2, 3], True),
46: PropertyCard(46, 1, "Connecticut Avenue", {Color.LBLUE}, [1, 2, 3], True),
47: PropertyCard(47, 1, "Vermont Avenue", {Color.LBLUE}, [1, 2, 3], True),
48: PropertyCard(48, 2, "States Avenue", {Color.PURPLE}, [1, 2, 4], True),
49: PropertyCard(49, 2, "Virginia Avenue", {Color.PURPLE}, [1, 2, 4], True),
50: PropertyCard(50, 2, "St. Charles Place", {Color.PURPLE}, [1, 2, 4], True),
51: PropertyCard(51, 2, "St. James Place", {Color.ORANGE}, [1, 3, 5], True),
52: PropertyCard(52, 2, "Tennessee Avenue", {Color.ORANGE}, [1, 3, 5], True),
53: PropertyCard(53, 2, "New York Avenue", {Color.ORANGE}, [1, 3, 5], True),
54: PropertyCard(54, 3, "Indiana Avenue", {Color.RED}, [2, 3, 6], True),
55: PropertyCard(55, 3, "Illinois Avenue", {Color.RED}, [2, 3, 6], True),
56: PropertyCard(56, 3, "Kentucky Avenue", {Color.RED}, [2, 3, 6], True),
57: PropertyCard(57, 3, "Atlantic Avenue", {Color.YELLOW}, [2, 4, 6], True),
58: PropertyCard(58, 3, "<NAME>", {Color.YELLOW}, [2, 4, 6], True),
59: PropertyCard(59, 3, "Ventnor Avenue", {Color.YELLOW}, [2, 4, 6], True),
60: PropertyCard(60, 4, "Pennsylvania Avenue", {Color.GREEN}, [2, 4, 7], True),
61: PropertyCard(61, 4, "Pacific Avenue", {Color.GREEN}, [2, 4, 7], True),
62: PropertyCard(62, 4, "North Carolina Avenue", {Color.GREEN}, [2, 4, 7], True),
63: PropertyCard(63, 4, "Park Place", {Color.DBLUE}, [3, 8], True),
64: PropertyCard(64, 4, "Boardwalk", {Color.DBLUE}, [3, 8], True),
65: PropertyCard(65, 0, "Wild", {Color.ALL, Color.ALL}, [], True),
66: PropertyCard(66, 0, "Wild", {Color.ALL, Color.ALL}, [], True),
67: PropertyCard(67, 4, "Wild", {Color.RR, Color.LBLUE}, [], True),
68: PropertyCard(68, 2, "Wild", {Color.RR, Color.UTIL}, [], True),
69: PropertyCard(69, 4, "Wild", {Color.RR, Color.GREEN}, [], True),
70: PropertyCard(70, 4, "Wild", {Color.GREEN, Color.DBLUE}, [], True),
71: PropertyCard(71, 3, "Wild", {Color.YELLOW, Color.RED}, [], True),
72: PropertyCard(72, 3, "Wild", {Color.YELLOW, Color.RED}, [], True),
73: PropertyCard(73, 1, "Wild", {Color.LBLUE, Color.BROWN}, [], True),
74: PropertyCard(74, 2, "Wild", {Color.PURPLE, Color.ORANGE}, [], True),
75: PropertyCard(75, 2, "Wild", {Color.PURPLE, Color.ORANGE}, [], True),
76: RentCard(76, 1, {Color.BROWN, Color.LBLUE}, False),
77: RentCard(77, 1, {Color.BROWN, Color.LBLUE}, False),
78: RentCard(78, 1, {Color.RED, Color.YELLOW}, False),
79: RentCard(79, 1, {Color.RED, Color.YELLOW}, False),
80: RentCard(80, 1, {Color.GREEN, Color.DBLUE}, False),
81: RentCard(81, 1, {Color.GREEN, Color.DBLUE}, False),
82: RentCard(82, 1, {Color.RR, Color.UTIL}, False),
83: RentCard(83, 1, {Color.RR, Color.UTIL}, False),
84: RentCard(84, 1, {Color.PURPLE, Color.ORANGE}, False),
85: RentCard(85, 1, {Color.PURPLE, Color.ORANGE}, False),
86: RentCard(86, 3, {Color.ALL}, True),
87: RentCard(87, 3, {Color.ALL}, True),
88: RentCard(88, 3, {Color.ALL}, True),
89: CashCard(89, 1),
90: CashCard(90, 1),
91: CashCard(91, 1),
92: CashCard(92, 1),
93: CashCard(93, 1),
94: CashCard(94, 1),
95: CashCard(95, 2),
96: CashCard(96, 2),
97: CashCard(97, 2),
98: CashCard(98, 2),
99: CashCard(99, 2),
100: CashCard(100, 3),
101: CashCard(101, 3),
102: CashCard(102, 3),
103: CashCard(103, 4),
104: CashCard(104, 4),
105: CashCard(105, 4),
106: CashCard(106, 5),
107: CashCard(107, 5),
108: CashCard(108, 10)
}
property_set_rents = {
Color.UTIL: [1, 2],
Color.RR: [1, 2, 3, 4],
Color.BROWN: [1, 2],
Color.LBLUE: [1, 2, 3],
Color.PURPLE: [1, 2, 4],
Color.ORANGE: [1, 3, 5],
Color.RED: [2, 3, 6],
Color.YELLOW: [2, 4, 6],
Color.GREEN: [2, 4, 7],
Color.DBLUE: [3, 8]
} | StarcoderdataPython |
143597 | """ 2d and 3d wrappers for plotting 2d and 3d data in dataframes """
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, GWU Physics"
__license__ = "Free BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import datetime
import matplotlib.pyplot as plt
import matplotlib.ticker as mplticker
import matplotlib.dates as dates
from mpl_toolkits.mplot3d import Axes3D, axes3d, art3d #Need Axes3d for 3d projection!
import numpy as np
import matplotlib.cm as cm
from matplotlib.collections import PolyCollection
from basic_plots import PlotError
from skspec.core.abcspectra import SpecError
import plot_utils as pu
import skspec.config as pvconfig
from skspec.exceptions import badvalue_error
# Smart float to int conversion
_ir=lambda(x): int(round(x))
from skspec.plotting.basic_plots import range_timeplot, areaplot, _genplot
from skspec.plotting.plot_registry import PlotRegister
_TIMESTAMPPADDING = 2.9 #Padding for timestamp labels
_TIMESTAMPFORMAT = '%H:%M:%S'
# Colormap of wire plot: hack and only works on a square mesh (eg 100x100)
# https://github.com/matplotlib/matplotlib/issues/3562
def wire_cmap(wires, ax, cmap='hsv'):
""" Add a colormap to a set of wires (returned form ax.plot_wireframe)"""
# Retrive data from internal storage of plot_wireframe, then delete it
if wires._segments3d.ndim != 3:
raise PlotError('Wireframe colormapping for non-squre data (ie same '
'number rows and columns) is not supported.')
nx, ny, _ = np.shape(wires._segments3d)
wire_x = np.array(wires._segments3d)[:, :, 0].ravel()
wire_y = np.array(wires._segments3d)[:, :, 1].ravel()
wire_z = np.array(wires._segments3d)[:, :, 2].ravel()
wires.remove()
# create data for a LineCollection
wire_x1 = np.vstack([wire_x, np.roll(wire_x, 1)])
wire_y1 = np.vstack([wire_y, np.roll(wire_y, 1)])
wire_z1 = np.vstack([wire_z, np.roll(wire_z, 1)])
to_delete = np.arange(0, nx*ny, ny)
wire_x1 = np.delete(wire_x1, to_delete, axis=1)
wire_y1 = np.delete(wire_y1, to_delete, axis=1)
wire_z1 = np.delete(wire_z1, to_delete, axis=1)
scalars = np.delete(wire_z, to_delete)
segs = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(wire_x1.T, wire_y1.T, wire_z1.T)]
# Plots the wireframe by a a line3DCollection
new_wires = art3d.Line3DCollection(segs, cmap=cmap)
new_wires.set_array(scalars)
ax.add_collection(new_wires)
return new_wires
def custom_wireframe(ax, X, Y, Z, *args, **kwargs):
"""
Overoad matplotlib's plot_wireframe for a special use case that we want
to plot a wireframe over a surface with customizability of those
lines.
In future versions, this may be incorporated into matplotlib natively.
This would still be required for backwards compatibility.
"""
rstride = kwargs.pop("rstride", 1)
cstride = kwargs.pop("cstride", 1)
ts = Z
had_data = ax.has_data()
Z = np.atleast_2d(Z)
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(xrange(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1) :
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(xrange(0, cols, cstride))
if cols > 0 and cii[-1] != (cols - 1) :
cii += [cols-1]
else:
cii = []
# If the inputs were empty, then just
# reset everything.
if Z.size == 0 :
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
# Row lines from rowstrides
lines = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(xlines, ylines, zlines)]
# Col lines form colstrides
lines += [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(txlines, tylines, tzlines)]
linec = art3d.Line3DCollection(lines, *args, **kwargs)
ax.add_collection(linec)
ax.auto_scale_xyz(X, Y, Z, had_data)
if 'cmap' in kwargs:
linec = wire_cmap(linec, ax, cmap=kwargs['cmap'])
return linec
def format_date(x, pos=None):
return dates.num2date(x).strftime(_TIMESTAMPFORMAT)
# Rename
def _gen2d3d(*args, **pltkwargs):
# UPDATE
""" Abstract layout for 2d plot.
For convienence, a few special labels, colorbar and background keywords
have been implemented. If these are not adequate, it one can add
custom colorbars, linelabels background images etc... easily just by
using respecitve calls to plot (plt.colorbar(), plt.imshow(),
plt.clabel() ); my implementations are only for convienences and
some predefined, cool styles.
countours: Number of desired contours from output.
label: Predefined label types. For now, only integer values 1,2. Use plt.clabel to add a custom label.
background: Integers 1,2 will add gray or autumn colormap under contour plot. Use plt.imgshow() to generate
custom background, or pass a PIL-opened image (note, proper image scaling not yet implemented).
c_mesh, r_mesh: These control how man column and row iso lines will be projected onto the 3d plot.
For example, if c_mesh=10, then 10 isolines will be plotted as columns, despite the actual length of the
columns. Alternatively, one can pass in c_stride directly, which is a column step size rather than
an absolute number, and c_mesh will be disregarded.
fill: bool (False)
Fill between contour lines.
**pltkwargs: Will be passed directly to plt.contour().
Returns
-------
tuple: (Axes, SurfaceFunction)
Returns axes object and the surface function (e.g. contours for
contour plot. Surface for surface plot.
"""
# Use a label mapper to allow for datetimes in any plot x/y axis
_x_dti = _ = _y_dti = False
# Passed Spectra
if len(args) == 1:
ts = args[0]
try:
index = np.array([dates.date2num(x) for x in ts.index])
_x_dti = True
except AttributeError:
index = ts.index.values #VALUES NECESSARY FOR POLY CMAP
try:
cols = np.array([dates.date2num(x) for x in ts.columns])
_y_dti = True
except AttributeError:
cols = ts.columns.values #VALUES NECESSARY FOR POLY CMAP
yy, xx = np.meshgrid(cols, index)
# Passed xx, yy, ts/zz
elif len(args) == 3:
xx, yy, ts = args
cols, index = ts.columns.values, ts.index.values
else:
raise PlotError("Please pass a single spectra, or xx, yy, zz. Got %s args"
% len(args))
# Boilerplate from basic_plots._genplot(); could refactor as decorator
xlabel = pltkwargs.pop('xlabel', '')
ylabel = pltkwargs.pop('ylabel', '')
zlabel = pltkwargs.pop('zlabel', '')
title = pltkwargs.pop('title', '')
labelsize = pltkwargs.pop('labelsize', pvconfig.LABELSIZE) #Can also be ints
titlesize = pltkwargs.pop('titlesize', pvconfig.TITLESIZE)
# Choose plot kind
kind = pltkwargs.pop('kind', 'contour')
grid = pltkwargs.pop('grid', True)
# pltkwargs.setdefault('legend', False) #(any purpose in 2d?)
# LEGEND FOR 2D PLOT: http://stackoverflow.com/questions/10490302/how-do-you-create-a-legend-for-a-contour-plot-in-matplotlib
pltkwargs.setdefault('linewidth', 1)
cbar = pltkwargs.pop('cbar', False)
outline = pltkwargs.pop('outline', None)
if outline:
if kind != 'surf' and kind != 'waterfall':
raise PlotError('"outline" is only valid for "surf" and "waterfall"'
' plots. Please use color/cmap for all other color'
' designations.')
fig = pltkwargs.pop('fig', None)
ax = pltkwargs.pop('ax', None)
fill = pltkwargs.pop('fill', pvconfig.FILL_CONTOUR)
xlim = pltkwargs.pop('xlim', None)
ylim = pltkwargs.pop('ylim', None)
zlim = pltkwargs.pop('zlim', None)
#Private attributes
_modifyax = pltkwargs.pop('_modifyax', True)
contours = pltkwargs.pop('contours', pvconfig.NUM_CONTOURS)
label = pltkwargs.pop('label', None)
projection = None
if kind in PLOTPARSER.plots_3d:
projection = '3d'
elev = pltkwargs.pop('elev', 35)
azim = pltkwargs.pop('azim', -135)
view = pltkwargs.pop('view', None)
if view:
if view == 1:
elev, azim = 35, -135
elif view == 2:
elev, azim = 35, -45
elif view == 3:
elev, azim = 20, -10 # Side view
elif view == 4:
elev, azim = 20, -170
elif view == 5:
elev, azim = 0,-90
elif view == 6:
elev, azim = 65, -90
else:
raise PlotError('View must be between 1 and 6; otherwise set'
' "elev" and "azim" keywords.')
# Orientation of zlabel (doesn't work...)
_zlabel_rotation = 0.0
if azim < 0:
_zlabel_rotation = 90.0
if 'mesh' in pltkwargs:
pltkwargs['c_mesh'] = pltkwargs['r_mesh'] = pltkwargs.pop('mesh')
# Defaults will be ignored if mesh or ciso in kwargs
ciso_default = pvconfig.C_MESH
if len(ts.columns) < ciso_default:
ciso_default = len(ts.columns)
riso_default = pvconfig.R_MESH
if len(ts.index) < riso_default:
riso_default = len(ts.index)
c_mesh = pltkwargs.pop('c_mesh', ciso_default)
r_mesh = pltkwargs.pop('r_mesh', riso_default)
if c_mesh > ts.shape[1] or c_mesh < 0:
raise PlotError('"c_mesh/column mesh" must be between 0 and %s, got "%s"' %
(ts.shape[1], c_mesh))
if r_mesh > ts.shape[0] or r_mesh < 0:
raise PlotError('"r_mesh/row mesh" must be between 0 and %s, got "%s"' %
(ts.shape[0], r_mesh))
if c_mesh == 0:
cstride = 0
else:
cstride = _ir(ts.shape[1]/float(c_mesh) )
if r_mesh == 0:
rstride = 0
else:
rstride = _ir(ts.shape[0]/float(r_mesh) )
pltkwargs.setdefault('cstride', cstride)
pltkwargs.setdefault('rstride', rstride)
elif kind == 'contour':
pass
else:
raise PlotError('_gen2d3d invalid kind: "%s". '
'Choose from %s' % (kind, PLOTPARSER.plots_2d_3d))
# Is this the best logic for 2d/3d fig?
if not ax:
f = plt.figure()
# ax = f.gca(projection=projection)
ax = f.add_subplot(111, projection=projection)
if not fig:
fig = f
# PLT.CONTOUR() doesn't take 'color'; rather, takes 'colors' for now
if 'color' in pltkwargs:
if kind == 'contour' or kind == 'contour3d':
pltkwargs['colors'] = pltkwargs.pop('color')
# Convienence method to pass in string colors
if 'colormap' in pltkwargs:
pltkwargs['cmap'] = pltkwargs.pop('colormap')
if 'cmap' in pltkwargs:
if isinstance(pltkwargs['cmap'], basestring):
pltkwargs['cmap'] = pu.cmget(pltkwargs['cmap'])
# Contour Plots
# -------------
# Broken background image
### More here http://matplotlib.org/examples/pylab_examples/image_demo3.html ###
# Refactored with xx, yy instead of df.columns/index UNTESTED
#if background:
#xmin, xmax, ymin, ymax = xx.min(), xx.max(), yy.min(), yy.max()
## Could try rescaling contour rather than image:
##http://stackoverflow.com/questions/10850882/pyqt-matplotlib-plot-contour-data-on-top-of-picture-scaling-issue
#if background==1:
#im = ax.imshow(ts, interpolation='bilinear', origin='lower',
#cmap=cm.gray, extent=(xmin, xmax, ymin, ymax))
#### This will take a custom image opened in PIL or it will take plt.imshow() returned from somewhere else
#else:
#try:
#im = ax.imshow(background)
#### Perhaps image was not correctly opened
#except Exception:
#raise badvalue_error(background, 'integer 1,2 or a PIL-opened image')
# Note this overwrites the 'contours' variable from an int to array
if kind == 'contour' or kind == 'contour3d':
# Cornercase datetimeindex and offest from add projection hack
try:
pltkwargs['offset'] = dates.date2num(pltkwargs['offset'])
except Exception:
pass
if fill: #Values of DTI doesn't work
mappable = ax.contourf(xx, yy, ts.values, contours, **pltkwargs) #linewidths is a pltkwargs arg
else:
mappable = ax.contour(xx, yy, ts.values, contours, **pltkwargs)
### Pick a few label styles to choose from.
if label:
if label==1:
ax.clabel(inline=1, fontsize=10)
elif label==2:
ax.clabel(levels[1::2], inline=1, fontsize=10) #label every second line
else:
raise PlotError(label, 'integer of value 1 or 2')
elif kind == 'surf':
mappable = ax.plot_surface(xx, yy, ts, **pltkwargs)
if outline:
try:
pltkwargs['cmap'] = pu.cmget(outline)
except Exception: #Don't change; attribute error fails when outline=None
pltkwargs['color'] = outline
pltkwargs.pop('cmap')
custom_wireframe(ax, xx, yy, ts, **pltkwargs)
# Wires are thrown out, since mappable is the surface, and only mappable returned
elif kind == 'wire':
pltkwargs.setdefault('color', 'black')
mappable = custom_wireframe(ax, xx, yy, ts, **pltkwargs)
elif kind == 'waterfall':
# Parse outline color (if colormap, error!)
try:
pu.cmget(outline)
except Exception:
pltkwargs['edgecolors'] = outline
else:
raise PlotError('Waterfall "outline" must be a solid color, not colormap.')
pltkwargs.setdefault('closed', False)
alpha = pltkwargs.setdefault('alpha', None)
# Need to handle cmap/colors a bit differently for PolyCollection API
if 'color' in pltkwargs:
pltkwargs['facecolors']=pltkwargs.pop('color')
cmap = pltkwargs.setdefault('cmap', None)
if alpha is None: #as opposed to 0
alpha = 0.6 * (13.0/ts.shape[1])
if alpha > 0.6:
alpha = 0.6
#Delete stride keywords (waterfall doesn't have strides: not a surface!)
for key in ['cstride', 'rstride']:
try:
del pltkwargs[key]
except KeyError:
pass
# Verts are index dotted with data
verts = []
for col in ts.columns:
values = ts[col]
values[0], values[-1] = values.min().min(), values.min().min()
verts.append(list(zip(ts.index, values)))
mappable = PolyCollection(verts, **pltkwargs)
if cmap:
mappable.set_array(cols) #If set array in __init__, autogens a cmap!
mappable.set_cmap(pltkwargs['cmap'])
mappable.set_alpha(alpha)
#zdir is the direction used to plot; dont' fully understand
ax.add_collection3d(mappable, zs=cols, zdir='x' )
# custom limits/labels polygon plot (reverse x,y)
if not ylim:
ylim = (max(index), min(index)) #REVERSE AXIS FOR VIEWING PURPOSES
if not xlim:
xlim = (min(cols), max(cols)) #x
if not zlim:
zlim = (min(ts.min()), max(ts.max())) #How to get absolute min/max of ts values
# Reverse labels/DTI call for correct orientaion HACK HACK HACK
xlabel, ylabel = ylabel, xlabel
_x_dti, _y_dti = _y_dti, _x_dti
azim = -1 * azim
# General Features
# ----------------
# Some applications (like add_projection) shouldn't alther axes features
if not _modifyax:
return (ax, mappable)
if cbar:
# Do I want colorbar outside of fig? Wouldn't be better on axes?
try:
cb = fig.colorbar(mappable, ax=ax)
# Label colorbar on contour since no 3d-zlabel
if kind == 'contour':
cb.set_label(zlabel)
except Exception:
raise PlotError("Colorbar failed; did you pass a colormap?")
if grid:
if grid == True:
ax.grid()
else:
ax.grid(color=grid) #Let's any supported color in
# Format datetime axis
# -------------------
if _x_dti:
ax.xaxis.set_major_formatter(mplticker.FuncFormatter(format_date))
# Uncomment for custom 3d timestamp orientation
# if projection:
# for t1 in ax.yaxis.get_ticklabels():
# t1.set_ha('right')
# t1.set_rotation(30)
# ax.yaxis._axinfo['label']['space_factor'] = _TIMESTAMPPADDING
if _y_dti:
ax.yaxis.set_major_formatter(mplticker.FuncFormatter(format_date))
# Uncomment for custom 3d timestamp orientation
# if projection:
# for t1 in ax.yaxis.get_ticklabels():
# t1.set_ha('right')
# t1.set_rotation(30)
# ax.yaxis._axinfo['label']['space_factor'] = _TIMESTAMPPADDING
if xlim:
ax.set_xlim3d(xlim)
if ylim:
ax.set_ylim3d(ylim)
if zlim:
ax.set_zlim3d(zlim)
# Set elevation/azimuth for 3d plots
if projection:
ax.view_init(elev, azim)
ax.set_zlabel(zlabel, fontsize=labelsize, rotation= _zlabel_rotation)
ax.set_xlabel(xlabel, fontsize=labelsize)
ax.set_ylabel(ylabel, fontsize=labelsize)
ax.set_title(title, fontsize=titlesize)
# Return Ax, contours/surface/polygons etc...
return (ax, mappable)
# Support images or any other 2d plot besides contours?
def add_projection(ts, plane='xz', flip=False, fill=False, **contourkwds):
""" Add a contour plot/projection onto the xy, xz, or yz plane of a 3d
plot.
args: 2D Arrays or Contour Set
xx, yy, zz : 2D mesh arrays, or contour set.
plane: str ('xz')
'xz', 'yz', 'xy', choose the plane on which to project the contours.
flip : bool (False)
The 3d plot is a square of 6 sides. Flip will change the projeciton
from the left wall to right wall, or from floor to ceiling in the
same plane.
fill : bool (False)
Fill the contours
**contourkwds
Keywords passed directly to ax.contour.
Returns
-------
tuple: Axes
Returns axes object.
"""
plane = plane.lower()
if plane == 'zx': plane = 'xz'
if plane == 'zy': plane = 'yz'
if plane == 'yx': plane = 'xy'
if plane == 'xy':
offset=ts.min().min()
if flip:
offset=ts.max().max()
zdir = 'z'
elif plane == 'yz':
offset=ts.index.min()
if flip:
offset=ts.index.max()
zdir = 'x'
elif plane == 'xz':
offset = ts.columns.max()
if flip:
offset = ts.columns.min()
zdir = 'y'
else:
raise PlotError('Invalid plane "%s": must be "xy, xz or yz".' % plane)
ax, cset = _gen2d3d(ts,
zdir=zdir,
kind='contour',
offset=offset,
fill=fill,
_modifyax=False,
**contourkwds)
# Return axes only as this is public
return ax
def spec3d(ts, projection=True, fill=True, samples=5, contourkwds={}, **pltkwargs):
""" Wireframe plot with no connected clines. By default, adds an xz
projection.
Parameters
----------
projection : color/colormap
Valid colormap or solid color of contour projection.
samples : int
Number of samples to take from dataset. Defaults to 5. Must
be within 0 and the number of columns in Spectra.
contourkwds: {}
Dictionary that holds arguments specific to the projection.
Returns
-------
tuple: (Axes, SurfaceFunction)
Returns axes object and the surface function (e.g. contours for
contour plot. Surface for surface plot.)
Notes
-----
Mostly a shortcut for a very useful 3d look at data. Thus, did not add
much customizability as this plot can be assembled from _gen2d3d2d and
add_projeciton.
"""
for invalid in ['c_mesh', 'r_mesh', 'cstride', 'rstride']:
if invalid in pltkwargs:
raise PlotError('Unsupported Keyword %s.'
'Please use the samples argument' % invalid)
#3d Plot
pltkwargs['kind'] = 'wire'
pltkwargs['r_mesh'] = 0
pltkwargs['c_mesh'] = samples
ax, mappable = _gen2d3d(ts, **pltkwargs)
# Projection
if 'alpha' not in contourkwds:
contourkwds['alpha'] = 0.3
if projection:
if projection == True:
projection = pvconfig.PROJECTION_CMAP
ax = add_projection(ts, ax=ax, fill=fill, **contourkwds)
return ax, mappable
# SET REGISTER HERE! HAS TO BE HERE BECAUSE GEN2D USES IT, SO GET CIRCULAR IMPORT ISSUES
# IF PUT THIS IN A SEPARATE MODULE. STRICTLY SPEAKING, GEND2D IS MANY PLOTS SO IT HAS
# TO INSPECT ITS OWN KIND ARGUMENT. THIS IS ONE HEADACHE OF SUCH A DESIGN PATTERN!
PLOTPARSER = PlotRegister()
#Basic plots
PLOTPARSER.add('spec', _genplot, False, 'Spec vs. Variation' )
PLOTPARSER.add('area', areaplot, False, 'Area vs. Variation' )
PLOTPARSER.add('range_timeplot', range_timeplot, False, 'Slice Ranges vs. Variation' )
#Advanced plots
PLOTPARSER.add('contour', _gen2d3d, False, 'Contour Plot' )
PLOTPARSER.add('contour3d',_gen2d3d, True, '3D Contour Plot')
PLOTPARSER.add('wire', _gen2d3d, True, '3D Wireframe')
PLOTPARSER.add('surf', _gen2d3d, True, '3D Surface' )
PLOTPARSER.add('waterfall', _gen2d3d, True, '3D Waterfall' )
PLOTPARSER.add('spec3d', spec3d, True, '3D Wire + Projection' )
if __name__ == '__main__':
from matplotlib import rc
from skspec.data import aunps_glass, aunps_water, solvent_evap
ts = aunps_glass()
# ts = ts.nearby[400:700]
# ts = ts.nearby[1520:1320]
# ts=ts.iloc[450:500, 50:100]
# xx,yy = ts.meshgrid()
print PLOTPARSER
##---- First subplot
#ax2 = fig.add_subplot(1, 2, 1, projection='3d')
#_gencorr2d(xx, yy, ts,
#fill=True,
#title='My baller plot',
#xlabel=ts.full_varunit,
#ylabel=ts.full_specunit,
#contours=20,
#cbar = True,
#background=False)
# Is this the best logic for 2d/3d fig?
ts = ts.iloc[0:100, :]#.as_varunit('m')
ts.plot(kind='spec3d', fill=False)
ax = ts.plot(
kind='spec3d',
cmap = 'hot',
outline='cool',
# outline='je',
# outline = 'r',
# cmap='jet',
cbar=False,
# c_mesh=5,
# r_mesh=5,
# edgecolors='r',
#edgecolors='jet',
linewidth=2,
alpha=.5,
contours=5,
xlabel = ts.full_specunit,
ylabel = ts.full_varunit,
zlabel = ts.full_iunit)
## Done automatically by spec.plot
# if ts.index[0] > ts.index[-1]:
# ax2.set_xlim(ax2.get_xlim()[::-1])
# add_projection(ts, ax=ax2, cmap='cool')
rc('text', usetex=True)
print ts.shape
plt.show()
| StarcoderdataPython |
1739640 | import os,copy,argparse
from collections import OrderedDict
from pypospack.pyposmat.data import PyposmatDataFile
from pypospack.pyposmat.data import PyposmatConfigurationFile
from pypospack.pyposmat.data import PyposmatDataAnalyzer
def get_qoi_targets(o_config):
print(type(o_config))
assert type(o_config) is PyposmatConfigurationFile
return OrderedDict([
(k,v['target']) for k,v in o_config.qois.items()]
)
def show_qoi_targets(config_fn,
data_fn):
o_config = PyposmatConfigurationFile()
o_config.read(filename=config_fn)
o_data = PyposmatDataFile()
o_data.read(filename=data_fn)
for qoi_name, qoi_target in o_config.qoi_targets.items():
try:
qoi_avg = o_data.df[qoi_name].mean()
except KeyError as e:
qoi_avg = 'no value'
s = "{:20} {:10} {:10}".format(qoi_name,qoi_target,qoi_avg)
print(s)
import matplotlib.pyplot as plt
def make_rug_plot(config_fn,
data_fn,
ax=None,
plot_fn='rugplot.png'):
o_config = PyposmatConfigurationFile()
o_config.read(filename=config_fn)
o_data = PyposmatDataFile()
o_data.read(filename=data_fn)
qoi_targets = o_config.qoi_targets
#qoi_targets = get_qoi_targets(o_config)
error_names = o_data.error_names
qoi_names = o_data.qoi_names
# create normalized error
df = copy.deepcopy(o_data.df[error_names])
for qn in qoi_names:
en = "{}.err".format(qn)
nen = "{}.nerr".format(qn)
q = qoi_targets[qn]
df[nen]=o_data.df[en]/q-q
(_nrows,_ncols) = o_data.df.shape
if ax is None:
fig, ax = plt.subplots(nrows=1,ncols=1)
for iq,qn in enumerate(qoi_names):
_yloc = [iq+1]
ax.scatter(
df["{}.nerr".format(qn)],
_nrows*[iq+1],
marker='|',
s=100.,
color='k'
)
plt.sca(ax)
plt.yticks(range(len(qoi_names)+1),['']+qoi_names)
fig.savefig(plot_fn)
if __name__ == "__main__":
data_directory = 'data'
plot_directory = 'plots'
n_iterations = 10
n_potentials = 30
if not os.path.isdir(plot_directory):
os.mkdir(plot_directory)
for i in range(n_iterations):
config_fn = os.path.join(data_directory,'pyposmat.config.in')
results_data_fn = os.path.join(data_directory,'pyposmat.results.{}.out'.format(i))
kde_data_fn = os.path.join(data_directory,'pyposmat.kde.{}.out'.format(i+1))
plot_fn = os.path.join(plot_directory,"rugplot_{}.png".format(i))
print(80*'=')
print("{:^80}".format('ITERATION {}'.format(i)))
results_data = PyposmatDataFile()
results_data.read(filename=results_data_fn)
results_n_rows,results_n_cols = results_data.df.shape
kde_data = PyposmatDataFile()
kde_data.read(filename=kde_data_fn)
kde_n_rows,kde_n_cols=kde_data.df.shape
print('total_number_of_candidates:{}'.format(results_n_rows))
print('remaining_number_of_candiddates:{}'.format(kde_n_rows))
show_qoi_targets(config_fn=config_fn,data_fn=kde_data_fn)
#make_rug_plot(config_fn=config_fn,
# data_fn=kde_data_fn,
# plot_fn=plot_fn)
| StarcoderdataPython |
3260114 | <filename>arekit/contrib/networks/core/input/embedding/offsets.py
import logging
from arekit.contrib.networks.embeddings.base import Embedding
logger = logging.getLogger(__name__)
class TermsEmbeddingOffsets(object):
"""
Describes indices distribution within a further TermsEmbedding.
All parameters shifted by 1 because of a empty placeholder.
"""
def __init__(self, words_count):
assert(isinstance(words_count, int))
self.__words_count = words_count
# region properties
@property
def TotalCount(self):
return 1 + self.__words_count
# endregion
# region 'get' methods
@staticmethod
def get_word_index(index):
return 1 + index
# endregion
@staticmethod
def extract_vocab(words_embedding):
"""
returns:
enumeration of pairs (word, key)
"""
assert(isinstance(words_embedding, Embedding))
offsets = TermsEmbeddingOffsets(words_count=words_embedding.VocabularySize)
all_words = [(0, 'PADDING')]
for word, index in words_embedding.iter_vocabulary():
assert(isinstance(word, str))
all_words.append((offsets.get_word_index(index), word))
assert(len(all_words) == offsets.TotalCount)
for key, word in sorted(all_words, key=lambda item: item[0]):
yield word, key
| StarcoderdataPython |
1651487 | """
We try to determine if it is harder for a NN to learn from
"""
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset, DataLoader
import os
from typing import Tuple, List
from torch.utils.tensorboard import SummaryWriter
MNIST_SIZE = 5
def transform_and_save_data_set(size: int, num_workers: int = 8, folder: str = "../data",
dataset_name="CIFAR10"):
norm = (0.1307,), (0.3081,)
size_img = (size, size)
mnist_path = f"{folder}/{dataset_name}_{size}_dataset"
if os.path.isfile(mnist_path):
return mnist_path
data_set_class = getattr(datasets, dataset_name)
train_loader = torch.utils.data.DataLoader(
data_set_class(folder, train=True, download=True,
transform=transforms.Compose([
transforms.Resize(size_img),
transforms.ToTensor(),
transforms.Normalize(*norm)
])),
batch_size=60000, shuffle=False, num_workers=num_workers)
data, targets = next(iter(train_loader))
sorted_train_targets, idxs = targets.sort()
sorted_train_data = data[idxs]
test_loader = torch.utils.data.DataLoader(
data_set_class(folder, train=False, transform=transforms.Compose([
transforms.Resize(size_img),
transforms.ToTensor(),
transforms.Normalize(*norm)
])),
batch_size=10000, shuffle=True, num_workers=num_workers)
data, targets = next(iter(test_loader))
sorted_test_targets, idxs = targets.sort()
sorted_test_data = data[idxs]
torch.save({
"test": (sorted_test_data, sorted_test_targets),
"train": (sorted_train_data, sorted_train_targets),
}, mnist_path)
return mnist_path
class DataSetMem:
def __init__(self, size: int = 5, device: str = "cpu", train: bool = True, data_name="CIFAR10"):
data_path = transform_and_save_data_set(size=size, dataset_name=data_name)
self._data_name = data_name
self._train = train
self._all_data = data = torch.load(data_path)
for ttt in ["train", "test"]:
data[ttt] = list(data[ttt])
data[ttt][0] = data[ttt][0].flatten(1).to(device)
data[ttt][1] = data[ttt][1].to(device)
crt_type = "train" if train else "test"
self._data = data[crt_type][0]
self._targets = data[crt_type][1]
self._size = size ** 2
self._num_classes = 10
self._permute = torch.arange(self._size)
def size(self):
return self._size
def change_data(self, train: bool):
crt_type = "train" if train else "test"
self._train = train
self._data = self._all_data[crt_type][0]
self._data = self._data[:, self._permute]
self._targets = self._all_data[crt_type][1]
def change_permute(self, permute: torch.Tensor):
self._permute = permute
self.change_data(self._train)
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx], self._targets[idx]
class TaskData:
def __init__(self, data_set: DataSetMem, num_tasks: int, batch_size: int,
device: str = "cpu", shuffle: bool = True):
assert num_tasks > 0, "Must have min 1 task"
self.data_set = data_set
self._num_tasks = num_tasks
self._item_size = data_set.size()
self._size = data_set.size() * 1
self._num_classes = data_set._num_classes
self._crt_task = 0
self._batch_size = batch_size
self._device = device
self._shuffle = shuffle
self._permute = torch.arange(data_set.size())
def size(self):
return self._size
@property
def dataset(self):
return self.data_set
def __len__(self):
return len(self.data_set)
@property
def num_classes(self):
return self._num_classes
@property
def num_tasks(self):
return self._num_tasks
@property
def task(self):
return self._crt_task
@task.setter
def task(self, value: int):
assert 0 <= value < self._num_tasks
self._set_new_task(value)
self._crt_task = value
def next_task(self):
new_task = (self._crt_task + 1) % self._num_tasks
self.task = new_task
def _set_new_task(self, new_task: int):
pass
def get_data(self):
return self._get_data()
def _get_data(self):
data = self.data_set
batch_size = self._batch_size
idx_s = torch.randperm(len(data)) if self._shuffle else torch.arange(len(data))
for idx in range(0, idx_s.size(0) // batch_size * batch_size, batch_size):
fetch = idx_s[idx: idx+batch_size]
yield data[fetch]
class SeqTasksSameTarget(TaskData):
def __init__(self, data_set: DataSetMem, num_tasks: int, batch_size: int,
device: str = "cpu"):
super(SeqTasksSameTarget, self).__init__(data_set, num_tasks, batch_size, device)
self._size = self.data_set.size() * num_tasks
def __iter__(self):
return self.get_data()
def get_data(self):
task = self._crt_task
item_size = self._item_size
in_pos = item_size * task
fin_pos = item_size * (task + 1)
batch_size = self._batch_size
size = self.size()
device = self._device
for i, (data, target) in enumerate(self._get_data()):
data_storage = torch.zeros(batch_size, size, device=device)
data_storage[:, in_pos: fin_pos].copy_(data)
yield data_storage.detach(), target
class SeqTasksSameTargetNoise(TaskData):
def __init__(self, data_set: DataSetMem, num_tasks: int, batch_size: int,
device: str = "cpu"):
super(SeqTasksSameTargetNoise, self).__init__(data_set, num_tasks, batch_size, device)
self._size = self.data_set.size() * num_tasks
def __iter__(self):
return self.get_data()
def get_data(self):
task = self._crt_task
num_tasks = self._num_tasks
item_size = self._item_size
in_pos = item_size * task
fin_pos = item_size * (task + 1)
batch_size = self._batch_size
size = self.size()
device = self._device
data_set = self.data_set
for i, (data, target) in enumerate(self._get_data()):
batch = data_set[torch.randperm(len(data_set))[:batch_size*num_tasks]][0]
data_storage = batch.view(batch_size, -1)
data_storage[:, in_pos: fin_pos].copy_(data)
yield data_storage.detach(), target
# def _set_new_task(self, new_task: int):
# self._permute = torch.randperm(self.data_set.size())
# self.data_set.change_permute(self._permute)
class MultiDataset(TaskData):
def __init__(self, data_sets: List[DataSetMem], num_tasks: int, batch_size: int,
device: str = "cpu"):
self._all_data_sets = data_sets
super(MultiDataset, self).__init__(data_sets[0], num_tasks, batch_size, device)
self._size = self.data_set.size() * num_tasks
def __iter__(self):
return self.get_data()
def get_data(self):
task = self._crt_task
num_tasks = self._num_tasks
item_size = self._item_size
in_pos = item_size * task
fin_pos = item_size * (task + 1)
batch_size = self._batch_size
size = self.size()
device = self._device
data_set = self.data_set
print(self.data_set._data_name, in_pos, fin_pos)
for i, (data, target) in enumerate(self._get_data()):
data_storage = torch.zeros(batch_size, size, device=device)
data_storage[:, in_pos: fin_pos].copy_(data)
yield data_storage.detach(), target
def _set_new_task(self, new_task: int):
self.data_set = self._all_data_sets[new_task]
class Net(nn.Module):
def __init__(self, in_size: int, out_size: int, num_hidden_layers: int, hidden_size: int = 256):
super(Net, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.ReLU()
)
self.has_hidden = num_hidden_layers > 0
self.hfc = nn.Sequential()
for i in range(num_hidden_layers):
self.hfc.add_module(f"h{i}_fc", nn.Linear(hidden_size, hidden_size))
self.hfc.add_module(f"h{i}_act", nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(hidden_size, out_size),
nn.ReLU()
)
def forward(self, x):
x = x.flatten(1)
x = self.fc1(x)
if self.has_hidden:
x = self.hfc(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(start_i, args, model, device, train_loader, optimizer, epoch, writer, run=0,
plt_name="Train"):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(f'Train Epoch: {epoch} | task: {train_loader.task} | Loss: {loss.item():.6f}')
writer.add_scalars(f'{plt_name}/multi', {f'train_loss_{run}': loss.item()},
start_i + batch_idx)
return start_i + batch_idx
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
acc))
return test_loss, acc
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=30, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
writer = SummaryWriter()
img_size = 8
num_tasks = 3
num_layers = 2
hidden_size = 256
base_dataset = DataSetMem(size=img_size, device=device, train=True, data_name="MNIST")
base_dataset2 = DataSetMem(size=img_size, device=device, train=True, data_name="FashionMNIST")
base_dataset3 = DataSetMem(size=img_size, device=device, train=True, data_name="CIFAR10")
# train_loader = SeqTasksSameTarget(base_dataset, num_tasks, args.batch_size, device=device)
for base_dataset, plt_name in [
([base_dataset, base_dataset2, base_dataset3], "Train_MFC"),
([base_dataset2, base_dataset3, base_dataset], "Train_FCM"),
([base_dataset3, base_dataset, base_dataset2], "Train_CMF")
]:
# base_dataset = [base_dataset, base_dataset2, base_dataset3]
# plt_name = "Train_MFC"
for run in range(5):
torch.manual_seed(args.seed + run)
train_loader = MultiDataset(base_dataset, num_tasks, args.batch_size, device=device)
model = Net(train_loader.size(), train_loader.num_classes, num_layers,
hidden_size=hidden_size).to(device)
# optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
optimizer = optim.Adadelta(model.parameters(), lr=0.003, eps=1.e-5)
# optimizer = optim.Adam(model.parameters(), lr=0.0001)
# optimizer = optim.RMSprop(model.parameters(), lr=0.00003, eps=1.e-6, alpha=0.99)
# optimizer = optim.RMSprop(model.parameters(), lr=0.0001, eps=1.e-5, alpha=0.99)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
start_idx = 0
for task_id in range(train_loader.num_tasks):
for epoch in range(1, args.epochs + 1):
train_loader.data_set.change_data(True)
start_idx = train(start_idx, args, model, device, train_loader, optimizer, epoch,
writer, run=run, plt_name=plt_name)
train_loader.data_set.change_data(False)
loss, acc = test(args, model, device, train_loader)
writer.add_scalars(f'{plt_name}/eval_acc', {f"eval_acc_{run}": acc}, start_idx)
writer.add_scalars(f'{plt_name}/multi', {f"eval_loss_{run}": loss}, start_idx)
# scheduler.step()
train_loader.next_task()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
def analysis_logs():
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import re
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
log_files = glob.glob("runs/Feb08_19-53-42_andrei/*/events.*", recursive=True)
print(len(log_files))
datas = []
data = {}
for fl in log_files:
path = os.path.basename(os.path.dirname(fl))
data[path] = dict()
event_acc = EventAccumulator(fl)
event_acc.Reload()
scalars = event_acc.Tags()["scalars"]
for sc in scalars:
data[path][sc] = event_acc.Scalars(sc)
run = int(re.search(r'\d+', path[::-1]).group()[::-1])
for d_point in data[path][sc]:
datas.append([path, sc, run, d_point.step, d_point.value])
df = pd.DataFrame(datas, columns=["path", "log", "run", "step", "value"])
eval_dfs = df[df.log.apply(lambda x: "eval_acc" in x)]
fig1, ax1 = plt.subplots()
for exp, exp_df in eval_dfs.groupby("log"):
plt_data = exp_df.groupby("step").mean()
plt_data["value"].plot(label=exp)
ax1.legend()
plt.title("mean")
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3337372 | from taggit.forms import TagField, TagWidget
from django.forms.widgets import SelectMultiple, Textarea, HiddenInput, TextInput
from django import forms
from django.utils.translation import ugettext as _
from taggit.utils import parse_tags, edit_string_for_tags
import re
from django import forms
class ContactForm(forms.Form):
SUBJECTS = (
('Feedback', 'Feedback'),
('Bugs', 'Bug Report'),
('Support', 'Support'),
)
subject = forms.ChoiceField(choices=SUBJECTS)
message = forms.CharField(widget=Textarea(attrs={'placeholder':'Enter your message here'}))
sender = forms.EmailField(label='Your Email', widget=TextInput(attrs={'placeholder':'<EMAIL>'}))
cc_myself = forms.BooleanField(required=False)
next = forms.CharField(widget=HiddenInput())
honey = forms.CharField(required=False, label='', widget=TextInput(attrs={'style':'display: none;'}))
def __init__(self, *args, **kwargs):
super(forms.Form, self).__init__(*args, **kwargs)
def clean_honey(self):
#print '--form clean_repin'
data = self.cleaned_data['honey']
if data == '':
pass
else:
raise forms.ValidationError("You must be a robot!")
return data
class CustomTagWidget(forms.TextInput):
def render(self, name, value, attrs=None):
attrs={'placeholder':'add new tags here'}
print 'TODO: core.models.CustomTagWidget move to widget file'
#print "widget attrs", attrs
#print "widget value",value
#print "widget name",name
if value is not None and not isinstance(value, basestring):
value = edit_string_for_tags([o.tag for o in value.select_related("tag")])
#remove all quotes from tag values when rendered on form
value = re.sub(r'"', '', value)
#value = ""#remove exising values from form
return super(CustomTagWidget, self).render(name, value, attrs)
from pinry.core.utils import format_tags, format_tags_list
class CustomTagField(forms.CharField):
widget = CustomTagWidget
def clean(self, value):
value = super(CustomTagField, self).clean(value)
print 'TODO: core.models.CustomTagField move to widget file'
if value:
try:
#jquery.tagit compatability: make sure there is a comma if not present.
#This allows one mutiword tag, jquery.tagit does not put a comma after
#first tag if no second tag and django-taggit will use spaces if no comma.
if not value.find(',')+1:
value = value+','
print value
return parse_tags(value)
except ValueError:
print '****CustomTagField ValueError'
raise forms.ValidationError("Provide one or more comma-separated tags.")
else:
return value
class UserTagsWidget(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None):
print 'TODO: core.models.UserTagsWidget move to widget file'
#make sure there is only one of each tag in choices
seen = set()
seen_add = seen.add
choices = [ x for x in self.choices if x not in seen and not seen_add(x)]
self.choices = choices
#print self.choices
#for c in self.choices: print 'choices:', c
return super(UserTagsWidget, self).render(name, value, attrs)
class UserTagsField(forms.ModelMultipleChoiceField):
widget = UserTagsWidget
def clean(self, value):
print 'TODO: core.models.UserTagsField move to widget file'
value = super(UserTagsField, self).clean(value)
#print '------vlue:', value
return value
| StarcoderdataPython |
1745789 | <gh_stars>10-100
#!/usr/bin/env python3
import os
from typing import Optional
EON = os.path.isfile('/EON')
class Service:
def __init__(self, port: int, should_log: bool, frequency: float, decimation: Optional[int] = None):
self.port = port
self.should_log = should_log
self.frequency = frequency
self.decimation = decimation
service_list = {
"roadCameraState": Service(8002, True, 20., 1),
"sensorEvents": Service(8003, True, 100., 100),
"gpsNMEA": Service(8004, True, 9.),
"deviceState": Service(8005, True, 2., 1),
"can": Service(8006, True, 100.),
"controlsState": Service(8007, True, 100., 100),
"features": Service(8010, True, 0.),
"pandaState": Service(8011, True, 2., 1),
"radarState": Service(8012, True, 20., 5),
"roadEncodeIdx": Service(8015, True, 20., 1),
"liveTracks": Service(8016, True, 20.),
"sendcan": Service(8017, True, 100.),
"logMessage": Service(8018, True, 0.),
"liveCalibration": Service(8019, True, 4., 4),
"androidLog": Service(8020, True, 0., 1),
"carState": Service(8021, True, 100., 10),
"carControl": Service(8023, True, 100., 10),
"longitudinalPlan": Service(8024, True, 20., 2),
"liveLocation": Service(8025, True, 0., 1),
"procLog": Service(8031, True, 0.5),
"gpsLocationExternal": Service(8032, True, 10., 1),
"ubloxGnss": Service(8033, True, 10.),
"clocks": Service(8034, True, 1., 1),
"liveMpc": Service(8035, False, 20.),
"liveLongitudinalMpc": Service(8036, False, 20.),
"ubloxRaw": Service(8042, True, 20.),
"liveLocationKalman": Service(8054, True, 20., 2),
"uiLayoutState": Service(8060, True, 0.),
"liveParameters": Service(8064, True, 20., 2),
"cameraOdometry": Service(8066, True, 20., 5),
"lateralPlan": Service(8067, True, 20., 2),
"thumbnail": Service(8069, True, 0.2, 1),
"carEvents": Service(8070, True, 1., 1),
"carParams": Service(8071, True, 0.02, 1),
"driverCameraState": Service(8072, True, 10. if EON else 20., 1),
"driverEncodeIdx": Service(8061, True, 10. if EON else 20., 1),
"driverState": Service(8063, True, 10. if EON else 20., 1),
"driverMonitoringState": Service(8073, True, 10. if EON else 20., 1),
"offroadLayout": Service(8074, False, 0.),
"wideRoadEncodeIdx": Service(8075, True, 20., 1),
"wideRoadCameraState": Service(8076, True, 20., 1),
"modelV2": Service(8077, True, 20., 20),
"managerState": Service(8078, True, 2., 1),
"testModel": Service(8040, False, 0.),
"testLiveLocation": Service(8045, False, 0.),
"testJoystick": Service(8056, False, 0.),
}
def build_header():
h = ""
h += "/* THIS IS AN AUTOGENERATED FILE, PLEASE EDIT services.py */\n"
h += "#ifndef __SERVICES_H\n"
h += "#define __SERVICES_H\n"
h += "struct service { char name[0x100]; int port; bool should_log; int frequency; int decimation; };\n"
h += "static struct service services[] = {\n"
for k, v in service_list.items():
should_log = "true" if v.should_log else "false"
decimation = -1 if v.decimation is None else v.decimation
h += ' { .name = "%s", .port = %d, .should_log = %s, .frequency = %d, .decimation = %d },\n' % \
(k, v.port, should_log, v.frequency, decimation)
h += "};\n"
h += "#endif\n"
return h
if __name__ == "__main__":
print(build_header())
| StarcoderdataPython |
1633972 | import tensorflow as tf
import argparse
import logging
from tqdm import tqdm
import os
import absl.logging
from utils.datasets import PascalSentencesDataset
from multi_hop_attention.hyperparameters import YParams
from multi_hop_attention.loaders import InferenceLoader
from multi_hop_attention.models import MultiHopAttentionModel
from utils.evaluators import Evaluator
from utils.constants import inference_for_recall_at
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
tf.logging.set_verbosity(tf.logging.ERROR)
# https://github.com/abseil/abseil-py/issues/99
absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info")
def inference(
hparams_path: str,
images_path: str,
texts_path: str,
batch_size: int,
prefetch_size: int,
checkpoint_path: str,
) -> None:
"""Performs inference on the Pascal sentences dataset.
Args:
hparams_path: The path to the hyperparameters yaml file.
images_path: A path where all the images are located.
texts_path: Path where the text doc with the descriptions is.
batch_size: The batch size to be used.
prefetch_size: How many batches to prefetch.
checkpoint_path: Path to a valid model checkpoint.
Returns:
None
"""
hparams = YParams(hparams_path)
dataset = PascalSentencesDataset(images_path, texts_path)
# Getting the vocabulary size of the train dataset
test_image_paths, test_captions = dataset.get_test_data()
logger.info("Test dataset created...")
evaluator_test = Evaluator(
len(test_image_paths), hparams.joint_space * hparams.attn_hops
)
logger.info("Test evaluator created...")
# Resetting the default graph and setting the random seed
tf.reset_default_graph()
tf.set_random_seed(hparams.seed)
loader = InferenceLoader(test_image_paths, test_captions, batch_size, prefetch_size)
images, captions, captions_lengths = loader.get_next()
logger.info("Loader created...")
model = MultiHopAttentionModel(
images,
captions,
captions_lengths,
hparams.margin,
hparams.joint_space,
hparams.num_layers,
hparams.attn_size,
hparams.attn_hops,
)
logger.info("Model created...")
logger.info("Inference is starting...")
with tf.Session() as sess:
# Initializers
model.init(sess, checkpoint_path)
try:
with tqdm(total=len(test_image_paths)) as pbar:
while True:
loss, lengths, embedded_images, embedded_captions = sess.run(
[
model.loss,
model.captions_len,
model.attended_images,
model.attended_captions,
]
)
evaluator_test.update_metrics(loss)
evaluator_test.update_embeddings(embedded_images, embedded_captions)
pbar.update(len(lengths))
except tf.errors.OutOfRangeError:
pass
for recall_at in inference_for_recall_at:
logger.info(
f"The image2text recall at {recall_at} is: "
f"{evaluator_test.image2text_recall_at_k(recall_at)}"
)
for recall_at in inference_for_recall_at:
logger.info(
f"The text2image recall at {recall_at} is: "
f"{evaluator_test.text2image_recall_at_k(recall_at)}"
)
def main():
# Without the main sentinel, the code would be executed even if the script were
# imported as a module.
args = parse_args()
inference(
args.hparams_path,
args.images_path,
args.texts_path,
args.batch_size,
args.prefetch_size,
args.checkpoint_path,
)
def parse_args():
"""Parse command line arguments.
Returns:
Arguments
"""
parser = argparse.ArgumentParser(
"Performs inference on the Pascal sentences dataset."
)
parser.add_argument(
"--hparams_path",
type=str,
default="hyperparameters/default_hparams.yaml",
help="Path to an hyperparameters yaml file.",
)
parser.add_argument(
"--images_path",
type=str,
default="data/Pascal_sentences_dataset/dataset",
help="Path where all images are.",
)
parser.add_argument(
"--texts_path",
type=str,
default="data/Pascal_sentences_dataset/sentences",
help="Path where the captions are.",
)
parser.add_argument(
"--checkpoint_path", type=str, default=None, help="Path to a model checkpoint."
)
parser.add_argument(
"--batch_size", type=int, default=64, help="The size of the batch."
)
parser.add_argument(
"--prefetch_size", type=int, default=5, help="The size of prefetch on gpu."
)
return parser.parse_args()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1775673 | <filename>src/utils/transformation_configs.py<gh_stars>1-10
"""
Configurations
@author: <NAME> (y(dot)meng201011(at)gmail(dot)com)
"""
import cv2
from enum import Enum
from PIL import Image
from skimage import filters, morphology, transform
from scipy import ndimage
class TRANSFORMATION(Enum):
CLEAN = 'clean'
ROTATE = 'rotate'
SHIFT = 'shift'
FLIP = 'flip'
AFFINE_TRANS = 'affine'
MORPH_TRANS = 'morph'
AUGMENT = 'augment'
CARTOON = 'cartoon'
QUANTIZATION = 'quant'
DISTORTION = 'distort'
NOISE = 'noise'
FILTER = 'filter'
COMPRESSION = 'compress'
DENOISE = 'denoise'
GEOMETRIC = 'geometric'
SEGMENTATION = 'segment'
def get_flip_direction(flip_trans):
return {
0: 'AROUND_X_AXIS',
1: 'AROUND_Y_AXIS',
-1: 'AROUND_BOTH_AXIS',
}[flip_trans]
class AUGMENT_TRANSFORMATIONS(Enum):
SAMPLEWISE_AUGMENTATION = 'samplewise_std_norm'
FEATURE_AUTMENTATION = 'feature_std_norm'
ZCA_WHITENING = 'zca_whitening'
PCA_WHITENING = 'pca_whitening'
class DISTORT_TRANSFORMATIONS(Enum):
X = 'x'
Y = 'y'
PIXELATE = 'pixelate'
CONTRAST = 'contrast'
BRIGHTNESS = 'brightness'
class NOISE_TRANSFORMATIONS(Enum):
GAUSSIAN = 'gaussian'
LOCALVAR = 'localvar'
POISSON = 'poisson'
SALT = 'salt'
PEPPER = 'pepper'
SALTNPEPPER = 's&p'
SPECKLE = 'speckle'
class DENOISE_TRANSFORMATIONS(Enum):
WAVELET = 'wavelet'
TV_CHAMBOLLE = 'tv_chambolle'
TV_BREGMAN = 'tv_bregman'
BILATERAL = 'bilateral' # TODO: bug fix
NL_MEANS = 'nl_means'
NL_MEANS_FAST = 'nl_means_fast'
class MORPH_TRANSFORMATIONS(Enum):
EROSION = 'erosion'
DILATION = 'dilation'
OPENING = 'opening'
CLOSING = 'closing'
GRADIENT = 'gradient'
def get_morph_op(morph_trans):
return {
MORPH_TRANSFORMATIONS.EROSION.value: cv2.MORPH_ERODE,
MORPH_TRANSFORMATIONS.DILATION.value: cv2.MORPH_DILATE,
MORPH_TRANSFORMATIONS.OPENING.value: cv2.MORPH_OPEN,
MORPH_TRANSFORMATIONS.CLOSING.value: cv2.MORPH_CLOSE,
MORPH_TRANSFORMATIONS.GRADIENT.value: cv2.MORPH_GRADIENT,
}[morph_trans]
class FLIP_DIRECTION(Enum):
AROUND_X_AXIS = 0
AROUND_Y_AXIS = 1
AROUND_BOTH_AXIS = -1
class FILTER_TRANSFORMATION(Enum):
SOBEL = 'sobel'
GAUSSIAN = 'gaussian'
RANK = 'rank'
MEDIAN = 'median'
MINIMUM = 'minimum'
MAXIMUM = 'maximum'
ENTROPY = 'entropy'
ROBERTS = 'roberts'
SCHARR = 'scharr'
PREWITT = 'prewitt'
MEIJERING = 'heijering' # TODO: bug fix
SATO = 'sato' # TODO: bug fix
FRANGI = 'frangi' # TODO: bug fix
HESSIAN = 'hessian' # TODO: bug fix
SKELETONIZE = 'skelentonize' # TODO: bug fix
THIN = 'thin' # TODO: bug fix
def get_filter_op(filter):
return {
FILTER_TRANSFORMATION.SOBEL.value: filters.sobel,
FILTER_TRANSFORMATION.GAUSSIAN.value: ndimage.gaussian_filter,
FILTER_TRANSFORMATION.RANK.value: ndimage.rank_filter,
FILTER_TRANSFORMATION.MEDIAN.value: ndimage.median_filter,
FILTER_TRANSFORMATION.MINIMUM.value: ndimage.minimum_filter,
FILTER_TRANSFORMATION.MAXIMUM.value: ndimage.maximum_filter,
FILTER_TRANSFORMATION.ENTROPY.value: filters.rank.entropy,
FILTER_TRANSFORMATION.ROBERTS.value: filters.roberts,
FILTER_TRANSFORMATION.SCHARR.value: filters.scharr,
FILTER_TRANSFORMATION.PREWITT.value: filters.prewitt,
FILTER_TRANSFORMATION.MEIJERING.value: filters.meijering,
FILTER_TRANSFORMATION.SATO.value: filters.sato,
FILTER_TRANSFORMATION.FRANGI.value: filters.frangi,
FILTER_TRANSFORMATION.HESSIAN.value: filters.hessian,
FILTER_TRANSFORMATION.SKELETONIZE.value: morphology.skeletonize,
FILTER_TRANSFORMATION.THIN.value: morphology.thin,
}[filter]
class CARTOON_ADAPTIVE_METHODS(Enum):
MEAN = 'mean'
GAUSSIAN = 'gaussian'
class CARTOON_THRESH_METHODS(Enum):
BINARY = 'thresh_binary'
BINARY_INV = 'thresh_binary_inv'
TRIANGLE = 'thresh_triangle'
MASK = 'thresh_mask'
TRUNC = 'thresh_trunc'
OTSU = 'thresh_otsu'
TOZERO = 'thresh_tozero'
TOZERO_INV = 'thresh_tozero_inv'
def get_cartoon_adpative_method(adaptive_method):
return {
CARTOON_ADAPTIVE_METHODS.MEAN.value: cv2.ADAPTIVE_THRESH_MEAN_C,
CARTOON_ADAPTIVE_METHODS.GAUSSIAN.value: cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
}[adaptive_method]
def get_cartoon_thresh_method(thresh_method):
return {
CARTOON_THRESH_METHODS.BINARY.value: cv2.THRESH_BINARY,
CARTOON_THRESH_METHODS.BINARY_INV.value: cv2.THRESH_BINARY_INV,
CARTOON_THRESH_METHODS.TRIANGLE.value: cv2.THRESH_TRIANGLE,
CARTOON_THRESH_METHODS.MASK.value: cv2.THRESH_MASK,
CARTOON_THRESH_METHODS.TRUNC.value: cv2.THRESH_TRUNC,
CARTOON_THRESH_METHODS.OTSU.value: cv2.THRESH_OTSU,
CARTOON_THRESH_METHODS.TOZERO.value: cv2.THRESH_TOZERO,
CARTOON_THRESH_METHODS.TOZERO_INV.value: cv2.THRESH_TOZERO_INV,
}[thresh_method]
class DISTORT_RESAMPLE_MEHTOD(Enum):
NEAREST = 'nearest'
LINEAR = 'linear'
NORMAL = 'normal'
BOX = 'box'
def get_distort_resample(resample):
return {
DISTORT_RESAMPLE_MEHTOD.NEAREST.value: Image.NEAREST,
DISTORT_RESAMPLE_MEHTOD.LINEAR.value: Image.LINEAR,
DISTORT_RESAMPLE_MEHTOD.NORMAL.value: Image.NORMAL,
DISTORT_RESAMPLE_MEHTOD.BOX.value: Image.BOX,
}[resample]
class COMPRESS_FORMAT(Enum):
JPEG = '.jpeg'
JPG = '.jpg'
PNG = '.png'
def get_compress_encoder(format, rate):
if format == COMPRESS_FORMAT.PNG.value:
return [cv2.IMWRITE_PNG_COMPRESSION, rate]
else:
return [int(cv2.IMWRITE_JPEG_QUALITY), rate]
class GEOMETRIC_TRANSFORMATIONS(Enum):
SWIRL = 'swirl'
RADON = 'radon'
IRADON = 'iradon'
IRADON_SART = 'iradon_sart'
def get_geometric_op(geo_trans):
return {
GEOMETRIC_TRANSFORMATIONS.SWIRL.value: transform.swirl,
GEOMETRIC_TRANSFORMATIONS.RADON.value: transform.radon,
GEOMETRIC_TRANSFORMATIONS.IRADON.value: transform.iradon,
GEOMETRIC_TRANSFORMATIONS.IRADON_SART.value: transform.iradon_sart,
}[geo_trans]
class SEGMENT_TRANSFORMATIONS(Enum):
GRADIENT = 'gradient'
WATERSHED = 'watershed'
| StarcoderdataPython |
3343984 | <gh_stars>0
""" string-ID-based functions
"""
from .ipybel.smiles import canonical as canonical_smiles
from .ipybel.smiles import number_of_atoms as number_of_atoms_from_smiles
from .ipybel.smiles import formula as formula_from_smiles
from .ipybel.smiles import geometry as geometry_from_smiles
from .ipybel.smiles import xyz_string as xyz_string_from_smiles
from .parse_help import DIGIT
from .parse_help import one_or_more
from .parse_help import named_capture
from .parse_help import group_dictionary
def canonical(sid):
""" canonical SMILES string in a species ID
"""
mult = multiplicity(sid)
smi = smiles(sid)
can_smi = canonical_smiles(smi)
can_sid = '{:s}_m{:d}'.format(can_smi, mult)
return can_sid
def smiles(sid):
""" SMILES string from a species ID
"""
smi = str.split(sid, '_')[0]
return smi
def multiplicity(sid):
""" multiplicity from a species ID
"""
mult_pattern = '_m' + named_capture(one_or_more(DIGIT), name='mult')
gdct = group_dictionary(mult_pattern, sid)
mult = int(gdct['mult'])
return mult
def spin_count(sid):
""" 2 * S = multiplicity - 1
"""
return multiplicity(sid) - 1
def formula(sid):
""" molecular formula
"""
smi = smiles(sid)
fml = formula_from_smiles(smi)
return fml
def geometry(sid):
""" molecular geometry
"""
smi = smiles(sid)
mgeo = geometry_from_smiles(smi)
return mgeo
def xyz_string(sid):
""" .xyz string
"""
smi = smiles(sid)
dxyz = xyz_string_from_smiles(smi)
return dxyz
def number_of_atoms(sid):
""" number of atoms
"""
smi = smiles(sid)
fml = number_of_atoms_from_smiles(smi)
return fml
def reaction_identifier(rct_sids, prd_sids):
""" SMIRKS-style reaction ID from reactant and product species IDs
"""
rct_str = '.'.join(rct_sids)
prd_str = '.'.join(prd_sids)
rid = rct_str + '>>' + prd_str
return rid
def canonical_reaction_identifier(rid):
""" canonical SMILES for a reaction identifier
"""
rct_sids, prd_sids = split_reaction_identifier(rid)
rct_sids = tuple(map(canonical, rct_sids))
prd_sids = tuple(map(canonical, prd_sids))
return reaction_identifier(rct_sids, prd_sids)
def split_reaction_identifier(rid):
""" SMIRKS-style reaction ID from reactant and product species IDs
"""
rct_str, prd_str = str.split(rid, '>>')
rct_sids = tuple(str.split(rct_str, '.'))
prd_sids = tuple(str.split(prd_str, '.'))
return (rct_sids, prd_sids)
def reaction_spin_counts(rid):
""" multiplicities of reactions and products
"""
rct_sids, prd_sids = split_reaction_identifier(rid)
rct_mults = tuple(map(spin_count, rct_sids))
prd_mults = tuple(map(spin_count, prd_sids))
return (rct_mults, prd_mults)
def is_radical_radical(rid):
""" determine if this is a radical-radical abstraction
"""
ret = any((all(sct > 0 for sct in scts) and len(scts) > 1)
for scts in reaction_spin_counts(rid))
return ret
def is_spin_balanced(rid):
""" determine if this reaction has equal total spin on both sides
"""
rct_scts, prd_scts = reaction_spin_counts(rid)
return sum(rct_scts) == sum(prd_scts)
| StarcoderdataPython |
147273 | <gh_stars>10-100
def main():
import RPi.GPIO as GPIO
import time
try:
print(GPIO.VERSION)
print(GPIO.RPI_INFO)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(11, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(13, GPIO.IN)
GPIO.setup(15, GPIO.OUT)
GPIO.setup([16, 18], GPIO.OUT)
GPIO.setup((8, 10), GPIO.OUT)
GPIO.setup([21, 19], GPIO.IN)
GPIO.setup(3, GPIO.IN)
GPIO.cleanup(3)
GPIO.setup([5, 7], GPIO.OUT)
GPIO.cleanup([5, 7])
GPIO.setup([5, 7], GPIO.IN)
GPIO.cleanup((5, 7))
while True:
GPIO.output(12, GPIO.HIGH)
GPIO.output(11, 0)
GPIO.output(15, True)
GPIO.output([16, 18], not GPIO.input(16))
GPIO.output((8, 10), [GPIO.HIGH, GPIO.LOW])
time.sleep(1)
GPIO.output(12, GPIO.LOW)
GPIO.output(11, 1)
GPIO.output(15, False)
GPIO.output((16, 18), not GPIO.input(16))
GPIO.output([8, 10], (GPIO.LOW, GPIO.HIGH))
time.sleep(1)
finally:
GPIO.cleanup()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
main()
| StarcoderdataPython |
3312842 | <gh_stars>1-10
import requests
"""
url = 'https://viacep.com.br/ws/'
cep = '30140071'
formato = '/json/'
r = requests.get(url + cep + formato)
if(r.status_code == 200):
print('JSON: ', r.json())
else:
print('Requisição mal sucedida')
"""
url = 'https://viacep.com.br/ws/' #invocação a um web wervice
cep = '30140071'
formato = '/xml/'
r = requests.get(url + cep + formato)
if(r.status_code == 200):
print('XML: ', r.text) #que retorna um XML
else:
print('Requisição mal sucedida') | StarcoderdataPython |
4809155 | from diary.views import NoteListView, NoteCreateView, note_and_next
from django.urls import path
urlpatterns = [
path('', NoteListView.as_view(), name='note-list'),
path('note/create', NoteCreateView.as_view(), name='note-create'),
path('note/<pk>/and_next', note_and_next, name='note_and_next'),
] | StarcoderdataPython |
1644040 | import ncvis
vis = ncvis.NCVis(n_neighbors=15, M=16, ef_construction=200, n_init_epochs=20, n_epochs=50, min_dist=0.4, n_threads=-1, distance='euclidean') | StarcoderdataPython |
3269464 | from .kernel_model import KernelNet
from .unet_model import UNet
| StarcoderdataPython |
43098 | import tempfile
import pandas as pd
from pg2pd import Pg2Pd
def test_make_df_1(pg_conn):
"""Test of main Postgres binary data to Pandas dataframe pipeline.
This tests an integer and varchar.
"""
cursor = pg_conn.cursor()
# Copy binary data to a tempfile
path = tempfile.mkstemp()[1]
query = 'COPY test1 TO STDOUT BINARY;'
with open(path, 'wb') as f:
cursor.copy_expert(sql=query, file=f)
pg_conn.commit()
pg = Pg2Pd(path, ['integer', 'varchar'], ['id', 'text'])
df = pg.make_df()
assert df['id'].tolist() == [42, 25, 60]
assert df['text'].tolist()[:2] == ['Some cool data', 'Even more cool data']
# Note that NaN != NaN, so we can do this assertion instead
assert pd.isna(df['text'].tolist()[2])
def test_make_df_2(pg_conn):
"""Test of main Postgres binary data to Pandas dataframe pipeline.
This tests boolean data.
"""
cursor = pg_conn.cursor()
# Copy binary data to a tempfile
path = tempfile.mkstemp()[1]
query = 'COPY test2 TO STDOUT BINARY;'
with open(path, 'wb') as f:
cursor.copy_expert(sql=query, file=f)
pg_conn.commit()
pg = Pg2Pd(path, ['boolean', 'boolean'], ['t', 'f'])
df = pg.make_df()
assert df['t'].tolist() == [True]
assert df['f'].tolist() == [False]
| StarcoderdataPython |
120469 | from opendc.models.experiment import Experiment
from opendc.util import exceptions
from opendc.util.rest import Response
def GET(request):
"""Get this Experiment."""
try:
request.check_required_parameters(
path={
'experimentId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Experiment from the database
experiment = Experiment.from_primary_key((request.params_path['experimentId'],))
# Make sure this Experiment exists
if not experiment.exists():
return Response(404, '{} not found.'.format(experiment))
# Make sure this user is authorized to view this Experiment
if not experiment.google_id_has_at_least(request.google_id, 'VIEW'):
return Response(403, 'Forbidden from retrieving {}.'.format(experiment))
# Return this Experiment
experiment.read()
return Response(
200,
'Successfully retrieved {}.'.format(experiment),
experiment.to_JSON()
)
def PUT(request):
"""Update this Experiment's Path, Trace, Scheduler, and/or name."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'experimentId': 'int'
},
body={
'experiment': {
'pathId': 'int',
'traceId': 'int',
'schedulerName': 'string',
'name': 'string'
}
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Experiment from the database
experiment = Experiment.from_primary_key((request.params_path['experimentId'],))
# Make sure this Experiment exists
if not experiment.exists():
return Response(404, '{} not found.'.format(experiment))
# Make sure this user is authorized to edit this Experiment
if not experiment.google_id_has_at_least(request.google_id, 'EDIT'):
return Response(403, 'Forbidden from updating {}.'.format(experiment))
# Update this Experiment
experiment.path_id = request.params_body['experiment']['pathId']
experiment.trace_id = request.params_body['experiment']['traceId']
experiment.scheduler_name = request.params_body['experiment']['schedulerName']
experiment.name = request.params_body['experiment']['name']
try:
experiment.update()
except exceptions.ForeignKeyError:
return Response(400, 'Foreign key error.')
# Return this Experiment
return Response(
200,
'Successfully updated {}.'.format(experiment),
experiment.to_JSON()
)
def DELETE(request):
"""Delete this Experiment."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'experimentId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Experiment and make sure it exists
experiment = Experiment.from_primary_key((request.params_path['experimentId'],))
if not experiment.exists():
return Response(404, '{} not found.'.format(experiment))
# Make sure this user is authorized to delete this Experiment
if not experiment.google_id_has_at_least(request.google_id, 'EDIT'):
return Response(403, 'Forbidden from deleting {}.'.format(experiment))
# Delete and return this Experiment
experiment.delete()
return Response(
200,
'Successfully deleted {}.'.format(experiment),
experiment.to_JSON()
)
| StarcoderdataPython |
29457 | <filename>tests/test_get_google_streetview.py
import os
import pandas as pd
from open_geo_engine.src.get_google_streetview import GetGoogleStreetView
def test_get_google_streetview():
size = "600x300"
heading = "151.78"
pitch = "-0.76"
key = os.environ.get("GOOGLE_DEV_API_KEY")
image_folder = "tests/test_data"
links_file = "tests/test_data/streetview_links.txt"
metadata_file = "tests/test_data/test_metadata.json"
place = "Parque_El_Retiro_Madrid"
meta_base = "https://maps.googleapis.com/maps/api/streetview/metadata?"
satellite_data_df = pd.DataFrame(
{
"longitude": [-3.683317243711068, -3.683317243711068],
"latitude": [40.41498005371624, 40.41498005371624],
"time": [1578653746335, 1580036142137],
"datetime": ["2020-01-10 10:55:46.335,", "2020-01-26 10:55:42.137"],
"B4": [7053, 6869],
"B3": [7177, 7069],
"B2": [7825, 7720],
}
)
get_google_streetview = GetGoogleStreetView(
size,
heading,
pitch,
key,
image_folder,
links_file,
metadata_file,
place,
meta_base,
)
assert (
get_google_streetview.generate_lat_lon_string(satellite_data_df)
== "40.41498005371624,-3.683317243711068"
)
lat_lon_str = get_google_streetview.generate_lat_lon_string(satellite_data_df)
params = get_google_streetview._generate_params(lat_lon_str)
satellite_data_df["lat_lon_str"] = get_google_streetview._join_lat_lon(satellite_data_df)
assert satellite_data_df["lat_lon_str"][0] == str(lat_lon_str)
assert (
get_google_streetview.add_metadata_to_satellite_df(satellite_data_df)["metadata"][0]
== "<Response [200]>"
)
params.pop("key")
assert params == {
"size": "600x300",
"location": "40.41498005371624,-3.683317243711068",
"pitch": "-0.76",
}
satellite_streetview_data_df = get_google_streetview.add_links_to_satellite_df(
satellite_data_df
)
assert satellite_streetview_data_df["latitude"][0] == 40.41498005371624
assert satellite_streetview_data_df["longitude"][0] == -3.683317243711068
assert len(satellite_streetview_data_df.columns) == 10
| StarcoderdataPython |
66151 | <reponame>ze-nian/yt_trending_data<filename>scraper.py
import requests, sys, time, os, argparse,datetime
# List of simple to collect features
snippet_features = ["title",
"publishedAt",
"channelId",
"channelTitle",
"categoryId"]
# Any characters to exclude, generally these are things that become problematic in CSV files
unsafe_characters = ['\n', '"']
# Used to identify columns, currently hardcoded order
header = ["video_id"] + snippet_features + ["trending_date", "tags", "view_count", "likes", "dislikes",
"comment_count", "thumbnail_link", "comments_disabled",
"ratings_disabled", "description"]
def setup(api_path, code_path):
with open(api_path, 'r') as file:
api_key = file.readline()
with open(code_path) as file:
country_codes = [x.rstrip() for x in file]
return api_key, country_codes
def prepare_feature(feature):
# Removes any character from the unsafe characters list and surrounds the whole item in quotes
for ch in unsafe_characters:
feature = str(feature).replace(ch, "")
return f'"{feature}"'
def api_request(page_token, country_code):
# Builds the URL and requests the JSON from it
request_url = f"https://www.googleapis.com/youtube/v3/videos?part=id,statistics,snippet{page_token}chart=mostPopular®ionCode={country_code}&maxResults=50&key={api_key}"
request = requests.get(request_url)
if request.status_code == 429:
print("Temp-Banned due to excess requests, please wait and continue later")
sys.exit()
return request.json()
def get_tags(tags_list):
# Takes a list of tags, prepares each tag and joins them into a string by the pipe character
return prepare_feature("|".join(tags_list))
def get_videos(items):
lines = []
for video in items:
comments_disabled = False
ratings_disabled = False
# We can assume something is wrong with the video if it has no statistics, often this means it has been deleted
# so we can just skip it
if "statistics" not in video:
continue
# A full explanation of all of these features can be found on the GitHub page for this project
video_id = prepare_feature(video['id'])
# Snippet and statistics are sub-dicts of video, containing the most useful info
snippet = video['snippet']
statistics = video['statistics']
# This list contains all of the features in snippet that are 1 deep and require no special processing
features = [prepare_feature(snippet.get(feature, "")) for feature in snippet_features]
# The following are special case features which require unique processing, or are not within the snippet dict
description = snippet.get("description", "")
thumbnail_link = snippet.get("thumbnails", dict()).get("default", dict()).get("url", "")
trending_date = time.strftime("%y.%d.%m")
tags = get_tags(snippet.get("tags", ["[none]"]))
view_count = statistics.get("viewCount", 0)
# This may be unclear, essentially the way the API works is that if a video has comments or ratings disabled
# then it has no feature for it, thus if they don't exist in the statistics dict we know they are disabled
if 'likeCount' in statistics and 'dislikeCount' in statistics:
likes = statistics['likeCount']
dislikes = statistics['dislikeCount']
else:
ratings_disabled = True
likes = 0
dislikes = 0
if 'commentCount' in statistics:
comment_count = statistics['commentCount']
else:
comments_disabled = True
comment_count = 0
# Compiles all of the various bits of info into one consistently formatted line
line = [video_id] + features + [prepare_feature(x) for x in [trending_date, tags, view_count, likes, dislikes,
comment_count, thumbnail_link, comments_disabled,
ratings_disabled, description]]
lines.append(",".join(line))
#*print(lines[40])
return lines
def get_pages(country_code, next_page_token="&"):
country_data = []
# Because the API uses page tokens (which are literally just the same function of numbers everywhere) it is much
# more inconvenient to iterate over pages, but that is what is done here.
while next_page_token is not None:
# A page of data i.e. a list of videos and all needed data
video_data_page = api_request(next_page_token, country_code)
# Get the next page token and build a string which can be injected into the request with it, unless it's None,
# then let the whole thing be None so that the loop ends after this cycle
next_page_token = video_data_page.get("nextPageToken", None)
next_page_token = f"&pageToken={next_page_token}&" if next_page_token is not None else next_page_token
# Get all of the items as a list and let get_videos return the needed features
items = video_data_page.get('items', [])
country_data += get_videos(items)
#*print(items[0])
return country_data
def write_to_file(country_code, country_data):
print(f"Writing {country_code} data to file...")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(f"{output_dir}/{time.strftime('%y.%m.%d.%I.%M.%p')}_{country_code}_videos.csv", "w+", encoding='utf_8_sig') as file:
for row in country_data:
file.write(f"{row}\n")
def get_data():
for country_code in country_codes:
country_data = [",".join(header)] + get_pages(country_code)
write_to_file(country_code, country_data)
while True:
now = datetime.datetime.now()
while now.second == 0 and now.minute == 0 and now.hour % 12 == 0:
while True:
print(datetime.datetime.now())
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument('--key_path', help='Path to the file containing the api key, by default will use api_key.txt in the same directory', default='api_key.txt')
parser.add_argument('--country_code_path', help='Path to the file containing the list of country codes to scrape, by default will use country_codes.txt in the same directory', default='country_codes.txt')
parser.add_argument('--output_dir', help='Path to save the outputted files in', default='output/')
args = parser.parse_args()
output_dir = args.output_dir
api_key, country_codes = setup(args.key_path, args.country_code_path)
get_data()
time.sleep(43199) | StarcoderdataPython |
1609877 | <gh_stars>1-10
# Import the Flask libraries
# used for powering the Main Event loop
try:
from flask import Flask
from flask import flash
from flask import url_for
from flask import redirect
from flask import request
from flask import session
from flask import make_response
from hashlib import md5
from flask import render_template
from flask_mysqldb import MySQL
from flask import jsonify
import requests
import json
# Throw an exception Handler that
# Throws an ImportError exception
except ImportError as err:
# Print to the EndUser a Modified
# Error Notice to show clear descriptions
print ("The Module could not be Imported {}". format(err))
# Initialize the Database connection
# and specific environmental variables
# and application configuration vars
app = Flask(__name__)
app.secret_key = "SECRET_KEY"
# Start for the Database Configurations
# Am Implementing MySQL for the database
app.config["MYSQL_DB"] = "stocks"
app.config["MYSQL_HOST"] = "Localhost"
app.config["MYSQL_USER"] = "root"
app.config["MYSQL_PASSWORD"] = ""
# Create a Model representing the
# Database to create different Objects
mysql = MySQL(app)
# We need a custom error handler
# to throw custom errors that would
# be user friendly to the eyes
@app.errorhandler(404)
def not_found(error):
return jsonify({'error':'Not found'}),404
# Create the route decorators for
# handling different functions via
# the Main application.
@app.route('/')
def homepage():
# Check if user is logged in
if 'loggedin' in session:
return render_template('home.html', username=session['username'])
# Return the templating view
return render_template('home.html')
# This function returns all the users
# Registered on the Platform
@app.route('/users')
def users():
# initiate a database connection
cur = mysql.connection.cursor()
# return all users using the Execute
# function via generated Cursor
cur.execute("SELECT * FROM users")
# Fetch all the data needed using
# the Cursor fetch function
fetch_data = cur.fetchall()
# return the templating view
# with the corresponding data
return render_template('user.html', data = fetch_data)
# This function actually powers the registration
# of new users and inserts them via database
@app.route('/register', methods = ["POST", "GET"])
def register():
# Check if there is a POST request
# and return true for the values
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
email_ad = request.form["email"]
# Now create a connection and insert
# data into the database generally
cur = mysql.connection.cursor()
# Execute the insert statement, using Prepared
# statements to avoid MySQL Injections.
cur.execute("INSERT INTO users(username, password, email) VALUES(%s, %s, %s)", (username, password, email_ad))
# Commit the changes into the database
mysql.connection.commit()
return redirect(url_for('login'))
return render_template('register.html')
# This function would help us to update
# Our data in event of viewing it
@app.route('/update', methods = ["POST", "GET"])
def update():
# Check if the details are already on
# and then update the data as supposed
if request.method == "POST":
id_data = request.form["id"]
email = request.form["email"]
username = request.form["username"]
password = request.form["password"]
# Create a cursor and create a
# connection that would power it
cur = mysql.connection.cursor()
# Execute the update details too
# and populate the database
cur.execute("UPDATE users SET username=%s, password=%s, email=%s WHERE id=%s",(username, password, email, id_data))
# Commit the changes to the database
mysql.connection.commit()
return redirect(url_for('users'))
# return render_template('user.html')
# This function would help us to delete
# Our data in event of the activity loop
@app.route('/delete/<string:id_data>', methods = ["POST","GET"])
def delete(id_data):
# Create a connnection that would help
# initialize a delete func helper.
cur = mysql.connection.cursor()
# Execute the delete Function to remove
# a user detail ASAP
cur.execute("DELETE FROM users WHERE id=%s", (id_data))
# Commit the change and return a
# Flash Message of success
mysql.connection.commit()
return redirect(url_for('users'))
# This function helps us in creating an individual
# Editing field for a particular user
@app.route('/edituser/<string:id_data>', methods=["GET"])
def edit(id_data):
# Create the connection that would
# power the Editing function
cur = mysql.connection.cursor()
# Execute the Editing function to aid in
# fixing the edit variables gotten from
# the forms in general.
cur.execute("SELECT * FROM users WHERE id=%s", (id_data))
fetchuser = cur.fetchone()
return render_template('edit.html', data = fetchuser)
# The login function that takes one id
# and returns associated data
@app.route('/login', methods=["POST", "GET"])
def login():
# Output message if something goes wrong...
msg = ''
# Check if "username" and "password" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
# Check if account exists using MySQL
cursor = mysql.connection.cursor()
cursor.execute('SELECT * FROM users WHERE username = %s AND password = %s', (username, password))
# Fetch one record and return result
account = cursor.fetchone()
# If account exists in accounts table in out database
if account:
# Create session data, we can access this data in other routes
session['loggedin'] = True
session['id'] = account[0]
session['username'] = account[1]
# Redirect to home page
return redirect(url_for('profile'))
else:
# Account doesnt exist or username/password incorrect
msg = 'Incorrect username/password!'
# Show the login form with message (if any)
return render_template('login.html', msg=msg)
# Function for the user profile after
# Login with required details
@app.route('/profile')
def profile():
# Check if user is loggedin
if 'loggedin' in session:
# We need all the account info for the user so we can display it on the profile page
cursor = mysql.connection.cursor()
cursor.execute('SELECT * FROM users WHERE id = %s', [session['id']])
account = cursor.fetchone()
# Show the profile page with account info
return render_template('profile.html', account=account)
# User is not loggedin redirect to login page
return redirect(url_for('login'))
# Function for already logged
# in user to logout
@app.route('/logout')
def logout():
# Remove session data, this will log the user out
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
# Redirect to login page
return redirect(url_for('login'))
## Callback success page for user
## after payment Notifications
@app.route('/success')
def success():
# return a success page thanking user for
# funding his/her account
try:
return render_template('success.html')
except Exception, e:
return(str(e))
# The route below takes us to the
# IEX api page where end-users can see iex
# prices and related companies
# API call here
@app.route('/stock-prices')
def iex():
re = requests.get('https://cloud.iexapis.com/v1/stock/market/batch?&types=quote&symbols=aapl,fb,googl,tsla,ba,baba&token=<KEY>')
data = re.json()
return render_template('quotes.html', quote = data)
# This function actually powers the funding
# of accounts and inserts them via database
@app.route('/fund-account', methods = ["POST", "GET"])
def fundaccount():
# Check if there is a POST request
# and return true for the values
if request.method == "POST":
username = session['username']
cardno = request.form["cardno"]
amount = request.form["amount"]
cvv = request.form["cvv"]
# Now create a connection and insert
# data into the database generally
cur = mysql.connection.cursor()
# Execute the insert statement, using Prepared
# statements to avoid MySQL Injections.
cur.execute("UPDATE users SET cardno=%s, amount=%s, cvv=%s WHERE username=%s", (cardno, amount, cvv,username))
# Commit the changes into the database
mysql.connection.commit()
return redirect(url_for('success'))
return render_template('fund.html')
# Buy Stocks Page for user
@app.route('/buystock')
def buystock():
re = requests.get('https://cloud.iexapis.com/v1/stock/market/batch?&types=quote&symbols=aapl,fb,googl,tsla,ba,baba&token=<KEY>')
data = re.json()
return render_template('buystock.html', quote = data)
# This function actually powers the funding
# of accounts and inserts them via database
@app.route('/completedstock', methods = ["POST", "GET"])
def completedstock():
# Check if there is a POST request
# and return true for the values
if request.method == "POST":
username = session['username']
symbol = request.form["symbol"]
amount = request.form["amount"]
# Now create a connection and insert
# data into the database generally
cur = mysql.connection.cursor()
# Execute the insert statement, using Prepared
# statements to avoid MySQL Injections.
cur.execute("INSERT INTO boughtstocks(symbol, amount, user) VALUES(%s, %s, %s)",(symbol, amount,username))
# Commit the changes into the database
mysql.connection.commit()
return redirect(url_for('yourstock'))
return render_template('fund.html')
# Get all the stocks Bought by
# A single user and return vals
@app.route('/yourstock')
def yourstock():
# create and initialize a connection
cur = mysql.connection.cursor()
# Execute and run the query to get vals
cur.execute("SELECT * FROM boughtstocks WHERE user=%s",[session['username']])
# Fetch the data for just one person
fetchonestock = cur.fetchall()
# return the template and pass
# the data to the views
return render_template('yourstocks.html', data=fetchonestock)
'''
The __name__ attribute gives room for
the current file to be imported as a
different module into another file
'''
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3261788 | # Generated by Django 2.1.3 on 2018-12-01 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0005_auto_20181117_1716'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='participants',
field=models.ManyToManyField(blank=True, related_name='chats', to='chat.Contact'),
),
]
| StarcoderdataPython |
1677699 | <filename>tests/11 - NSC Data Tests/All folders/Artificial Dataset/artificial_data_var.py<gh_stars>1-10
import sys
import os
import platform
system = platform.system()
current_dir = os.getcwd()
if system == 'Windows':
path_dir = current_dir.split("\\Neurons")[0] + "\\Neurons"
else:
path_dir = a.split("/Neurons")[0] + "/Neurons"
sys.path.append(path_dir)
from model import NeuronGraph, PCC
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from model import ngplot
from model import unwrap
from tqdm import tqdm
import sklearn.datasets as datasets
from sklearn.metrics import confusion_matrix
from sklearn.semi_supervised import LabelPropagation, LabelSpreading
#np.random.seed(0)
import multiprocessing
def concat_data(x,y):
x_pd = pd.DataFrame(x,columns=['X','Y'])
y_pd = pd.DataFrame(y,columns=['target'])
df = pd.concat([x_pd,y_pd],axis=1)
return df
""" Define the range of the params """
def multi_get_params(data,name):
print("Process ID: ", os.getpid())
seeds = [2,5,10,12,15,20,22,25,30,32,35,40,42,45,50]
expand = [2,3,4,5,10,20,30,40,50,75,100,200,300,400,500]
neighbors = [5,10,15,20,25,50]
epochs = 600
features = data.drop(columns=['target'],axis=1)
numeric = features.select_dtypes(include=np.number)
numeric_names = numeric.columns
data.loc[:,numeric_names] = (numeric-numeric.mean())/numeric.std()
# Cria a instância do modelo
ng = NeuronGraph(data=data, target='target', similarity='Euclidean',
model='Izhikevic', alpha = 0.1, w_step = 0.3, time_step=0.5, print_info=False,
print_steps=False, beta=2.0, gamma=1.5)
labels = np.unique(ng.data['target'])
accuracy = np.zeros(shape=(len(neighbors), len(labels), len(seeds),len(expand)))
accuracy_mean = np.zeros(shape=(50,len(labels)))
# Para cada valor de vizinhos
for n in tqdm(range(len(neighbors))):
print("{} data neighbors: {}/6".format(name,n))
ng.neighbors = neighbors[n]
# Para cada valor de seeds
for s in tqdm(range(len(seeds))):
print("{} data seeds: {}/15".format(name,s))
# Para cada valor de expand
for e in tqdm(range(len(expand))):
print("{} data expand: {}/15".format(name,e))
ng.search_expand = expand[e]
# Para 50 iterações
for i in range(50):
# Preprocessa os dados
ng.preprocess_data(not_null=seeds[s], standarlize=False)
# Fit
ng.fit(epochs,ng.numerical)
diag = np.diagonal(ng.confusion_matrix)
if diag.size==len(ng.labels_list):
accuracy_mean[i,:] = diag
elif diag.size > len(ng.labels_list):
accuracy_mean[i,:] = diag[-len(ng.labels_list):]
ng.data = data
ng.y_predicted = -np.ones(shape=ng.neurons)
ng.degree_out = np.zeros(shape=(ng.neurons,len(ng.labels_list)+1))
ng.labels_array = -np.ones(shape=(ng.neurons,ng.neurons))
ng.incident_degree = np.zeros(shape=(ng.neurons,len(ng.labels_list)))
ng.inner_degree = {}
ng.graph.vertexes = np.array([])
ng.disputed = np.array([])
ng.capacity = np.zeros(shape=(ng.neurons))
accuracy[n,:,s,e] = np.mean(accuracy_mean,axis=0)
np.save(name,accuracy)
if __name__ == "__main__":
# x_clusters_05,y_clusters_05 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=0.5)
# clusters_05 = concat_data(x_clusters_05,y_clusters_05)
# x_clusters_07,y_clusters_07 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=0.7)
# clusters_07 = concat_data(x_clusters_07,y_clusters_07)
x_clusters_09,y_clusters_09 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=0.9)
clusters_09 = concat_data(x_clusters_09,y_clusters_09)
x_clusters_12,y_clusters_12 = datasets.make_blobs(n_samples=400,centers=3,n_features=2, cluster_std=1.2)
clusters_12 = concat_data(x_clusters_12,y_clusters_12)
x_circles,y_circles = datasets.make_circles(n_samples=400,noise=0.05,factor=0.5)
circles = concat_data(x_circles, y_circles)
x_moons,y_moons = datasets.make_moons(n_samples=400,noise=0.1)
moons = concat_data(x_moons, y_moons)
x_classification, y_classification = datasets.make_classification(n_samples=400,n_features=2,n_informative=2,n_redundant=0,n_clusters_per_class=1)
classification = concat_data(x_classification,y_classification)
# x_gaussian, y_gaussian = datasets.make_gaussian_quantiles(n_samples=400,n_features=2)
# gaussian = concat_data(x_gaussian, y_gaussian)
dic_of_data = {'Cluster_09':clusters_09, 'Cluster_12':clusters_12, 'Circles':circles, 'Moons':moons, 'Classification':classification}
keys = list(dic_of_data.keys())
print(keys)
datas = list(dic_of_data.values())
processes = []
for i in tqdm(range(5)):
p = multiprocessing.Process(target=multi_get_params,args=(datas[i],keys[i],))
p.start()
processes.append(p)
for p in processes:
p.join() | StarcoderdataPython |
69978 | <filename>experiments/visualizations/visualize_smis.py
import os
import sys
from datetime import datetime
import logging
from collections import OrderedDict
import io
import numpy as np
import torch
import h5py
import pandas as pd
from PIL import Image
import rdkit.Chem as Chem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import SimilarityMaps
from absl import app
from absl import flags
def save_png(data, out_path):
bio = io.BytesIO(data)
img = Image.open(bio)
orig = [float(d) for d in img.size]
scale = 300.0/72.0
img.thumbnail([round(scale * d) for d in orig], Image.ANTIALIAS)
img.save(out_path, 'PNG', dpi=(300.0, 300.0))
DELQSAR_ROOT = os.path.abspath(__file__ + '/../../../')
sys.path += [os.path.dirname(DELQSAR_ROOT)]
from del_qsar import featurizers, models
if not os.path.isdir(os.path.join(DELQSAR_ROOT, 'experiments', 'results')):
os.mkdir(os.path.join(DELQSAR_ROOT, 'experiments', 'results'))
FLAGS = flags.FLAGS
flags.DEFINE_string('csv', 'triazine_lib_sEH_SIRT2_QSAR.csv', 'csv filename')
flags.DEFINE_string('fps_h5', 'x_triazine_2048_bits_all_fps.h5', 'HDF5 file with stored fingerprints')
flags.DEFINE_string('out', None, 'Output folder name')
flags.DEFINE_string('model_path', None, 'File path from experiments folder for saved model (should be a .torch file)')
flags.DEFINE_float('dropout', None, 'Dropout rate')
flags.DEFINE_list('layer_sizes', None, 'FFN layer sizes')
flags.DEFINE_list('cpd_ids', None, 'cpd_ids of the compounds to visualize with atom annotations')
flags.DEFINE_float('img_size', 2.5, 'Height/width of image (in inches)')
flags.DEFINE_float('font_size', 0.85, 'Heteroatom fontsize')
def main(argv):
del argv
dt = datetime.today()
DATE = os.path.join(DELQSAR_ROOT, 'experiments', 'results',
f'{dt.year}-{str(dt.month).zfill(2)}-{str(dt.day).zfill(2)}')
if not os.path.isdir(DATE):
os.mkdir(DATE)
SAVE_ROOT = os.path.join(DATE, FLAGS.out)
if not os.path.isdir(SAVE_ROOT):
os.mkdir(SAVE_ROOT)
LOG_FILE = os.path.join(SAVE_ROOT, 'run.log')
with open(LOG_FILE, 'a') as lf:
logging.info('FLAGS:')
lf.write(f'{datetime.now()} INFO: FLAGS:\n')
for f in FLAGS.get_key_flags_for_module(sys.argv[0]):
logging.info(f.serialize())
lf.write(f'{datetime.now()} INFO: ' + f.serialize() + '\n')
df_data = pd.read_csv(os.path.join(DELQSAR_ROOT, 'experiments', 'datasets', FLAGS.csv))
smis, smis_to_cpd_ids, x_rows_visualize = [], {}, []
for ID in FLAGS.cpd_ids:
smi = df_data.iloc[int(ID)-1]['smiles']
smis.append(smi)
smis_to_cpd_ids[smi] = ID
x_rows_visualize.append(int(ID)-1)
featurizer = featurizers.FingerprintFeaturizer()
df_visualize = pd.DataFrame.from_dict({'smiles': smis})
if os.path.isfile(os.path.join(DELQSAR_ROOT, 'experiments', FLAGS.fps_h5)):
hf = h5py.File(os.path.join(DELQSAR_ROOT, 'experiments', FLAGS.fps_h5))
x = np.array(hf['all_fps'])
x_visualize = x[x_rows_visualize, :]
hf.close()
else:
x_visualize = featurizer.prepare_x(df_visualize)
input_size = x_visualize.shape[1]
print(f'Input featurization is {input_size} long')
model = models.MLP(input_size, [int(size) for size in FLAGS.layer_sizes],
dropout=FLAGS.dropout)
model.load_state_dict(torch.load(os.path.join(DELQSAR_ROOT, 'experiments', FLAGS.model_path)))
model = model.to('cuda:0')
print(model)
model.eval()
enrichments = model.predict_on_x(x_visualize, device='cuda:0')
drawings = OrderedDict()
# Get all weights
for i, smi in enumerate(smis):
mol = Chem.MolFromSmiles(smi)
weights = SimilarityMaps.GetAtomicWeightsForModel(
mol,
featurizer.simmap_featurizer,
lambda fp: model.predict_on_x(np.array(fp), device='cuda:0'),
)
drawings[smi] = (mol,weights,enrichments[i])
# Normalize
scale = max(np.abs(w) for w in weights for _,weights,_ in drawings.values())
# Show
for smi, (mol,weights,enrichment) in drawings.items():
logging.info(f'cpd_id: {smis_to_cpd_ids[smi]}')
logging.info(f'SMILES: {smi}')
logging.info(f'Predicted enrichment: {enrichment:.2f}')
with open(LOG_FILE, 'a') as lf:
lf.write(f'cpd_id: {smis_to_cpd_ids[smi]}\n')
lf.write(f'SMILES: {smi}\n')
lf.write(f'Predicted enrichment: {enrichment:.2f}\n\n')
lf.close()
dim = int(300*FLAGS.img_size)
d = Draw.MolDraw2DCairo(dim, dim)
d.SetFontSize(FLAGS.font_size)
d.drawOptions().updateAtomPalette({9: (1.0, 0.0, 0.4980392156862745),
16: (1.0, 0.3411764705882353, 0.2),
17: (0.4980392156862745, 0.0, 1.0)})
if scale != 0:
SimilarityMaps.GetSimilarityMapFromWeights(mol, [w/scale for w in weights], draw2d=d)
else:
SimilarityMaps.GetSimilarityMapFromWeights(mol, weights, draw2d=d)
d.FinishDrawing()
save_png(d.GetDrawingText(), os.path.join(SAVE_ROOT, 'cpd_id_' + str(smis_to_cpd_ids[smi]) + '.png'))
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
23100 | <gh_stars>0
import sys
import os
class Console(object):
"""
Class responsible for handling input from the user
"""
def __init__(self):
self.log_file = "log.txt"
# initialize log
with open(self.log_file, 'a') as file:
file.write("** NEW LOG CREATED **\n")
@staticmethod
def read_for_condition(prompt, condition):
"""
Continues to prompt user for input until the condition
is fulfilled
:param prompt: prompt for the input
:param condition: the function that checks condition of input
:return: value
"""
user_input = None
while user_input is None:
user_input = condition(input(prompt))
return user_input
@staticmethod
def yes_or_no(prompt):
"""
Primpt the user to say yes or no
@:param: prompt
:return: true of yes, false if no
"""
user_input = None
while user_input is None:
user_input = input("{}: yes/no >>> ".format(prompt))
if user_input.lower() == 'yes' or user_input.lower() == 'y':
return True
elif user_input.lower() == 'no' or user_input.lower() == 'n':
return False
else:
user_input = None
@staticmethod
def press_enter_to_continue():
"""
Wait until someone presses enter
:return:
"""
user_input = input("Press Enter To Continue...")
def log(self, msg):
"""
Log information
:param msg:
:return:
"""
with open(self.log_file, 'a') as file:
file.write(msg)
@staticmethod
def clear_console():
"""
clear console
:return:
"""
# not covering cygwin
if sys.platform == "darwin":
os.system("clear")
elif sys.platform == "linux":
os.system("clear")
else:
os.system("cls")
| StarcoderdataPython |
726 | <gh_stars>0
"""
Client for simulator requests
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
from random import uniform
import time
from typing import Union
import jsons
import requests
from .exceptions import RetryTimeoutError, ServiceError
from .logger import Logger
from .simulator_protocol import (
ServiceConfig,
SimulatorEvent,
SimulatorEventRequest,
SimulatorInterface,
)
log = Logger()
_RETRYABLE_ERROR_CODES = {502, 503, 504}
_MAXIMUM_BACKOFF_SECONDS = 60
_BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50
class SimulatorClient:
def __init__(self, config: ServiceConfig):
self._config = config
self._retry_attempts = 0
self._retry_timeout = None
self._session = requests.session()
self._session.headers.update(
{"Authorization": config.access_key, "Content-type": "application/json"}
)
def register_simulator(self, interface: SimulatorInterface) -> SimulatorEvent:
return self._http_request(interface, self._config)
def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent:
return self._http_request(event_request, self._config)
def unregister_simulator(self, session_id: str):
url = "{}/v2/workspaces/{}/simulatorSessions/{}".format(
self._config.server, self._config.workspace, session_id
)
log.debug("Sending unregister request to {}".format(url))
return self._session.delete(url, timeout=self._config.network_timeout_seconds)
def _http_request(
self,
payload: Union[SimulatorInterface, SimulatorEventRequest],
config: ServiceConfig,
) -> SimulatorEvent:
res = None
if self._retry_attempts >= 1:
self._handle_retry()
try:
# NOTE: we assert these for the user here to allow the config object to be partially initialized before use.
assert len(
config.access_key
), "Environment variable SIM_ACCESS_KEY is unset or access_key is empty."
assert len(
config.workspace
), "Environment variable SIM_WORKSPACE is unset or workspace is empty."
assert len(
config.server
), "Environment variable SIM_API_HOST is unset or server is empty."
# Register request
if isinstance(payload, SimulatorInterface):
reg_url = "{}/v2/workspaces/{}/simulatorSessions".format(
config.server, config.workspace
)
log.debug("Sending registration to {}".format(reg_url))
log.debug("Registration payload: {}".format(jsons.dumps(payload)))
res = self._session.post(
reg_url,
json=jsons.loads(payload.json),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.debug("Response to registration received.")
# Get next event request
if isinstance(payload, SimulatorEventRequest):
log.network("Sending get next event request.")
res = self._session.post(
"{}/v2/workspaces/{}/simulatorSessions/{}/advance".format(
config.server, config.workspace, payload.sessionId
),
json=jsons.loads(jsons.dumps(payload)),
headers={
"Authorization": config.access_key,
"Content-type": "application/json",
},
timeout=self._config.network_timeout_seconds,
)
log.network("Response to get next event request received.")
except requests.exceptions.Timeout as err:
log.error(err)
self._retry_attempts += 1
return self._http_request(payload, config)
except requests.exceptions.RequestException as err:
if res is not None:
log.error(res.text)
log.error(err)
raise
if res is not None:
if res.status_code in _RETRYABLE_ERROR_CODES:
log.debug(
"Service returned {}, a retryable response error code."
" Retrying request.".format(res.status_code)
)
self._retry_attempts += 1
return self._http_request(payload, config)
# bail on error
if res.status_code != 200 and res.status_code != 201:
log.error(
"Received response with {} http status code. "
"Raising exception.".format(res.status_code)
)
if res.text:
log.error(res.text)
raise ServiceError(
"Unable to get next event for simulator, "
"received {} http status code".format(res.status_code)
)
# TODO estee: this needs validation
# SimulatorEvent
self._retry_attempts = 0
self._retry_timeout = None
return self._event_from_json(res.text)
raise RuntimeError(
"Usage error: Somehow http response ended up as none. "
"Check arguments to _http_request and ensure the payload "
"is either of type SimulatorInterface or SimulatorEventRequest"
)
def _event_from_json(self, json_text: str) -> SimulatorEvent:
"""Converts a json string into a SimulatorEvent."""
event_dict = jsons.loads(json_text)
log.debug("Event Response: {}".format(event_dict))
return SimulatorEvent(event_dict)
def _handle_retry(self):
log.network("handling retry.")
if (
self._retry_timeout and time.time() > self._retry_timeout
) or self._config.retry_timeout_seconds == 0:
raise RetryTimeoutError("Simulator Retry time exceeded.")
if self._config.retry_timeout_seconds > 0 and self._retry_timeout is None:
self._retry_timeout = time.time() + self._config.retry_timeout_seconds
log.info(
"Simulator will timeout in {} seconds if it is not able "
"to connect to the platform.".format(self._retry_timeout - time.time())
)
self._backoff()
log.network("retry handled.")
def _backoff(self):
"""
Implements Exponential backoff algorithm with full jitter
Check the following url for more information
https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
power_of_two = 2 ** self._retry_attempts
max_sleep = min(
power_of_two * _BACKOFF_BASE_MULTIPLIER_MILLISECONDS / 1000.0,
_MAXIMUM_BACKOFF_SECONDS,
)
sleep = uniform(0, max_sleep)
log.debug(
"Retry attempt: {}, backing off for {} seconds".format(
self._retry_attempts, sleep
)
)
time.sleep(sleep)
| StarcoderdataPython |
1612957 | <gh_stars>0
from __future__ import division
import numpy as np
from collections import Counter
import LinearAlgebraFunctions as alg
import math
num_friends = np.random.poisson(5, 1000)
num_friends = [20 * nf_i for nf_i in num_friends]
daily_minutes = np.random.poisson(10, 1000)
daily_minutes = [15 * dm_i for dm_i in daily_minutes]
# number of points
num_points = len(num_friends)
# largest value
largest_value = max(num_friends)
smallest_value = min(num_friends)
# specific positions
sorted_values = sorted(num_friends)
smallest_value = sorted_values[0]
second_smallest_value = sorted_values[1]
second_largest_value = sorted_values[-2]
# MEASURES OF CENTRAL TENDENCY
# mean
def mean(x):
return sum(x) / len(x)
# median
def median(v):
""" finds the middle most value of v """
n = len(v)
sorted_v = sorted(v)
midpoint = n // 2
if n % 2 == 1:
# if odd, return the middle value
return sorted_v[midpoint]
else:
# if even, return the average of the middle values
lo = midpoint - 1
hi = midpoint
return (sorted_v[lo] + sorted_v[hi]) / 2
# quantile
def quantile(x, p):
""" returns the pth-percentile value in x """
p_index = int(p * len(x))
return sorted(x)[p_index]
# mode
def mode(x):
""" returns a list, might be more than one mode """
counts = Counter(x)
max_count = max(counts.values())
return [x_i for x_i, count in counts.iteritems()
if count == max_count]
# MEASURES OF DISPERSION
# range
def data_range(x):
return max(x) - min(x)
# variance
def de_mean(x):
""" translate x by subtracting its mean (so the result has mean 0) """
x_bar = mean(x)
return [x_i - x_bar for x_i in x]
def variance(x):
""" assumes x has at least two elements """
n = len(x)
deviations = de_mean(x)
return alg.sum_of_squares(deviations) / (n - 1)
# standard deviation
def standard_deviation(x):
return math.sqrt(variance(x))
# inter quartile range
def interquartile_range(x):
return quantile(x, 0.75) - quantile(x, 0.25)
# CORRELATION
def covariance(x, y):
n = len(x)
return alg.dot(de_mean(x), de_mean(y)) / (n - 1)
def correlation(x, y):
stdev_x = standard_deviation(x)
stdev_y = standard_deviation(y)
if stdev_x > 0 and stdev_y > 0:
return covariance(x, y) / stdev_x / stdev_y
else:
return 0 # if no variation, correlation is 0
| StarcoderdataPython |
1693819 | <filename>test/02_ascii_art.py
#!/usr/bin/python
from PIL import Image
ASCII_CHARS_RAW = "#@%*=+;:,. "
ASCII_CHARS = list(ASCII_CHARS_RAW)
def scale_image(image, new_width=100):
"""Resizes an image preserving the aspect ratio.
"""
(original_width, original_height) = image.size
# because characters are twice as tall as wide, convert aspect_ratio to half
aspect_ratio = (original_height/float(original_width))*0.5
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width, new_height))
return new_image
def map_pixels_to_ascii_chars(image, range_width=25):
"""Maps each pixel to an ascii char based on the range
in which it lies.
0-255 is divided into 11 ranges of 25 pixels each.
"""
pixels_in_image = list(image.getdata())
pixels_to_chars = [ASCII_CHARS[pixel_value/range_width] for pixel_value in
pixels_in_image]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width=100):
image = scale_image(image)
image = image.convert('L') # Convert to grayscale
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [pixels_to_chars[index: index + new_width] for index in
xrange(0, len_pixels_to_chars, new_width)]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath):
image = None
try:
image = Image.open(image_filepath)
except Exception, e:
print "Unable to open image file {image_filepath}.".format(image_filepath=image_filepath)
print e
return
image_ascii = convert_image_to_ascii(image)
return image_ascii
if __name__=='__main__':
image_file_path = "../img/test_portrait1.jpg"
ascii_img = handle_image_conversion(image_file_path)
#fh = open("../img/test.txt", 'wa')
#fh.write(ascii_img)
#fh.close()
print ascii_img
| StarcoderdataPython |
1693838 | # http://www.columbia.edu/~cs2035/courses/csor4231.F15/matrix-chain.pdf
# http://www.geeksforgeeks.org/dynamic-programming-set-8-matrix-chain-multiplication/
# Given a sequence of matrices, find the most efficient way to multiply these
# matrices together. The problem is not actually to perform the multiplications,
# but merely to decide in which order to perform the multiplications.
# We have many options to multiply a chain of matrices because matrix
# multiplication is associative. In other words, no matter how we parenthesize
# the product, the result will be the same. For example, if we had four matrices
# A, B, C, and D, we would have:
# (ABC)D = (AB)(CD) = A(BCD) = ....
# However, the order in which we parenthesize the product affects the number of
# simple arithmetic operations needed to compute the product, or the efficiency.
# For example, suppose A is a 10 × 30 matrix, B is a 30 × 5 matrix,
# and C is a 5 × 60 matrix. Then,
# (AB)C = (10×30×5) + (10×5×60) = 1500 + 3000 = 4500 operations
# A(BC) = (30×5×60) + (10×30×60) = 9000 + 18000 = 27000 operations.
# Clearly the first parenthesization requires less number of operations.
# Given an array p[] which represents the chain of matrices such that the ith
# matrix Ai is of dimension p[i-1] x p[i]. We need to write a function MatrixChainOrder()
# that should return the minimum number of multiplications needed to multiply the chain.
import time
def find_optimal_chain(arr, i, j):
if i == j:
return 0
min_val = 2 ** 32 - 1
for k in range(i,j):
count = (find_optimal_chain(arr,i,k) + find_optimal_chain(arr,k + 1,j) +
arr[i-1] * arr[k] * arr[j])
min_val = min(count,min_val)
return min_val
dict_memo = {}
def find_optimal_chain_memo(arr,i,j):
if i == j:
return 0
if (i,j) in dict_memo:
return dict_memo[(i,j)]
min_val = 2 ** 32 - 1
for k in range(i,j):
count = (find_optimal_chain_memo(arr,i,k) + find_optimal_chain_memo(arr,k + 1,j) +
arr[i-1] * arr[k] * arr[j])
min_val = min(count,min_val)
dict_memo[(i,j)] = min_val
return min_val
def find_optimal_chain_bottom_up(arr):
n = len(arr)
M = [[0] * n for i in range(n)]
for l in range(2,n):
for i in range(0,n-l):
j = i + l
M[i][j] = 2 ** 31
for k in range(i + 1,j):
count = M[i][k] + M[k][j] + arr[i] * arr[k] * arr[j]
M[i][j] = min(count,M[i][j])
return M[0][n-1]
if __name__ == "__main__":
start = time.time()
arr = [1,2,3,4]
print(find_optimal_chain(arr,1,len(arr) - 1))
print(find_optimal_chain_memo(arr,1,len(arr) - 1))
print(find_optimal_chain_bottom_up(arr))
print("Elapsed Time: ", time.time() - start)
| StarcoderdataPython |
3202576 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-23 11:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lead', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lead',
name='deposits',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='lead',
name='no_of_hours',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='lead',
name='rent_or_purchase',
field=models.CharField(choices=[('1', 'Rent'), ('2', 'Purchase')], max_length=1, null=True),
),
]
| StarcoderdataPython |
3240175 | <gh_stars>1-10
from django.contrib import admin
from .models import Usuario
# admin.site.register(Usuario)
@admin.register(Usuario)
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('nome', 'email')
search_fields = ('nome', 'email')
readonly_fields = ('senha',)
| StarcoderdataPython |
3231320 | <filename>petab_MS/C.py<gh_stars>0
# pylint: disable:invalid-name
"""
This file contains constant definitions.
"""
# MEASUREMENTS
OBSERVABLE_ID = 'observableId'
PREEQUILIBRATION_CONDITION_ID = 'preequilibrationConditionId'
SIMULATION_CONDITION_ID = 'simulationConditionId'
MEASUREMENT = 'measurement'
TIME = 'time'
OBSERVABLE_PARAMETERS = 'observableParameters'
NOISE_PARAMETERS = 'noiseParameters'
DATASET_ID = 'datasetId'
REPLICATE_ID = 'replicateId'
MEASUREMENT_DF_REQUIRED_COLS = [
OBSERVABLE_ID, SIMULATION_CONDITION_ID, MEASUREMENT, TIME]
MEASUREMENT_DF_OPTIONAL_COLS = [
PREEQUILIBRATION_CONDITION_ID, OBSERVABLE_PARAMETERS,
NOISE_PARAMETERS,
DATASET_ID, REPLICATE_ID]
MEASUREMENT_DF_COLS = [
MEASUREMENT_DF_REQUIRED_COLS[0], MEASUREMENT_DF_OPTIONAL_COLS[0],
*MEASUREMENT_DF_REQUIRED_COLS[1:], *MEASUREMENT_DF_OPTIONAL_COLS[1:]]
# PARAMETERS
PARAMETER_ID = 'parameterId'
PARAMETER_NAME = 'parameterName'
PARAMETER_SCALE = 'parameterScale'
LOWER_BOUND = 'lowerBound'
UPPER_BOUND = 'upperBound'
NOMINAL_VALUE = 'nominalValue'
ESTIMATE = 'estimate'
INITIALIZATION_PRIOR_TYPE = 'initializationPriorType'
INITIALIZATION_PRIOR_PARAMETERS = 'initializationPriorParameters'
OBJECTIVE_PRIOR_TYPE = 'objectivePriorType'
OBJECTIVE_PRIOR_PARAMETERS = 'objectivePriorParameters'
PARAMETER_DF_REQUIRED_COLS = [
PARAMETER_ID, PARAMETER_SCALE, LOWER_BOUND, UPPER_BOUND, ESTIMATE]
PARAMETER_DF_OPTIONAL_COLS = [
PARAMETER_NAME, NOMINAL_VALUE,
INITIALIZATION_PRIOR_TYPE, INITIALIZATION_PRIOR_PARAMETERS,
OBJECTIVE_PRIOR_TYPE, OBJECTIVE_PRIOR_PARAMETERS]
PARAMETER_DF_COLS = [
PARAMETER_DF_REQUIRED_COLS[0], PARAMETER_DF_OPTIONAL_COLS[0],
*PARAMETER_DF_REQUIRED_COLS[1:], *PARAMETER_DF_OPTIONAL_COLS[1:]]
INITIALIZATION = 'initialization'
OBJECTIVE = 'objective'
# CONDITIONS
CONDITION_ID = 'conditionId'
CONDITION_NAME = 'conditionName'
# OBSERVABLES
OBSERVABLE_NAME = 'observableName'
OBSERVABLE_FORMULA = 'observableFormula'
NOISE_FORMULA = 'noiseFormula'
OBSERVABLE_TRANSFORMATION = 'observableTransformation'
NOISE_DISTRIBUTION = 'noiseDistribution'
OBSERVABLE_DF_REQUIRED_COLS = [
OBSERVABLE_ID, OBSERVABLE_FORMULA, NOISE_FORMULA]
OBSERVABLE_DF_OPTIONAL_COLS = [
OBSERVABLE_NAME, OBSERVABLE_TRANSFORMATION, NOISE_DISTRIBUTION]
OBSERVABLE_DF_COLS = [
*OBSERVABLE_DF_REQUIRED_COLS, *OBSERVABLE_DF_OPTIONAL_COLS]
# TRANSFORMATIONS
LIN = 'lin'
LOG = 'log'
LOG10 = 'log10'
OBSERVABLE_TRANSFORMATIONS = [LIN, LOG, LOG10]
# NOISE MODELS
UNIFORM = 'uniform'
PARAMETER_SCALE_UNIFORM = 'parameterScaleUniform'
NORMAL = 'normal'
PARAMETER_SCALE_NORMAL = 'parameterScaleNormal'
LAPLACE = 'laplace'
PARAMETER_SCALE_LAPLACE = 'parameterScaleLaplace'
LOG_NORMAL = 'logNormal'
LOG_LAPLACE = 'logLaplace'
DISCRETE = "discrete"
PARAMETER_VALUE_DISCRETE = "parameter_value_discrete"
PRIOR_TYPES = [
UNIFORM, NORMAL, LAPLACE, LOG_NORMAL, LOG_LAPLACE,
PARAMETER_SCALE_UNIFORM, PARAMETER_SCALE_NORMAL, PARAMETER_SCALE_LAPLACE,
DISCRETE, PARAMETER_VALUE_DISCRETE]
NOISE_MODELS = [NORMAL, LAPLACE]
# VISUALIZATION
PLOT_ID = 'plotId'
PLOT_NAME = 'plotName'
PLOT_TYPE_SIMULATION = 'plotTypeSimulation'
PLOT_TYPE_DATA = 'plotTypeData'
X_VALUES = 'xValues'
X_OFFSET = 'xOffset'
X_LABEL = 'xLabel'
X_SCALE = 'xScale'
Y_VALUES = 'yValues'
Y_OFFSET = 'yOffset'
Y_LABEL = 'yLabel'
Y_SCALE = 'yScale'
LEGEND_ENTRY = 'legendEntry'
VISUALIZATION_DF_REQUIRED_COLS = [PLOT_ID]
VISUALIZATION_DF_OPTIONAL_COLS = [
PLOT_NAME, PLOT_TYPE_SIMULATION, PLOT_TYPE_DATA, X_VALUES, X_OFFSET,
X_LABEL, X_SCALE, Y_VALUES, Y_OFFSET, Y_LABEL, Y_SCALE, LEGEND_ENTRY,
DATASET_ID]
VISUALIZATION_DF_COLS = [
*VISUALIZATION_DF_REQUIRED_COLS, *VISUALIZATION_DF_OPTIONAL_COLS]
VISUALIZATION_DF_SUBPLOT_LEVEL_COLS = [
PLOT_ID, PLOT_NAME, PLOT_TYPE_SIMULATION, PLOT_TYPE_DATA,
X_LABEL, X_SCALE, Y_LABEL, Y_SCALE]
VISUALIZATION_DF_SINGLE_PLOT_LEVEL_COLS = [
X_VALUES, X_OFFSET, Y_VALUES, Y_OFFSET, LEGEND_ENTRY, DATASET_ID]
LINE_PLOT = 'LinePlot'
BAR_PLOT = 'BarPlot'
SCATTER_PLOT = 'ScatterPlot'
PLOT_TYPES_SIMULATION = [LINE_PLOT, BAR_PLOT, SCATTER_PLOT]
MEAN_AND_SD = 'MeanAndSD'
MEAN_AND_SEM = 'MeanAndSEM'
REPLICATE = 'replicate'
PROVIDED = 'provided'
PLOT_TYPES_DATA = [MEAN_AND_SD, MEAN_AND_SEM, REPLICATE, PROVIDED]
# YAML
FORMAT_VERSION = 'format_version'
PARAMETER_FILE = 'parameter_file'
PROBLEMS = 'problems'
MODEL_FILES = 'model_files'
CONDITION_FILES = 'condition_files'
MEASUREMENT_FILES = 'measurement_files'
OBSERVABLE_FILES = 'observable_files'
VISUALIZATION_FILES = 'visualization_files'
OBJECTIVE_FILE = 'objective_file'
# MORE
SIMULATION = 'simulation'
RESIDUAL = 'residual'
NOISE_VALUE = 'noiseValue'
| StarcoderdataPython |
124710 | """
Created by <NAME>, Sep. 2018.
FLOW Lab
Brigham Young University
"""
import unittest
import numpy as np
from _porteagel_fortran import porteagel_analyze, x0_func, theta_c_0_func, sigmay_func, sigma_spread_func
from _porteagel_fortran import sigmaz_func, wake_offset_func, deltav_func, deltav_near_wake_lin_func
from _porteagel_fortran import overlap_area_func, wake_combination_func, added_ti_func, k_star_func
from _porteagel_fortran import ct_to_axial_ind_func, wind_shear_func, discontinuity_point_func, smooth_max
from _porteagel_fortran import interpolation, hermite_spline, point_velocity_with_shear_func
from openmdao.api import Problem, Group
def power_func_v80(v):
# power curve fit for vestas v80 from Niayifar 2016
p = 0.17819 * v ** 5 - 6.5198 * v ** 4 + 90.623 * v ** 3 - 574.62 * v ** 2 + 1727.2 * v - 1975.0
return p
class test_basic_subroutines(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-6
self.d = 126.4
self.yaw = np.pi/6.
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.25
self.kz = 0.2
self.wind_speed = 8.0
def test_x0_func_hand_calc(self):
x0 = x0_func(self.d, self.yaw, self.ct, self.alpha, self.ti, self.beta)
self.assertAlmostEqual(x0, 353.2313474, delta=self.tolerance)
def test_x0_func_data_yaw0(self):
rotor_diameter = 0.15 # m
yaw = 0.0*np.pi/180. #radians
ct = 0.8214036062840235
ti = 0.074
#
# 3.77839335632898, 0.6643546778702326
# 3.704230762943225, 0.7361200568897026
# 3.849186706118913, 0.7866577299700839
# 3.848583479099574, 0.8214036062840235
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 3.862413891540104, delta=1E-2)
def test_x0_func_data_yaw10(self):
rotor_diameter = 0.15 # m
yaw = 10.0 * np.pi / 180. # radians
ct = 0.7866577299700839
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 3.973368012202963, delta=1E-1)
def test_x0_func_data_yaw20(self):
rotor_diameter = 0.15 # m
yaw = 20.0 * np.pi / 180. # radians
ct = 0.7361200568897026
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 4.051040798613613, delta=1E-1)
def test_x0_func_data_yaw30(self):
rotor_diameter = 0.15 # m
yaw = 30.0 * np.pi / 180. # radians
ct = 0.6643546778702326
ti = 0.074
x0 = x0_func(rotor_diameter, yaw, ct, self.alpha, ti, self.beta)
self.assertAlmostEqual(x0/rotor_diameter, 4.053814723717636, delta=1E-1)
def test_discontinuity_point_func(self):
x0 = 353.0
xd = discontinuity_point_func(x0, self.d, self.ky, self.kz, self.yaw, self.ct)
self.assertAlmostEqual(xd, 335.5180515, delta=self.tolerance)
def test_sigmay_func(self):
x = 500.0
x0 = 353.0
xd = sigmay_func(x, x0, self.ky, self.d, self.yaw)
self.assertAlmostEqual(xd, 75.45193794, delta=self.tolerance)
def test_sigmaz_func(self):
x = 500.0
x0 = 353.0
xd = sigmaz_func(x, x0, self.kz, self.d)
self.assertAlmostEqual(xd, 74.08914857, delta=self.tolerance)
def test_theta_c_0_func(self):
theta_c_0 = theta_c_0_func(self.yaw, self.ct)
self.assertAlmostEqual(theta_c_0, 0.080852297, delta=self.tolerance)
def test_wake_offset_func_near_wake(self):
x = 200.
theta_c_0 = 0.0808
sigmay = 36.
sigmaz = 43.
x0 = 353.
delta = wake_offset_func(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self.ct, sigmay, sigmaz)
self.assertAlmostEqual(delta, 16.16, delta=self.tolerance)
def test_wake_offset_func_far_wake(self):
x = 500.
x0 = 353.
theta_c_0 = 0.0808
sigmay = 75.45193794
sigmaz = 74.08914857
delta = wake_offset_func(x, self.d, theta_c_0, x0, self.yaw, self.ky, self.kz, self.ct, sigmay, sigmaz)
self.assertAlmostEqual(delta, 33.89352568, delta=self.tolerance)
def test_deltav_func_2016(self):
d = 126.4
yaw = np.pi / 6.
ky = 0.25
kz = 0.2
sigmay = 75.0
sigmaz = 74.0
ct = 0.8
z = 100.0
zh = 90.0
wec_factor = 1.0
y = 50.0
delta = 33.89
x = 500.
deltay = y-delta
deltaz = z-zh
deltav = deltav_func(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2016, self.ky, x, wec_factor, sigmay, sigmaz)
self.assertAlmostEqual(deltav, 0.1293410999394427, delta=self.tolerance)
def test_deltav_func_2014(self):
d = 126.4
yaw = np.pi / 6.
ky = 0.25
kz = 0.2
sigmay = 75.0
sigmaz = 74.0
ct = 0.8
z = 100.0
zh = 90.0
wec_factor = 1.0
y = 50.0
delta = 33.89
x = 500.
deltay = y-delta
deltaz = z-zh
deltav = deltav_func(deltay, deltaz, ct, yaw, sigmay, sigmaz, d, 2014, self.ky, x, wec_factor, sigmay, sigmaz)
self.assertAlmostEqual(deltav, 0.03264659097, delta=self.tolerance)
def test_near_deltav_func_2014_rotor_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 0.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd, sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.00048145926305030354, delta=self.tolerance)
def test_near_deltav_func_2014_midrange_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 200.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd, sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.027941992346249663, delta=self.tolerance)
def test_near_deltav_func_2014_x0_location(self):
version = 2014
x0 = 353.0
xd = 335.0
yaw = 0.0
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 353.
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.0401329549842686, delta=self.tolerance)
def test_near_deltav_func_2016_rotor_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 0.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.05319773340098457, delta=self.tolerance)
def test_near_deltav_func_2016_midrange_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 200.0
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.0388723745762739, delta=self.tolerance)
def test_near_deltav_func_2016_x0_location(self):
version = 2016
x0 = 353.0
xd = 335.0
yaw = np.pi/6.
sigmay_spread = 45.
sigmaz_spread = 45.
sigmay_0 = 50.0
sigmaz_0 = 50.0
sigmay_d = 40.0
sigmaz_d = 40.0
wec_factor = 1.0
deltay = 100.0
deltaz = 5.0
x = 353.
deltav = deltav_near_wake_lin_func(deltay, deltaz, self.ct, yaw, sigmay_0, sigmaz_0, x0, self.d, x, xd,
sigmay_d,
sigmaz_d, version, self.ky, x, sigmay_spread, sigmaz_spread, wec_factor)
self.assertAlmostEqual(deltav, 0.027913475075370238, delta=self.tolerance)
def test_wake_combination_func_Lissaman1979(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 0
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.4, delta=self.tolerance)
def test_wake_combination_func_Katic1986(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 2
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.039607805437114, delta=self.tolerance)
def test_wake_combination_func_Voutsinas1990(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 3
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.0303940504246953, delta=self.tolerance)
def test_wake_combination_func_Niayifar2016(self):
Uk = 7.0
deltav = 0.05
wake_combination_method = 1
deficit_sum = 2.0
new_sum = wake_combination_func(self.wind_speed, Uk, deltav, wake_combination_method, deficit_sum)
self.assertAlmostEqual(new_sum, 2.35, delta=self.tolerance)
def test_wind_shear_func(self):
z = 90.0
zo = 2.0
zr = 80.0
psi = 0.15
wind_velocity_with_shear = wind_shear_func(z, self.wind_speed, zr, zo, psi)
self.assertAlmostEqual(wind_velocity_with_shear, 8.14607111996, delta=self.tolerance)
def test_k_star_func(self):
ti_ust = 0.1
kstar = k_star_func(ti_ust)
self.assertAlmostEqual(kstar, 0.042048, delta=self.tolerance)
def test_ct_to_axial_ind_func_normal_ct(self):
ct = 0.84
axial_induction = ct_to_axial_ind_func(ct)
self.assertAlmostEqual(axial_induction, 0.3, delta=self.tolerance)
def test_ct_to_axial_ind_func_high_ct(self):
ct = 0.97
axial_induction = ct_to_axial_ind_func(ct)
self.assertAlmostEqual(axial_induction, 0.4119957249, delta=self.tolerance)
def test_smooth_max(self):
x = 12.
y = 13.
s = 100.
smax1 = smooth_max(s, x, y)
self.assertAlmostEqual(smax1, 13.0, delta=self.tolerance)
def test_overlap_area_func_rotor_all_in_wake(self):
turbiney = 50.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 200.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*rotor_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_rotor_all_in_wake_perfect_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 200.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z,
wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi * rotor_diameter ** 2 / 4, delta=self.tolerance)
def test_overlap_area_func_wake_all_in_rotor(self):
turbiney = 50.
turbinez = 90.
rotor_diameter = 200.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*wake_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_wake_all_in_rotor_perfect_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 200.
wake_center_y = 0.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, np.pi*wake_diameter**2/4, delta=self.tolerance)
def test_overlap_area_func_no_overlap(self):
turbiney = 0.
turbinez = 90.
rotor_diameter = 100.
wake_center_y = 100.0
wake_center_z = 90.0
wake_diameter = 100.
wake_overlap = overlap_area_func(turbiney, turbinez, rotor_diameter, wake_center_y, wake_center_z, wake_diameter)
self.assertAlmostEqual(wake_overlap, 0.0, delta=self.tolerance)
#TODO add tests for partial overlap
class test_added_ti_func(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-2
self.yaw = 0.0
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.022
self.kz = 0.022
self.wind_speed = 8.0
self.TI = 0.077
self.x = 560.
self.rotor_diameter = 80.
self.deltay = 0.
self.wake_height = 70.
self.turbine_height = 70.
self.sm_smoothing = 700.
def test_added_ti_func_Niayifar_2016_max_2nd_turb(self):
TI_calculation_method = 4
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.077
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing, TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_max_3rd_turb(self):
TI_calculation_method = 4
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.1476
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter, self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing, TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_smoothmax_2nd_turb(self):
TI_calculation_method = 5
TI_area_ratio_in = 0.0
TI_dst_in = 0.0
TI_ust = 0.077
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter,
self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing,
TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
def test_added_ti_func_Niayifar_2016_smoothmax_3rd_turb(self):
TI_calculation_method = 5
TI_area_ratio_in = .05
TI_dst_in = 0.077
TI_ust = 0.1476
ti_area_ratio, ti_dst = added_ti_func(self.TI, self.ct, self.x, self.ky, self.rotor_diameter,
self.rotor_diameter,
self.deltay, self.wake_height, self.turbine_height, self.sm_smoothing,
TI_ust,
TI_calculation_method, TI_area_ratio_in, TI_dst_in)
self.assertAlmostEqual(ti_dst, 0.1476, delta=self.tolerance)
class test_point_velocity_with_shear(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-2
self.turbI = -1
self.wake_combination_method = 1
self.wake_model_version = 2016
self.sorted_x_idx = np.array([0])
self.rotorDiameter = np.array([0.15])
self.pointX = self.rotorDiameter*5.
self.pointY = 0.24*self.rotorDiameter
self.pointZ = 0.125
self.tol = 1E-12
self.alpha = 2.32
self.beta = 0.154
self.expratemultiplier = 1.0
self.wec_factor = 1.0
self.wind_speed = 4.88
self.z_ref = 0.125
self.z_0 = 0.000022
self.shear_exp = 0.1
self.turbineXw = np.array([0])
self.turbineYw = np.array([0])
self.turbineZ = np.array([0.125])
self.yaw = np.array([20. * np.pi / 180.0])
self.wtVelocity = np.array([self.wind_speed])
self.Ct_local = 0.7361200568897026 * np.ones_like(self.turbineXw) # np.array([0.7374481936835376])
self.TIturbs = 0.025 * np.ones_like(self.turbineXw) # *np.array([0.01]) #np.array([0.001]) #TODO check point velocity tests and ti input
self.ky_local = 0.022 # np.array([0.3837*TIturbs[0] + 0.003678])
self.kz_local = 0.022 # np.array([0.3837*TIturbs[0] + 0.003678])
def test_point_velocity_with_shear(self):
point_velocity_with_shear = point_velocity_with_shear_func(self.turbI, self.wake_combination_method, self.wake_model_version,
self.sorted_x_idx, self.pointX, self.pointY, self.pointZ, self.tol,
self.alpha, self.beta, self.expratemultiplier, self.wec_factor,
self.wind_speed, self.z_ref, self.z_0, self.shear_exp, self.turbineXw,
self.turbineYw, self.turbineZ, self.rotorDiameter, self.yaw,
self.wtVelocity, self.Ct_local, self.TIturbs, self.ky_local,
self.kz_local)
self.assertAlmostEqual(point_velocity_with_shear/self.wind_speed, 0.406, delta=self.tolerance)
class test_sigma_spread(unittest.TestCase):
def setUp(self):
self.tolerance = 1E-6
self.d = 126.4
self.yaw = np.pi / 6.
self.ct = 0.8
self.alpha = 2.32
self.beta = 0.154
self.ti = 0.1
self.ky = 0.25
self.kz = 0.2
x = np.array([500.0, 500.0, 500.0, 200.0, -10.0])
xi_d = np.array([1.0, 2.0, 1.0, 1.0, 1.0])
xi_a = np.array([0.0, 0.0, 45.0, 0.0, 0.0])
sigma_0 = 38.7
sigma_d = 34.2
x0 = 353.0
self.sigma_spread = np.zeros_like(x)
for i in np.arange(0, x.size):
self.sigma_spread[i] = sigma_spread_func(x[i], x0, self.ky, sigma_0, sigma_d, xi_a[i], xi_d[i])
self.correct_results = np.array([75.45, 150.9, 534.2, 36.7495751, 0.0])
def test_sigma_spread_func_case1(self):
self.assertAlmostEqual(self.sigma_spread[0], self.correct_results[0], delta=self.tolerance)
def test_sigma_spread_func_case2(self):
self.assertAlmostEqual(self.sigma_spread[1], self.correct_results[1], delta=self.tolerance)
def test_sigma_spread_func_case3(self):
self.assertAlmostEqual(self.sigma_spread[2], self.correct_results[2], delta=self.tolerance)
def test_sigma_spread_func_case4(self):
self.assertAlmostEqual(self.sigma_spread[3], self.correct_results[3], delta=self.tolerance)
def test_sigma_spread_func_case5(self):
self.assertAlmostEqual(self.sigma_spread[4], self.correct_results[4], delta=self.tolerance)
# class test_sigma_spread_too_high_error(unittest.TestCase):
#
# def setUp(self):
# self.tolerance = 1E-6
# self.d = 126.4
# self.yaw = np.pi / 6.
# self.ct = 0.8
# self.alpha = 2.32
# self.beta = 0.154
# self.ti = 0.1
# self.ky = 0.25
# self.kz = 0.2
#
# self.x = 500.0
# self.xi_d = 1.0
# self.xi_a = 90.000
#
#
# self.sigma_0 = 38.7
# self.sigma_d = 34.2
#
# self.x0 = 353.0
#
# def test_sigma_spread_too_high(self):
# self.assertRaises(sigma_spread_func(self.x, self.x0, self.ky, self.sigma_0, self.sigma_d, self.xi_a, self.xi_d))
#
class test_hermite_spline(unittest.TestCase):
def test_linear(self):
""""Approximate y = x - 1"""
x = 1.
x0 = 0.
x1 = 2.
y0 = -1.
dy0 = 1.
y1 = 1.
dy1 = 1.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
def test_cubic(self):
"""Approximate y=x**3"""
x = 0.
x0 = -1.
x1 = 1.
y0 = 0.
dy0 = 2.
y1 = 0.
dy1 = 2.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
def test_parabolic(self):
"""Approximate y=x**2"""
x = 0.
x0 = -1.
x1 = 1.
y0 = 1.
dy0 = -2.
y1 = 1.
dy1 = 2.
y = hermite_spline(x, x0, x1, y0, dy0, y1, dy1)
self.assertEqual(y, 0.0)
class test_interpolation(unittest.TestCase):
# def test_cubic(self):
#
# # define interpolation type
# interp_type = 0
#
# # set up points for interpolation
# x = np.array([-1., -0.5, 0., 0.5, 1.])
# y = np.array([-1., -0.125, 0., 0.125, 1.])
#
# # set location of interpolation
# xval = 0.125
#
# # get interpolated y value
# yval = interpolation(interp_type, x, y, xval, 3.0, 3.0, True)
#
# self.assertEqual(yval, 0.0625)
def test_linear(self):
# define interpolation type
interp_type = 1
# set up points for interpolation
x = np.array([0., 1., 2.])
y = np.array([0., 1., 0.])
# set location of interpolation
xval = 0.5
# get interpolated y value
yval = interpolation(interp_type, x, y, xval, 0.0, 0.0, False)
self.assertEqual(yval, 0.5)
class test_ctcp_curve(unittest.TestCase):
def setUp(self):
filename = "./input_files/NREL5MWCPCT_dict.p"
import cPickle as pickle
data = pickle.load(open(filename, "rb"))
cp_data = np.zeros([data['wind_speed'].size])
ct_data = np.zeros([data['wind_speed'].size])
wind_speed_data = np.zeros([data['wind_speed'].size])
cp_data[:] = data['CP']
ct_data[:] = data['CT']
wind_speed_data[:] = data['wind_speed']
self.ct_data = ct_data
self.cp_data = cp_data
self.wind_speed_data = wind_speed_data
self.options = {'use_ct_curve': True,
'ct_curve_ct': self.ct_data,
'ct_curve_wind_speed': self.wind_speed_data}
def test_5mw_ct_greater_than_1_warning(self):
from gaussianwake.gaussianwake import GaussianWake
import pytest
pytest.warns(Warning, GaussianWake, nTurbines=6, options=self.options)
class test_wec(unittest.TestCase):
def setUp(self):
filename = "./input_files/NREL5MWCPCT_dict.p"
import cPickle as pickle
data = pickle.load(open(filename, "rb"))
cp_data = np.zeros([data['wind_speed'].size])
ct_data = np.zeros([data['wind_speed'].size])
wind_speed_data = np.zeros([data['wind_speed'].size])
cp_data[:] = data['CP']
ct_data[:] = data['CT']
wind_speed_data[:] = data['wind_speed']
self.ct_data = ct_data
self.cp_data = cp_data
self.wind_speed_data = wind_speed_data
self.options = {'use_ct_curve': True,
'ct_curve_ct': self.ct_data,
'ct_curve_wind_speed': self.wind_speed_data}
nTurbines = 2
from gaussianwake.gaussianwake import GaussianWake
prob = Problem(root=Group())
prob.root.add('wakemodel', GaussianWake(nTurbines, options=self.options), promotes=['*'])
prob.setup()
prob['wind_speed'] = 8.
self.prob = prob
def test_no_change_in_deficit_by_wake_spread_rate_multiplier_at_center(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 0.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertEqual(wspeed1, wspeed0)
def test_no_change_in_deficit_by_wake_diameter_multiplier_at_center(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 0.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertEqual(wspeed1, wspeed0)
def test_increase_deficit_by_wake_diameter_expansion(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 100.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_factor'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertGreater(wspeed0, wspeed1)
def test_increase_deficit_by_wake_expansion_rate_multiplier(self):
prob = self.prob
turbineX = np.array([0., 400.])
turbineY = np.array([0., 100.])
rotor_diameter = 50.
prob['turbineXw'] = turbineX
prob['turbineYw'] = turbineY
prob['rotorDiameter'] = np.array([rotor_diameter, rotor_diameter])
prob['model_params:wec_spreading_angle'] = 0.0
prob['model_params:wec_factor'] = 1.0
prob.run_once()
prob['model_params:wec_factor'] = 1.0
wspeed0 = prob['wtVelocity0'][1]
prob['model_params:wec_spreading_angle'] = 2.0
prob.run_once()
wspeed1 = prob['wtVelocity0'][1]
self.assertGreater(wspeed0, wspeed1)
class test_porteagel_analyze(unittest.TestCase):
#TODO improve tolerance of test - why can't we match more closely?
def setUp(self):
from plantenergy.utilities import sunflower_points
self.tolerance = 1E-1
self.wake_combination_method = 1
self.wake_model_version = 2016
self.rotor_diameter = 80.
self.hub_height = 70.
self.ct = 0.6
self.alpha = 2.32
self.beta = 0.154
self.expratemultiplier = 1.0
self.wec_factor = 1.0
self.wind_speed = 8.0
self.z_ref = self.hub_height
self.z_0 = 0.0002
self.shear_exp = 0.15
self.yaw = 0.0
self.wtVelocity = np.array([self.wind_speed])
self.TI = 0.077
self.ky = 0.3837*self.TI + 0.003678 # np.array([0.3837*TIturbs[0] + 0.003678])
self.kz = 0.3837*self.TI + 0.003678 # np.array([0.3837*TIturbs[0] + 0.003678])
rotorpoints = sunflower_points(100)
self.RotorPointsY = rotorpoints[0] #np.array([0, .5, 1.0, 0., 0.0, -.5, -1.0, 0., 0.])
self.RotorPointsZ = rotorpoints[1] #np.array([0, 0., 0., .5, 1.0, 0., 0.0, -0.5, -1.])
# self.RotorPointsY = np.array([0])
# self.RotorPointsZ = np.array([0])
self.RotorPointsY = np.array([0, .5, 1.0, 0., 0.0, -.5, -1.0, 0., 0.])
self.RotorPointsZ = np.array([0, 0., 0., .5, 1.0, 0., 0.0, -0.5, -1.])
self.TI_calculation_method = 4
self.calc_k_star = True
self.print_ti = False
self.interp_type = 1
self.sm_smoothing = 700.
loc_data = np.loadtxt('input_files/horns_rev_locations.txt', delimiter=',')
turbineXw = loc_data[:, 0] * self.rotor_diameter
turbineYw = loc_data[:, 1] * self.rotor_diameter
turbineZ = np.ones_like(turbineXw) * self.hub_height
sorted_x_idx = np.argsort(turbineXw, kind='heapsort')
rotorDiameter = np.ones_like(turbineXw) * self.rotor_diameter
Ct = np.ones_like(turbineXw) * self.ct
yaw = np.ones_like(turbineXw) * self.yaw
TI_turbs = np.ones_like(turbineXw) * self.TI
use_ct_curve = True
# ct_data = np.loadtxt('input_files/predicted_ct_vestas_v80_niayifar2016.txt', delimiter=',')
ct_data = np.loadtxt('input_files/mfg_ct_vestas_v80_niayifar2016.txt', delimiter=',')
ct_curve_wind_speed = ct_data[:, 0]
ct_curve_ct = ct_data[:, 1]
CalculateFlowField=False
wtVelocity, _ = porteagel_analyze(turbineXw, sorted_x_idx, turbineYw, turbineZ,
rotorDiameter, Ct, self.wind_speed,
yaw, self.ky, self.kz, self.alpha, self.beta, TI_turbs, self.RotorPointsY,
self.RotorPointsZ, np.array([0]), np.array([0]), np.array([0]),
self.z_ref, self.z_0, self.shear_exp, self.wake_combination_method,
self.TI_calculation_method, self.calc_k_star, self.wec_factor, self.print_ti,
self.wake_model_version, self.interp_type, use_ct_curve,
ct_curve_wind_speed, ct_curve_ct, self.sm_smoothing,
self.expratemultiplier, CalculateFlowField)
free_stream_power = power_func_v80(self.wind_speed)
wtPower = power_func_v80(wtVelocity)
self.norm_pow_ave_by_row = np.zeros(10)
for i in np.arange(0, self.norm_pow_ave_by_row.size):
pow_ave_row = np.average([wtPower[40 + i], wtPower[50 + i], wtPower[60 + i]])
self.norm_pow_ave_by_row[i] = pow_ave_row / free_stream_power
def test_wt_velocity_1_turb(self):
turbineXw = np.array([0.0])
turbineYw = np.array([0.0])
turbineZ = np.ones_like(turbineXw)*self.hub_height
sorted_x_idx = np.argsort(turbineXw, kind='heapsort')
rotorDiameter = np.ones_like(turbineXw)*self.rotor_diameter
Ct = np.ones_like(turbineXw)*self.ct
yaw = np.ones_like(turbineXw)*self.yaw
TI_turbs = np.ones_like(turbineXw)*self.TI
use_ct_curve = False
ct_curve_wind_speed = np.array([self.wind_speed])
ct_curve_ct = np.array([self.ct])
CalculateFlowField=False
wtVelocity, _ = porteagel_analyze(turbineXw, sorted_x_idx, turbineYw, turbineZ,
rotorDiameter, Ct, self.wind_speed,
yaw, self.ky, self.kz, self.alpha, self.beta, TI_turbs, self.RotorPointsY,
self.RotorPointsZ, np.array([0]), np.array([0]), np.array([0]),
self.z_ref, self.z_0, self.shear_exp, self.wake_combination_method,
self.TI_calculation_method, self.calc_k_star, self.wec_factor, self.print_ti,
self.wake_model_version, self.interp_type, use_ct_curve,
ct_curve_wind_speed, ct_curve_ct, self.sm_smoothing,
self.expratemultiplier, CalculateFlowField)
self.assertAlmostEqual(wtVelocity, 8.0, delta=self.tolerance)
#
# 2.009085240790877, 0.4619246861924686
# 2.0091082123225856, 0.46359832635983256
# 3.003385019279678, 0.5037656903765689
# 3.997271310197718, 0.515481171548117
# 4.996498482238084, 0.516317991631799
# 5.990212486668307, 0.515481171548117
# 7.0003626220362625, 0.5121338912133888
# 7.994042169168923, 0.5087866108786608
# 8.99869062269259, 0.5046025104602508
# 10.003339076216259, 0.5004184100418408
def test_wt_velocity_row_1_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[0], 1.0, delta=self.tolerance)
def test_wt_velocity_row_2_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[1], 0.4619246861924686, delta=self.tolerance)
def test_wt_velocity_row_3_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[2], 0.5037656903765689, delta=self.tolerance)
def test_wt_velocity_row_4_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[3], 0.515481171548117, delta=self.tolerance)
def test_wt_velocity_row_5_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[4], 0.516317991631799, delta=self.tolerance)
def test_wt_velocity_row_6_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[5], 0.515481171548117, delta=self.tolerance)
def test_wt_velocity_row_7_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[6], 0.5121338912133888, delta=self.tolerance)
def test_wt_velocity_row_8_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[7], 0.5087866108786608, delta=self.tolerance)
def test_wt_velocity_row_9_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[8], 0.5046025104602508, delta=self.tolerance)
def test_wt_velocity_row_10_of_horns_rev(self):
self.assertAlmostEqual(self.norm_pow_ave_by_row[9], 0.5004184100418408, delta=self.tolerance)
if __name__ == "__main__":
unittest.main(verbosity=2) | StarcoderdataPython |
3211021 | <filename>compiler/modules/single_level_column_mux.py
import design
import debug
from tech import drc, info
from vector import vector
import contact
from ptx import ptx
from globals import OPTS
class single_level_column_mux(design.design):
"""
This module implements the columnmux bitline cell used in the design.
Creates a single columnmux cell.
"""
def __init__(self, tx_size):
name="single_level_column_mux_{}".format(tx_size)
design.design.__init__(self, name)
debug.info(2, "create single column mux cell: {0}".format(name))
c = reload(__import__(OPTS.bitcell))
self.mod_bitcell = getattr(c, OPTS.bitcell)
self.bitcell = self.mod_bitcell()
self.ptx_width = tx_size * drc["minwidth_tx"]
self.add_pin_list(["bl", "br", "bl_out", "br_out", "sel", "gnd"])
self.create_layout()
def create_layout(self):
self.add_ptx()
self.pin_height = 2*self.m2_width
self.width = self.bitcell.width
self.height = self.nmos2.uy() + self.pin_height
self.connect_poly()
self.add_gnd_rail()
self.add_bitline_pins()
self.connect_bitlines()
self.add_wells()
def add_bitline_pins(self):
""" Add the top and bottom pins to this cell """
bl_pos = vector(self.bitcell.get_pin("BL").lx(), 0)
br_pos = vector(self.bitcell.get_pin("BR").lx(), 0)
# bl and br
self.add_layout_pin(text="bl",
layer="metal2",
offset=bl_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
self.add_layout_pin(text="br",
layer="metal2",
offset=br_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
# bl_out and br_out
self.add_layout_pin(text="bl_out",
layer="metal2",
offset=bl_pos,
height=self.pin_height)
self.add_layout_pin(text="br_out",
layer="metal2",
offset=br_pos,
height=self.pin_height)
def add_ptx(self):
""" Create the two pass gate NMOS transistors to switch the bitlines"""
# Adds nmos1,nmos2 to the module
self.nmos = ptx(width=self.ptx_width)
self.add_mod(self.nmos)
# Space it in the center
nmos1_position = self.nmos.active_offset.scale(0,1) + vector(0.5*self.bitcell.width-0.5*self.nmos.active_width,0)
self.nmos1=self.add_inst(name="mux_tx1",
mod=self.nmos,
offset=nmos1_position)
self.connect_inst(["bl", "sel", "bl_out", "gnd"])
# This aligns it directly above the other tx with gates abutting
nmos2_position = nmos1_position + vector(0,self.nmos.active_height + self.poly_space)
self.nmos2=self.add_inst(name="mux_tx2",
mod=self.nmos,
offset=nmos2_position)
self.connect_inst(["br", "sel", "br_out", "gnd"])
def connect_poly(self):
""" Connect the poly gate of the two pass transistors """
height=self.nmos2.get_pin("G").uy() - self.nmos1.get_pin("G").by()
self.add_layout_pin(text="sel",
layer="poly",
offset=self.nmos1.get_pin("G").ll(),
height=height)
def connect_bitlines(self):
""" Connect the bitlines to the mux transistors """
# These are on metal2
bl_pin = self.get_pin("bl")
br_pin = self.get_pin("br")
bl_out_pin = self.get_pin("bl_out")
br_out_pin = self.get_pin("br_out")
# These are on metal1
nmos1_s_pin = self.nmos1.get_pin("S")
nmos1_d_pin = self.nmos1.get_pin("D")
nmos2_s_pin = self.nmos2.get_pin("S")
nmos2_d_pin = self.nmos2.get_pin("D")
# Add vias to bl, br_out, nmos2/S, nmos1/D
self.add_via_center(layers=("metal1","via1","metal2"),
offset=bl_pin.bc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=br_out_pin.uc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos2_s_pin.center())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos1_d_pin.center())
# bl -> nmos2/D on metal1
# bl_out -> nmos2/S on metal2
self.add_path("metal1",[bl_pin.ll(), vector(nmos2_d_pin.cx(),bl_pin.by()), nmos2_d_pin.center()])
# halfway up, move over
mid1 = bl_out_pin.uc().scale(1,0.5)+nmos2_s_pin.bc().scale(0,0.5)
mid2 = bl_out_pin.uc().scale(0,0.5)+nmos2_s_pin.bc().scale(1,0.5)
self.add_path("metal2",[bl_out_pin.uc(), mid1, mid2, nmos2_s_pin.bc()])
# br -> nmos1/D on metal2
# br_out -> nmos1/S on metal1
self.add_path("metal1",[br_out_pin.uc(), vector(nmos1_s_pin.cx(),br_out_pin.uy()), nmos1_s_pin.center()])
# halfway up, move over
mid1 = br_pin.bc().scale(1,0.5)+nmos1_d_pin.uc().scale(0,0.5)
mid2 = br_pin.bc().scale(0,0.5)+nmos1_d_pin.uc().scale(1,0.5)
self.add_path("metal2",[br_pin.bc(), mid1, mid2, nmos1_d_pin.uc()])
def add_gnd_rail(self):
""" Add the gnd rails through the cell to connect to the bitcell array """
gnd_pins = self.bitcell.get_pins("gnd")
for gnd_pin in gnd_pins:
# only use vertical gnd pins that span the whole cell
if gnd_pin.layer == "metal2" and gnd_pin.height >= self.bitcell.height:
gnd_position = vector(gnd_pin.lx(), 0)
self.add_layout_pin(text="gnd",
layer="metal2",
offset=gnd_position,
height=self.height)
def add_wells(self):
""" Add a well and implant over the whole cell. Also, add the pwell contact (if it exists) """
# find right most gnd rail
gnd_pins = self.bitcell.get_pins("gnd")
right_gnd = None
for gnd_pin in gnd_pins:
if right_gnd == None or gnd_pin.lx()>right_gnd.lx():
right_gnd = gnd_pin
# Add to the right (first) gnd rail
m1m2_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("metal1", "via1", "metal2"),
offset=m1m2_offset)
active_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("active", "contact", "metal1"),
offset=active_offset,
implant_type="p",
well_type="p")
| StarcoderdataPython |
3396481 | <reponame>GPelayo/kwantiko
class PostDatabaseReader:
@property
def post_items(self):
raise NotImplementedError
| StarcoderdataPython |
1719873 | <gh_stars>0
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# ./image_browser.py
import hashlib
import os
import re
import sys
import unicodedata
import collections
import itertools
import subprocess
try:
#~ from PyQt5.QtCore import (
#~ QByteArray,
#~ QFile,
#~ QPoint,
#~ Qt,
#~ )
from PyQt5.QtWidgets import (
#~ QAction,
QShortcut,
QApplication,
#~ QFileDialog,
#~ QGridLayout,
#~ QLabel,
QMainWindow,
#~ QMenu,é
#~ QPushButton,
#~ QSizePolicy,
#~ QVBoxLayout,
QWidget,
)
from PyQt5.QtGui import (
#~ QMovie,
#~ QPixmap,
QKeySequence,
#~ QWheelEvent,
)
except ImportError:
sys.exit("This script needs the PyQt5 module to run.")
IMG_EXTENSIONS = [ # supported image format
'.bmp',
'.gif',
'.jpg',
'.jpeg',
'.png',
'.pbm',
'.pgm',
'.ppm',
'.xbm',
'.xpm',
]
def prev_current_and_next(an_iterable):
""" return a triple of previous, current and next item in the given
iterable
"""
prevs, items, nexts = itertools.tee(an_iterable, 3)
prevs = itertools.chain([None], prevs)
nexts = itertools.chain(itertools.islice(nexts, 1, None), [None])
return itertools.izip(prevs, items, nexts)
def unc_string(a_string):
""" return a lower case string without any special character
"""
# enlever les accents
nkfd_form = unicodedata.normalize('NFKD', a_string)
string = "".join([c for c in nkfd_form if not unicodedata.combining(c)])
# remplacer les espaces par des underscores
string = string.strip().replace(' ', '_')
# enlever tous les caractères non alpha-numérique
string = re.sub(r'[^\w]', '', string)
# corriger les doubles underscores
string = string.strip().replace('__', '_')
return string.lower()
def sort_nicely(a_list):
""" sort the given list in the way that humans expect.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]
a_list.sort(key=alphanum_key)
def files_from_dir(a_dirpath):
""" return a list of files from given dirpath sorted in the way that
humans expect.
"""
root, dirs, files = next(os.walk(a_dirpath))
files = [ os.path.join(root, f) for f in files ]
sort_nicely(files)
return files
def next_file(a_filepath):
""" return the filepath of the next file in given filepath directory.
the file list is sorted in the way that humans expect.
"""
files = files_from_dir(os.path.dirname(a_filepath))
l = len(files)
for i, f in enumerate(files):
if f == a_filepath:
if i < (l - 1):
return files[i + 1]
else:
return None
def prev_file(a_filepath):
""" return the filepath of the previous file in given filepath directory.
the file list is sorted in the way that humans expect.
"""
files = files_from_dir(os.path.dirname(a_filepath))
l = len(files)
for i, f in enumerate(files):
if f == a_filepath:
if i > 0:
return files[i - 1]
else:
return None
def prev_current_and_next_files(a_filepath):
""" return a triple of previous, current and next file pathes in the given
filepath directory
"""
p = prev_file(a_filepath)
n = next_file(a_filepath)
return p, a_filepath, n
class Colors(object):
@staticmethod
def hex_to_rgb(hexcode):
""" hexcode = 6 characters string hexcode
Return a tuple of (red, green, blue) decimal values
"""
dec = '0123456789abcdefABCDEF'
hexdec = {v: int(v, 16) for v in (x+y for x in dec for y in dec)}
return hexdec[hexcode[0:2]], hexdec[hexcode[2:4]], hexdec[hexcode[4:6]]
@staticmethod
def rgb_to_hex(rgb, *, case='low'):
""" rgb = tuple of (red, green, blue) decimal values
Return a 6 characters string hexcode
"""
if case == 'up':
lettercase = 'X'
else:
lettercase = 'x'
return format(rgb[0]<<16 | rgb[1]<<8 | rgb[2], '06'+lettercase)
class FileHash(object):
@staticmethod
def md5(fname):
""" return md5 hash from file
"""
_hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
_hash.update(chunk)
return _hash.hexdigest()
@staticmethod
def sha1(fname):
""" return sha1 hash from file
"""
_hash = hashlib.sha1()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
_hash.update(chunk)
return _hash.hexdigest()
@staticmethod
def sha256(fname):
""" return sha256 hash from file
"""
_hash = hashlib.sha256()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
_hash.update(chunk)
return _hash.hexdigest()
@staticmethod
def sha512(fname):
""" return sha512 hash from file
"""
_hash = hashlib.sha512()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
_hash.update(chunk)
return _hash.hexdigest()
@staticmethod
def sha(fname):
""" return triple sha{1, 256, 512} hashes from file
"""
_hash1 = hashlib.sha1()
_hash256 = hashlib.sha256()
_hash512 = hashlib.sha512()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
_hash1.update(chunk)
_hash256.update(chunk)
_hash512.update(chunk)
return _hash1.hexdigest(), _hash256.hexdigest(), _hash512.hexdigest()
@staticmethod
def all(fname):
""" return all availlable hashes from file
"""
_hashes = {}
for algo in hashlib.algorithms_available:
_hashes[algo] = hashlib.new(algo)
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
for _hash in _hashes.values():
_hash.update(chunk)
for _algo, _hash in _hashes.items():
_hashes[_algo] = _hash.hexdigest()
return collections.OrderedDict(sorted(_hashes.items()))
class Qt5ImageWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
class Qt5FSTreeView(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
class Qt5UI(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setMinimumSize(800, 600)
self.setWindowTitle("Image Viewer")
self.setStyleSheet("Qt5UI{background-color: rgba(110,110,110,210);}")
self.showMaximized()
self.shortcut = QShortcut(QKeySequence("Ctrl+Q"), self)
self.shortcut.activated.connect(self.close)
def main_qt5(*args, **kwarg):
app = QApplication([args])
dia = Qt5UI()
sys.exit(app.exec_())
def main(*args):
_dir = "/home/rw/.rwbox/.rwpvt/tmp/20111201"
for root, dirs, files in os.walk(_dir):
sort_nicely(files)
for f in files:
fp = os.path.join(root, f)
print(FileHash.sha(fp), fp)
sys.exit(0)
if __name__ == '__main__':
main_qt5(sys.argv[1:])
| StarcoderdataPython |
3256422 | <reponame>mdnls/tramp
import unittest
from tramp.channels import (
AbsChannel, SgnChannel, ReluChannel, LeakyReluChannel, HardTanhChannel,
MultiConvChannel, LinearChannel, DiagonalChannel, UpsampleChannel
)
from tramp.ensembles import Multi2dConvEnsemble
import numpy as np
import torch
def empirical_second_moment(tau_z, channel):
"""
Estimate second_moment by sampling.
"""
noise = np.random.standard_normal(size=1000 * 1000)
Z = np.sqrt(tau_z) * noise / noise.std()
X = channel.sample(Z)
tau_x = (X**2).mean()
return tau_x
def explicit_integral(az, bz, ax, bx, channel):
"""
Compute rx, vx, rz, vz for p(x|z) by integration
"""
def belief(z, x):
L = -0.5 * ax * (x**2) + bx * x - 0.5 * az * (z**2) + bz * z
return np.exp(L)
def z_belief(z, x):
return z * belief(z, x)
def z2_belief(z, x):
return (z**2) * belief(z, x)
def x_belief(z, x):
return x * belief(z, x)
def x2_belief(z, x):
return (x**2) * belief(z, x)
zmin = bz / az - 10 / np.sqrt(az)
zmax = bz / az + 10 / np.sqrt(az)
Z = channel.measure(belief, zmin, zmax)
rx = channel.measure(x_belief, zmin, zmax) / Z
x2 = channel.measure(x2_belief, zmin, zmax) / Z
vx = x2 - rx**2
rz = channel.measure(z_belief, zmin, zmax) / Z
z2 = channel.measure(z2_belief, zmin, zmax) / Z
vz = z2 - rz**2
return rz, vz, rx, vx
class ChannelsTest(unittest.TestCase):
def setUp(self):
self.records = [
dict(az=2.1, bz=2.0, ax=2.0, bx=2.0, tau_z=2.0),
dict(az=2.0, bz=+1.6, ax=1.5, bx=1.3, tau_z=1.5),
dict(az=2.0, bz=-1.6, ax=1.5, bx=1.3, tau_z=1.0)
]
def tearDown(self):
pass
def _test_function_second_moment(self, channel, records, places=6):
for record in records:
tau_z = record["tau_z"]
tau_x_emp = empirical_second_moment(tau_z, channel)
tau_x_hat = channel.second_moment(tau_z)
msg = f"record={record}"
self.assertAlmostEqual(tau_x_emp, tau_x_hat, places=places, msg=msg)
def _test_function_posterior(self, channel, records, places=12):
for record in records:
az, bz, ax, bx = record["az"], record["bz"], record["ax"], record["bx"]
rz, vz, rx, vx = explicit_integral(az, bz, ax, bx, channel)
rx_hat, vx_hat = channel.compute_forward_posterior(az, bz, ax, bx)
rz_hat, vz_hat = channel.compute_backward_posterior(az, bz, ax, bx)
msg = f"record={record}"
self.assertAlmostEqual(rx, rx_hat, places=places, msg=msg)
self.assertAlmostEqual(vx, vx_hat, places=places, msg=msg)
self.assertAlmostEqual(rz, rz_hat, places=places, msg=msg)
self.assertAlmostEqual(vz, vz_hat, places=places, msg=msg)
def _test_function_proba(self, channel, records, places=12):
for record in records:
az, ax, tau_z = record["az"], record["ax"], record["tau_z"]
def one(bz, bx): return 1
sum_proba = channel.beliefs_measure(az, ax, tau_z, f=one)
msg = f"record={record}"
self.assertAlmostEqual(sum_proba, 1., places=places, msg=msg)
def test_abs_posterior(self):
channel = AbsChannel()
self._test_function_posterior(channel, self.records, places=6)
def test_sgn_posterior(self):
channel = SgnChannel()
self._test_function_posterior(channel, self.records, places=4)
def test_relu_posterior(self):
channel = ReluChannel()
self._test_function_posterior(channel, self.records, places=6)
def test_leaky_relu_posterior(self):
channel = LeakyReluChannel(slope=0.1)
self._test_function_posterior(channel, self.records, places=6)
def test_hard_tanh_posterior(self):
channel = HardTanhChannel()
self._test_function_posterior(channel, self.records, places=1)
def test_abs_second_moment(self):
channel = AbsChannel()
self._test_function_second_moment(channel, self.records, places=2)
def test_sgn_second_moment(self):
channel = SgnChannel()
self._test_function_second_moment(channel, self.records)
def test_relu_second_moment(self):
channel = ReluChannel()
self._test_function_second_moment(channel, self.records, places=2)
def test_leaky_relu_second_moment(self):
channel = LeakyReluChannel(slope=0.1)
self._test_function_second_moment(channel, self.records, places=2)
def test_hard_tanh_second_moment(self):
channel = HardTanhChannel()
self._test_function_second_moment(channel, self.records, places=2)
def test_abs_proba(self):
channel = AbsChannel()
self._test_function_proba(channel, self.records)
def test_sgn_proba(self):
channel = SgnChannel()
self._test_function_proba(channel, self.records)
def test_relu_proba(self):
channel = ReluChannel()
self._test_function_proba(channel, self.records)
def test_leaky_relu_proba(self):
channel = LeakyReluChannel(slope=0.1)
self._test_function_proba(channel, self.records)
class MultiConvChannelTest(unittest.TestCase):
def setUp(self):
H, W = (10, 11) # height, width
k= 3
M, N = (3, 4) # out channels, in channels
self.inp_imdim = (N, H, W)
self.outp_imdim = (M, H, W)
# Generate the convolution
self.inp_img = np.random.normal(size=(N, H, W))
self.conv_ensemble = Multi2dConvEnsemble(width=W, height=H, in_channels=N, out_channels=M, k=3)
conv_filter = self.conv_ensemble.generate()
self.conv_filter = conv_filter
self.mcc_channel = MultiConvChannel(self.conv_filter, block_shape=(H, W))
self.dense_conv = self.mcc_channel.densify()
# Construct reference implementations of convolutions and linear operators
self.ref_conv = torch.nn.Conv2d(in_channels=N, out_channels=M, padding_mode="circular", padding=(k - 1) // 2,
kernel_size=k, bias=False, stride=1)
self.ref_conv.weight.data = torch.FloatTensor(conv_filter)
inp_img_pt = torch.FloatTensor(self.inp_img[np.newaxis, ...])
ref_outp_img = self.ref_conv(inp_img_pt).detach().cpu().numpy()[0]
self.ref_outp_img = ref_outp_img
self.ref_linear = LinearChannel(W=self.dense_conv)
def tearDown(self):
pass
def test_unitary(self):
# Test the closed form SVD and the virtual matrix multiplication by verifying unitarity
Vt_img = self.mcc_channel.V.T(self.inp_img)
Ut_img = self.mcc_channel.U.T(self.ref_outp_img)
self.assertTrue(np.isclose(np.linalg.norm(self.inp_img), np.linalg.norm(Vt_img)))
self.assertTrue(np.isclose(np.linalg.norm(self.ref_outp_img), np.linalg.norm(Ut_img)))
self.assertTrue(np.allclose(self.inp_img, self.mcc_channel.V(Vt_img)))
self.assertTrue(np.allclose(self.ref_outp_img, self.mcc_channel.U(Ut_img)))
def test_densify(self):
# Test that densify() returns dense matrices that correctly implement sparse matrix behavior
Vt_img = self.mcc_channel.V.T(self.inp_img)
Ut_img = self.mcc_channel.U.T(self.ref_outp_img)
self.assertTrue(np.allclose( self.mcc_channel.V.densify() @ Vt_img.flatten(), self.inp_img.flatten()))
self.assertTrue(np.allclose( self.mcc_channel.U.densify() @ Ut_img.flatten(), self.ref_outp_img.flatten()))
self.assertTrue(np.allclose( self.mcc_channel.densify() @ self.inp_img.flatten(), self.mcc_channel.at(self.inp_img).flatten()))
def test_conv_agreement(self):
# Test the sparse matrix mult matches a pytorch 2d convolution
C = self.mcc_channel
outp_img = C.at(self.inp_img)
self.assertTrue(np.allclose(outp_img, self.ref_outp_img, atol=1e-6))
def test_linear_agreement(self):
# Check that the multichannel conv channel exactly matches the behavior of the corresponding
# dense linear channel.
az = np.random.uniform(low=1, high=5)
ax = np.random.uniform(low=1, high=5)
tau_z = np.random.uniform(low=1, high=5)
bz = np.random.normal(size=self.inp_imdim)
bx = np.random.normal(size=self.outp_imdim)
self.assertTrue(np.allclose(self.mcc_channel.sample(bz).flatten(),
self.ref_linear.sample(bz.flatten())))
self.assertTrue(np.allclose(self.mcc_channel.compute_forward_variance(az, ax),
self.ref_linear.compute_forward_variance(az, ax)))
self.assertTrue(np.allclose(self.mcc_channel.compute_backward_variance(az, ax),
self.ref_linear.compute_backward_variance(az, ax)))
self.assertTrue(np.allclose(self.mcc_channel.compute_backward_mean(az, bz, ax, bx).flatten(),
self.ref_linear.compute_backward_mean(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.mcc_channel.compute_log_partition(az, bz, ax, bx),
self.ref_linear.compute_log_partition(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.mcc_channel.second_moment(tau_z),
self.ref_linear.second_moment(tau_z)))
self.assertTrue(np.allclose(self.mcc_channel.compute_free_energy(az, ax, tau_z),
self.ref_linear.compute_free_energy(az, ax, tau_z)))
self.assertTrue(np.allclose(self.mcc_channel.compute_mutual_information(az, ax, tau_z),
self.ref_linear.compute_mutual_information(az, ax, tau_z)))
class DiagonalChannelTest(unittest.TestCase):
def setUp(self):
self.dim = (32, 32)
self.S = np.random.normal(size=self.dim)
self.channel = DiagonalChannel(S=self.S)
self.ref_linear = LinearChannel(W=np.diag(self.S.flatten()))
def test_linear_agreement(self):
az = np.random.uniform(low=1, high=5)
ax = np.random.uniform(low=1, high=5)
tau_z = np.random.uniform(low=1, high=5)
bz = np.random.normal(size=self.dim)
bx = np.random.normal(size=self.dim)
self.assertTrue(np.allclose(self.channel.sample(bz).flatten(),
self.ref_linear.sample(bz.flatten())))
self.assertTrue(np.allclose(self.channel.compute_forward_variance(az, ax),
self.ref_linear.compute_forward_variance(az, ax)))
self.assertTrue(np.allclose(self.channel.compute_backward_variance(az, ax),
self.ref_linear.compute_backward_variance(az, ax)))
self.assertTrue(np.allclose(self.channel.compute_backward_mean(az, bz, ax, bx).flatten(),
self.ref_linear.compute_backward_mean(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.channel.compute_log_partition(az, bz, ax, bx),
self.ref_linear.compute_log_partition(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.channel.second_moment(tau_z),
self.ref_linear.second_moment(tau_z)))
self.assertTrue(np.allclose(self.channel.compute_free_energy(az, ax, tau_z),
self.ref_linear.compute_free_energy(az, ax, tau_z)))
self.assertTrue(np.allclose(self.channel.compute_mutual_information(az, ax, tau_z),
self.ref_linear.compute_mutual_information(az, ax, tau_z)))
class UpsampleChannelTest(unittest.TestCase):
def setUp(self):
self.inp_imdim = (3, 5, 5)
self.outp_imdim = (3, 10, 10)
self.example_image = np.random.normal(size=self.inp_imdim)
self.channel = UpsampleChannel(input_shape=self.inp_imdim, output_shape=self.outp_imdim)
self.ref_upsample_operator = torch.nn.Upsample(size=self.outp_imdim[1:], mode="bilinear", align_corners=False)
self.ref_linear = LinearChannel(self.channel.densify())
def test_upsample_agreement(self):
ref_ups_image = self.ref_upsample_operator(torch.FloatTensor(self.example_image[np.newaxis, ...]))[0].detach().numpy()
ups_image = self.channel.sample(self.example_image)
self.assertTrue(np.allclose(ups_image, ref_ups_image, atol=1e-6))
def test(self):
ref_ups_image = self.ref_upsample_operator(torch.FloatTensor(self.example_image[np.newaxis, ...]))[0].detach().numpy()
ups_image = self.channel.sample(self.example_image)
self.assertTrue(np.allclose(ups_image, ref_ups_image, atol=1e-6))
def test_linear_agreement(self):
az = np.random.uniform(low=1, high=5)
ax = np.random.uniform(low=1, high=5)
tau_z = np.random.uniform(low=1, high=5)
bz = np.random.normal(size=self.inp_imdim)
bx = np.random.normal(size=self.outp_imdim)
self.assertTrue(np.allclose(self.channel.sample(bz).flatten(),
self.ref_linear.sample(bz.flatten())))
self.assertTrue(np.allclose(self.channel.compute_forward_variance(az, ax),
self.ref_linear.compute_forward_variance(az, ax)))
self.assertTrue(np.allclose(self.channel.compute_backward_variance(az, ax),
self.ref_linear.compute_backward_variance(az, ax)))
self.assertTrue(np.allclose(self.channel.compute_backward_mean(az, bz, ax, bx).flatten(),
self.ref_linear.compute_backward_mean(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.channel.second_moment(tau_z),
self.ref_linear.second_moment(tau_z)))
self.assertTrue(np.allclose(self.channel.compute_free_energy(az, ax, tau_z),
self.ref_linear.compute_free_energy(az, ax, tau_z)))
self.assertTrue(np.allclose(self.channel.compute_log_partition(az, bz, ax, bx),
self.ref_linear.compute_log_partition(az, bz.flatten(), ax, bx.flatten())))
self.assertTrue(np.allclose(self.channel.compute_mutual_information(az, ax, tau_z),
self.ref_linear.compute_mutual_information(az, ax, tau_z)))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
170601 | # flake8: noqa
"""
Auswärtiges Amt OpenData Schnittstelle
Dies ist die Beschreibung für die Schnittstelle zum Zugriff auf die Daten des [Auswärtigen Amtes](https://www.auswaertiges-amt.de/de/) im Rahmen der [OpenData](https://www.auswaertiges-amt.de/de/open-data-schnittstelle/736118) Initiative. ## Deaktivierung Die Schnittstelle kann deaktiviert werden, in dem Fall wird ein leeres JSON-Objekt zurückgegeben. ## Fehlerfall Im Fehlerfall wird ein leeres JSON-Objekt zurückgegeben. ## Nutzungsbedingungen Die Nutzungsbedingungen sind auf der [OpenData-Schnittstelle](https://www.auswaertiges-amt.de/de/open-data-schnittstelle/736118) des Auswärtigen Amtes zu finden. ## Änderungen ### version 1.0.1 (September 2021) * `content` (-> Details des Reise- und Sicherheitshinweis) wurde von [`/travelwarning`](#operations-default-getTravelwarning) entfernt -> bitte ab jetzt [`/travelwarning/{contentId}`](#operations-default-getSingleTravelwarning) nutzen um `content` abzufragen # noqa: E501
The version of the OpenAPI document: 1.0.1
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from deutschland.travelwarning.api_client import ApiClient
# import Configuration
from deutschland.travelwarning.configuration import Configuration
# import exceptions
from deutschland.travelwarning.exceptions import (
ApiAttributeError,
ApiException,
ApiKeyError,
ApiTypeError,
ApiValueError,
OpenApiException,
)
| StarcoderdataPython |
1689378 | <gh_stars>100-1000
# Scorer function Gi(z) in the complex plane
cplot(scorergi, [-8,8], [-8,8], points=50000)
| StarcoderdataPython |
4802132 | from typing import Type
from kernel.middleware import CrequestMiddleware
_cls = Type('KernelModel', bound='kernel.models.base.KernelModel')
class ActionKernelModel(object):
@property
def action_user(self):
return CrequestMiddleware.get_user()
@classmethod
def generate_perm(cls: _cls, action):
app_label = cls._meta.app_label
class_name = cls._meta.model_name
return '{}.{}_{}'.format(app_label, action, class_name)
@classmethod
def can_action_create(cls, request):
return request.user.has_perm(cls.generate_perm('add'))
@classmethod
def can_action_update(cls, request):
return request.user.has_perm(cls.generate_perm('change'))
@classmethod
def can_action_delete(cls, request):
return request.user.has_perm(cls.generate_perm('delete'))
@classmethod
def can_action_view_detail(cls, request):
return request.user.has_perm(cls.generate_perm('view'))
@classmethod
def can_action_view_list(cls, request):
return request.user.has_perm(cls.generate_perm('view'))
@classmethod
def can_action_export(cls, request):
return request.user.has_perm(cls.generate_perm('view'))
def can_object_action_create(self):
return self.action_user.has_perm(self.generate_perm('create'))
def can_object_action_update(self):
return self.action_user.has_perm(self.generate_perm('change'))
def can_object_action_delete(self):
return self.action_user.has_perm(self.generate_perm('delete'))
| StarcoderdataPython |
3331942 | <gh_stars>0
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
def int_pendulum_sim(theta_init, t, L=1, m=1, b=0, g=9.81):
theta_dot_1 = theta_init[1]
theta_dot_2 = -b/m*theta_init[1] - g/L*np.sin(theta_init[0])
return theta_dot_1, theta_dot_2
# Input constants
m = 1 # mass (kg)
L = 1 # length (m)
b = 0 # damping value (kg/m^2-s)
g = 9.81 # gravity (m/s^2)
delta_t = 0.02 # time step size (seconds)
t_max = 10.0 # max sim time (seconds)
theta1_0 = np.pi/2 # initial angle (radians)
theta2_0 = 0 # initial angular velocity (rad/s)
theta_init = (theta1_0, theta2_0)
# Get timesteps
t = np.linspace(0.0, t_max, int(t_max/delta_t))
theta_vals_int = integrate.odeint(int_pendulum_sim, theta_init, t)
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(t, theta_vals_int) | StarcoderdataPython |
1633389 | #! /usr/bin/env python
# Copyright 2020 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from pathlib import Path
import importlib
import pprint
import subprocess
import sys
import pkg_resources as pkg
class ImportToPkg:
def __init__(self):
self.import_to_pkg = dict(self.get_import_to_pkg())
def get_pkg_names(self):
for line in subprocess.check_output('conda list'.split()).decode().splitlines():
if not line.startswith('#'):
# columns are: Name Version Build Channel
pkg_name = line.split()[0]
yield pkg_name
def get_import_to_pkg(self):
for name in self.get_pkg_names():
try:
meta = pkg.get_distribution(name)
except pkg.DistributionNotFound:
continue # Skips binaries: arrow-cpp, boost-cpp, brotli, bzip2, c-ares.
folder = Path(meta.egg_info)
try:
import_name = self._get_imports(folder / 'top_level.txt')[0].rstrip()
except FileNotFoundError:
continue # Skips the entrypoints-0.3 package
try:
importlib.import_module(import_name)
sys.modules[import_name] # Verify that it actually _was_ imported.
except ModuleNotFoundError:
continue # Skips 'amd' from cvxopt.
yield import_name, meta.project_name
@classmethod
def _get_imports(cls, fspec):
with open(fspec) as fin:
lines = fin.readlines()
return sorted(lines, key=cls._underscores_to_the_end)
@staticmethod
def _underscores_to_the_end(s):
# The '_' character is between 'Z' & 'a'. This helper moves it past 'z',
# so names starting with a letter will sort earlier than underscore names.
return s.replace('_', '~')
if __name__ == '__main__':
pprint.pprint(ImportToPkg().import_to_pkg)
| StarcoderdataPython |
1708249 | import copy
from membase.helper.cluster_helper import ClusterOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator
from .xdcrnewbasetests import XDCRNewBaseTest
from .xdcrnewbasetests import NodeHelper
from .xdcrnewbasetests import Utility, BUCKET_NAME, OPS
from remote.remote_util import RemoteMachineShellConnection
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
# Assumption that at least 2 nodes on every cluster
class bidirectional(XDCRNewBaseTest):
def setUp(self):
super(bidirectional, self).setUp()
self.src_cluster = self.get_cb_cluster_by_name('C1')
self.src_master = self.src_cluster.get_master_node()
self.dest_cluster = self.get_cb_cluster_by_name('C2')
self.dest_master = self.dest_cluster.get_master_node()
def tearDown(self):
super(bidirectional, self).tearDown()
def __perform_ops_joint_sets(self):
# Merging the keys as keys are actually replicated.
temp_expires = self._expires
self._expires = 0 # Assigning it to 0, so that merge_buckets don't wait for expiration here.
self.merge_all_buckets()
tasks = []
kv_gen_src = self.src_cluster.get_kv_gen()[OPS.CREATE]
gen_update = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=0,
end=int(kv_gen_src.end * (float)(self._perc_upd) / 100))
gen_delete = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=int((kv_gen_src.end) * (float)(100 - self._perc_del) / 100),
end=kv_gen_src.end)
if "C1" in self._upd_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C2" in self._upd_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C1" in self._del_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
if "C2" in self._del_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
for task in tasks:
task.result()
self._expires = temp_expires
if (self._wait_for_expiration and self._expires) and ("C1" in self._upd_clusters or "C2" in self._upd_clusters):
self.sleep(self._expires)
self.sleep(self._wait_timeout)
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket."""
def load_with_ops(self):
self.setup_xdcr_and_load()
self.perform_update_delete()
self.verify_results()
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket.
Here running incremental load on both cluster1 and cluster2 as specified by the user/conf file"""
def load_with_async_ops(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
"""Testing Bidirectional load( Loading at source/destination). Failover node at Source/Destination while
Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
Verifying whether XDCR replication is successful on subsequent destination clusters. """
def load_with_async_ops_and_joint_sets(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
def load_with_async_ops_with_warmup(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
"Test case does not apply for Ephemeral buckets"
return
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_failover(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.sleep(300)
self.verify_results()
def load_with_failover_then_add_back(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
self.src_cluster.add_back_node()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
self.dest_cluster.add_back_node()
self.perform_update_delete()
self.verify_results()
def load_with_failover_master(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_master()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_master()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.verify_results()
"""Replication with compaction ddocs and view queries on both clusters.
This test begins by loading a given number of items on both clusters.
It creates _num_views as development/production view with default
map view funcs(_is_dev_ddoc = True by default) on both clusters.
Then we disabled compaction for ddoc on src cluster. While we don't reach
expected fragmentation for ddoc on src cluster we update docs and perform
view queries for all views. Then we start compaction when fragmentation
was reached fragmentation_value. When compaction was completed we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_ddoc_compaction(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
fragmentation_value = self._input.param("fragmentation_value", 80)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false"}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
self.src_cluster.disable_compaction()
fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
# generate load until fragmentation reached
while fragmentation_monitor.state != "FINISHED":
# update docs to create fragmentation
self.src_cluster.update_delete_data(OPS.UPDATE)
for view in views:
# run queries to create indexes
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
fragmentation_monitor.result()
compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')
self.assertTrue(compaction_task.result())
self.verify_results()
def replication_with_view_queries_and_ops(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
tasks = []
try:
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
tasks = []
# Setting up doc-ops at source nodes
if "C1" in self._upd_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C1" in self._del_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del))
if "C2" in self._upd_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C2" in self._del_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del))
self.sleep(5)
while True:
for view in views:
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
if {task.state for task in tasks} != {"FINISHED"}:
continue
else:
if self._wait_for_expiration:
if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
self.sleep(self._expires)
break
self.merge_all_buckets()
self.src_cluster.verify_items_count()
self.dest_cluster.verify_items_count()
tasks = []
src_buckets = self.src_cluster.get_buckets()
dest_buckets = self.dest_cluster.get_buckets()
for view in views:
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))
for task in tasks:
task.result(self._poll_timeout)
self.verify_results()
finally:
# For timeout error, all tasks to be cancelled
# Before proceeding to next test
for task in tasks:
task.cancel()
"""Replication with disabled/enabled ddoc compaction on both clusters.
This test begins by loading a given number of items on both clusters.
Then we disabled or enabled compaction on both clusters( set via params).
Then we mutate and delete data on clusters 3 times. After deletion we recreate
deleted items. When data was changed 3 times we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_disabled_ddoc_compaction(self):
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
if "C1" in self._disable_compaction:
self.src_cluster.disable_compaction()
if "C2" in self._disable_compaction:
self.dest_cluster.disable_compaction()
# perform doc's ops 3 times to increase rev number
for _ in range(3):
self.async_perform_update_delete()
# wait till deletes have been sent to recreate
self.sleep(60)
# restore(re-creating) deleted items
if 'C1' in self._del_clusters:
c1_kv_gen = self.src_cluster.get_kv_gen()
c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
if self._expires:
# if expiration set, recreate those keys before
# trying to update
c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE])
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_update)
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_delete)
if 'C2' in self._del_clusters:
c2_kv_gen = self.dest_cluster.get_kv_gen()
c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE])
if self._expires:
c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE])
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_update)
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_delete)
# wait till we recreate deleted keys before we can delete/update
self.sleep(300)
self.verify_results()
def replication_while_rebooting_a_non_master_src_dest_node(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
def test_disk_full(self):
self.setup_xdcr_and_load()
self.verify_results()
self.sleep(self._wait_timeout)
zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
try:
for node in [self.src_master, self.dest_master]:
self.shell = RemoteMachineShellConnection(node)
self.shell.execute_cbcollect_info(zip_file)
if self.shell.extract_remote_info().type.lower() != "windows":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
output, _ = self.shell.execute_command(cmd)
else:
cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
self.src_master.ip,
self.src_master.rest_username,
self.src_master.rest_password)
output, _ = self.shell.execute_command(cmd)
self.assertNotEqual(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
self.log.info("Full disk warning generated as expected in %s" % node.ip)
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
except Exception as e:
self.log.info(e)
def test_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
src_nodes = self.src_cluster.get_nodes()
dest_nodes = self.dest_cluster.get_nodes()
nodes = src_nodes + dest_nodes
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.setup_xdcr()
self.src_cluster.pause_all_replications()
self.dest_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
gen = BlobGenerator("C2-", "C2-", self._value_size, end=self._num_items)
self.dest_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self.dest_cluster.resume_all_replications()
# Perform mutations on the bucket
self.async_perform_update_delete()
rest1 = RestConnection(self.src_cluster.get_master_node())
rest2 = RestConnection(self.dest_cluster.get_master_node())
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
shell = RemoteMachineShellConnection(self.dest_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(src_nodes[1], bucket)
mem_client.start_persistence()
mem_client = MemcachedClientHelper.direct_client(dest_nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
failover_task = self.dest_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(60)
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
self.assertTrue(self.src_cluster.wait_for_outbound_mutations(),
"Mutations in source cluster not replicated to target after rollback")
self.assertTrue(self.dest_cluster.wait_for_outbound_mutations(),
"Mutations in target cluster not replicated to source after rollback")
_, count = NodeHelper.check_goxdcr_log(
src_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
_, count = NodeHelper.check_goxdcr_log(
dest_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
def test_scramsha(self):
"""
Creates a new bi-xdcr replication with scram-sha
Make sure to pass use-scramsha=True
from command line
"""
self.setup_xdcr()
self.sleep(60, "wait before checking logs")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node,
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= 0:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results()
def test_update_to_scramsha_auth(self):
"""
Start with ordinary replication, then switch to use scram_sha_auth
Search for success log stmtsS
"""
_, old_count = NodeHelper.check_goxdcr_log(self.src_cluster.get_master_node(),
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
self.setup_xdcr()
# modify remote cluster ref to use scramsha
for remote_cluster in self.src_cluster.get_remote_clusters()+self.dest_cluster.get_remote_clusters():
remote_cluster.use_scram_sha_auth()
self.sleep(60, "wait before checking the logs for using scram-sha")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= old_count:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results() | StarcoderdataPython |
1706031 | <reponame>samuelduchesne/osmnx
################################################################################
# Module: footprints.py
# Description: Download and plot footprints from OpenStreetMap
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/gboeing/osmnx
################################################################################
import geopandas as gpd
import matplotlib.pyplot as plt
import time
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from shapely.geometry import Polygon
from shapely.geometry import MultiPolygon
from . import settings
from .core import consolidate_subdivide_geometry
from .core import get_polygons_coordinates
from .core import overpass_request
from .core import bbox_from_point
from .core import gdf_from_place
from .plot import save_and_show
from .projection import project_geometry
from .utils import log
from .utils import geocode
def osm_footprints_download(polygon=None, north=None, south=None, east=None, west=None,
footprint_type='building', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000):
"""
Download OpenStreetMap footprint data.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the footprints within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is in:
any polygon bigger will get divided up for multiple queries to API
(default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are
meters))
Returns
-------
list
list of response_json dicts
"""
# check if we're querying by polygon or by bounding box based on which
# argument(s) where passed into this function
by_poly = polygon is not None
by_bbox = not (north is None or south is None or east is None or west is None)
if not (by_poly or by_bbox):
raise ValueError('You must pass a polygon or north, south, east, and west')
response_jsons = []
# pass server memory allocation in bytes for the query to the API
# if None, pass nothing so the server will use its default allocation size
# otherwise, define the query's maxsize parameter value as whatever the
# caller passed in
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the query to send the API
if by_bbox:
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(west, south), (east, south), (east, north), (west, north)])
geometry_proj, crs_proj = project_geometry(polygon)
# subdivide it if it exceeds the max area size (in meters), then project
# back to lat-long
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(geometry_proj, max_query_area_size=max_query_area_size)
geometry, _ = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True)
log('Requesting footprints data within bounding box from API in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon rectangle in the geometry (there will only
# be one if original bbox didn't exceed max area size)
for poly in geometry:
# represent bbox as south,west,north,east and round lat-longs to 8
# decimal places (ie, within 1 mm) so URL strings aren't different
# due to float rounding issues (for consistent caching)
west, south, east, north = poly.bounds
query_template = ('[out:json][timeout:{timeout}]{maxsize};'
'((way["{footprint_type}"]({south:.8f},{west:.8f},{north:.8f},{east:.8f});'
'(._;>;););'
'(relation["{footprint_type}"]({south:.8f},{west:.8f},{north:.8f},{east:.8f});'
'(._;>;);););out;')
query_str = query_template.format(north=north, south=south, east=east, west=west, timeout=timeout,
maxsize=maxsize, footprint_type=footprint_type)
response_json = overpass_request(data={'data':query_str}, timeout=timeout)
response_jsons.append(response_json)
msg = ('Got all footprint data within bounding box from '
'API in {:,} request(s) and {:,.2f} seconds')
log(msg.format(len(geometry), time.time()-start_time))
elif by_poly:
# project to utm, divide polygon up into sub-polygons if area exceeds a
# max size (in meters), project back to lat-long, then get a list of polygon(s) exterior coordinates
geometry_proj, crs_proj = project_geometry(polygon)
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(geometry_proj, max_query_area_size=max_query_area_size)
geometry, _ = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True)
polygon_coord_strs = get_polygons_coordinates(geometry)
log('Requesting footprint data within polygon from API in {:,} request(s)'.format(len(polygon_coord_strs)))
start_time = time.time()
# pass each polygon exterior coordinates in the list to the API, one at
# a time
for polygon_coord_str in polygon_coord_strs:
query_template = ('[out:json][timeout:{timeout}]{maxsize};('
'way(poly:"{polygon}")["{footprint_type}"];(._;>;);'
'relation(poly:"{polygon}")["{footprint_type}"];(._;>;););out;')
query_str = query_template.format(polygon=polygon_coord_str, timeout=timeout, maxsize=maxsize,
footprint_type=footprint_type)
response_json = overpass_request(data={'data':query_str}, timeout=timeout)
response_jsons.append(response_json)
msg = ('Got all footprint data within polygon from API in '
'{:,} request(s) and {:,.2f} seconds')
log(msg.format(len(polygon_coord_strs), time.time()-start_time))
return response_jsons
def create_footprints_gdf(polygon=None, north=None, south=None, east=None, west=None,
footprint_type='building', retain_invalid=False):
"""
Get footprint data from OSM then assemble it into a GeoDataFrame.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the footprints within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
responses = osm_footprints_download(polygon, north, south, east, west, footprint_type)
# list of polygons to removed at the end of the process
pop_list = []
vertices = {}
for response in responses:
for result in response['elements']:
if 'type' in result and result['type']=='node':
vertices[result['id']] = {'lat' : result['lat'],
'lon' : result['lon']}
footprints = {}
for response in responses:
for result in response['elements']:
if 'type' in result and result['type']=='way':
nodes = result['nodes']
try:
polygon = Polygon([(vertices[node]['lon'], vertices[node]['lat']) for node in nodes])
except Exception:
log('Polygon has invalid geometry: {}'.format(nodes))
footprint = {'nodes' : nodes,
'geometry' : polygon}
if 'tags' in result:
for tag in result['tags']:
footprint[tag] = result['tags'][tag]
# if polygons are untagged or not tagged with the footprint_type
# add them to pop_list to be removed from the final dictionary
if 'tags' not in result:
pop_list.append(result['id'])
elif footprint_type not in result['tags']:
pop_list.append(result['id'])
footprints[result['id']] = footprint
# Create multipolygon footprints and pop untagged supporting polygons from footprints
for response in responses:
for result in response['elements']:
if 'type' in result and result['type']=='relation':
outer_polys = []
inner_polys = []
multipoly = []
for member in result['members']:
if 'role' in member and member['role']=='outer':
outer_polys.append(member['ref'])
if 'role' in member and member['role']=='inner':
inner_polys.append(member['ref'])
# osm allows multiple outer polygons in a relation
for outer_poly in outer_polys:
temp_poly=footprints[outer_poly]['geometry']
for inner_poly in inner_polys:
temp_poly=temp_poly.difference(footprints[inner_poly]['geometry'])
multipoly.append(temp_poly)
footprint = {'geometry' : MultiPolygon(multipoly)}
if 'tags' in result:
for tag in result['tags']:
footprint[tag] = result['tags'][tag]
footprints[result['id']] = footprint
# remove supporting geometry from footprints dictionary
for item in pop_list:
footprints.pop(item)
gdf = gpd.GeoDataFrame(footprints).T
gdf.crs = settings.default_crs
if not retain_invalid:
# drop all invalid geometries
gdf = gdf[gdf['geometry'].is_valid]
return gdf
def footprints_from_point(point, distance, footprint_type='building', retain_invalid=False):
"""
Get footprints within some distance north, south, east, and west of
a lat-long point.
Parameters
----------
point : tuple
a lat-long point
distance : numeric
distance in meters
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
bbox = bbox_from_point(point=point, distance=distance)
north, south, east, west = bbox
return create_footprints_gdf(north=north, south=south, east=east, west=west,
footprint_type=footprint_type, retain_invalid=retain_invalid)
def footprints_from_address(address, distance, footprint_type='building', retain_invalid=False):
"""
Get footprints within some distance north, south, east, and west of
an address.
Parameters
----------
address : string
the address to geocode to a lat-long point
distance : numeric
distance in meters
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
# geocode the address string to a (lat, lon) point
point = geocode(query=address)
# get footprints within distance of this point
return footprints_from_point(point, distance, footprint_type=footprint_type,
retain_invalid=retain_invalid)
def footprints_from_polygon(polygon, footprint_type='building', retain_invalid=False):
"""
Get footprints within some polygon.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
the shape to get data within. coordinates should be in units of
latitude-longitude degrees.
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
return create_footprints_gdf(polygon=polygon, footprint_type=footprint_type,
retain_invalid=retain_invalid)
def footprints_from_place(place, footprint_type='building', retain_invalid=False):
"""
Get footprints within the boundaries of some place.
The query must be geocodable and OSM must have polygon boundaries for the
geocode result. If OSM does not have a polygon for this place, you can
instead get its footprints using the footprints_from_address function, which
geocodes the place name to a point and gets the footprints within some distance
of that point.
Parameters
----------
place : string
the query to geocode to get geojson boundary polygon
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
city = gdf_from_place(place)
polygon = city['geometry'].iloc[0]
return create_footprints_gdf(polygon, retain_invalid=retain_invalid,
footprint_type=footprint_type)
def plot_footprints(gdf, fig=None, ax=None, figsize=None, color='#333333', bgcolor='w',
set_bounds=True, bbox=None, save=False, show=True, close=False,
filename='image', file_format='png', dpi=600):
"""
Plot a GeoDataFrame of footprints.
Parameters
----------
gdf : GeoDataFrame
footprints
fig : figure
ax : axis
figsize : tuple
color : string
the color of the footprints
bgcolor : string
the background color of the plot
set_bounds : bool
if True, set bounds from either passed-in bbox or the spatial extent of the gdf
bbox : tuple
if True and if set_bounds is True, set the display bounds to this bbox
save : bool
whether to save the figure to disk or not
show : bool
whether to display the figure or not
close : bool
close the figure (only if show equals False) to prevent display
filename : string
the name of the file to save
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
dpi : int
the resolution of the image file if saving
Returns
-------
fig, ax : tuple
"""
if fig is None or ax is None:
fig, ax = plt.subplots(figsize=figsize, facecolor=bgcolor)
ax.set_facecolor(bgcolor)
# extract each polygon as a descartes patch, and add to a matplotlib patch
# collection
patches = []
for geometry in gdf['geometry']:
if isinstance(geometry, Polygon):
patches.append(PolygonPatch(geometry))
elif isinstance(geometry, MultiPolygon):
for subpolygon in geometry: #if geometry is multipolygon, go through each constituent subpolygon
patches.append(PolygonPatch(subpolygon))
pc = PatchCollection(patches, facecolor=color, edgecolor=color, linewidth=0, alpha=1)
ax.add_collection(pc)
if set_bounds:
if bbox is None:
# set the figure bounds to the polygons' bounds
left, bottom, right, top = gdf.total_bounds
else:
top, bottom, right, left = bbox
ax.set_xlim((left, right))
ax.set_ylim((bottom, top))
# turn off the axis display set the margins to zero and point the ticks in
# so there's no space around the plot
ax.axis('off')
ax.margins(0)
ax.tick_params(which='both', direction='in')
fig.canvas.draw()
# make everything square
ax.set_aspect('equal')
fig.canvas.draw()
fig, ax = save_and_show(fig=fig, ax=ax, save=save, show=show, close=close,
filename=filename, file_format=file_format, dpi=dpi, axis_off=True)
return fig, ax
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.