id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
192075 | <reponame>dmitrykoro/NetworksLab2020<filename>Lab04_git/action_operator.py<gh_stars>0
import json
import sys
from math import ceil
from server import BLOCK_SIZE
from encrypt import encrypt_password
from database_handler import add_to_database, get_from_database, delete_from_database, get_total_amount
from datetime import datetime
date_format = '%Y-%m-%d %H:%M:%S.%f'
def authenticate(user, conn):
data = conn.recv(BLOCK_SIZE).decode("utf-8")
data = json.loads(data)
password = data.get("password", None)
if password == -1:
print(f'Authenticated user {user} as USER.')
return False
else:
password = <PASSWORD>password(password)
correct_password = get_from_database({"type": "password"})['password']
if password == correct_password:
accept = {"type": "auth", "password": 1}
conn.send(bytes(json.dumps(accept), encoding="utf-8"))
print(f'Authenticated user {user} as ADMIN.')
return True
else:
accept = {"type": "auth", "password": -1}
conn.send(bytes(json.dumps(accept), encoding="utf-8"))
print(f'Authenticated user {user} as USER. (Wrong admin password)')
return False
def park(number, conn, address):
if get_from_database({"_id": number}) is None:
accept = {"type": "accept"}
conn.send(bytes(json.dumps(accept), encoding="utf-8"))
start_time = datetime.now().strftime(date_format)
db = {"_id": number, "time": start_time}
add_to_database(db)
print(f'Parked car {number} successfully.')
add_to_database(
{"type": "log", "date": datetime.now(),
"text": f"Parked car {number} successfully from user {address}"})
else:
error = {"type": "error", "code": 1} # car is already parked
conn.send(bytes(json.dumps(error), encoding="utf-8"))
add_to_database(
{"type": "log", "date": datetime.now(), "text": f"Sent error 1 to user {address}"})
def unpark(number, conn, address):
if get_from_database({"_id": number}) is not None:
finish_time = datetime.now().strftime(date_format)
start_time = get_from_database({"_id": number}).get("time", None)
diff = datetime.strptime(finish_time, date_format) - datetime.strptime(start_time,
date_format)
amount = ceil(diff.seconds / 3600) * 100 # 100 = price per hour
checkout = {"type": "checkout", "amount": amount}
conn.send(bytes(json.dumps(checkout), encoding="utf-8"))
delete_from_database({"_id": number})
db = {"type": "history", "number": number, "date": finish_time, "amount": amount}
add_to_database(db)
add_to_database(
{"type": "log", "date": datetime.now(),
"text": f"UNparked car {number} successfully from user {address}"})
else:
error = {"type": "error", "code": 2} # car is not parked yet
conn.send(bytes(json.dumps(error), encoding="utf-8"))
add_to_database(
{"type": "log", "date": datetime.now(), "text": f"Sent error 2 to user {address}"})
def get_big_data(type, conn):
data = get_from_database({"type": type}, multiple=True)
data = json.dumps(data, default=str)
data = bytes(data, encoding="utf-8")
data_size_bytes = sys.getsizeof(data)
ack_data = {"type": "size", "size": data_size_bytes}
conn.send(bytes(json.dumps(ack_data), encoding="utf-8"))
conn.send(data)
def get_total(conn):
total_amount = get_total_amount()
conn.send(bytes(json.dumps(total_amount[0]), encoding="utf-8"))
| StarcoderdataPython |
3373588 | import ctypes
class CEnumMeta(type(ctypes.c_int)):
def __new__(cls, name, bases, namespace):
cls2 = type(ctypes.c_int).__new__(cls, name, bases, namespace)
if namespace.get("__module__") != __name__:
namespace["_values_"].clear()
for name in namespace["_names_"].keys():
if name.startswith("_"):
continue
setattr(cls2, name, cls2(namespace[name]))
namespace["_names_"][name] = namespace[name]
namespace["_values_"][namespace[name]] = name
return cls2
def with_meta(meta, base = object):
return meta("NewBase", (base,), {"__module__" : __name__})
class CEnum(with_meta(CEnumMeta, ctypes.c_int)):
_names_ = {}
_values_ = {}
__slots__ = []
def __repr__(self):
name = self._values_.get(self.value)
if name is None:
return "%s(%r)" % (self.__class__.__name__, self.val)
else:
return "%s.%s" % (self.__class__.__name__, name)
@classmethod
def from_param(cls, obj):
return int(obj)
@classmethod
def from_name(cls, name):
return cls._names_[name]
@classmethod
def from_value(cls, val):
return getattr(self, cls._values_[val])
def __int__(self):
return int(self.value)
def __index__(self):
return int(self)
def __eq__(self, other):
return int(self) == int(other)
def __ne__(self, other):
return int(self) != int(other)
def __gt__(self, other):
return int(self) > int(other)
def __ge__(self, other):
return int(self) >= int(other)
def __lt__(self, other):
return int(self) < int(other)
def __le__(self, other):
return int(self) <= int(other)
def __hash__(self):
return hash(int(self))
class DLLNotLoaded(Exception):
pass
class UnloadedDLL(object):
__slots__ = []
def __bool__(self):
return False
__nonzero__ = __bool__
def __call__(self, *args, **kwargs):
raise DLLNotLoaded("DLL is not loaded")
def __getattr__(self, name):
raise DLLNotLoaded("DLL is not loaded")
UnloadedDLL = UnloadedDLL()
| StarcoderdataPython |
152230 | from django.contrib import admin
from django.contrib.auth.models import Permission
from simple_history.admin import SimpleHistoryAdmin
from django.db.models import F
from .models import School, Profile, CotisationHistory, WhiteListHistory
class CotisationHistoryAdmin(SimpleHistoryAdmin):
"""
The admin class for :class:`Consumptions <users.models.CotisationHistory>`.
"""
list_display = ('user', 'amount', 'duration', 'paymentDate', 'endDate', 'paymentMethod')
ordering = ('user', 'amount', 'duration', 'paymentDate', 'endDate')
search_fields = ('user__username', 'user__first_name', 'user__last_name')
list_filter = ('paymentMethod', )
class BalanceFilter(admin.SimpleListFilter):
"""
A filter which filters according to the sign of the balance
"""
title = 'Solde'
parameter_name = 'solde'
def lookups(self, request, model_admin):
return (
('po', '>0'),
('nu', '=0'),
('ne', '<0'),
)
def queryset(self, request, queryset):
if self.value() == 'po':
return queryset.filter(credit__gt=F('debit'))
elif self.value() == 'nu':
return queryset.filter(credit=F('debit'))
elif self.value() == 'ne':
return queryset.filter(credit__lt=F('debit'))
class ProfileAdmin(SimpleHistoryAdmin):
"""
The admin class for :class:`Consumptions <users.models.Profile>`.
"""
list_display = ('user', 'credit', 'debit', 'balance', 'school', 'cotisationEnd', 'is_adherent')
ordering = ('user', '-credit', '-debit')
search_fields = ('user__username', 'user__first_name', 'user__last_name')
list_filter = ('school', BalanceFilter)
class WhiteListHistoryAdmin(SimpleHistoryAdmin):
"""
The admin class for :class:`Consumptions <users.models.WhiteListHistory>`.
"""
list_display = ('user', 'paymentDate', 'endDate', 'duration', 'reason')
ordering = ('user', 'duration', 'paymentDate', 'endDate')
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'reason')
admin.site.register(Permission, SimpleHistoryAdmin)
admin.site.register(School, SimpleHistoryAdmin)
admin.site.register(WhiteListHistory, WhiteListHistoryAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(CotisationHistory, CotisationHistoryAdmin) | StarcoderdataPython |
3326644 | """Chat history plugin"""
import os
from typing import Optional, List, Any
from dataclasses import dataclass, field
from wechaty_puppet import MessageType, FileBox
from wechaty import Wechaty, Message, get_logger
from wechaty.plugin import WechatyPlugin, WechatyPluginOptions
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker, declarative_base
from sqlalchemy import (
Column,
Integer,
VARCHAR,
Text
)
logger = get_logger('ChatHistoryPlugin')
SUPPORTED_MESSAGE_FILE_TYPES: List[MessageType] = [
MessageType.MESSAGE_TYPE_ATTACHMENT,
MessageType.MESSAGE_TYPE_IMAGE,
MessageType.MESSAGE_TYPE_EMOTICON,
MessageType.MESSAGE_TYPE_VIDEO,
MessageType.MESSAGE_TYPE_AUDIO
]
Base: Any = declarative_base()
class ChatHistory(Base):
"""ChatHistory"""
__tablename__ = 'ChatHistory'
id = Column(Integer, primary_key=True, autoincrement=True)
msg_id = Column(Integer, default=None)
filename = Column(Text, default=None)
text = Column(Text, default=None)
timestamp = Column(Integer, default=None)
type = Column(Integer, default=None)
from_id = Column(VARCHAR(50), default=None)
room_id = Column(VARCHAR(50), default=None)
to_id = Column(VARCHAR(50), default=None)
mention_ids = Column(Text, default=None)
@dataclass
class ChatHistoryPluginOptions(WechatyPluginOptions):
"""
chat history plugin options
"""
chat_history_path: str = field(default_factory=str)
chat_history_database: str = field(default_factory=str)
class ChatHistoryPlugin(WechatyPlugin):
"""chat history plugin"""
def __init__(self, options: Optional[ChatHistoryPluginOptions] = None):
super().__init__(options)
if options is None:
options = ChatHistoryPluginOptions()
if not options.chat_history_path:
self.chat_history_path = os.path.join(
os.getcwd(), 'wechaty/chathistory')
if not os.path.exists(self.chat_history_path):
os.makedirs(self.chat_history_path)
self.chat_history_database = options.chat_history_database \
or 'sqlite+aiosqlite:///chathistory.db'
@property
def name(self) -> str:
return 'chat-history'
async def init_plugin(self, wechaty: Wechaty) -> None:
"""init plugin"""
async_engine = create_async_engine(self.chat_history_database)
async with async_engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async def on_message(self, msg: Message) -> None:
"""listen message event"""
async_engine = create_async_engine(self.chat_history_database)
async_session = sessionmaker(async_engine,
expire_on_commit=False,
class_=AsyncSession)
async with async_session() as session:
async with session.begin():
file_box: FileBox = None
if msg.type() in SUPPORTED_MESSAGE_FILE_TYPES:
file_box = await msg.to_file_box()
payload = msg.payload
chathistroy = ChatHistory(
msg_id=msg.message_id,
filename=file_box.name if file_box else None,
text=payload.text if payload.text else None,
timestamp=payload.timestamp,
type=payload.type,
from_id=payload.from_id,
room_id=payload.room_id if payload.room_id else None,
to_id=payload.to_id if payload.to_id else None,
mention_ids=','.join(
payload.mention_ids) if payload.mention_ids else None
)
session.add(chathistroy)
await session.commit()
await async_engine.dispose()
if msg.type() in SUPPORTED_MESSAGE_FILE_TYPES:
file_box = await msg.to_file_box()
if file_box is not None:
filename = '-'.join(list(filter(lambda x: x is not None, [
payload.room_id if payload.room_id else None,
payload.from_id,
str(payload.timestamp),
file_box.name]
)))
await file_box.to_file(os.path.join(self.chat_history_path, filename))
| StarcoderdataPython |
3361458 | <gh_stars>1-10
from netapp.netapp_object import NetAppObject
class VolumeFlexcacheAttributes(NetAppObject):
"""
Information about FlexCache volumes.
"""
_origin = None
@property
def origin(self):
"""
The name of the origin volume that contains the
authoritative data. This field is valid only for a
FlexCache volume. The origin volume must belong to the
same vserver that owns this volume.
<p>
Attributes: optional-for-create, non-modifiable
"""
return self._origin
@origin.setter
def origin(self, val):
if val != None:
self.validate('origin', val)
self._origin = val
_cache_policy = None
@property
def cache_policy(self):
"""
The name of the cache policy.
<p>
The default policy name is 'default'.
<p>
This policy applies only to FlexCache volumes and can be
created using the 'flexcache-cache-policy-create' API.
<p>
Attributes: optional-for-create, modifiable
"""
return self._cache_policy
@cache_policy.setter
def cache_policy(self, val):
if val != None:
self.validate('cache_policy', val)
self._cache_policy = val
_min_reserve = None
@property
def min_reserve(self):
"""
The amount of space requested to be preallocated in the
aggregate hosting the FlexCache volume.
<p>
Attributes: optional-for-create, modifiable
"""
return self._min_reserve
@min_reserve.setter
def min_reserve(self, val):
if val != None:
self.validate('min_reserve', val)
self._min_reserve = val
_fill_policy = None
@property
def fill_policy(self):
"""
The name of the fill policy.
<p>
The default policy name is 'default'.
<p>
This policy applies only to FlexCache volumes and can be
created using the 'flexcache-fill-policy-create' API.
<p>
Attributes: optional-for-create, modifiable
"""
return self._fill_policy
@fill_policy.setter
def fill_policy(self, val):
if val != None:
self.validate('fill_policy', val)
self._fill_policy = val
@staticmethod
def get_api_name():
return "volume-flexcache-attributes"
@staticmethod
def get_desired_attrs():
return [
'origin',
'cache-policy',
'min-reserve',
'fill-policy',
]
def describe_properties(self):
return {
'origin': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'cache_policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'min_reserve': { 'class': int, 'is_list': False, 'required': 'optional' },
'fill_policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| StarcoderdataPython |
67212 | <reponame>run-ai/runai
import os
import unittest
import keras
from keras.utils.np_utils import to_categorical
from keras.layers import Dense
from keras.models import Sequential
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
import runai.utils
import runai.reporter
import runai.reporter.keras
NUM_CLASSES = 10
BATCH_SIZE = 16
STEPS_PER_EPOCH = 1
class MockReporter(runai.utils.Hook):
def __init__(self, methodName):
super(MockReporter, self).__init__(runai.reporter, methodName)
self.reported = []
def __hook__(self, *args, **kwargs):
reportedInput = args[0]
wasCurrentInputAlreadyReported = reportedInput not in self.reported
if wasCurrentInputAlreadyReported:
self.reported.append(reportedInput)
class KerasStopModelCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs={}):
self.model.stop_training = True
class KerasAutologTest(unittest.TestCase):
def _run_test(self, run_fit=True, expected_metrics=[], expected_parameters=[]):
self._mock_env_variables()
with MockReporter('reportMetric') as reportMetricMock, MockReporter('reportParameter') as reportParameterMock:
if run_fit:
self._run_model_with_fit()
else:
self._run_model_with_fit_generator()
self.assertEqual(reportMetricMock.reported, expected_metrics, 'Reported Metrics unmatched')
self.assertEqual(reportParameterMock.reported, expected_parameters, 'Reported Paramters unmatched')
def _mock_env_variables(self):
os.environ["podUUID"] = "podUUId"
os.environ["reporterGatewayURL"] = "reporterGatewayURL"
def _run_model_with_fit(self):
x_train, y_train = self._get_x_train_y_train()
model = self._create_model_and_compile()
model.fit(x_train, y_train, batch_size=BATCH_SIZE, callbacks=[
KerasStopModelCallback()])
def _run_model_with_fit_generator(self):
x_train, y_train = self._get_x_train_y_train()
model = self._create_model_and_compile()
datagen = ImageDataGenerator()
datagen.fit(x_train)
model.fit_generator(datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
steps_per_epoch=STEPS_PER_EPOCH)
def _create_model_and_compile(self):
model = Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
model.add(Dense(NUM_CLASSES, activation='softmax'))
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
return model
def _get_x_train_y_train(self):
(x_train, y_train), (_x_test, _y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 28, 28, 1)
x_train = x_train.astype('float32')
x_train /= 255
y_train = to_categorical(y_train, NUM_CLASSES)
return x_train, y_train
def testFitWithoutAutoLog(self):
self._run_test()
def testFitWithAutoLog(self):
runai.reporter.keras.autolog()
expected_metrics = ['overall_epochs', 'batch_size', 'number_of_layers', 'epoch', 'step', 'accuracy', 'loss']
expected_parameters = ['optimizer_name', 'learning_rate']
self._run_test(expected_metrics=expected_metrics, expected_parameters=expected_parameters)
runai.reporter.keras.disableAutoLog()
def testFitAllMetrics(self):
runai.reporter.keras.autolog(loss_method=True, epsilon=True)
expected_metrics = ['overall_epochs', 'batch_size', 'number_of_layers', 'epoch', 'step', 'accuracy', 'loss']
expected_parameters = ['loss_method', 'optimizer_name', 'learning_rate', 'epsilon']
self._run_test(expected_metrics=expected_metrics, expected_parameters=expected_parameters)
runai.reporter.keras.disableAutoLog()
def testFitGeneratorWithoutAutoLog(self):
self._run_test(run_fit=False)
def testFitGeneratorWithAutoLog(self):
runai.reporter.keras.autolog()
expected_metrics = ['overall_epochs', 'number_of_layers', 'epoch', 'step', 'accuracy', 'loss']
expected_parameters = ['optimizer_name', 'learning_rate']
self._run_test(run_fit=False, expected_metrics=expected_metrics, expected_parameters=expected_parameters)
runai.reporter.keras.disableAutoLog()
def testFitGeneratorAllMetrics(self):
runai.reporter.keras.autolog(loss_method=True, epsilon=True)
expected_metrics = ['overall_epochs', 'number_of_layers', 'epoch', 'step', 'accuracy', 'loss']
expected_parameters = ['loss_method', 'optimizer_name', 'learning_rate', 'epsilon']
self._run_test(run_fit=False, expected_metrics=expected_metrics, expected_parameters=expected_parameters)
runai.reporter.keras.disableAutoLog()
#TODO: Add tests that will add new metrics to the compile method and verify they were added.
def pid_exists(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
class KerasReporterTest(KerasAutologTest):
def testCreationScope(self):
with runai.reporter.keras.Reporter() as reporter:
self.assertTrue(pid_exists(reporter.pid))
self.assertFalse(pid_exists(reporter.pid))
def testAutologCreation(self):
for autolog in [True, False]:
with runai.reporter.keras.Reporter(autolog=autolog) as reporter:
self.assertEquals(runai.reporter.keras.autologged(), autolog)
self.assertFalse(runai.reporter.keras.autologged())
def testAutologManual(self):
with runai.reporter.keras.Reporter() as reporter:
self.assertFalse(runai.reporter.keras.autologged())
reporter.autolog()
self.assertTrue(runai.reporter.keras.autologged())
self.assertFalse(runai.reporter.keras.autologged())
def testAutologSanity(self):
class Reporter(runai.reporter.keras.Reporter):
def __init__(self, *args, **kwargs):
super(Reporter, self).__init__(*args, **kwargs)
self.metrics = set()
self.parameters = set()
def reportMetric(self, name, value):
self.metrics.add(name)
def reportParameter(self, name, value):
self.parameters.add(name)
with Reporter() as reporter:
reporter.autolog()
self._run_model_with_fit()
self.assertEqual(reporter.metrics, { 'overall_epochs', 'batch_size', 'number_of_layers', 'epoch', 'step', 'accuracy', 'loss' })
self.assertEqual(reporter.parameters, { 'optimizer_name', 'learning_rate' })
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3290961 | <gh_stars>1-10
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Microbiomeutil(MakefilePackage):
"""Microbiome analysis utilities"""
homepage = "http://microbiomeutil.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/microbiomeutil/microbiomeutil-r20110519.tgz"
version('20110519', '11eaac4b0468c05297ba88ec27bd4b56')
depends_on('perl', type=('build', 'run'))
depends_on('blast-plus')
depends_on('cdbfasta')
def install(self, spec, prefix):
install_tree('ChimeraSlayer', prefix.ChimeraSlayer)
install_tree('NAST-iEr', join_path(prefix, 'NAST-iEr'))
install_tree('TreeChopper', prefix.TreeChopper)
install_tree('WigeoN', prefix.WigeoN)
install_tree('docs', prefix.docs)
install_tree('RESOURCES', prefix.resources)
install_tree('AmosCmp16Spipeline', prefix.AmosCmp16Spipeline)
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', self.prefix.ChimeraSlayer)
run_env.prepend_path('PATH', join_path(self.prefix, 'NAST-iEr'))
run_env.prepend_path('PATH', self.prefix.TreeChopper)
run_env.prepend_path('PATH', self.prefix.WigeoN)
| StarcoderdataPython |
37762 | # Export the contents of AviSys files SIGHTING.DAT and FNotes.DAT to CSV format
# Author: <NAME> <<EMAIL>>
# Version: 1.2 3 April 2021
import sys
import csv
import ctypes
# Input files
DATA_FILE = 'SIGHTING.DAT'
MASTER_FILE = 'MASTER.AVI'
PLACES_FILE = 'PLACES.AVI'
NOTE_INDEX = 'FNotes.IX'
NOTE_FILE = 'FNotes.DAT'
ASSOCIATE_FILE = 'ASSOCIAT.AVI'
# Output files
EXPORT_FILE = 'AviSys.sightings.'
NOTE_OUTPUT = 'FieldNotes.txt'
stateCode = {
'Alabama':'AL',
'Alaska':'AK',
'Arizona':'AZ',
'Arkansas':'AR',
'California':'CA',
'Colorado':'CO',
'Connecticut':'CT',
'Delaware':'DE',
'D.C.':'DC',
'Florida':'FL',
'Georgia':'GA',
'Hawaii':'HI',
'Idaho':'ID',
'Illinois':'IL',
'Indiana':'IN',
'Iowa':'IA',
'Kansas':'KS',
'Kentucky':'KY',
'Louisiana':'LA',
'Maine':'ME',
'Maryland':'MD',
'Massachusetts':'MA',
'Michigan':'MI',
'Minnesota':'MN',
'Mississippi':'MS',
'Missouri':'MO',
'Montana':'MT',
'Nebraska':'NE',
'Nevada':'NV',
'New Hampshire':'NH',
'New Jersey':'NJ',
'New Mexico':'NM',
'New York':'NY',
'North Carolina':'NC',
'North Dakota':'ND',
'Ohio':'OH',
'Oklahoma':'OK',
'Oregon':'OR',
'Pennsylvania':'PA',
'Rhode Island':'RI',
'South Carolina':'SC',
'South Dakota':'SD',
'Tennessee':'TN',
'Texas':'TX',
'Utah':'UT',
'Vermont':'VT',
'Virginia':'VA',
'Washington':'WA',
'West Virginia':'WV',
'Wisconsin':'WI',
'Wyoming':'WY'
}
provinceCode = {
'Alberta':'AB',
'British Columbia':'BC',
'Manitoba':'MB',
'New Brunswick':'NB',
'Newfoundland':'NL', # AviSys uses 'NF'
'Northwest Terr.':'NT',
'Nova Scotia':'NS',
'Nunavut':'NU',
'Ontario':'ON',
'Prince Edward Is.':'PE',
'Quebec':'QC', # AviSys uses 'PQ'
'Saskatchewan':'SK',
'Yukon Territory':'YT'
}
class NoteBlock:
# FNotes.DAT contains 512-byte blocks. The first block is a header. Subsequent blocks have this structure:
# If byte 0 is 00: (First block in a note)
# Offset
# 0: Flag (00)
# 1-3: 000000
# 4-7: Note number
# 8-505: Data
# 506-507: Number of valid bytes from offset 0 through 505
# 508-511: Index of next block
# If byte 0 is 01: (Block that continues a note)
# 0: Flag (01)
# 1-505: Data
# 506-507: Number of valid bytes from offset 1 through 505
# 508-511: Index of next block
# Data lines are contained in fixed-length records of 125 bytes, which span blocks
# E.g., first block contains 3 records of 125 bytes, plus the first 123 bytes of the 4th record.
# Each data line is prefixed with its length in the first byte
def __init__(self,file,blockNumber): # Read the specified block from FNotes.DAT
self.file = file
offset = blockNumber * 512
file.seek(offset)
block = file.read(512)
validBytes = int.from_bytes(block[506:508],'little')
self.next = int.from_bytes(block[508:512],'little')
if block[0] == 0:
self.data = block[8:validBytes] # First block in chain
else:
self.data = block[1:validBytes+1] # Any subsequent block
def extract(self): # Extract the chain of blocks, and the individual records from the chain
data = self.extractBlocks()
output = ''
ptr = 0
while ptr < len(data):
strlen = data[ptr] # First byte has the length
ptr += 1 # String starts in second byte
output += data[ptr:ptr+strlen].decode('Windows-1252') + '\n'
ptr += 124
return output
def extractBlocks(self): # Extract data from this block and blocks chained to it
data = self.data
if self.next:
block = NoteBlock(self.file,self.next)
data += block.extractBlocks()
return data
def readMaster():
# Fill in the species name lookup table
# MASTER.AVI contains the taxonomy in 110 byte records
# Byte Content
# 0 Life list mask: 20 All species have this bit; 2a species I have seen
# 1-2 Custom checklist mask (bits 0-14) (Custom checklists that include this species); bit 15: species in most recent report
# 3-4 Custom checklist seen mask
# 5-6 Species number
# 7 Common name length
# 8-43 Common name
# 44-51 State checklist mask (64 bits) (State checklists that include this species)
# 52 Genus name length
# 53-76 Genus name
# 77 Species name length
# 78-101 Species name
# 102-103 ABA bytes
# 104-109 Always 00
# ABA byte 0
# 01 ABA area species
# 00 not ABA area species
# ABA byte 1
# 01 Seen in ABA area
# 00 Not seen in ABA area
# Bytes 0-4 (Life list mask and checklist masks)
# Let 0200 be the mask for the NC checklist. Then bytes 0-4 work like this:
# 20 0000 0000 Non-NC species I have not seen anywhere; also family level entry
# 20 0200 0000 NC species that I have not seen anywhere
# 2a 0000 0000 Non-NC species I have seen somewhere but not in NC
# 2a 0000 0200 Non-NC species I have seen in NC
# 2a 0200 0000 NC species that I have seen but not in NC
# 2a 0200 0200 NC species seen in NC
name = {}
genusName = {}
speciesName = {}
try:
master_input = open(MASTER_FILE, "rb")
except FileNotFoundError:
print('Error: File',MASTER_FILE,'not found.')
raise SystemExit
except:
print("Error opening",MASTER_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True:
taxon = master_input.read(110) # Read a record of 110 bytes
if not taxon:
break
speciesNo = int.from_bytes(taxon[5:7],"little")
name[speciesNo] = taxon[8:(8+taxon[7])].decode('Windows-1252')
genusName[speciesNo] = taxon[53:(53+taxon[52])].decode('Windows-1252')
speciesName[speciesNo] = taxon[78:(78+taxon[77])].decode('Windows-1252')
master_input.close()
return (name,genusName,speciesName)
class Place:
def __init__(self,placeNumber,name,link):
self.placeNumber = placeNumber
self.name = name
self.link = link
self.table = (placeNumber-1)//450
def __str__(self):
return str(self.placeNumber) + ': ' + self.name + ' ' + str(self.link) + ' (table ' + str(self.table) + ')'
def readPlaces():
# The places file (PLACES.AVI) contains fixed length records of 39 bytes
# Bytes
# 0-1 Place number
# 6 Length of place name
# 7-36 Place name
# 37-38 Place number of linked location
output = {}
try:
places_input = open(PLACES_FILE,"rb")
except FileNotFoundError:
print('Error: File',PLACES_FILE,'not found.')
raise SystemExit
except:
print("Error opening",PLACES_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True: # Read all the places in the file
place = places_input.read(39) # Read a record of 39 bytes
if not place:
break
placeNumber = int.from_bytes(place[0:2],"little")
if placeNumber == 0:
continue;
name = place[7:(7+place[6])].decode('Windows-1252')
link = int.from_bytes(place[37:39],"little")
placeInfo = Place(placeNumber,name,link)
output[placeNumber] = placeInfo
places_input.close()
# Now make the 6-level list of links for each place
for placeNumber in output:
place = output[placeNumber]
links = []
for i in range(6):
if i == place.table: # i is the entry for this place
links.append(place.name)
next = place.link # now list the higher-level places this one is linked to
if next == 0:
while table < 5:
table += 1
links.append('')
break
place = output[next]
table = place.table
else:
links.append('') # Links are null until we get to the first one
output[placeNumber].linklist = links
return output
class Association:
def __init__(self,placeName,locationName,lat,lng,state,nation):
self.placeName = placeName
self.locationName = locationName
self.lat = lat
self.lng = lng
self.state = state
self.nation = nation
def readAssociate():
# The hotspot association file (ASSOCIAT.AVI) contains fixed length records of 152 bytes
# Bytes
# 0 Place len
# 1-30 AviSys place (30 chars)
# 31-33 ?
# 34 locid len
# 35-41 locid
# 42 hotspot len
# 43-102 eBird hotspot (60 chars)
# 103 lat len
# 104-115 lat
# 116-123 binary (float) lat
# 124 lng len
# 125-136 lng
# 137-144 binary (float) lng
# 145 state len
# 146-148 state
# 149 nation len
# 150-151 nation
output = {}
try:
associate_input = open(ASSOCIATE_FILE,"rb")
except FileNotFoundError:
print('Note: File',ASSOCIATE_FILE,'not found.')
return output
except:
print("Error opening",ASSOCIATE_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True: # Read all the places in the file
association = associate_input.read(152) # Read a record of 152 bytes
if not association:
break
if len(association) != 152:
print("Odd, length is",len(association))
else:
place = association[1:1+association[0]].decode('Windows-1252')
location = association[43:43+association[42]].decode('Windows-1252')
lat = association[104:104+association[103]].decode('Windows-1252')
lng = association[125:125+association[124]].decode('Windows-1252')
state = association[146:146+association[145]].decode('Windows-1252')
nation = association[150:150+association[149]].decode('Windows-1252')
Info = Association(place,location,lat,lng,state,nation)
output[place] = Info
associate_input.close()
return output
def readNoteIndex():
# FNotes.IX contains fixed-length blocks.
# The first block begins with a 32 byte descriptive header:
# Bytes 0-3 contain 0xffffffff
# Bytes 4-7 contain ??
# Bytes 8-11 Number of blocks in the file
# Bytes 12-15 Size of each block (874 bytes)
# Bytes 16-21 ??
# Bytes 22-25 Number of field notes in the file
# Bytes 26-29 Number of notes per block (62)
# The rest of the first block is empty.
# In subsequent blocks:
# Byte 0: Number of valid index entries in this block
# Index entries begin at Byte 6 and are an array of 14-byte entries
# Index entry has block number in binary in bytes 0-3,
# length of note number (always 5) in byte 8,
# and note number in ascii in bytes 9-13
# Valid index entries are grouped at the beginning of a block,
# and the block may be padded out with non-valid, i.e., unused, entries.
try:
note_index = open(NOTE_INDEX,"rb")
except FileNotFoundError:
print('Error: File',NOTE_INDEX,'not found.')
except:
print("Error opening",NOTE_INDEX,'--',sys.exc_info()[1])
raise SystemExit
header = note_index.read(32)
marker = int.from_bytes(header[0:4],'little')
if marker != 4294967295:
print('Unexpected value',marker,'at beginning of',NOTE_INDEX)
# raise SystemExit
numBlocks = int.from_bytes(header[8:12],'little') # number of 874 byte blocks (e.g., 11)
blockSize = int.from_bytes(header[12:16],'little') # blocksize (874, 0x036a)
numNotes = int.from_bytes(header[22:26],'little') # Number of notes (e.g., 600)
blockFactor = int.from_bytes(header[26:30],'little') # Number of notes per block (62, 0x3E)
reclen = int((blockSize-6) / blockFactor) # 14
if reclen != 14:
print('Reclen was expected to be 14 but is', reclen)
raise SystemExit
note_index.read(blockSize - 32) # Have already read 32 bytes of first block. Now read the rest (and discard).
index = {}
while True:
block = note_index.read(blockSize)
if not block:
break
numValid = block[0]
if not numValid:
break
# Loop through each index entry in this block
for ptr in range(6,blockSize,reclen):
ix = block[ptr:ptr+reclen]
if not ix:
break
blockNumber = int.from_bytes(ix[0:4],'little')
nchar = ix[8]
ascii = ix[9:9+nchar].decode('Windows-1252')
index[int(ascii)] = blockNumber
numValid -= 1
if not numValid:
break # Finished with all valid entries this block
note_index.close()
return index
def integrateNote(comment,fieldnoteText):
# Integrate the comment and field note.
# If the observation was imported from eBird via http://avisys.info/ebirdtoavisys/
# the AviSys comment may duplicate the beginning of the eBird comment.
# Here we remove duplication.
if fieldnoteText != '': # If there is a field note
work = comment # Working copy of the comment
keepLen = 0 # Length of the beginning of the comment to keep, if any duplication
ptr = 0 # Where we are in the comment
hasAttributes = True if ptr < len(work) and work[ptr] == '/' else False
while hasAttributes: # There are AviSys attributes at the beginning of comment
attributeLen = 3 if ptr+2 < len(work) and comment[ptr+2] == '/' else 2 # Attributes are either 2 or 3 bytes
ptr += attributeLen # Bump ptr past this attribute
while ptr < len(work) and work[ptr] == ' ': # and past any trailing blanks
ptr += 1
hasAttributes = True if ptr < len(work) and work[ptr] == '/' else False # Check if there is another attribute
if ptr < len(work) and work[ptr] == '(': # If the first part of comment is parenthesized, skip over it
ptr += 1
while ptr < len(work) and work[ptr] != ')':
ptr += 1
if work[ptr] == ')':
ptr += 1
while ptr < len(work) and work[ptr] == ' ':
ptr += 1
keepLen = ptr # Keep at least this much of the comment
work = work[ptr:] # Check if this part of the comment is duplicated in the field note
text = fieldnoteText
linend = fieldnoteText.find('\n') # end of first line
# If the first line contains ' :: ' it is probably a heading so skip that line
if fieldnoteText[0:linend].find(' :: ') > 0:
text = fieldnoteText[linend+1:]
linend = text.find('\n') # end of second line
text = text[0:linend] + ' ' + text[linend+1:] # Examine the first two lines as one line
ptr = 0
while ptr < len(text) and text[ptr] == ' ': # Skip over any leading blanks
ptr += 1
if len(work): # If we have a comment
if text[ptr:ptr+len(work)] == work: # If the comment is identical to the beginning of the field note
if keepLen: # Discard the comment text. Keep only the comment prefix (attributes and/or parenthesized content)
comment = comment[0:keepLen]
else:
comment = '' # Discard the entire comment.
comment = comment.strip() + ' ' + fieldnoteText # Concatenate comment prefix and field note.
comment = comment.strip(' \n')
return comment
#########################################################################################################
######################################## The program starts here ########################################
#########################################################################################################
outArray = []
noteDict = {}
# ref https://stackoverflow.com/questions/55172090/detect-if-python-program-is-executed-via-windows-gui-double-click-vs-command-p
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
process_array = (ctypes.c_uint * 1)()
num_processes = kernel32.GetConsoleProcessList(process_array, 1)
if len(sys.argv) < 2: # If no command-line argument
if num_processes <= 2: # Run from double-click
outputType = 'eBird'
else: # Run from command line
outputType = 'AviSys'
else:
outputType = sys.argv[1]
if outputType.lower() == 'avisys':
outputType = 'AviSys'
elif outputType.lower() == 'ebird':
outputType = 'eBird'
else:
print("Please specify either AviSys or eBird")
raise SystemExit
try:
FNotes = open(NOTE_FILE,"rb")
except FileNotFoundError:
print('Error: File',NOTE_FILE,'not found.')
raise SystemExit
except:
print("Error opening",NOTE_FILE,'--',sys.exc_info()[1])
raise SystemExit
noteIndex = readNoteIndex()
(name,genusName,speciesName) = readMaster()
places = readPlaces()
association = readAssociate()
try:
sighting_file = open(DATA_FILE,"rb")
except FileNotFoundError:
print('Error: File',DATA_FILE,'not found.')
raise SystemExit
except:
print("Error opening",DATA_FILE,'--',sys.exc_info()[1])
raise SystemExit
# Format of SIGHTING.DAT
# Header record
# 0-3 ffffffff
# 8-11 Number of records
# 12 Reclen (6F, 111)
# padded to 111 bytes
#
# Sighting record
# 0-3 always 00000000
# 4-5 Species number
# 6-9 Fieldnote number
# 10-13 Date
# 14-15 Place number
# 16 Country len
# 17-19 Country
# 20-23 nation bits e.g. 0d200800 for lower 48
# 24-27 always 00000000
# 28 Comment len
# 29-108 Comment
# 109-110 Count
#
# Update 2021 08 14:
# I figured out how bytes 0-3 are used.
# For valid sighting records, the first 4 bytes are zeroes.
# Corrupted records can be kept in the file but ignored;
# they are stored in a linked list where bytes 0-3 are the link pointer.
# The last record in the linked list has ffffffff in bytes 0-3.
# The first four bytes of the header (first four bytes of the file) point to the beginning of the linked list of corrupt records.
# If there are no corrupt records, the file begins with ffffffff.
# The value of the link pointer is the record number; thus multiply by 111 to get the byte offset in the file.
# To ignore invalid records, skip any record that does not begin with 00000000.
#
# Nation bits:
# 00000100 Australasia
# 00000200 Eurasia
# 00000400 South Polar
# 00000800 [AOU]
#
# 00010000 [Asia]
# 00020000 Atlantic Ocean
# 00040000 Pacific Ocean
# 00080000 Indian Ocean
#
# 00100000 [Oceanic]
# 00200000 North America
# 00400000 South America
# 00800000 Africa
#
# 01000000 [ABA Area]
# 02000000 [Canada]
# 04000000 [US]
# 08000000 [Lower 48]
#
# 10000000 [West Indies]
# 20000000 [Mexico]
# 40000000 [Central America]
# 80000000 [Western Palearctic]
header = sighting_file.read(111) # Read a 111 byte record
marker = int.from_bytes(header[0:4],'little')
corruptRecords = 0
EXPORT_FILE += outputType+'.csv'
try:
CSV = open(EXPORT_FILE,'w', newline='')
except PermissionError:
print('Denied permission to open',EXPORT_FILE,'-- Maybe it is open in another program? If so, close it and try again.')
raise SystemExit
except:
print('Error opening',EXPORT_FILE,'--',sys.exc_info()[1])
raise SystemExit
try:
noteOut = open(NOTE_OUTPUT,'w', newline='')
except PermissionError:
print('Denied permission to open',NOTE_OUTPUT,'-- Maybe it is open in another program? If so, close it and try again,')
raise SystemExit
except:
print('Error opening',NOTE_OUTPUT,'--',sys.exc_info()[1])
nrecs = int.from_bytes(header[8:12],"little")
reclen = header[12]
if reclen != 111:
print('Record length is', reclen, 'expecting it to be 111.')
raise SystemExit
recordCount = 0
while True:
sighting = sighting_file.read(111)
if not sighting:
break
recordCount+=1
corruptPointer = int.from_bytes(sighting[0:4],'little')
corruptedRecord = corruptPointer != 0
speciesNo = int.from_bytes(sighting[4:6],'little')
fieldnote = int.from_bytes(sighting[6:10],'little')
if fieldnote:
block = NoteBlock(FNotes,noteIndex[fieldnote])
fieldnoteText = block.extract()
noteDict[recordCount] = fieldnoteText
else:
fieldnoteText = ''
fieldnoteText = fieldnoteText.rstrip(' \n')
date = int.from_bytes(sighting[10:14],'little')
day = date % 100
month = (date // 100) % 100
year = (date // 10000) + 1930
date = str(month) + '/' + str(day) + '/' + str(year)
sortdate = str(year) + '-' + str(month).rjust(2,'0') + '-' + str(day).rjust(2,'0')
place = int.from_bytes(sighting[14:16],'little')
countryLen = sighting[16]
country = sighting[17:19].decode('Windows-1252')
commentLen = sighting[28]
shortComment = sighting[29:29+commentLen].decode('Windows-1252').strip()
comment = integrateNote(shortComment,fieldnoteText)
if outputType == 'eBird':
comment = comment.replace("\n"," ")
tally = int.from_bytes(sighting[109:111],'little')
if speciesNo in name:
commonName = name[speciesNo]
else:
commonName = '?'
if not corruptedRecord:
print("No name found for species number", speciesNo)
raise SystemExit
if place not in places:
if not corruptedRecord:
print("Place", place, "is not set")
raise SystemExit
else:
location = 'Unknown location'
else:
linkList = places[place].linklist
location = linkList[0] if linkList[0] != '' else \
linkList[1] if linkList[1] != '' else \
linkList[2] if linkList[2] != '' else \
linkList[3] if linkList[3] != '' else \
linkList[4] if linkList[4] != '' else \
linkList[5] if linkList[5] != '' else \
linkList[6]
if outputType == 'eBird' and location in association:
location = association[location].locationName # Use associated eBird location name instead of AviSys place name
if country == 'US':
state = stateCode[linkList[3]]
elif country == 'CA':
state = provinceCode[linkList[3]]
else:
state = ''
if corruptedRecord:
corruptRecords += 1
print('Corrupt record found:',commonName,location,date,state,country,comment)
else:
outArray.append([commonName,genusName[speciesNo],speciesName[speciesNo],tally,comment,location,sortdate,date,state,country,speciesNo,recordCount,shortComment])
def sortkey(array):
return array[6]
outArray.sort(key=sortkey)
if outputType == 'eBird':
csvFields = ['Common name','Genus','Species','Species Count','Species Comment','Location','Lat','Lng','Date','Start time','State','Country','Protocol','N. Observers','Duration','Complete','Distance','Area','Checklist comment','Important: Delete this header row before importing to eBird']
else:
csvFields = ['Common name','Genus','Species','Place','Date','Count','Comment','State','Nation','Blank','SpeciesNo']
CSVwriter = csv.DictWriter(CSV,fieldnames=csvFields)
CSVwriter.writeheader()
if outputType == 'eBird':
for row in outArray:
CSVwriter.writerow({'Common name':row[0],'Genus':row[1],'Species':row[2],'Species Count':row[3],'Species Comment':row[4],
'Location':row[5],'Lat':'','Lng':'','Date':row[7],'Start time':'','State':row[8],'Country':row[9],
'Protocol':'historical','N. Observers':1,'Duration':'','Complete':'N','Distance':'','Area':'','Checklist comment':'Imported from AviSys'})
else:
for row in outArray:
dateVal = row[6].split('-')
date = str(int(dateVal[1]))+'/'+str(int(dateVal[2]))+'/'+dateVal[0]
CSVwriter.writerow({'Common name':row[0],'Genus':row[1],'Species':row[2],'Place':row[5],'Date':date,'Count':row[3],'Comment':row[4],
'State':row[7],'Nation':row[8],'Blank':'','SpeciesNo':row[9]})
# Write all field notes to a file
# The entry for each note begins with species name -- date -- place on the first line, followed by a blank line.
# The text of the field note follows
# The note is terminated by a line of 80 equal signs (which is something that could not be part of the actual note).
# Note: If AviSys type output, the place is the AviSys place. If eBird type output, the associated eBird location, if any, is used as the place.
for row in outArray:
recordNo = row[11]
if recordNo in noteDict:
shortComment = row[12]
noteOut.write(row[0] +' -- '+ row[6] +' -- '+ row[5] + '\n\n')
if len(shortComment):
noteOut.write( 'Short comment: ' + shortComment + '\n\n')
noteOut.write(noteDict[recordNo] + '\n' + '==========================================================================================\n')
sighting_file.close()
noteOut.close()
CSV.close()
if recordCount != nrecs:
print('Should be', nrecs, 'records, but counted', recordCount)
else:
print(nrecs,"records processed")
if corruptRecords:
if corruptRecords == 1:
print('File', DATA_FILE, 'contains one corrupt record, which has been ignored. ')
print('To remove it from AviSys, run Utilities->Restructure sighting file.')
else:
print('File', DATA_FILE, 'contains', corruptRecords, 'corrupt records, which have been ignored. ')
print('To remove them from AviSys, run Utilities->Restructure sighting file.')
print(nrecs-corruptRecords, 'records are valid.')
| StarcoderdataPython |
3294410 | <filename>extract_haplotype_read_counts.py<gh_stars>0
#!/bin/env python
#
# Copyright 2013 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
"""
usage: extract_haplotype_read_counts.py [-h] [--assembly ASSEMBLY]
[--target_region_size TARGET_REGION_SIZE]
[--sample_file SAMPLE_FILE]
[--homozygous_as_counts {zero,rand_hap,rand_allele}]
track_prefix pop individual input_file
positional arguments:
track_prefix prefix of tracks to extract reads from (e.g.
10_IND/PolII/read_counts/PolII_18505)
pop population prefix for genotype tracks (YRI or CEU)
individual individual to extract read counts for (e.g. 18505)
input_file bed-like file to read coordinates of test SNP and
target region from
optional arguments:
-h, --help show this help message and exit
--assembly ASSEMBLY genome assembly that reads were mapped to (e.g. hg18)
--target_region_size TARGET_REGION_SIZE
override target region size that is specified by input
file
--sample_file SAMPLE_FILE
path to file containing ordered list of genotyped
individuals
--homozygous_as_counts {zero,rand_hap,rand_allele}
how to report AS counts at linked het SNPs when test
SNP genotype is homozygous or unknown. zero (default):
set allele-specific counts to 0; rand_hap: randomly
choose one of the haplotypes to be 'reference';
rand_allele: choose random allele at each SNP to be
reference
This script is used to generate input files for the combined haplotype
test script. It depends on a number of datafiles, which may make it
difficult for other people to use. More specifically this script reads
data from HDF5 files (a.k.a. tracks) and uses code from the 'genome'
library (https://github.com/gmcvicker/genome) to access them.
The script reads from the following HDF5 tracks. <PREFIX> and <POP> are specified by
positional command line arguments "track_prefix", "pop":
<PREFIX>_AS_ref_count - number of allele-specific reads that match ref allele at each SNP
<PREFIX>_AS_alt_count - number of allele-specific reads that match alt allele at each SNP
<PREFIX>_AS_other_count - number of reads that match neither ref nor alt allele at each SNP
<PREFIX>_read_start_count - number of aligned reads that start at each position
<PREFIX>_impute2/snps - table with info about each SNP including alleles, and position
<PREFIX>impute2/snp_index - mapping from genomic position to index in snps table
impute2/<POP>_geno_probs - genotype probabilites for each individual
impute2/<POP>_haplotypes - phasing information for alleles
"""
import argparse
import numpy as np
import sys
import gzip
import genome.db
import genome.coord
SNP_UNDEF = -1
HAP_UNDEF = -1
class SNP(object):
def __init__(self, chrom, pos, name, ref_allele, alt_allele):
self.chrom = chrom
self.pos = pos
self.name = name
self.ref_allele = ref_allele
self.alt_allele = alt_allele
class DataTracks(object):
def __init__(self, gdb, track_prefix, pop):
# open tracks that read counts will be pulled from
ref_as_name = "%s_AS_ref_count" % track_prefix
alt_as_name = "%s_AS_alt_count" % track_prefix
oth_as_name = "%s_AS_other_count" % track_prefix
read_count_name = "%s_read_start_count" % track_prefix
self.ref_count_track = gdb.open_track(ref_as_name)
self.alt_count_track = gdb.open_track(alt_as_name)
self.other_count_track = gdb.open_track(oth_as_name)
self.read_count_track = gdb.open_track(read_count_name)
# open tracks where SNP information can be extracted
self.snp_track = gdb.open_track("impute2/snps")
self.snp_index_track = gdb.open_track("impute2/snp_index")
self.geno_track = gdb.open_track("impute2/%s_geno_probs" %
pop.lower())
self.hap_track = gdb.open_track("impute2/%s_haplotypes" %
pop.lower())
def close(self):
"""closes all of the data tracks"""
self.ref_count_track.close()
self.alt_count_track.close()
self.other_count_track.close()
self.read_count_track.close()
self.snp_track.close()
self.snp_index_track.close()
self.geno_track.close()
self.hap_track.close()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--assembly", help="genome assembly that reads "
"were mapped to (e.g. hg18)", default=None)
parser.add_argument('--target_region_size',
help='override target region size that is '
'specified by input file',
type=int, default=None)
parser.add_argument("--sample_file",
help="path to file containing ordered list of "
"genotyped individuals",
default=None)
parser.add_argument("--homozygous_as_counts",
help="how to report AS counts at linked het SNPs when "
"test SNP genotype is homozygous or unknown. "
"zero (default): set allele-specific counts to 0; "
"rand_hap: randomly choose one of the haplotypes "
"to be 'reference'; "
"rand_allele: choose random allele at each SNP to "
"be reference", default="zero",
choices=("zero", "rand_hap", "rand_allele"))
parser.add_argument("track_prefix",
help="prefix of tracks to extract reads from "
"(e.g. 10_IND/PolII/read_counts/PolII_18505)")
parser.add_argument("pop",
help="population prefix for genotype tracks "
"(YRI or CEU)")
parser.add_argument("individual",
help="individual to extract read counts "
"for (e.g. 18505)")
parser.add_argument("input_file",
help="bed-like file to read coordinates of "
"test SNP and target region from")
args = parser.parse_args()
return args
def get_region_snps(dt, region_list, ind_idx):
"""Retrieves all of the SNPs in the requested regions.
The test SNP is also returned."""
if len(region_list) == 0:
raise genome.coord.CoordError("expected at least one coordinate, got 0")
chrom = region_list[0].chrom
snp_tab = dt.snp_track.h5f.getNode("/%s" % chrom.name)
hap_tab = dt.hap_track.h5f.getNode("/%s" % chrom.name)
geno_tab = dt.geno_track.h5f.getNode("/%s" % chrom.name)
region_snps = []
for region in region_list:
if region.chrom.name != chrom.name:
raise CoordError("only regions on same chromosome are supported")
snp_idx = dt.snp_index_track.get_nparray(chrom, region.start, region.end)
offsets = np.where(snp_idx != SNP_UNDEF)[0]
test_snp = None
for offset in offsets:
i = snp_idx[offset]
snp_row = snp_tab[i]
# extract geno probs and haplotypes for this individual
geno_probs = geno_tab[i, (ind_idx*3):(ind_idx*3 + 3)]
haps = hap_tab[i, (ind_idx*2):(ind_idx*2 + 2)]
snp = SNP(region.chrom, snp_row['pos'],
snp_row['name'],
snp_row['allele1'],
snp_row['allele2'])
# get heterozygote probability for SNP
snp.het_prob = geno_probs[1]
# linear combination of genotype probs:
# 0*homo_ref + 1*het + 2*homo_alt
snp.geno_sum = geno_probs[1] + 2.0*geno_probs[2]
snp.haps = haps
# TODO: set linkage probabilty properly
snp.linkage_prob = 1.0
region_snps.append(snp)
return region_snps
def get_het_snps(snp_list):
het_snp_list = []
for snp in snp_list:
if snp.haps[0] != snp.haps[1]:
het_snp_list.append(snp)
return het_snp_list
def lookup_individual_index(options, ind_name):
"""Gets the index of individual that is used
to lookup information in the genotype and haplotype tables"""
if options.sample_file is None:
sample_file = "/data/share/10_IND/IMPUTE/%s_samples.txt" % options.pop
else:
sample_file = options.sample_file
sys.stderr.write("reading list of individuals from %s\n" % sample_file)
f = open(sample_file)
idx = 0
for line in f:
words = line.rstrip().split()
name = words[0].replace("NA", "")
if name == ind_name:
f.close()
return idx
idx += 1
raise ValueError("individual %s is not in samples file %s" %
(ind_name, options.sample_file))
def set_snp_counts(dt, region_list, snps, test_snp, options):
"""Sets counts of reference and alternate haplotype matching reads
for each of the provided SNPs. Labeling of 'reference' or 'alternate'
is with respect to the test SNP"""
if test_snp and (test_snp.haps[0] != test_snp.haps[1]) and \
(test_snp.haps[0] != HAP_UNDEF):
# test SNP is heterozygous: use this to phase counts that are
# retrieved at linked het SNPs
if test_snp.haps[0] == 0:
# reference allele is first haplotype at test SNP
ref_idx = 0
alt_idx = 1
else:
# alt allele is first haplotype at test SNP
ref_idx = 1
alt_idx = 0
else:
# test SNP is homozygous or is undefined
# so we have no way to tell which haplotype it is on
if options.homozygous_as_counts == "rand_hap":
# choose haplotype randomly
if np.random.randint(2) == 0:
ref_idx = 0
alt_idx = 1
else:
ref_idx = 1
alt_idx = 0
else:
ref_idx = None
alt_idx = None
for region in region_list:
ref_counts = dt.ref_count_track.get_nparray(region.chrom, region.start,
region.end)
alt_counts = dt.alt_count_track.get_nparray(region.chrom, region.start,
region.end)
other_counts = dt.other_count_track.get_nparray(region.chrom,
region.start,
region.end)
for snp in snps:
# we have het SNPs from several regions, but only want to consider
# ones in current region
if snp.pos >= region.start and snp.pos <= region.end:
offset = snp.pos - region.start
ref_count = ref_counts[offset]
alt_count = alt_counts[offset]
snp.other_count = other_counts[offset]
if ref_idx is None:
if options.homozygous_as_counts == "zero":
snp.ref_hap_count = 0
snp.alt_hap_count = 0
elif options.homozygous_as_counts == "rand_allele":
# choose allele randomly to be reference
if np.random.randint(2) == 0:
snp.ref_hap_count = ref_count
snp.alt_hap_count = alt_count
else:
snp.ref_hap_count = alt_count
snp.alt_hap_count = ref_count
else:
raise ValueError("unknown homozygous_as_counts option %s" %
options.homozygous_as_counts)
else:
if snp.haps[ref_idx] == 0:
# reference allele is on "reference" haplotype
snp.ref_hap_count = ref_count
snp.alt_hap_count = alt_count
elif snp.haps[ref_idx] == 1:
# reference allele is on "alternate" haplotype
snp.ref_hap_count = alt_count
snp.alt_hap_count = ref_count
else:
raise ValueError("expected haplotype to be defined")
def write_header(f):
f.write("CHROM "
"TEST.SNP.POS "
"TEST.SNP.ID "
"TEST.SNP.REF.ALLELE "
"TEST.SNP.ALT.ALLELE "
"TEST.SNP.GENOTYPE "
"TEST.SNP.HAPLOTYPE "
"REGION.START "
"REGION.END "
"REGION.SNP.POS "
"REGION.SNP.HET.PROB "
"REGION.SNP.LINKAGE.PROB "
"REGION.SNP.REF.HAP.COUNT "
"REGION.SNP.ALT.HAP.COUNT "
"REGION.SNP.OTHER.HAP.COUNT "
"REGION.READ.COUNT "
"GENOMEWIDE.READ.COUNT\n")
def write_NA_line(f):
f.write(" ".join(["NA"] * 15) + "\n")
def write_output(f, region_list, snps, test_snp, test_snp_pos, region_read_count,
genomewide_read_count):
chrom_name = region_list[0].chrom.name
region_start_str = ";".join([str(r.start) for r in region_list])
region_end_str = ";".join([str(r.end) for r in region_list])
if test_snp is None:
# the SNP did not exist, probably was removed between
# 1000 genomes releases
f.write("%s %d NA NA NA NA NA %s %s" %
(chrom_name, test_snp_pos, region_start_str, region_end_str))
f.write(" %s\n" % " ".join(["NA"] * 8))
return
f.write("%s %d %s %s %s %.2f %d|%d %s %s" %
(test_snp.chrom.name, test_snp.pos, test_snp.name,
test_snp.ref_allele, test_snp.alt_allele,
test_snp.geno_sum, test_snp.haps[0],
test_snp.haps[1], region_start_str, region_end_str))
# number of linked heterozygous SNPs that we can pull
# haplotype-specific counts from
n_het_snps = len(snps)
if n_het_snps > 0:
# write SNP positions
f.write(" %s" % ";".join(["%d" % s.pos for s in snps]))
# write SNP het probs
f.write(" %s" % ";".join(["%.2f" % s.het_prob for s in snps]))
# write SNP linkage probs
f.write(" %s" % ";".join(["%.2f" % s.linkage_prob for s in snps]))
# write SNP ref/alt/other haplotype counts
f.write(" %s" % ";".join(["%d" % s.ref_hap_count for s in snps]))
f.write(" %s" % ";".join(["%d" % s.alt_hap_count for s in snps]))
f.write(" %s" % ";".join(["%d" % s.other_count for s in snps]))
else:
# no linked heterozygous SNPs
f.write(" %s" % " ".join(["NA"] * 6))
# write total read count for region and genome-wide read count
f.write(" %d %d\n" % (region_read_count, genomewide_read_count))
def get_genomewide_count(gdb, track):
stat = genome.trackstat.get_stats(gdb, track)
return stat.sum
def get_region_read_counts(dt, region_list):
total_count = 0
for region in region_list:
counts = dt.read_count_track.get_nparray(region.chrom, region.start, region.end)
total_count += np.sum(counts)
return total_count
def get_target_regions(args, chrom, words):
"""Parse start and end positions and return list of Coord objects representing
target region(s)."""
start_words = words[7].split(";")
end_words = words[8].split(";")
if len(start_words) != len(end_words):
raise genome.coord.CoordError("number of start (%d) and end (%d) positions "
"do not match" % (len(start_words), len(end_words)))
n_coord = len(start_words)
region_list = []
for i in range(n_coord):
start = int(start_words[i])
end = int(end_words[i])
region = genome.coord.Coord(chrom, start, end)
if args.target_region_size:
if region.length() != args.target_region_size:
# override the size of the target region
# with size provided on command line
mid = (region.start + region.end)/2
region.start = mid - args.target_region_size/2
region.end = mid + args.target_region_size/2
if region.start < 1:
region.start = 1
if region.end > chrom.length:
region.end = chrom.length
region_list.append(region)
return region_list
def main():
args = parse_args()
gdb = genome.db.GenomeDB(assembly=args.assembly)
write_header(sys.stdout)
individual = args.individual.replace("NA","").replace("GM", "")
ind_idx = lookup_individual_index(args, individual)
dt = DataTracks(gdb, args.track_prefix, args.pop)
genomewide_read_counts = get_genomewide_count(gdb, dt.read_count_track)
chrom_dict = gdb.get_chromosome_dict()
if args.input_file.endswith(".gz"):
f = gzip.open(args.input_file)
else:
f = open(args.input_file)
line_count = 0
if args.target_region_size:
sys.stderr.write("setting target region size to %d\n" %
args.target_region_size)
for line in f:
line_count += 1
if line_count % 1000 == 0:
sys.stderr.write(".")
if line.startswith("#"):
continue
words = line.rstrip().split()
if words[1] == "NA":
# no SNP defined on this line:
write_NA_line(sys.stdout)
continue
chrom_name = words[0]
chrom = chrom_dict[chrom_name]
region_list = get_target_regions(args, chrom, words)
snp_pos = int(words[1])
snp_ref_base = words[3]
snp_alt_base = words[4]
# TODO: check that SNP ref/alt match?
snp_region = genome.coord.Coord(chrom, snp_pos, snp_pos)
# pull out all of the SNPs in the target region(s)
region_snps = get_region_snps(dt, region_list, ind_idx)
# pull out test SNP
test_snp_list = get_region_snps(dt, [snp_region], ind_idx)
if len(test_snp_list) != 1:
test_snp = None
sys.stderr.write("WARNING: could not find test SNP at "
"position %s:%d\n" % (chrom.name, snp_pos))
het_snps = []
else:
test_snp = test_snp_list[0]
# pull out haplotype counts from linked heterozygous SNPs
het_snps = get_het_snps(region_snps)
set_snp_counts(dt, region_list, het_snps, test_snp, args)
region_read_counts = get_region_read_counts(dt, region_list)
write_output(sys.stdout, region_list, het_snps, test_snp, snp_pos,
region_read_counts, genomewide_read_counts)
sys.stderr.write("\n")
f.close()
dt.close()
main()
| StarcoderdataPython |
3292827 | # -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import copy
from typing import Any, List
from rest_framework.exceptions import ValidationError
from rest_framework.settings import api_settings
from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList
def stringify_validation_error(error: ValidationError) -> List[str]:
"""Transform DRF's ValidationError into a list of error strings
>>> stringify_validation_error(ValidationError({'foo': ErrorDetail('err')}))
['foo: err']
"""
results: List[str] = []
def traverse(err_detail: Any, keys: List[str]):
"""Traverse error data to collect all error messages"""
# Dig deeper when structure is list or dict
if isinstance(err_detail, (ReturnList, list, tuple)):
for err in err_detail:
traverse(err, keys)
elif isinstance(err_detail, (ReturnDict, dict)):
for key, err in err_detail.items():
# Make a copy of keys so the inner loop won't affect outer scope
_keys = copy.copy(keys)
if key != api_settings.NON_FIELD_ERRORS_KEY:
_keys.append(str(key))
traverse(err, _keys)
else:
if not keys:
results.append(str(err_detail))
else:
results.append('{}: {}'.format('.'.join(keys), str(err_detail)))
traverse(error.detail, [])
return sorted(results)
| StarcoderdataPython |
4837464 | import argparse
import sys
import time
import numpy as np
import pyqtgraph as pg
from sensapex import UMP
from sensapex.sensapex import LIBUM_DEF_BCAST_ADDRESS
from sensapex.utils import bytes_str
parser = argparse.ArgumentParser(
description="Test for sensapex devices; perform a series of random moves while rapidly polling the device position and state."
)
parser.add_argument("device", type=int, help="Device ID to test")
parser.add_argument(
"--library-path", "-l", type=str, dest="library_path", default=None, help="Folder containing the umsdk library"
)
parser.add_argument("--address", "-a", type=bytes_str, default=LIBUM_DEF_BCAST_ADDRESS, help="Device network address")
parser.add_argument("--debug", "-d", action="store_true", help="Turn on debug logging")
parser.add_argument("--group", "-g", type=int, default=0, help="Device group number")
parser.add_argument(
"--x", action="store_true", default=False, dest="x", help="True = Random X axis values. False = keep start position"
)
parser.add_argument(
"--y", action="store_true", default=False, dest="y", help="True = Random Y axis values. False = keep start position"
)
parser.add_argument(
"--z", action="store_true", default=False, dest="z", help="True = Random Z axis values. False = keep start position"
)
parser.add_argument("--speed", type=int, default=1000, help="Movement speed in um/sec")
parser.add_argument(
"--distance", type=int, default=10, help="Max distance to travel in um (relative to current position)"
)
parser.add_argument("--iter", type=int, default=10, help="Number of positions to test")
parser.add_argument("--acceleration", type=int, default=0, help="Max speed acceleration")
parser.add_argument(
"--high-res",
action="store_true",
default=False,
dest="high_res",
help="Use high-resolution time sampling rather than poller's schedule",
)
parser.add_argument(
"--start-pos",
type=str,
default=None,
dest="start_pos",
help="x,y,z starting position (by default, the current position is used)",
)
parser.add_argument(
"--test-pos",
type=str,
default=None,
dest="test_pos",
help="x,y,z position to test (by default, random steps from the starting position are used)",
)
args = parser.parse_args()
UMP.set_library_path(args.library_path)
ump = UMP.get_ump(address=args.address, group=args.group)
if args.debug:
try:
ump.set_debug_mode(True)
except Exception as e:
print(f"Could not enable Sensapex debug mode: {e}")
time.sleep(2)
devids = ump.list_devices()
devs = {i: ump.get_device(i) for i in devids}
print("SDK version:", ump.sdk_version())
print("Found device IDs:", devids)
dev = devs[args.device]
app = pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.show()
plots = [
win.addPlot(labels={"left": ("x position", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("y position", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("z position", "m"), "bottom": ("time", "s")}),
]
plots[1].setYLink(plots[0])
plots[2].setYLink(plots[0])
plots[1].setXLink(plots[0])
plots[2].setXLink(plots[0])
win.nextRow()
errplots = [
win.addPlot(labels={"left": ("x error", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("y error", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("z error", "m"), "bottom": ("time", "s")}),
]
errplots[1].setYLink(errplots[0])
errplots[2].setYLink(errplots[0])
errplots[0].setXLink(plots[0])
errplots[1].setXLink(plots[0])
errplots[2].setXLink(plots[0])
start = pg.ptime.time()
pos = [[], [], []]
tgt = [[], [], []]
err = [[], [], []]
bus = []
mov = []
times = []
lastupdate = pg.ptime.time()
def update(update_error=False):
global lastupdate
timeout = -1 if args.high_res else 0
p = dev.get_pos(timeout=timeout)
s = dev.is_busy()
m = not move_req.finished
bus.append(int(s))
mov.append(int(m))
now = pg.ptime.time() - start
times.append(now)
for i in range(3):
pos[i].append((p[i] - start_pos[i]) * 1e-6)
tgt[i].append((target[i] - start_pos[i]) * 1e-6)
if update_error:
err[i].append(pos[i][-1] - tgt[i][-1])
else:
err[i].append(np.nan)
def update_plots():
for i in range(3):
plots[i].clear()
plots[i].addItem(
pg.PlotCurveItem(times, bus[:-1], stepMode=True, pen=None, brush=(0, 255, 0, 40), fillLevel=0),
ignoreBounds=True,
)
plots[i].addItem(
pg.PlotCurveItem(times, mov[:-1], stepMode=True, pen=None, brush=(255, 0, 0, 40), fillLevel=0),
ignoreBounds=True,
)
plots[i].plot(times, tgt[i], pen="r")
plots[i].plot(times, pos[i], symbol="o", symbolSize=5)
errplots[i].plot(times, err[i], clear=True, connect="finite")
if args.start_pos is None:
start_pos = dev.get_pos()
else:
start_pos = np.array(list(map(float, args.start_pos.split(","))))
print(start_pos)
diffs = []
errs = []
positions = []
if args.test_pos is None:
xmoves = []
ymoves = []
zmoves = []
if args.x:
xmoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
xmoves = np.zeros(args.iter)
if args.y:
ymoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
ymoves = np.zeros(args.iter)
if args.z:
zmoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
zmoves = np.zeros(args.iter)
moves = np.column_stack((xmoves, ymoves, zmoves))
# moves = (np.random.random(size=(args.iter, 3)) * args.distance*1000).astype(int)
targets = np.array(start_pos)[np.newaxis, :] + moves
print(moves)
print(targets)
else:
# just move back and forth between start and test position
test_pos = np.array(list(map(float, args.test_pos.split(","))))
targets = np.zeros((args.iter, 3))
targets[::2] = start_pos[None, :]
targets[1::2] = test_pos[None, :]
speeds = [args.speed] * args.iter
# targets = np.array([[15431718, 7349832, 17269820], [15432068, 7349816, 17249852]] * 5)
# speeds = [100, 2] * args.iter
# targets = np.array([[13073580, 13482162, 17228380], [9280157.0, 9121206.0, 12198605.]] * 5)
# speeds = [1000] * args.iter
# targets = np.array([[9335078, 10085446, 12197238], [14793665.0, 11658668.0, 17168934.]] * 5)
# speeds = [1000] * args.iter
dev.stop()
for i in range(args.iter):
target = targets[i]
move_req = dev.goto_pos(target, speed=speeds[i], linear=False, max_acceleration=args.acceleration)
while not move_req.finished:
update(update_error=False)
time.sleep(0.002)
waitstart = pg.ptime.time()
while pg.ptime.time() - waitstart < 1.0:
update(update_error=True)
time.sleep(0.002)
# time.sleep(0.05)
p2 = dev.get_pos(timeout=200)
positions.append(p2)
diff = (p2 - target) * 1e-6
diffs.append(diff)
errs.append(np.linalg.norm(diff))
print(i, diff, errs[-1])
update_plots()
dev.goto_pos(start_pos, args.speed)
print("mean:", np.mean(errs), " max:", np.max(errs))
if sys.flags.interactive == 0:
app.exec_()
| StarcoderdataPython |
3223832 | <filename>python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cdn/apis/SetReferRequest.py
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SetReferRequest(JDCloudRequest):
"""
设置域名refer
"""
def __init__(self, parameters, header=None, version="v1"):
super(SetReferRequest, self).__init__(
'/domain/{domain}/refer', 'POST', header, version)
self.parameters = parameters
class SetReferParameters(object):
def __init__(self, domain, ):
"""
:param domain: 用户域名
"""
self.domain = domain
self.referType = None
self.referList = None
self.allowNoReferHeader = None
self.allowNullReferHeader = None
def setReferType(self, referType):
"""
:param referType: (Optional) refer类型,取值:block(黑名单),allow(白名单)默认为block
"""
self.referType = referType
def setReferList(self, referList):
"""
:param referList: (Optional) 逗号隔开的域名列表,如果referList传空则为删除
"""
self.referList = referList
def setAllowNoReferHeader(self, allowNoReferHeader):
"""
:param allowNoReferHeader: (Optional) 是否允许空refer访问,默认为“on”
"""
self.allowNoReferHeader = allowNoReferHeader
def setAllowNullReferHeader(self, allowNullReferHeader):
"""
:param allowNullReferHeader: (Optional) 是否允许无ua访问,默认为“on”
"""
self.allowNullReferHeader = allowNullReferHeader
| StarcoderdataPython |
4809266 | <reponame>dmontoya1/cajas
from django.db import models
from enumfields import EnumField
from enumfields import Enum
class ConceptType(Enum):
SIMPLE = 'SM'
DOUBLE = 'DB'
SIMPLEDOUBLE = 'SD'
class Labels:
SIMPLE = 'Simple'
DOUBLE = 'Doble'
SIMPLEDOUBLE = 'Simple y doble'
class CrossoverType(Enum):
PARTNER = 'PA'
OFFICE = 'OF'
class Labels:
PARTNER = 'Socio directo'
OFFICE = 'Oficina'
class Relationship(Enum):
UNIT = 'UNIT'
PERSON = 'PERS'
COUNTRY = 'COUNTRY'
LOAN = 'PREST'
CHAIN = 'CHAIN'
OFFICE = 'OF'
BETWEEN_OFFICE = 'BTWN'
class Labels:
UNIT = 'Unidad'
PERSON = 'Persona'
COUNTRY = 'País'
LOAN = 'Préstamo'
CHAIN = 'Cadena'
OFFICE = 'Oficina'
BETWEEN_OFFICE = 'Entre oficinas'
class Concept(models.Model):
"""
"""
name = models.CharField(
'Nombre',
max_length=255,
)
description = models.TextField(
'Descripción'
)
concept_type = EnumField(
ConceptType,
verbose_name='Tipo de concepto',
max_length=2,
)
crossover_type = EnumField(
CrossoverType,
verbose_name='Tipo de cruce',
max_length=2,
blank=True, null=True
)
counterpart = models.ForeignKey(
'self',
verbose_name='Concepto Contrapartida',
help_text='Concepto de contrapartida cuando el concepto es de cruce.',
blank=True, null=True,
on_delete=models.SET_NULL
)
relationship = EnumField(
Relationship,
max_length=7,
verbose_name='Relación del movimiento',
blank=True, null=True
)
movement_type = models.BooleanField(
'El concepto genera movimiento en la caja?',
default=True,
help_text='Indicar si el movimiento genera o no un movimiento en las cajas. Como el caso de ENTREGA DE DINERO. '
'Éste no génera movimiento en la caja'
)
is_active = models.BooleanField(
'Concepto activo?',
default=True
)
def __str__(self):
return '%s de tipo %s' % (self.name, self.get_concept_type_display())
class Meta:
verbose_name = 'Concepto'
ordering = ['name']
| StarcoderdataPython |
3345045 | <gh_stars>1-10
import os
import math
import json
import logging
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from ..utils.util import ensure_dir
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, loss, metrics, resume, config, train_logger=None):
self.config = config
self.logger = logging.getLogger(self.__class__.__name__)
self.model = model
self.loss = loss
self.metrics = metrics
self.name = config['name']
self.epochs = config['trainer']['epochs']
self.save_freq = config['trainer']['save_freq']
self.verbosity = config['trainer']['verbosity']
self.summyWriter = SummaryWriter()
if torch.cuda.is_available():
if config['cuda']:
self.with_cuda = True
self.gpus = {i: item for i, item in enumerate(self.config['gpus'])}
device = 'cuda'
if torch.cuda.device_count() > 1 and len(self.gpus) > 1:
self.model.parallelize()
torch.cuda.empty_cache()
else:
self.with_cuda = False
device = 'cpu'
else:
self.logger.warning('Warning: There\'s no CUDA support on this machine, '
'training is performed on CPU.')
self.with_cuda = False
device = 'cpu'
self.device = torch.device(device)
self.model.to(self.device)
self.logger.debug('Model is initialized.')
self._log_memory_useage()
self.train_logger = train_logger
self.optimizer = self.model.optimize(config['optimizer_type'], config['optimizer'])
self.lr_scheduler = getattr(
optim.lr_scheduler,
config['lr_scheduler_type'], None)
if self.lr_scheduler:
self.lr_scheduler = self.lr_scheduler(self.optimizer, **config['lr_scheduler'])
self.lr_scheduler_freq = config['lr_scheduler_freq']
self.monitor = config['trainer']['monitor']
self.monitor_mode = config['trainer']['monitor_mode']
assert self.monitor_mode == 'min' or self.monitor_mode == 'max'
self.monitor_best = math.inf if self.monitor_mode == 'min' else -math.inf
self.start_epoch = 1
self.checkpoint_dir = os.path.join(config['trainer']['save_dir'], self.name)
ensure_dir(self.checkpoint_dir)
json.dump(config, open(os.path.join(self.checkpoint_dir, 'config.json'), 'w'),
indent=4, sort_keys=False)
if resume:
self._resume_checkpoint(resume)
def train(self):
"""
Full training logic
"""
print(self.epochs)
for epoch in range(self.start_epoch, self.epochs + 1):
try:
result = self._train_epoch(epoch)
except torch.cuda.CudaError:
self._log_memory_useage()
log = {'epoch': epoch}
for key, value in result.items():
if key == 'metrics':
for i, metric in enumerate(self.metrics):
log[metric.__name__] = result['metrics'][i]
elif key == 'val_metrics':
for i, metric in enumerate(self.metrics):
log['val_' + metric.__name__] = result['val_metrics'][i]
else:
log[key] = value
if self.train_logger is not None:
self.train_logger.add_entry(log)
if self.verbosity >= 1:
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
if (self.monitor_mode == 'min' and log[self.monitor] < self.monitor_best)\
or (self.monitor_mode == 'max' and log[self.monitor] > self.monitor_best):
self.monitor_best = log[self.monitor]
self._save_checkpoint(epoch, log, save_best=True)
if epoch % self.save_freq == 0:
self._save_checkpoint(epoch, log)
if self.lr_scheduler:
self.lr_scheduler.step()
lr = self.lr_scheduler.get_lr()[0]
self.logger.info('New Learning Rate: {:.8f}'.format(lr))
self.summyWriter.add_scalars('Train', {'train_' + self.monitor: result[self.monitor],
'val_' + self.monitor: result[self.monitor]}, epoch)
self.summyWriter.close()
def _log_memory_useage(self):
if not self.with_cuda: return
template = """Memory Usage: \n{}"""
usage = []
for deviceID, device in self.gpus.items():
deviceID = int(deviceID)
allocated = torch.cuda.memory_allocated(deviceID) / (1024 * 1024)
cached = torch.cuda.memory_cached(deviceID) / (1024 * 1024)
usage.append(' CUDA: {} Allocated: {} MB Cached: {} MB \n'.format(device, allocated, cached))
content = ''.join(usage)
content = template.format(content)
self.logger.debug(content)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def _save_checkpoint(self, epoch, log, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth.tar'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'logger': self.train_logger,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.monitor_best,
'config': self.config
}
filename = os.path.join(self.checkpoint_dir, 'checkpoint-epoch{:03d}-loss-{:.4f}.pth.tar'
.format(epoch, log['loss']))
torch.save(state, filename)
if save_best:
os.rename(filename, os.path.join(self.checkpoint_dir, 'model_best.pth.tar'))
self.logger.info("Saving current best: {} ...".format('model_best.pth.tar'))
else:
self.logger.info("Saving checkpoint: {} ...".format(filename))
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.monitor_best = checkpoint['monitor_best']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
if self.with_cuda:
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(torch.device('cuda'))
self.train_logger = checkpoint['logger']
#self.config = checkpoint['config']
self.logger.info("Checkpoint '{}' (epoch {}) loaded".format(resume_path, self.start_epoch))
| StarcoderdataPython |
1716660 | <gh_stars>1-10
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
return sorted(points, key = lambda x: x[0] ** 2 + x[1] ** 2)[:K]
| StarcoderdataPython |
114935 | <filename>src/pass.py
import os
import threading
import hashlib
import ast
import keypirinha as kp
import keypirinha_util as kpu
class Pass(kp.Plugin):
"""
Provides an interface to a [password store](https://www.passwordstore.org/).
"""
CAT_FILE = kp.ItemCategory.USER_BASE + 1
CAT_FILE_LINE = kp.ItemCategory.USER_BASE + 2
CAT_FILE_LINE_INDEX = kp.ItemCategory.USER_BASE + 3
DEFAULT_CLIP_TIME = 45
DEFAULT_SHOW_SECRETS = False
DEFAULT_SAFE_KEYS = ["URL", "Username"]
DEFAULT_SAVE_HISTORY = True
def __init__(self):
super().__init__()
def _read_config(self):
settings = self.load_settings()
backend = settings.get('backend', 'main', fallback='wsl')
if backend.lower() == 'wsl':
from .backends.wsl import WslBackend
self.backend = WslBackend()
elif backend.lower() == 'gpg4win':
from .backends.gpg4win import Gpg4WinBackend
self.backend = Gpg4WinBackend()
else:
raise ValueError("Unknown backend: {}".format(backend))
pass_store = settings.get('path', 'pass',
fallback=self.backend.password_store)
self.backend.set_password_store(pass_store)
self.log("Password store: {} -> {}".format(pass_store, self.backend.password_store))
self.CLIP_TIME = settings.get('clip_time', 'pass',
fallback=self.DEFAULT_CLIP_TIME)
self._clip_timer = None
self.SHOW_SECRETS = settings.get_bool('show_secrets', 'main',
fallback=self.DEFAULT_SHOW_SECRETS)
safe_keys = settings.get('safe_keys', 'main',
fallback=None)
if safe_keys is None:
safe_keys = self.DEFAULT_SAFE_KEYS
else:
safe_keys = ast.literal_eval(safe_keys)
self.SAFE_KEYS = [x.lower() for x in safe_keys]
self.SAVE_HISTORY = settings.get_bool('save_history', 'main',
fallback=self.DEFAULT_SAVE_HISTORY)
def on_start(self):
self._read_config()
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self._read_config()
self.on_catalog()
def on_catalog(self):
# Refresh list of names in password-store
self.names = self.backend.get_pass_list()
self.log("Found {} files in password store".format(len(self.names)))
# Add pass command to catalog
catalog = []
catalog.append(
self.create_item(
category=kp.ItemCategory.KEYWORD,
label="Password Store",
short_desc="Show password store",
target="pass",
args_hint=kp.ItemArgsHint.ACCEPTED,
hit_hint=kp.ItemHitHint.IGNORE
)
)
self.set_catalog(catalog)
def on_suggest(self, user_input, items_chain):
if not items_chain:
return
items = []
if items_chain[-1].target() == 'pass':
if self.SAVE_HISTORY:
hit_hint = kp.ItemHitHint.NOARGS
else:
hit_hint = kp.ItemHitHint.IGNORE
# Display list of pass files
for name in self.names:
items.append(self.create_item(
category=self.CAT_FILE,
label=name,
short_desc="",
target=name,
args_hint=kp.ItemArgsHint.ACCEPTED,
hit_hint=hit_hint,
loop_on_suggest=True # tab will show contents of file
))
self.set_suggestions(items, kp.Match.FUZZY, kp.Sort.SCORE_DESC)
else:
# User pressed tab on a pass file, show its contents
pass_name = items_chain[-1].target()
# Display pass file contents
lines = self.backend.get_pass_contents(pass_name).split('\n')
for i,l in enumerate(lines):
# Skip empty lines
if not l:
continue
# Show full line if SHOW_SECRETS or a safe key
k = None
if i > 0:
# Don't kv split the first line
k,_ = self._pass_kv_split(l)
if self.SHOW_SECRETS or (k is not None and k.lower() in self.SAFE_KEYS):
shown = l
cat = self.CAT_FILE_LINE
# Store index of line so we can decide whether or not to kv split it
target = str((l,i))
else:
# Otherwise, display only KEY if it exists, otherwise asterisks
shown = '*'*8 if k is None else k
cat = self.CAT_FILE_LINE_INDEX
# Store index of line so we can get the full value later
# index also helps us decide whether or not to kv split the line
# This helps us keep secrets out of the log, too, if a user
# uses the "show item properties" shortcut
target = str((pass_name,i))
items.append(self.create_item(
category=cat,
label=shown,
short_desc="",
target=target,
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE
))
self.set_suggestions(items, kp.Match.FUZZY, kp.Sort.NONE)
def on_execute(self, item, action):
data = None
if item.category() == self.CAT_FILE:
# User selected file, put password in clipboard
data = self.backend.get_password(item.target())
elif item.category() == self.CAT_FILE_LINE:
# User selected a line from the pass file
tuple_val = ast.literal_eval(item.target())
data,lineno = tuple_val
elif item.category() == self.CAT_FILE_LINE_INDEX:
# User selected a line from the pass file
tuple_val = ast.literal_eval(item.target())
pass_name,lineno = tuple_val
data = self.backend.get_pass_contents(pass_name).split('\n')[int(lineno)]
if item.category() in [self.CAT_FILE_LINE, self.CAT_FILE_LINE_INDEX]:
# Don't kv split the first line
if lineno > 0:
# If it is a 'Key: Value' format, put Value in clipboard
# Otherwise, put full line in clipboard
data = self._pass_kv_split(data)[1]
if data is not None:
self._put_data_in_clipboard(data)
def _put_data_in_clipboard(self, data):
# XXX: This only works with text clipboard data, not fancy Windows objects (files, office content, etc)
orig_clip = kpu.get_clipboard()
pass_hash = hashlib.md5(data.encode()).digest()
# keypirinha.delay isn't implemented yet, so a timer will do
kwargs = {'orig_clip': orig_clip, 'pass_hash': pass_hash}
self._clip_timer = threading.Timer(self.CLIP_TIME, self._timer_reset_clipboard, kwargs=kwargs)
kpu.set_clipboard(data)
self._clip_timer.start()
@staticmethod
def _timer_reset_clipboard(orig_clip=None, pass_hash=None):
if orig_clip is None or pass_hash is None:
return
# Only reset clip if clipboard still contains pass
cur_clip = kpu.get_clipboard()
clip_hash = hashlib.md5(cur_clip.encode()).digest()
if clip_hash == pass_hash:
kpu.set_clipboard(orig_clip)
@staticmethod
def _pass_kv_split(line):
"""Splits key value pair from a pass file, returns a tuple.
Always returns a value, returns None for key if it does not exist.
Example:
>>> _pass_kv_split("URL: *.example.com/*")
("URL", "*.example.com/*")
"""
if ': ' in line:
return line.split(': ', 1)
return None,line
| StarcoderdataPython |
76692 | <filename>aiotdlib/api/functions/delete_revoked_chat_invite_link.py
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class DeleteRevokedChatInviteLink(BaseObject):
"""
Deletes revoked chat invite links. Requires administrator privileges and can_invite_users right in the chat for own links and owner privileges for other links
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param invite_link: Invite link to revoke
:type invite_link: :class:`str`
"""
ID: str = Field("deleteRevokedChatInviteLink", alias="@type")
chat_id: int
invite_link: str
@staticmethod
def read(q: dict) -> DeleteRevokedChatInviteLink:
return DeleteRevokedChatInviteLink.construct(**q)
| StarcoderdataPython |
76331 | <reponame>fuckseer/Refactoring
from PIL import Image
import numpy as np
import doctest
def convert_image_to_mosaic(image,size,gradation_step):
"""
Convert image to mosaic
param image: needed image
param size: block size mosaic
param gradation_step: gradation of gray
return image
>>> convert_image_to_mosaic((np.ones((3, 3, 3)) * 200), 2, 15)
array([[[195., 195., 195.],
[195., 195., 195.],
[ 90., 90., 90.]],
<BLANKLINE>
[[195., 195., 195.],
[195., 195., 195.],
[ 90., 90., 90.]],
<BLANKLINE>
[[ 90., 90., 90.],
[ 90., 90., 90.],
[ 45., 45., 45.]]])
"""
for x in range(0, len(image), size) :
for y in range(0, len(image[0]), size):
image[x:x + size, y:y + size] = get_average_brightness(
image[x:x + size, y:y + size], size, gradation_step )
return image
def get_average_brightness(block, size, gradation_step):
"""
Get average brightness of image
param block: mosaic block size
param size: size
param gradation_step: gradation of gray
return int
>>> get_average_brightness(np.ones((3, 3, 3)) * 200, 2, 15)
195
>>> get_average_brightness(np.ones((3, 3, 3)) * 100, 2, 15)
90
>>> get_average_brightness(np.ones((3, 3, 3)) * 100, 6, 6)
24
>>> get_average_brightness(np.ones((10, 10, 3)) * 100, 6, 6)
96
"""
average_color = (block[:size, :size].sum() / 3) // size ** 2
res = int(average_color // gradation_step) * gradation_step
return res
def main () :
image_file = Image.open(input("Введите имя файла, которое хотите конвертировать: "))
block_size = int(input("Введите размер блока: "))
gradations_count = int(input("Введите количество градаций серого: "))
image = np.array(image_file)
gradation_step = 255 // gradations_count
res = Image.fromarray(convert_image_to_mosaic(image, block_size, gradation_step))
res.save(input("Введите имя файла, в которой хотите сохранить результат: "))
if __name__ == '__main__' :
main()
| StarcoderdataPython |
99861 | <filename>2020/days/friday/bfs.py
from collections import deque
def neighbors(v):
pass # Tůdů
def bfs(start, end):
queue = deque([start])
beenTo = set()
direction = dict()
while len(queue) > 0:
v = queue.popleft()
if v == end:
cesta = [v]
while v != start:
cesta.append(direction[v])
v = direction[v]
else:
queue.append(neighbors(v))
direction[v] = v | StarcoderdataPython |
4823161 | <reponame>Taymindis/kubernetes-ingress<filename>tests/suite/test_app_protect_watch_namespace.py<gh_stars>1000+
import requests
import pytest
import time
from settings import TEST_DATA, DEPLOYMENTS
from suite.ap_resources_utils import (
create_ap_logconf_from_yaml,
create_ap_policy_from_yaml,
delete_ap_policy,
delete_ap_logconf,
)
from suite.resources_utils import (
wait_before_test,
create_example_app,
wait_until_all_pods_are_ready,
create_items_from_yaml,
delete_items_from_yaml,
delete_common_app,
delete_namespace,
ensure_connection_to_public_endpoint,
create_ingress_with_ap_annotations,
create_namespace_with_name_from_yaml,
ensure_response_from_backend,
wait_before_test,
)
from suite.yaml_utils import get_first_ingress_host_from_yaml
# This test shows that a policy outside of the namespace test_namespace is not picked up by IC.
timestamp = round(time.time() * 1000)
test_namespace = f"test-namespace-{str(timestamp)}"
policy_namespace = f"policy-test-namespace-{str(timestamp)}"
valid_resp_body = "Server name:"
invalid_resp_body = "The requested URL was rejected. Please consult with your administrator."
reload_times = {}
class BackendSetup:
"""
Encapsulate the example details.
Attributes:
req_url (str):
ingress_host (str):
"""
def __init__(self, req_url, req_url_2, metrics_url, ingress_host):
self.req_url = req_url
self.req_url_2 = req_url_2
self.metrics_url = metrics_url
self.ingress_host = ingress_host
@pytest.fixture(scope="class")
def backend_setup(request, kube_apis, ingress_controller_endpoint) -> BackendSetup:
"""
Deploy a simple application and AppProtect manifests.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:param test_namespace:
:return: BackendSetup
"""
policy = "file-block"
create_namespace_with_name_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/common/ns.yaml")
print("------------------------- Deploy backend application -------------------------")
create_example_app(kube_apis, "simple", test_namespace)
req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1"
req_url_2 = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend2"
metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics"
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl,
)
print("------------------------- Deploy Secret -----------------------------")
src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml"
create_items_from_yaml(kube_apis, src_sec_yaml, test_namespace)
print("------------------------- Deploy logconf -----------------------------")
src_log_yaml = f"{TEST_DATA}/appprotect/logconf.yaml"
log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)
print(f"------------------------- Deploy namespace: {policy_namespace} ---------------------------")
create_namespace_with_name_from_yaml(kube_apis.v1, policy_namespace, f"{TEST_DATA}/common/ns.yaml")
print(f"------------------------- Deploy appolicy: {policy} ---------------------------")
src_pol_yaml = f"{TEST_DATA}/appprotect/{policy}.yaml"
pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, policy_namespace)
print("------------------------- Deploy ingress -----------------------------")
ingress_host = {}
src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml"
create_ingress_with_ap_annotations(
kube_apis, src_ing_yaml, test_namespace, f"{policy_namespace}/{policy}", "True", "True", "127.0.0.1:514"
)
ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml)
wait_before_test()
def fin():
print("Clean up:")
src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml"
delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace)
delete_ap_policy(kube_apis.custom_objects, pol_name, policy_namespace)
delete_namespace(kube_apis.v1, policy_namespace)
delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)
delete_common_app(kube_apis, "simple", test_namespace)
src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml"
delete_items_from_yaml(kube_apis, src_sec_yaml, test_namespace)
delete_namespace(kube_apis.v1, test_namespace)
request.addfinalizer(fin)
return BackendSetup(req_url, req_url_2, metrics_url, ingress_host)
# the first case does not set "-watch-namespace" so the policy is configured on the ingress.
# This causes the traffic to be blocked
@pytest.mark.skip_for_nginx_oss
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap",
[
{
"extra_args": [
f"-enable-custom-resources",
f"-enable-app-protect",
f"-enable-prometheus-metrics"
]
}
],
indirect=True,
)
class TestAppProtectWatchNamespaceDisabled:
def test_responses(
self, request, kube_apis, crd_ingress_controller_with_ap, backend_setup
):
"""
Test file_block AppProtect policy without -watch-namespace
"""
print("------------- Run test for AP policy: file-block --------------")
print(f"Request URL: {backend_setup.req_url} and Host: {backend_setup.ingress_host}")
ensure_response_from_backend(
backend_setup.req_url, backend_setup.ingress_host, check404=True
)
print("----------------------- Send request ----------------------")
resp = requests.get(
f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False
)
print(resp.text)
assert invalid_resp_body in resp.text
assert resp.status_code == 200
# In this test case the "-watch-namespace" param is set so the policy in policy_namespace
# Is not configured on the ingress -> NAP uses the default policy which will not block the same request.
@pytest.mark.skip_for_nginx_oss
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap",
[
{
"extra_args": [
f"-enable-custom-resources",
f"-enable-app-protect",
f"-enable-prometheus-metrics",
f"-watch-namespace={test_namespace}"
]
}
],
indirect=True,
)
class TestAppProtectWatchNamespaceEnabled:
def test_responses(
self, request, kube_apis, crd_ingress_controller_with_ap, backend_setup, test_namespace
):
"""
Test file-block AppProtect policy with -watch-namespace
"""
print("------------- Run test for AP policy: file-block --------------")
print(f"Request URL: {backend_setup.req_url} and Host: {backend_setup.ingress_host}")
ensure_response_from_backend(
backend_setup.req_url, backend_setup.ingress_host, check404=True
)
print("----------------------- Send request ----------------------")
resp = requests.get(
f"{backend_setup.req_url}/test.bat", headers={"host": backend_setup.ingress_host}, verify=False
)
print(resp.text)
assert valid_resp_body in resp.text
assert resp.status_code == 200 | StarcoderdataPython |
194647 | <reponame>Mariatta/batavia
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class PrintTests(TranspileTestCase):
def test_buffering(self):
self.assertCodeExecution("""
print('1: hello', ' world')
print('2: hello\\n', 'world')
print('3: hello', ' world\\n')
print('4: hello\\nworld')
print('5: hello\\nworld\\n')
print('Done.')
""")
def test_fileobj(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
out = FileLikeObject()
print('hello', 'world', file=out)
print('goodbye', 'world', file=out)
print('---')
print(out.buffer)
print('Done.')
""")
def test_sep(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', sep='-')
print('Done.')
""")
def test_end(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', end='-')
print('Done.')
""")
def test_flush(self):
self.assertCodeExecution("""
print('hello world', 'goodbye world', flush=True)
print('Done.')
""")
def test_empty(self):
self.assertCodeExecution("""
print()
print('Done.')
""")
def test_combined(self):
self.assertCodeExecution("""
class FileLikeObject:
def __init__(self):
self.buffer = ''
def write(self, content):
self.buffer = self.buffer + (content * 2)
def flush(self):
self.buffer = self.buffer + '<<<'
out = FileLikeObject()
print('hello', 'world', sep='*', end='-', file=out, flush=True)
print('goodbye', 'world', file=out, sep='-', end='*')
print('---')
print(out.buffer)
print('Done.')
""")
class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["print"]
not_implemented = [
'test_class',
'test_NotImplemented',
'test_slice',
]
| StarcoderdataPython |
1605200 | import random
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
from groovebot.core.models import Album, Music, Abbreviation, Strike
from groovebot.core.utils import (
read_file,
failure_message,
success_message,
config,
text_to_neuropol,
)
class MusicCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@has_permissions(manage_messages=True)
@commands.command(name="createmusic")
async def create_music(self, ctx, album_acronym, acronym, title, url):
album = await Album.filter(acronym=album_acronym.upper()).first()
if album:
music = await Music.create(
album=album, acronym=acronym.upper(), value=title, url=url
)
await success_message(ctx, "Music added to database!", music)
else:
await failure_message(ctx, "No album with passed acronym exists.")
@has_permissions(manage_messages=True)
@commands.command(name="deletemusic")
async def delete_music(self, ctx, acronym):
if await Music.filter(acronym=acronym.upper()).delete() == 1:
await success_message(ctx, "Music successfully deleted from database.")
else:
await failure_message(ctx, "Music has not been deleted successfully.")
class AlbumCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="getalbums")
async def get_albums(self, ctx):
albums = await Album.all()
if albums:
embed = discord.Embed(colour=discord.Colour.purple())
embed.set_author(name="Here's a guide to all of the album abbreviations!")
for album in albums:
embed.add_field(name=album.acronym, value=album.value, inline=True)
await success_message(ctx, "Albums retrieved!", embed=embed)
else:
await failure_message(ctx, "No albums have been created.")
@has_permissions(manage_messages=True)
@commands.command(name="createalbum")
async def create_album(self, ctx, acronym, title, description):
album = await Album.create(
acronym=acronym.upper(), value=title, description=description
)
await success_message(ctx, "Album added to database.", album)
@has_permissions(manage_messages=True)
@commands.command(name="deletealbum")
async def delete_album(self, ctx, acronym):
if await Album.filter(acronym=acronym.upper()).delete() == 1:
album = await Album.filter(acronym=acronym.upper()).first()
await Music.filter(album=album).delete()
await success_message(ctx, "Album deleted from database!")
else:
await failure_message(ctx, "No album with passed acronym exists.")
class AbbreviationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="getabbreviations")
async def get_abbreviations(self, ctx):
abbreviations = await Abbreviation.all()
if abbreviations:
embed = discord.Embed(colour=discord.Colour.red())
embed.set_author(
name="Here's a guide to some of the server's abbreviations!"
)
for abbreviation in abbreviations:
embed.add_field(
name=abbreviation.acronym, value=abbreviation.value, inline=True
)
await success_message(ctx, "Abbreviations retrieved!", embed=embed)
else:
await failure_message(ctx, "No abbreviations have been created.")
@has_permissions(manage_messages=True)
@commands.command(name="createabbreviation")
async def create_abbreviation(self, ctx, acronym, description):
abbreviation = await Abbreviation.create(
acronym=acronym.upper(), value=description
)
await success_message(ctx, "Abbreviation added to database!", abbreviation)
@has_permissions(manage_messages=True)
@commands.command(name="deleteabbreviation")
async def delete_abbreviation(self, ctx, acronym):
if await Abbreviation.filter(acronym=acronym.upper()).delete() == 1:
await success_message(ctx, "Abbreviation deleted from database!")
else:
await failure_message(ctx, "No abbreviation with passed acronym exists.")
class MiscCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def fact(self, ctx):
await ctx.send(random.choice(await read_file("facts.txt", True)))
@commands.command()
async def help(self, ctx):
await ctx.send(await read_file("help.txt"))
@commands.command()
async def neuropol(self, ctx, *args):
neuropol_img_file = "neuropol.png"
try:
await text_to_neuropol(" ".join(args[:-1]), args[-1])
await ctx.send(file=discord.File(neuropol_img_file))
except ValueError:
try:
await text_to_neuropol(" ".join(args))
await ctx.send(file=discord.File(neuropol_img_file))
except ValueError:
await failure_message(ctx, "Message cannot be over 35 characters.")
@has_permissions(manage_messages=True)
@commands.command(name="welcometest")
async def welcome_test(self, ctx):
await ctx.send(await read_file("welcome.txt"))
class ModerationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@has_permissions(manage_messages=True)
@commands.command(name="modhelp")
async def mod_help(self, ctx):
await ctx.send(await read_file("modhelp.txt"))
@has_permissions(ban_members=True)
@commands.command()
async def ban(self, ctx, member: discord.Member, reason):
await member.ban(reason=reason)
await member.send(
f"You have been banned from the Animusic server. If you would like to submit an appeal, "
f"you can click here: https://forms.gle/FmkxeXaXSsUpS6Vv7 \nReason: {reason}"
)
await success_message(
ctx,
f"Successfully banned user {member.mention} ({member}) for reason: {reason}.",
)
@has_permissions(manage_messages=True)
@commands.command()
async def strike(self, ctx, member: discord.Member, reason, proof="Not provided."):
if ctx.message.attachments:
for attachment in ctx.message.attachments:
proof += f" {attachment.url}\n"
strike = await Strike.create(member_id=member.id, reason=reason, proof=proof)
await success_message(
ctx, f"Strike against {member.mention} added to database!", strike
)
await member.send(
f"You have incurred a strike against you! Please follow the rules. **Reason:** {strike.reason}"
)
@has_permissions(manage_messages=True)
@commands.command(name="getstrikes")
async def get_strikes(self, ctx, member: discord.Member):
strikes = await Strike.filter(member_id=member.id).all()
if strikes:
embed = discord.Embed(colour=discord.Colour.red())
embed.set_author(
name=f"Here's a list of all of the strikes against {member.display_name}!"
)
for strike in strikes:
embed.add_field(
name=f"ID: {strike.id}", value=strike.reason, inline=True
)
await success_message(ctx, "Strikes retrieved!", embed=embed)
else:
await failure_message(
ctx, f"No strikes associated with {member.mention} could be found!"
)
@has_permissions(manage_messages=True)
@commands.command(name="deletestrike")
async def delete_strike(self, ctx, number):
if await Strike.filter(id=number).delete() == 1:
await success_message(ctx, f"Strike id {number} deleted from database!")
else:
await failure_message(ctx, f"Could not find strike with id {number}.")
@commands.command()
async def verify(self, ctx):
if len(ctx.author.roles) <= 1:
role = ctx.guild.get_role(int(config["GROOVE"]["verified_role_id"]))
await ctx.author.add_roles(role)
class RetrievalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def _get_album(self, ctx, album):
music = await Music.filter(album=album).all()
if music:
embed = discord.Embed(colour=discord.Colour.blue())
embed.set_author(name="Here's a guide to all of the music abbreviations!")
for song in music:
embed.add_field(name=song.acronym, value=song.value, inline=True)
await success_message(ctx, "Album retrieved!", album, embed)
else:
await failure_message(ctx, f"Album {album} contains no music.")
@commands.command()
async def get(self, ctx, acronym):
acronym_upper = acronym.upper()
if await Album.filter(acronym=acronym_upper).exists():
album = await Album.filter(acronym=acronym_upper).first()
await self._get_album(ctx, album)
elif await Music.filter(acronym=acronym_upper).exists():
music = (
await Music.filter(acronym=acronym_upper)
.prefetch_related("album")
.first()
)
await success_message(ctx, "Music retrieved!", music)
elif await Abbreviation.filter(acronym=acronym_upper).exists():
abbreviation = await Abbreviation.filter(acronym=acronym_upper).first()
await success_message(ctx, "Abbreviation retrieved!", abbreviation)
elif (
ctx.channel.permissions_for(ctx.author).manage_messages
and acronym.isnumeric()
and await Strike.filter(id=acronym).exists()
):
strike = await Strike.filter(id=acronym).first()
await success_message(ctx, "Strike retrieved!", strike)
else:
await failure_message(ctx, "Please try again with a different acronym.")
| StarcoderdataPython |
3371757 | <gh_stars>1-10
try:
import rest_framework
except ImportError:
import unittest
raise unittest.SkipTest("djangorestframework is not installed")
from decimal import Decimal
from typing import Optional
from typing import Set
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
from django.db import transaction
from django.test import Client
from django.test import modify_settings
from django.test import override_settings
from django.test import TestCase
from django.urls import reverse
from rest_framework import viewsets
from rest_framework.test import APIClient
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from allianceutils.api.permissions import GenericDjangoViewsetPermissions
from test_allianceutils.tests.profile_auth.models import User
from test_allianceutils.tests.viewset_permissions.models import NinjaTurtleModel
from test_allianceutils.tests.viewset_permissions.models import SenseiRatModel
from test_allianceutils.tests.viewset_permissions.views import NinjaTurtleSerializer
USER_EMAIL = "<EMAIL>"
USER_PASS = "password"
class IgnoreObjectsBackend:
"""
Django's ModelBackend will reject any object permission check with an object;
we want to simply ignore objects instead.
This should come last in the authentication backends list
"""
def has_perm(self, user_obj, perm, obj=None):
if obj is not None:
return ModelBackend().has_perm(user_obj, perm, None)
return None
@override_settings(
MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
),
)
class ViewsetPermissionsTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(email=USER_EMAIL)
self.user.set_password(<PASSWORD>_<PASSWORD>)
self.user.save()
self.turtle = NinjaTurtleModel.objects.create(
name="leonardo",
color="red",
shell_size=Decimal("12.1"),
)
def grant_permission(self, codename: str, model=NinjaTurtleModel):
perm = Permission.objects.get_by_natural_key(
codename=codename,
app_label=model._meta.app_label,
model=model._meta.model_name
)
self.user.user_permissions.add(perm)
# refresh_from_db() doesn't refresh the user permissions cache, need to reload from scratch
# self.user = User.objects.get(pk=self.user.pk)
try:
del self.user._user_perm_cache
except AttributeError:
pass
def test_simple_permission_viewset(self):
"""
Test SimpleDjangoObjectPermissions
"""
client = Client()
client.force_login(self.user)
# request should not succeed
response = client.get(reverse('permissions:simple-list'))
self.assertEqual(response.status_code, 403)
response = client.get(reverse('permissions:simple-detail', kwargs={"pk": self.turtle.id}))
self.assertEqual(response.status_code, 403)
# once you add the permission to that user it should now pass
self.grant_permission("can_eat_pizza")
response = client.get(reverse('permissions:simple-list'))
self.assertEqual(response.status_code, 200)
response = client.get(reverse('permissions:simple-detail', kwargs={"pk": self.turtle.id}))
self.assertEqual(response.status_code, 200)
@modify_settings(
AUTHENTICATION_BACKENDS={
"append": 'test_allianceutils.tests.viewset_permissions.tests.IgnoreObjectsBackend',
}
)
def test_model_permission_viewset(self):
"""
Test GenericDjangoViewsetPermissions
"""
client = APIClient()
client.force_login(self.user)
def test_methods(with_grant_permission: Optional[str] = None, should_succeed: Set = set()):
turtle_id = self.turtle.id
with transaction.atomic():
if with_grant_permission:
self.grant_permission(with_grant_permission)
response = client.get(reverse('permissions:model-list'))
self.assertEqual(response.status_code, 200 if "view" in should_succeed else 403)
response = client.get(reverse('permissions:model-detail', kwargs={"pk": turtle_id}))
self.assertEqual(response.status_code, 200 if "view" in should_succeed else 403)
response = client.post(reverse('permissions:model-list'), data={"name": "leonardo", "color": "blue", "shell_size": "13.0"})
self.assertEqual(response.status_code, 201 if "add" in should_succeed else 403)
response = client.patch(reverse('permissions:model-detail', kwargs={"pk": turtle_id}), data={"name": "michaelangelo"})
self.assertEqual(response.status_code, 200 if "change" in should_succeed else 403)
response = client.delete(reverse('permissions:model-detail', kwargs={"pk": turtle_id}))
self.assertEqual(response.status_code, 204 if "delete" in should_succeed else 403)
transaction.set_rollback(True)
test_methods()
test_methods("view_ninjaturtlemodel", {"view"})
test_methods("add_ninjaturtlemodel", {"add"})
test_methods("change_ninjaturtlemodel", {"change"})
test_methods("delete_ninjaturtlemodel", {"delete"})
@modify_settings(
AUTHENTICATION_BACKENDS={
"append": 'test_allianceutils.tests.viewset_permissions.tests.IgnoreObjectsBackend',
}
)
def test_get_model_permission_viewset(self):
factory = APIRequestFactory()
# These two ViewSets are equivalent; they just use different hooks to
# return the model to use for permission checks
class NinjaTurtleGetModelPermission1(GenericDjangoViewsetPermissions):
def get_model(self, view):
return SenseiRatModel
class NinjaTurtleGetModelViewSet1(viewsets.ModelViewSet):
queryset = NinjaTurtleModel.objects.all()
serializer_class = NinjaTurtleSerializer
permission_classes = [NinjaTurtleGetModelPermission1]
class NinjaTurtleGetModelViewSet2(viewsets.ModelViewSet):
queryset = NinjaTurtleModel.objects.all()
serializer_class = NinjaTurtleSerializer
permission_classes = [GenericDjangoViewsetPermissions]
def get_permission_model(self):
return SenseiRatModel
def test_methods(with_grant_permission: Optional[str] = None, grant_model=NinjaTurtleModel,
should_succeed: Set = set()):
turtle_id = self.turtle.id
for viewset in (NinjaTurtleGetModelViewSet1, NinjaTurtleGetModelViewSet2):
with self.subTest(f"{viewset.__name__} with_grant_permission={with_grant_permission} grant_model={grant_model.__name__} should_success={should_succeed}"):
with transaction.atomic():
if with_grant_permission:
self.grant_permission(with_grant_permission, grant_model)
view = viewset.as_view({"get": "list"})
# Refetch user... something was caching permissions
user = User.objects.get(pk=self.user.pk)
request = factory.get("")
force_authenticate(request, user=user)
response = view(request).render()
self.assertEqual(response.status_code, 200 if "view" in should_succeed else 403)
view = viewset.as_view({"get": "retrieve"})
request = factory.get("")
force_authenticate(request, user=user)
response = view(request, pk=turtle_id).render()
self.assertEqual(response.status_code, 200 if "view" in should_succeed else 403)
transaction.set_rollback(True)
test_methods()
test_methods("view_ninjaturtlemodel") # should fail; perm lookup would hit SenseiRat who is going to say no
test_methods("view_senseiratmodel", SenseiRatModel, {"view"}) # should succeed with permission from SenseiRat
| StarcoderdataPython |
1692423 | <reponame>oxigenocc/oxigeno.cc<filename>mysite/equipos/management/commands/equipos_db_migration.py
from django.core.management.base import BaseCommand
from oxigeno.models import Tanque, Concentrador
from equipos.models import Tanque as Tan
from equipos.models import Concentrador as Conc
def migration():
tanques = Tanque.objects.all()
concentradores = Concentrador.objects.all()
for tanque in tanques:
tanq = tanque.__dict__
tanq.pop('_state')
try:
Tan.objects.get_or_create(**tanq)
except Exception as e:
print(str(e))
print("error en la migracion de Tanque {}".format(
tanq['id']))
print('Migracion de Tanques terminada')
for concen in concentradores:
con = concen.__dict__
con.pop('_state')
try:
Conc.objects.get_or_create(**con)
except Exception as e:
print(str(e))
print("error en la migracion de concentrador {}".format(
con['id']))
print('Migracion de Conentradores terminada')
class Command(BaseCommand):
help = 'Migracion de modelos de tanque, concentrador a arquitectura v3'
def handle(self, *args, **options):
migration() | StarcoderdataPython |
139528 | from spacy.tokens import Doc
def convert(cols, matched_token_idx):
#temp_path = os.path.join(this_dir,'temp.conllu')
matched_i = [list(map(lambda x: str(x+1), matched)) for matched in matched_token_idx]
for matched in matched_i:
cols = change_head(matched, cols)
return cols
def change_head(matched, cols):
new_cols = []
comp,be = matched[0],matched[1]
dep_comp = cols[int(comp)-1][7]
dep_be = cols[int(be)-1][7]
head_comp = cols[int(comp)-1][6]
for j,col in enumerate(cols):
col_i, head_i, dep = str(j+1), col[6], col[7]
if head_i == comp:
col[6] = be
if col_i == be:
col[6], col[7] = head_comp, dep_comp
elif col_i == comp:
col[6], col[7] = be, dep_be
new_cols.append(col)
return new_cols
def conll_list_to_doc(vocab, conll):
## conll_list for a single sentence
## [['12', '31', '31', 'NUM', 'CD', 'NumType=Card', '13', 'nummod', '_', 'start_char=242|end_char=244'], ['13', 'October', 'October', 'PROPN', 'NNP', 'Number=Sing', '10', 'obl', '_', 'start_char=245|end_char=252']]
words, spaces, tags, poses, morphs, lemmas = [], [], [], [], [], []
heads, deps = [], []
for i in range(len(conll)):
line = conll[i]
parts = line
id_, word, lemma, pos, tag, morph, head, dep, _, misc = parts
if "." in id_ or "-" in id_:
continue
if "SpaceAfter=No" in misc:
spaces.append(False)
else:
spaces.append(True)
id_ = int(id_) - 1
head = (int(head) - 1) if head not in ("0", "_") else id_
tag = pos if tag == "_" else tag
morph = morph if morph != "_" else ""
dep = "ROOT" if dep == "root" else dep
words.append(word)
lemmas.append(lemma)
poses.append(pos)
tags.append(tag)
morphs.append(morph)
heads.append(head)
deps.append(dep)
doc = Doc(vocab, words=words, spaces=spaces)
for i in range(len(doc)):
doc[i].tag_ = tags[i]
doc[i].pos_ = poses[i]
doc[i].dep_ = deps[i]
doc[i].lemma_ = lemmas[i]
doc[i].head = doc[heads[i]]
doc.is_parsed = True
doc.is_tagged = True
return doc
'''
##legacy
def process(doc):
text = str(doc)
doc.is_parsed = True
doc.is_tagged = True
token_list = [token.text for token in doc]
return text, doc, token_list
''' | StarcoderdataPython |
1633549 | # views.py
from datetime import datetime
from flask import Flask
from flask import render_template
from flask import flash
from flask import redirect
from flask import request
from flask_sqlalchemy import SQLAlchemy
from flask_login import login_user
from flask_login import logout_user
from flask_login import current_user
from flask_login import login_required
from app import app
from app import db
from app import lm
from app import init
from .forms import SignUpForm
from .models import User
from .models import Monitor
from .models import MonitorLog
from .models import AlertLog
# init
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/dashboard')
def dashboard():
return init.test_print()
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignUpForm()
if form.validate_on_submit():
#user = User(form.email_id.data, form.password.data, form.name.data, form.phone.data)
user = User(request.form['email_id'], request.form['password'], request.form['name'], request.form['phone'])
db.session.add(user)
db.session.commit()
flash('Hello, %s, Wait seconds... ' % form.name)
flash('User has been saved.')
return redirect('/dashboard')
return render_template('signup.html', title='Sign Up', form=form)
@app.route('/signin')
def signin():
return 'sign in'
| StarcoderdataPython |
3294600 | # -*- coding: utf-8 -*-
from spiders.BeiJing import BeijingSpider
from model.config import DBSession
from model.rule import Rule
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.crawler import Crawler
from twisted.internet import reactor
from scrapy import signals
RUNNING_CRAWLERS = []
def spider_closing(spider):
"""Activates on spider closed signal"""
# log.msg("Spider closed: %s" % spider, level=log.INFO)
RUNNING_CRAWLERS.remove(spider)
if not RUNNING_CRAWLERS:
reactor.stop()
# crawl settings
settings = Settings()
settings.set("USER_AGENT", "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36")
settings.set("ITEM_PIPELINES" , {
# 'pipelines.DuplicatesPipeline': 200,
# 'pipelines.CountDropPipline': 100,
'pipelines.SeprojectPipeline': 300
})
# settings.set("LOG_LEVEL","INFO")
# process = CrawlerProcess(settings)
db = DBSession()
rules = db.query(Rule).filter(Rule.enable == 1)
for rule in rules:
crawler = Crawler(settings)
spider = BeijingSpider(rule) # instantiate every spider using rule
RUNNING_CRAWLERS.append(spider)
# stop reactor when spider closes
crawler.signals.connect(spider_closing, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
# blocks process so always keep as the last statement
reactor.run()
# print(rule.starturl)
# print(rule.nextpagerule)
# process.crawl(BeijingSpider,rule)
# process.start() | StarcoderdataPython |
178244 | <gh_stars>1-10
#!/usr/bin/env python
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
from math import pow, exp
from scipy.stats import norm
# 133 0.5
#Create test data with zero valued diagonal:
data = np.genfromtxt("seq.txt", delimiter=', ')
data = data[125-20:125+20,]
data = data[::-1]
# data = data.reshape((20,3,20,-1)).mean(axis=3).mean(1)
data = (data - data.min()) * (1. / (data.max() - data.min()))
#rows, cols = np.indices((60,30))
#Create new colormap, with white for zero
#(can also take RGB values, like (255,255,255):
colors = [('white')] + [(cm.jet(i)) for i in xrange(1,256)]
new_map = matplotlib.colors.LinearSegmentedColormap.from_list('new_map', colors, N=256)
plt.subplots_adjust(wspace=.0)
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
ax0 = plt.subplot(gs[0])
plt.pcolor(data, cmap=new_map)
#plt.gca().set_aspect('equal', adjustable='box')
ax0.set_aspect('equal', adjustable='box')
ax0.text(25, 38, "-0.50", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 39], 'r--', lw=1, color='black')
ax0.text(25, 33, "-0.25", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 32], 'r--', lw=1, color='black')
ax0.text(25, 28, "0.00", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 27], 'r--', lw=1, color='black')
ax0.text(25, 23, "0.25", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 20], 'r--', lw=1, color='black')
ax0.text(25, 16, "0.50", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 12], 'k-', lw=1.5, color='black')
ax0.text(25, 10, "0.75", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 30], [27, 5], 'r--', lw=1, color='black')
ax0.text(25, 3, "1.00", color='white', bbox={'facecolor':'black', 'alpha':0.75, 'pad':2})
ax0.plot([0, 27], [27, 0], 'r--', lw=1, color='black')
plt.title("Comparisons")
plt.xlabel("Current Observations", fontsize=16)
plt.ylabel("Memory", fontsize=16)
ax1 = plt.subplot(gs[1], sharey = ax0)
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.title("Temporallity")
tempo = np.asarray([[exp(-(pow((i-12.), 2.0)) / (2.0 * 80))] for i in range(40)])
# tempo = np.concatenate((np.asarray([[(1. + norm.pdf(i, 22., 10.)) / 2.] for i in xrange(0, 23)]), np.asarray([[(1. + 2. * norm.pdf(i, 22., 10.)) / 3.] for i in xrange(24, 60)])), axis=1)
tempo = (tempo - tempo.min()) * (1. / (tempo.max() - tempo.min()))
plt.pcolor(tempo, cmap=new_map)
y = np.linspace(0, 40, 40)
plt.plot(tempo, y, 'k-', linewidth=1)
plt.colorbar()
#savefig('map.png')
show()
| StarcoderdataPython |
3320798 | import os
import random
import numpy as np
from itertools import chain, combinations
import torch
from torchvision import transforms
import torch.optim as optim
import PIL.Image as Image
from sklearn.metrics import accuracy_score
#from utils.BaseExperiment import BaseExperiment
from PIL import ImageFont
from modalities.CMNIST import CMNIST
# from mnistsvhntext.SVHNMNISTDataset import SVHNMNIST
from mmnist.MMNISTDataset import MMNISTDataset
# from mmnist.networks.VAEtrimodalSVHNMNIST import VAEtrimodalSVHNMNIST
from mmnist.networks.VAEMMNIST import VAEMMNIST
# from mmnist.networks.ConvNetworkImgClfSVHN import ClfImgSVHN
# from mmnist.networks.ConvNetworkTextClf import ClfText as ClfText
from mmnist.networks.ConvNetworkImgClfCMNIST import ClfImg as ClfImgCMNIST
from mmnist.networks.ConvNetworksImgCMNIST import EncoderImg, DecoderImg
# from mmnist.networks.ConvNetworksImgSVHN import EncoderSVHN, DecoderSVHN
# from mmnist.networks.ConvNetworksTextMNIST import EncoderText, DecoderText
from utils.BaseExperiment import BaseExperiment
class MMNISTExperiment(BaseExperiment):
def __init__(self, flags, alphabet):
super().__init__(flags)
# self.flags = flags
# self.name = flags.name
# self.dataset_name = flags.dataset
self.num_modalities = flags.num_mods
self.alphabet = alphabet
self.plot_img_size = torch.Size((3, 28, 28))
self.font = ImageFont.truetype('FreeSerif.ttf', 38)
self.flags.num_features = len(alphabet)
self.modalities = self.set_modalities()
self.subsets = self.set_subsets()
self.dataset_train = None
self.dataset_test = None
self.set_dataset()
self.mm_vae = self.set_model()
self.clfs = self.set_clfs()
self.optimizer = None
self.rec_weights = self.set_rec_weights()
self.style_weights = self.set_style_weights()
self.test_samples = self.get_test_samples()
self.eval_metric = accuracy_score
self.paths_fid = self.set_paths_fid()
self.labels = ['digit']
def set_model(self):
model = VAEMMNIST(self.flags, self.modalities, self.subsets)
model = model.to(self.flags.device)
return model
def set_modalities(self):
mods = [CMNIST("m%d" % m, EncoderImg(self.flags),
DecoderImg(self.flags), self.flags.class_dim,
self.flags.style_dim, self.flags.likelihood) for m in range(self.num_modalities)]
mods_dict = {m.name: m for m in mods}
return mods_dict
def set_dataset(self):
transform = transforms.Compose([transforms.ToTensor()])
train = MMNISTDataset(self.flags.unimodal_datapaths_train, transform=transform)
test = MMNISTDataset(self.flags.unimodal_datapaths_test, transform=transform)
self.dataset_train = train
self.dataset_test = test
def set_clfs(self):
clfs = {"m%d" % m: None for m in range(self.num_modalities)}
if self.flags.use_clf:
for m, fp in enumerate(self.flags.pretrained_classifier_paths):
model_clf = ClfImgCMNIST()
model_clf.load_state_dict(torch.load(fp))
model_clf = model_clf.to(self.flags.device)
clfs["m%d" % m] = model_clf
for m, clf in clfs.items():
if clf is None:
raise ValueError("Classifier is 'None' for modality %s" % str(m))
return clfs
def set_optimizer(self):
# optimizer definition
total_params = sum(p.numel() for p in self.mm_vae.parameters())
params = list(self.mm_vae.parameters());
print('num parameters: ' + str(total_params))
optimizer = optim.Adam(params,
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1,
self.flags.beta_2))
self.optimizer = optimizer
def set_rec_weights(self):
rec_weights = dict()
for k, m_key in enumerate(self.modalities.keys()):
mod = self.modalities[m_key]
numel_mod = mod.data_size.numel()
rec_weights[mod.name] = 1.0
return rec_weights
def set_style_weights(self):
weights = {"m%d" % m: self.flags.beta_style for m in range(self.num_modalities)}
return weights
def get_transform_mmnist(self):
# transform_mnist = transforms.Compose([transforms.ToTensor(),
# transforms.ToPILImage(),
# transforms.Resize(size=(28, 28), interpolation=Image.BICUBIC),
# transforms.ToTensor()])
transform_mnist = transforms.Compose([transforms.ToTensor()])
return transform_mnist
def get_test_samples(self, num_images=10):
n_test = len(self.dataset_test)
samples = []
for i in range(num_images):
while True:
ix = random.randint(0, n_test-1)
sample, target = self.dataset_test[ix]
if target == i:
for k, key in enumerate(sample):
sample[key] = sample[key].to(self.flags.device)
samples.append(sample)
break
return samples
def mean_eval_metric(self, values):
return np.mean(np.array(values))
def get_prediction_from_attr(self, attr, index=None):
pred = np.argmax(attr, axis=1).astype(int)
return pred
def eval_label(self, values, labels, index):
pred = self.get_prediction_from_attr(values)
return self.eval_metric(labels, pred)
| StarcoderdataPython |
4826832 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import torch.nn.init as init
from sklearn.preprocessing import MinMaxScaler
import sys
from tqdm import tqdm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
class CLASSIFIER:
# train_Y is interger
def __init__(self, _train_X, _train_Y, data_loader, _nclass, _cuda, _lr=0.001, _beta1=0.5, _nepoch=20,
_batch_size=100, generalized=True, MCA=True):
self.train_X = _train_X
self.train_Y = _train_Y
self.test_seen_feature = data_loader.test_seen_feature
self.test_seen_label = data_loader.test_seen_label
self.test_unseen_feature = data_loader.test_unseen_feature
self.test_unseen_label = data_loader.test_unseen_label
self.seenclasses = data_loader.seenclasses
self.unseenclasses = data_loader.unseenclasses
self.ntrain_class = data_loader.ntrain_class
self.batch_size = _batch_size
self.nepoch = _nepoch
self.nclass = _nclass
self.input_dim = _train_X.shape[1]
self.cuda = _cuda
self.MCA = MCA
self.model = LINEAR_LOGSOFTMAX(self.input_dim, self.nclass)
self.model.apply(weights_init)
self.criterion = nn.NLLLoss()
self.input = torch.FloatTensor(_batch_size, self.input_dim)
self.label = torch.LongTensor(_batch_size)
self.lr = _lr
self.beta1 = _beta1
# setup optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=_lr, betas=(_beta1, 0.999))
if self.cuda:
self.model.cuda()
self.criterion.cuda()
self.input = self.input.cuda()
self.label = self.label.cuda()
self.index_in_epoch = 0
self.epochs_completed = 0
self.ntrain = self.train_X.shape[0]
if generalized:
self.acc_seen, self.acc_unseen, self.H = self.fit()
#print('Final: acc_seen=%.4f, acc_unseen=%.4f, h=%.4f' % (self.acc_seen, self.acc_unseen, self.H))
else:
self.acc = self.fit_zsl()
#print('acc=%.4f' % (self.acc))
def fit_zsl(self):
best_acc = 0
mean_loss = 0
last_loss_epoch = 1e8
for epoch in tqdm(range(self.nepoch)):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
mean_loss += loss.item()
loss.backward()
self.optimizer.step()
#print('Training classifier loss= ', loss.data[0])
acc = self.val(self.test_unseen_feature, self.test_unseen_label, self.unseenclasses)
#print('acc %.4f' % (acc))
if acc > best_acc:
best_acc = acc
return best_acc * 100
def fit(self):
best_H = 0
best_seen = 0
best_unseen = 0
for epoch in tqdm(range(self.nepoch)):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
loss.backward()
self.optimizer.step()
#print('Training classifier loss= ', loss.data[0])
acc_seen = 0
acc_unseen = 0
acc_seen = self.val_gzsl(self.test_seen_feature, self.test_seen_label)
acc_unseen = self.val_gzsl(self.test_unseen_feature, self.test_unseen_label+self.ntrain_class)
H = 2*acc_seen*acc_unseen / (acc_seen+acc_unseen)
#print('acc_seen=%.4f, acc_unseen=%.4f, h=%.4f' % (acc_seen, acc_unseen, H))
if H > best_H:
best_seen = acc_seen
best_unseen = acc_unseen
best_H = H
return best_seen * 100, best_unseen * 100, best_H *100
def next_batch(self, batch_size):
start = self.index_in_epoch
# shuffle the data at the first epoch
if self.epochs_completed == 0 and start == 0:
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# the last batch
if start + batch_size > self.ntrain:
self.epochs_completed += 1
rest_num_examples = self.ntrain - start
if rest_num_examples > 0:
X_rest_part = self.train_X[start:self.ntrain]
Y_rest_part = self.train_Y[start:self.ntrain]
# shuffle the data
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# start next epoch
start = 0
self.index_in_epoch = batch_size - rest_num_examples
end = self.index_in_epoch
X_new_part = self.train_X[start:end]
Y_new_part = self.train_Y[start:end]
#print(start, end)
if rest_num_examples > 0:
return torch.cat((X_rest_part, X_new_part), 0) , torch.cat((Y_rest_part, Y_new_part), 0)
else:
return X_new_part, Y_new_part
else:
self.index_in_epoch += batch_size
end = self.index_in_epoch
#print(start, end)
# from index start to index end-1
return self.train_X[start:end], self.train_Y[start:end]
def val_gzsl(self, test_X, test_label):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
with torch.no_grad():
for i in range(0, ntest, self.batch_size):
end = min(ntest, start+self.batch_size)
if self.cuda:
output = self.model(test_X[start:end].cuda())
else:
output = self.model(test_X[start:end])
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
if self.MCA:
acc = self.eval_MCA(predicted_label.numpy(), test_label.numpy())
else:
acc = (predicted_label.numpy() == test_label.numpy()).mean()
return acc
def eval_MCA(self, preds, y):
cls_label = np.unique(y)
acc = list()
for i in cls_label:
acc.append((preds[y == i] == i).mean())
return np.asarray(acc).mean()
def compute_per_class_acc_gzsl(self, test_label, predicted_label, target_classes):
acc_per_class = 0
for i in target_classes:
idx = (test_label == i)
acc_per_class += torch.sum(test_label[idx]==predicted_label[idx]).float() / torch.sum(idx)
acc_per_class /= target_classes.size(0)
return acc_per_class
# test_label is integer
def val(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
with torch.no_grad():
for i in range(0, ntest, self.batch_size):
end = min(ntest, start+self.batch_size)
if self.cuda:
output = self.model(test_X[start:end].cuda())
else:
output = self.model(test_X[start:end])
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
if self.MCA:
acc = self.eval_MCA(predicted_label.numpy(), test_label.numpy())
else:
acc = (predicted_label.numpy() == test_label.numpy()).mean()
return acc
def compute_per_class_acc(self, test_label, predicted_label, nclass):
acc_per_class = torch.FloatTensor(nclass).fill_(0)
for i in range(nclass):
idx = (test_label == i)
acc_per_class[i] = torch.sum(test_label[idx]==predicted_label[idx]).float() / torch.sum(idx)
return acc_per_class.mean()
class LINEAR_LOGSOFTMAX(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX, self).__init__()
self.fc = nn.Linear(input_dim, nclass)
self.logic = nn.LogSoftmax(dim=1)
def forward(self, x):
o = self.logic(self.fc(x))
return o
| StarcoderdataPython |
107338 | <filename>shared.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# =============================================================================
#
# This file contains a simple PyTorch based implementation of multi-party
# computation (MPC) using additive sharing with uniform noise.
#
# =============================================================================
# dependencies:
import collections
import math
import torch
import approximations
import mpc_communicator as comm
SENTINEL = -1
def sample_uniform(shape, device):
return torch.empty(
size=shape, dtype=torch.float64, device=device).uniform_(
-SharedTensor.config.width / 2, SharedTensor.config.width / 2)
def share(tensor, num_parties):
if num_parties == 1:
return tensor
shares = [sample_uniform(tensor.size(), tensor.device)
for _ in range(num_parties - 1)]
# TODO: Use Ruiyu Zhu's trick when num_parties > 2
shares.append(tensor - sum(shares))
return shares
def beaver_triple(size1, size2, binary_op, device):
a = sample_uniform(size1, device)
b = sample_uniform(size2, device)
c = binary_op(a, b)
a, b = SharedTensor(a), SharedTensor(b)
# Use w^2 noise for c since c = a*b
w = SharedTensor.config.width
SharedTensor.config.width = w**2
c = SharedTensor(c)
SharedTensor.config.width = w
return a, b, c
def beaver_protocol(x, y, binary_op):
a, b, c = beaver_triple(x.shape, y.shape, binary_op, x.device)
epsilon = x.sub(a).get_plain_text()
delta = y.sub(b).get_plain_text()
result = c._tensor
result += binary_op(epsilon, b._tensor) + binary_op(a._tensor, delta)
# Aggregate in all-reduce only at one of the parties
# (the party with rank == 0), instead of downweighting
# by the total number of parties
if x._rank == 0:
result += binary_op(epsilon, delta)
return result
def beaver_square(x):
a = sample_uniform(x.shape, x.device)
c = a * a
a = SharedTensor(a)
# Use w^2 noise for c since c = a^2
w = SharedTensor.config.width
SharedTensor.config.width = w**2
c = SharedTensor(c)
SharedTensor.config.width = w
a = a.to(device=x.device)
c = c.to(device=x.device)
epsilon = x.sub(a).get_plain_text()
result = c._tensor + 2 * epsilon * a._tensor
# Aggregate in all-reduce only at one of the parties
# (the party with rank == 0), instead of downweighting
# by the total number of parties
if x._rank == 0:
result += epsilon * epsilon
return result
def _broadcast(a, b):
if torch.is_tensor(b):
return torch.broadcast_tensors(a, b)[0].clone()
return a.clone()
class SharedConfig:
"""
A configuration object for the SharedTensor class.
The object stores key properties of the sharing scheme,
including the width of the uniform noise being added
and the parameters used by approximating functions
(sign, exp, tanh, erf, etc.)
"""
ChebyshevConfig = collections.namedtuple(
"ChebyshevConfig", "maxval terms coeffs")
def __init__(self):
self.width = 1000
self.set_tanh()
self.set_erf()
self.exp_scale = 20
self.sign_iters = 60
self.invsqrt_iters = 26
self.inv8root_iters = 26
self.reciprocal_iters = 30
def set_tanh(self, maxval=20, terms=120):
self.tanh = SharedConfig.ChebyshevConfig(
maxval=maxval,
terms=terms,
coeffs=approximations.chebseries(math.tanh, maxval, terms))
def set_erf(self, maxval=20, terms=100):
self.erf = SharedConfig.ChebyshevConfig(
maxval=maxval,
terms=terms,
coeffs=approximations.chebseries(math.erf, maxval, terms))
# Arithmetically Shared Real Tensor
class SharedTensor:
"""
Encrypted tensor type that is private.
"""
config = SharedConfig()
# constructors:
def __init__(
self,
tensor=None,
shares=None,
size=None,
src=0,
):
if src == SENTINEL:
return
comm.initialize()
# _rank is the rank of the current processes
# _src is the rank of the source process that will provide data shares
self._rank = comm.get_rank()
self._src = src
if self._rank == self._src:
# encrypt tensor into private pair:
if tensor is not None:
assert torch.is_tensor(tensor), 'input must be a tensor'
shares = share(tensor.type(torch.float64).contiguous(),
num_parties=comm.get_world_size())
assert shares is not None, 'inputting some tensor is necessary'
self._tensor = comm.scatter(shares, src, dtype=torch.float64)
else:
# TODO: Remove this line & adapt tests to use size arg
if tensor is not None:
size = tensor.size()
device = tensor.device
else:
size = shares[0].size()
device = shares[0].device
self._tensor = comm.scatter(
None, src, size=size, dtype=torch.float64, device=device)
@staticmethod
def init_comm(**kwargs):
comm.initialize(**kwargs)
@property
def shape(self):
return self._tensor.shape
def size(self, *args):
"""Return tensor's size (shape)"""
return self._tensor.size(*args)
def dim(self):
"""Return number of dimensions in the tensor"""
return len(self.size())
def nelement(self):
"""Return number of elements in the tensor"""
return self._tensor.nelement()
def __len__(self):
"""Return length of the tensor"""
return self.size(0)
def view(self, *args):
"""Resize the tensor"""
result = self.shallow_copy()
result._tensor = self._tensor.view(*args)
return result
def unsqueeze(self, *args, **kwargs):
result = self.shallow_copy()
result._tensor = self._tensor.unsqueeze(*args, **kwargs)
return result
def squeeze(self, *args, **kwargs):
result = self.shallow_copy()
result._tensor = self._tensor.squeeze(*args, **kwargs)
return result
def __getitem__(self, index):
"""Index into tensor"""
result = self.shallow_copy()
result._tensor = self._tensor[index]
return result
def __setitem__(self, index, value):
"""Set tensor values by index"""
if torch.is_tensor(value):
value = SharedTensor(value)
assert isinstance(value, SharedTensor), \
'Unsupported input type %s for __setitem__' % type(value)
self._tensor.__setitem__(index, value._tensor)
def get_plain_text(self):
"""Decrypt the tensor"""
return comm.all_reduce(self._tensor.clone())
def get_plain_text_async(self):
"""Decrypt the tensor asynchronously"""
tensor = self._tensor.clone()
return tensor, comm.dist.all_reduce(tensor, async_op=True)
def clone(self):
"""Create a deepcopy"""
result = SharedTensor(src=SENTINEL)
result._rank = self._rank
result._src = self._src
result._tensor = self._tensor.clone()
return result
def add_(self, y):
"""Perform element-wise addition"""
return self.add(y, self)
def add(self, y, out=None):
"""Perform element-wise addition"""
if out is None:
out = self.shallow_copy()
other = y._tensor if isinstance(y, SharedTensor) else y
if self._rank == 0 or isinstance(y, SharedTensor):
torch.add(self._tensor, other=other, out=out._tensor)
elif out._tensor.numel() == 0:
out._tensor = _broadcast(self._tensor, other)
return out
def __iadd__(self, y):
return self.add_(y)
def __add__(self, y):
return self.add(y)
def __radd__(self, y):
return self.add(y)
def sub_(self, y):
"""Perform element-wise subtraction"""
return self.sub(y, self)
def sub(self, y, out=None):
"""Perform element-wise subtraction"""
if out is None:
out = self.shallow_copy()
other = y._tensor if isinstance(y, SharedTensor) else y
if self._rank == 0 or isinstance(y, SharedTensor):
torch.sub(self._tensor, other=other, out=out._tensor)
elif out._tensor.numel() == 0:
out._tensor = _broadcast(self._tensor, other)
return out
def __isub__(self, y):
return self.sub_(y)
def __sub__(self, y):
return self.sub(y)
def __rsub__(self, y):
return -self.sub(y)
def shallow_copy(self):
result = SharedTensor(src=SENTINEL)
result._rank = self._rank
result._src = self._src
if self._tensor.is_cuda:
result._tensor = torch.cuda.DoubleTensor()
else:
result._tensor = torch.DoubleTensor()
return result
def mul(self, y, out=None):
if out is None:
out = self.shallow_copy()
if isinstance(y, SharedTensor):
result = beaver_protocol(self, y, torch.mul)
out._tensor.resize_(result.shape)
out._tensor.copy_(result)
else:
if isinstance(y, (int, float)):
y = torch.tensor(data=y)
torch.mul(self._tensor, other=y, out=out._tensor)
return out
def mul_(self, y):
return self.mul(y, out=self)
def __imul__(self, y):
return self.mul_(y)
def __mul__(self, y):
return self.mul(y)
def __rmul__(self, y):
return self.mul(y)
def div(self, y, out=None, scale=2):
"""Perform division by private nos. in [1, scale] or any public nos."""
if out is None:
out = self.shallow_copy()
if isinstance(y, SharedTensor):
self.mul(y.reciprocal(scale=scale), out=out)
else:
if isinstance(y, (int, float)):
y = torch.tensor(data=y)
torch.div(self._tensor, other=y, out=out._tensor)
return out
def div_(self, y, scale=2):
return self.div(y, out=self, scale=scale)
def __truediv__(self, y, scale=2):
return self.div(y, scale=scale)
def matmul(self, y):
out = self.shallow_copy()
if isinstance(y, SharedTensor):
result = beaver_protocol(self, y, torch.matmul)
out._tensor = result
else:
out._tensor = self._tensor.matmul(y)
return out
def conv2d(self, kernel, **kwargs):
"""Perform a 2D convolution using the given kernel"""
def _conv2d(input, weight):
return torch.nn.functional.conv2d(input, weight, **kwargs)
out = self.shallow_copy()
if isinstance(kernel, SharedTensor):
result = beaver_protocol(self, kernel, _conv2d)
out._tensor = result
else:
out._tensor = _conv2d(self._tensor, kernel)
return out
def avg_pool2d(self, kernel_size, **kwargs):
"""Applied 2D average-pooling."""
out = self.shallow_copy()
out._tensor = torch.nn.functional.avg_pool2d(
self._tensor, kernel_size, **kwargs)
return out
def square(self):
return self.clone().square_()
def square_(self):
result = beaver_square(self)
self._tensor.copy_(result)
return self
def abs(self):
return self.clone().abs_()
def abs_(self):
return self.mul_(self.sign())
def __abs__(self):
return self.abs()
def exp(self):
return self.clone().exp_()
def exp_(self):
w = SharedTensor.config.width
n = SharedTensor.config.exp_scale
SharedTensor.config.width = w / 2**n
self.div_(2**n).add_(1)
for _ in range(n):
self.square_()
SharedTensor.config.width = min(w, SharedTensor.config.width * 2)
return self
def softmax(self, axis):
"""Perform the softmax transformation"""
return self.clone().softmax_(axis)
def softmax_(self, axis):
"""Perform the softmax transformation"""
# Subtract the sum of the numbers that are positive to ensure that
# all numbers are non-positive. Subtracting the max is more common,
# but hard to calculate with polynomials. Approximating the max
# with a p-norm, where p is sufficiently large, is also possible,
# yet risks leaking information if any intermediate results are large.
x = self.clone()
x.relu_()
x = x.sum(axis, keepdim=True)
self.sub_(x)
# Exponentiate each entry
self.exp_()
# Normalize
return self.mul_(
self.sum(axis, keepdim=True).reciprocal_(
scale=2 * self.shape[axis]))
def sigmoid(self):
return self.clone().sigmoid_()
def sigmoid_(self):
return self.div_(2).tanh_().div_(2).add_(0.5)
def tanh_(self):
coeffs = SharedTensor.config.tanh.coeffs
t = approximations.chebyshev(
SharedTensor.config.tanh.terms,
self / SharedTensor.config.tanh.maxval)
self._tensor.fill_(0)
for c, x in zip(coeffs[1::2], t):
self.add_(x.mul_(c))
return self
def tanh(self):
return self.clone().tanh_()
def erf_(self):
coeffs = SharedTensor.config.erf.coeffs
t = approximations.chebyshev(
SharedTensor.config.erf.terms,
self / SharedTensor.config.erf.maxval)
self._tensor.fill_(0)
for c, x in zip(coeffs[1::2], t):
self.add_(x.mul_(c))
return self
def erf(self):
return self.clone().erf_()
def sign(self):
return self.clone().sign_()
def sign_(self):
w = SharedTensor.config.width
# Divide by 1e5, assuming that the absval. of the input is at most 1e4
self.div_(1e5)
# Reduce the width of the added noise in accordance with 1e5/1e4 = 10
SharedTensor.config.width /= 10
n_iter = SharedTensor.config.sign_iters
for _ in range(n_iter):
self.mul_(self.square().neg_().add_(3).div_(2))
SharedTensor.config.width = w
return self
def reciprocal(self, scale=2):
"""
Reciprocal on the range [1, scale].
"""
return self.clone().reciprocal_(scale=scale)
def reciprocal_(self, scale=2):
"""
Reciprocal on the range [1, scale].
"""
self.mul_(1 / scale)
x = self.clone()
self.neg_().add_(2)
n_iter = SharedTensor.config.reciprocal_iters
for _ in range(n_iter):
self.mul_(self.mul(x).neg_().add_(2))
self.mul_(1 / scale)
return self
def invsqrt(self):
return self.clone().invsqrt_()
def invsqrt_(self):
x = self.clone()
self.neg_().add_(3).div_(2)
n_iter = SharedTensor.config.invsqrt_iters
for _ in range(n_iter):
self.mul_(self.square().mul(x).neg_().add_(3)).div_(2)
return self
def inv8root(self):
return self.clone().inv8root_()
def inv8root_(self):
x = self.clone()
self.neg_().add_(9).div_(8)
n_iter = SharedTensor.config.inv8root_iters
for _ in range(n_iter):
y = self.clone()
y.square_()
y.square_()
y.square_()
self.mul_(y.mul(x).neg_().add_(9)).div_(8)
return self
def relu(self):
return self.clone().relu_()
def relu_(self):
return self.add_(self.abs()).div_(2)
def neg_(self):
self._tensor.neg_()
return self
def neg(self):
return self.clone().neg_()
def __neg__(self):
return self.neg()
def sum(self, *args, **kwargs):
"""Sum the entries of a tensor, along a given dimension if given"""
out = self.shallow_copy()
out._tensor = self._tensor.sum(*args, **kwargs)
return out
def mean(self, *args, **kwargs):
"""Average the entries of a tensor, along a given dimension if given"""
out = self.shallow_copy()
out._tensor = self._tensor.mean(*args, **kwargs)
return out
def t(self):
return self.clone().t_()
def t_(self):
return self.transpose_(0, 1)
def transpose(self, dim0, dim1):
return self.clone().transpose_(dim0, dim1)
def transpose_(self, dim0, dim1):
self._tensor.transpose_(dim0, dim1)
return self
@property
def T(self):
return self.clone().transpose_(0, 1)
@property
def device(self):
return self._tensor.device
def to(self, device=None, copy=False, non_blocking=False):
result = self.shallow_copy()
result._tensor = self._tensor.to(
device=device, copy=copy, non_blocking=non_blocking)
return result
def cpu(self):
result = self.shallow_copy()
result._tensor = self._tensor.cpu()
return result
def cuda(self, *args, **kwargs):
result = self.shallow_copy()
result._tensor = self._tensor.cuda(*args, **kwargs)
return result
| StarcoderdataPython |
3270570 | import sys
import time
import subprocess
import os
import signal
wait = False
timeout = 0
srvfil = open("./antifreeze/time_" + sys.argv[1] +".txt", "r")
while (True):
srvtime = 0
time.sleep(1)
srvfil.seek(0)
try:
srvtime=int(srvfil.readlines()[0])
except IndexError as e:
print(e)
continue
systime=round(time.time())
# print ("srvtime: " + str(srvtime))
# print ("systime: " + str(systime))
if(srvtime >= systime-2):
if not(timeout == 0):
wait = False
print("Server caught back up!")
timeout=0
else:
if(wait == False):
timeout=timeout+1
print("Server is behind! (" + str(timeout) + ")")
if(timeout == 120):
PID = int(subprocess.Popen("PID= ps -x | grep -v grep | grep \"srcds_linux -game garrysmod +hostport " + sys.argv[1] +"\" | awk '{print $1}'",shell = True,stdout=subprocess.PIPE).stdout.read().strip().decode())
print ("Server Frozen! killing process...")
os.kill(PID, signal.SIGTERM)
wait = True
print ("Process Killed! waiting for server to catch back up...")
| StarcoderdataPython |
1764020 | <gh_stars>0
import sys
import argparse
import torch
from genre.util.util_print import str_warning
from genre.datasets import get_dataset
from genre.models import get_model
def add_general_arguments(parser):
# Parameters that will NOT be overwritten when resuming
unique_params = {'gpu', 'resume', 'epoch', 'workers', 'batch_size', 'save_net', 'epoch_batches', 'logdir'}
parser.add_argument('--gpu', default='1', type=str,
help='gpu to use')
parser.add_argument('--manual_seed', type=int, default=15,
help='manual seed for randomness')
parser.add_argument('--resume', type=int, default=0,
help='resume training by loading checkpoint.pt or best.pt. Use 0 for training from scratch, -1 for last and -2 for previous best. Use positive number for a specific epoch. \
Most options will be overwritten to resume training with exactly same environment')
parser.add_argument(
'--suffix', default='logs', type=str,
help="Suffix for `logdir` that will be formatted with `opt`, e.g., '{classes}_lr{lr}'"
)
parser.add_argument('--epoch', type=int, default=0,
help='number of epochs to train')
# Dataset IO
parser.add_argument('--dataset', type=str, default='shapenet_4_channel',
help='dataset to use')
parser.add_argument('--workers', type=int, default=0,
help='number of data loading workers')
parser.add_argument('--classes', default='chair', type=str,
help='class to use')
parser.add_argument('--batch_size', type=int, default=4,
help='training batch size')
parser.add_argument('--epoch_batches', default=1000, type=int, help='number of batches used per epoch')
parser.add_argument('--eval_batches', default=200,
type=int, help='max number of batches used for evaluation per epoch')
parser.add_argument('--eval_at_start', action='store_true',
help='run evaluation before starting to train')
parser.add_argument('--log_time', action='store_true', help='adding time log')
#parser.add_argument('--dataset_root', type=str, default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/cluttered_datasets/')#
parser.add_argument('--shapenet_root', type=str, default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/')
parser.add_argument('--dataset_root', type=str, default='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/cluttered_datasets/')
parser.add_argument('--eval_mode', type=str, default='vali')
# Network name
parser.add_argument('--net', type=str, required=False,
help='network type to use', default='genre_given_depth')
parser.add_argument('--baseline_remove_seen', type=int, default=0,
help='run unoccupied voxel removal baseline')
parser.add_argument('--compute_chamfer_dist', type=int, default=0,
help='run unoccupied voxel removal baseline')
parser.add_argument('--stability_loss', type=float, default=1e-7)
parser.add_argument('--connectivity_loss', type=float, default=1e-10)
# Optimizer
parser.add_argument('--optim', type=str, default='adam',
help='optimizer to use')
parser.add_argument('--lr', type=float, default=1.5*1e-4,
help='learning rate')
parser.add_argument('--adam_beta1', type=float, default=0.5,
help='beta1 of adam')
parser.add_argument('--adam_beta2', type=float, default=0.9,
help='beta2 of adam')
parser.add_argument('--sgd_momentum', type=float, default=0.9,
help="momentum factor of SGD")
parser.add_argument('--sgd_dampening', type=float, default=0,
help="dampening for momentum of SGD")
parser.add_argument('--wdecay', type=float, default=0.0,
help='weight decay')
# Logging and visualization
parser.add_argument('--logdir', type=str, default='/home/willie/workspace/GenRe-ShapeHD/logs',
help='Root directory for logging. Actual dir is [logdir]/[net_classes_dataset]/[expr_id]')
parser.add_argument('--log_batch', action='store_true',
help='Log batch loss')
parser.add_argument('--expr_id', type=int, default=0,
help='Experiment index. non-positive ones are overwritten by default. Use 0 for code test. ')
parser.add_argument('--save_net', type=int, default=1,
help='Period of saving network weights')
parser.add_argument('--save_net_opt', action='store_true',
help='Save optimizer state in regular network saving')
parser.add_argument('--vis_every_vali', default=1, type=int,
help="Visualize every N epochs during validation")
parser.add_argument('--vis_every_train', default=1, type=int,
help="Visualize every N epochs during training")
parser.add_argument('--vis_batches_vali', type=int, default=10,
help="# batches to visualize during validation")
parser.add_argument('--vis_batches_train', type=int, default=10,
help="# batches to visualize during training")
parser.add_argument('--tensorboard', action='store_true',
help='Use tensorboard for logging. If enabled, the output log will be at [logdir]/[tensorboard]/[net_classes_dataset]/[expr_id]')
parser.add_argument('--vis_workers', default=1, type=int, help="# workers for the visualizer")
parser.add_argument('--vis_param_f', default=None, type=str,
help="Parameter file read by the visualizer on every batch; defaults to 'visualize/config.json'")
return parser, unique_params
def overwrite(opt, opt_f_old, unique_params):
opt_dict = vars(opt)
opt_dict_old = torch.load(opt_f_old)
for k, v in opt_dict_old.items():
if k in opt_dict:
if (k not in unique_params) and (opt_dict[k] != v):
print(str_warning, "Overwriting %s for resuming training: %s -> %s"
% (k, str(opt_dict[k]), str(v)))
opt_dict[k] = v
else:
print(str_warning, "Ignoring %s, an old option that no longer exists" % k)
opt = argparse.Namespace(**opt_dict)
return opt
def parse(add_additional_arguments=None):
parser = argparse.ArgumentParser()
parser, unique_params = add_general_arguments(parser)
if add_additional_arguments is not None:
parser, unique_params_additional = add_additional_arguments(parser)
unique_params = unique_params.union(unique_params_additional)
opt_general, _ = parser.parse_known_args()
dataset_name, net_name = opt_general.dataset, opt_general.net
del opt_general
dataset_name='shapenet'
# Add parsers depending on dataset and models
parser, unique_params_dataset = get_dataset(dataset_name).add_arguments(parser)
parser, unique_params_model = get_model(net_name).add_arguments(parser)
# Manually add '-h' after adding all parser arguments
if '--printhelp' in sys.argv:
sys.argv.append('-h')
opt = parser.parse_args()
unique_params = unique_params.union(unique_params_dataset)
unique_params = unique_params.union(unique_params_model)
return opt, unique_params
| StarcoderdataPython |
1718855 |
import itertools
import time
from testbase import cur
for num in itertools.count():
cur.execute("select * from foo")
foovals = cur.fetchall()
print num, 'I fetched', len(foovals), 'values.', time.ctime()
| StarcoderdataPython |
3338347 | from contextlib import contextmanager
import json
from pprint import PrettyPrinter
from toolspy import merge
def run_interactive_shell(app, db):
app.config['WTF_CSRF_ENABLED'] = False
# Needed for making the console work in app request context
ctx = app.test_request_context()
ctx.push()
# app.preprocess_request()
# The test client. You can do .get and .post on all endpoints
client = app.test_client()
get = client.get
post = client.post
put = client.put
patch = client.patch
delete = client.delete
# Helper method for sending JSON POST.
def load_json(resp):
return json.loads(resp.data)
def jpost(url, data, raw=False):
response = client.post(
url, data=json.dumps(data),
content_type="application/json")
if raw:
return response
return load_json(response)
def jput(url, data, raw=False):
response = client.put(
url, data=json.dumps(data),
content_type="application/json")
if raw:
return response
return load_json(response)
def jpatch(url, data, raw=False):
response = client.patch(
url, data=json.dumps(data),
content_type="application/json")
if raw:
return response
return load_json(response)
def jget(url, **kwargs):
return load_json(get(url, **kwargs))
# Use this in your code as `with login() as c:` and you can use
# all the methods defined on `app.test_client`
@contextmanager
def login(email=None, password=None):
client.post('/login', data={'email': email, 'password': password})
yield
client.get('/logout', follow_redirects=True)
q = db.session.query
add = db.session.add
addall = db.session.add_all
commit = db.session.commit
delete = db.session.delete
sitemap = app.url_map._rules_by_endpoint
routes = {}
endpoints = {}
pprint = PrettyPrinter(indent=4).pprint
for rule in app.url_map._rules:
routes[rule.rule] = rule.endpoint
endpoints[rule.endpoint] = rule.rule
try:
import IPython
IPython.embed()
except:
import code
code.interact(local=merge(locals(), globals()))
| StarcoderdataPython |
3383884 | <filename>vidbench/visualize.py
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
from typing import List, Optional
import glob
def make_video_table(
data_dir: str,
youtube_ids: List[str],
ground_truth_labels: List[str],
predictions: Optional[List[str]] = None,
) -> str:
"""
Make an HTML table where each cell contains a video and metadata.
Inputs:
youtube_ids: list of strings of YouTube ids, one for each video to display
these videos should be part of the Kinetics dataset
ground_truth_labels: list of strings of ground truth labels, one for each video
predictions: [optional] list of strings of model predictions, one for each video
Outputs:
video_html: a list of HTML tags that build a table; to be called with IPython.display.HTML
Example:
from IPython.display import HTML
HTML(make_video_table(YOUTUBE_IDS, TRUE_LABELS_STR))
"""
VIDEOS_PER_ROW = 4
NO_ROWS = len(youtube_ids) // VIDEOS_PER_ROW + 1
WIDTH = 210
HEIGHT = WIDTH * 2 // 3
# for videos to properly display, data directory must be relative to notebook dir
try:
data_dir = data_dir[data_dir.find("data"):]
except:
pass
filepaths = []
for youtube_id in youtube_ids:
filepaths.append(glob.glob(f"{data_dir}/*/{youtube_id}_*.mp4")[0])
# make html video table
video_html = ["<table><tr>"]
i = 0
while i < len(filepaths):
prediction_par = ""
if predictions is not None:
color = "black" if predictions[i] == ground_truth_labels[i] else "red"
prediction_par = f"<p style='color:{color};'>{predictions[i]}</p>"
video_html.append(
f"""
<td><h2>{i}</h2><p>{ground_truth_labels[i]}</p><video width="{WIDTH}" height="{HEIGHT}" controls>
<source src="{filepaths[i]}" type="video/mp4">
</video>{prediction_par}</td>"""
)
i += 1
if i % VIDEOS_PER_ROW == 0:
video_html.append("</tr><tr>")
video_html.append("</tr></table>")
return "".join(video_html)
| StarcoderdataPython |
3271665 | <reponame>Illumina/SMNCopyNumberCaller
from charts.colors import color_arr
from charts.scale import scale, y_scale
class SvgElement:
def __init__(self, name, attrs, value=None):
self.name = name
self.value = value
self.attrs = attrs
def to_string(self):
result = "<%s" % self.name
for attr in self.attrs:
result += " %s=\"%s\"" % (attr[0], attr[1])
result += ">"
if self.value is not None:
for v in self.value:
if type(v) is str:
result += v
else:
result += v.to_string()
result += "</%s>" % self.name
return result
def add_attr(self, attr_tuple):
self.attrs.append(attr_tuple)
def modify_attr(self, attr_tuple):
for attr in self.attrs:
if attr[0] == attr_tuple[0]:
attr[1] = attr_tuple[1]
def line(x1, y1, x2, y2, color="black", thickness=1, opacity=1.0, dashes=0):
return SvgElement("line", [
("x1", x1),
("x2", x2),
("y1", y1),
("y2", y2),
("stroke", color),
("stroke-width", thickness),
("opacity", opacity),
("stroke-dasharray", dashes)
])
def rect(x, y, width, height, border_color="black", fill_color="black", border_width=1, corner_radius=0, opacity=1.0):
return SvgElement("rect", [
("x", x),
("y", y),
("width", width),
("height", height),
("stroke", border_color),
("stroke-width", border_width),
("fill", fill_color),
("rx", corner_radius),
("opacity", opacity)
])
def circle(x, y, r, fill_color="black", border_color="black", border_width="black"):
return SvgElement("circle", [
("cx", x),
("cy", y),
("r", r),
("stroke", border_color),
("stroke-width", border_width),
("fill", fill_color)
])
def text(x, y, txt, style="font: 13px sans-serif", transform=""):
return SvgElement("text", [
("x", x),
("y", y),
("style", style),
("transform", transform)
], txt)
def path(points, color="black", thickness=1, dashes=0, opacity=1.0):
return SvgElement("polyline", [
("points", " ".join(points)),
("stroke", color),
("fill", "none"),
("stroke-width", thickness),
("opacity", opacity),
("stroke-dasharray", dashes)
])
def add_star_to_13(x_axis, y_axis):
x = scale(13, x_axis) - 10
y = y_scale(y_axis["min"], y_axis) + 22
return [text(x, y, "*", style="font: 18px sans-serif")]
def add_tooltip(element, txt):
title_el = SvgElement("title", [], txt)
if element.value is None:
element.value = [title_el]
else:
element.value += title_el
return element
def headers(height=None):
svg = SvgElement("svg", [
("xmlns", "http://www.w3.org/2000/svg"),
("xmlns:xlink", "http://www.w3.org/1999/xlink")
])
if height is not None:
svg.add_attr(("style", "height: %s; padding-top: 50" % height))
return svg
def x_axis_lines(x_axis, y_axis):
lines = []
for tic in x_axis["tics"]:
x = scale(tic, x_axis)
lines.append(
line(
x,
y_scale(y_axis["min"], y_axis),
x,
y_scale(y_axis["max"], y_axis),
opacity=0.5,
dashes=3
)
)
return lines
def right_axis_line(x_axis, y_axis):
return [
line(
scale(x_axis["max"], x_axis),
y_scale(y_axis["min"], y_axis),
scale(x_axis["max"], x_axis),
y_scale(y_axis["max"], y_axis)
)
]
def x_axis_tics(x_axis, y_axis):
tics = [
line(
scale(x_axis["min"], x_axis),
y_scale(y_axis["min"], y_axis),
scale(x_axis["min"], x_axis),
y_scale(y_axis["max"], y_axis)
)
]
for tic in x_axis["tics"]:
tics.append(
line(
scale(tic, x_axis),
y_scale(y_axis["min"], y_axis),
scale(tic, x_axis),
y_scale(y_axis["min"], y_axis) + 6
)
)
return tics
def x_axis_text(x_axis, y_axis):
txt = []
for tic in x_axis["tics"]:
txt.append(
text(
scale(tic, x_axis) - 4,
y_scale(y_axis["min"], y_axis) + 18,
"%s" % tic
)
)
return txt
def x_axis_title(x_axis, y_axis):
x = ((x_axis["max"] - x_axis["min"]) / 2) + x_axis["min"]
return [
text(
scale(x, x_axis) - (len(x_axis["title"]) * 6),
y_scale(y_axis["min"], y_axis) + 34,
x_axis["title"],
style="font: 18px sans-serif"
)
]
def y_axis_lines(x_axis, y_axis):
lines = []
for tic in y_axis["tics"]:
y = y_scale(tic, y_axis)
lines.append(
line(
scale(x_axis["min"], x_axis),
y,
scale(x_axis["max"], x_axis),
y,
opacity=0.5,
dashes=3
)
)
return lines
def y_axis_tics(x_axis, y_axis, side):
if side == "left":
x1 = scale(x_axis["min"], x_axis) - 6
x2 = scale(x_axis["min"], x_axis)
else:
x1 = scale(x_axis["max"], x_axis)
x2 = scale(x_axis["max"], x_axis) + 4
tics = [
line(
scale(x_axis["min"], x_axis),
y_scale(y_axis["min"], y_axis),
scale(x_axis["max"], x_axis),
y_scale(y_axis["min"], y_axis)
)
]
for tic in y_axis["tics"]:
tics.append(
line(
x1,
y_scale(tic, y_axis),
x2,
y_scale(tic, y_axis)
)
)
return tics
def zeroes(value, result=1):
if value < 10:
return result
else:
return zeroes(value/10, result=result+1)
def y_axis_text(x_axis, y_axis, side):
if side == "left":
x = scale(x_axis["min"], x_axis) - 8
else:
x = scale(x_axis["max"], x_axis) + 6
txt = []
for tic in y_axis["tics"]:
z = zeroes(tic)
r = x
if side == "left":
r = x - (z * 8)
txt.append(
text(r, y_scale(tic, y_axis) + 3, "%s" % tic)
)
return txt
def y_axis_title(x_axis, y_axis, side):
if side == "left":
y = 45
x = -(y_scale(y_axis["min"], y_axis))
transform = "rotate(270)"
else:
y = -35 - scale(x_axis["max"], x_axis)
x = y_scale(y_axis["max"], y_axis)
transform = "rotate(90)"
return [
text(
x,
y,
y_axis["title"],
style="font: 18px sans-serif",
transform=transform
)
]
def title(txt, x_axis):
return [
text(
scale(x_axis["min"], x_axis),
30,
txt,
style="font: 22px sans-serif",
)
]
def get_keys(key_items, x_axis, y_axis, element_type="line"):
keys = []
x = scale(x_axis["max"], x_axis) + 60
y = y_scale(y_axis["max"], y_axis)
for idx, key in enumerate(key_items):
color = color_arr[idx % len(color_arr)]
y_val = y + (15 * (idx + 1))
keys.append(get_key_symbol(x, y_val, element_type, color))
keys.append(text(x + 20, y_val, key.split('_')[0]))
return keys
def get_key_symbol(x, y, element_type, color):
return {
"line": line(x, y - 6, x + 15, y - 6, color=color, thickness=3),
"circle": circle(x + 10, y - 4, 4, fill_color=color, border_color=color),
"rect": rect(x + 6, y - 8, 8, 8, fill_color=color, border_color=color),
}[element_type]
def add_chart_to_page(page, chart):
if page.value is None:
page.value = [chart]
else:
page.value.append(chart)
return page
| StarcoderdataPython |
3377991 | # Relational
# Copyright (C) 2008 Salvo "LtWorf" Tomaselli
#
# Relational is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Salvo "LtWorf" Tomaselli <<EMAIL>>
#
# This module provides a classes to represent relations and to perform
# relational operations on them.
import csv
from collections import deque
from itertools import chain, repeat
# from relational.rtypes import *
class Relation(object):
'''
This object defines a relation (as a group of consistent tuples) and operations.
A relation is a particular kind of set, which has a number of named attributes and
a number of tuples, which must express a value for every attribute.
Set operations like union, intersection and difference are restricted and can only be
performed on relations which share the same set of named attributes.
The constructor optionally accepts a filename and then it will load the relation from
that file.
If no parameter is supplied an empty relation is created.
Files need to be comma separated as described in RFC4180.
The first line need to contain the attributes of the relation while the
following lines contain the tuples of the relation.
An empty relation needs a header, and can be filled using the insert()
method.
'''
__hash__ = None
def __init__(self, filename=""):
self._readonly = False
self.content = set()
if len(filename) == 0: # Empty relation
self.header = Header([])
return
with open(filename) as fp:
reader = csv.reader(fp) # Creating a csv reader
self.header = Header(next(reader)) # read 1st line
iterator = ((self.insert(i) for i in reader))
deque(iterator, maxlen=0)
def _make_duplicate(self, copy):
'''Flag that the relation "copy" is pointing
to the same set as this relation.'''
self._readonly = True
copy._readonly = True
def _make_writable(self, copy_content=True):
'''If this relation is marked as readonly, this
method will copy the content to make it writable too
if copy_content is set to false, the caller must
separately copy the content.'''
if self._readonly:
self._readonly = False
if copy_content:
self.content = set(self.content)
def __iter__(self):
return iter(self.content)
def __contains__(self, key):
return key in self.content
def save(self, filename):
'''
Saves the relation in a file. Will save using the csv
format as defined in RFC4180.
'''
with open(filename, 'w') as fp:
writer = csv.writer(fp) # Creating csv writer
# It wants an iterable containing iterables
head = (self.header,)
writer.writerows(head)
# Writing content, already in the correct format
writer.writerows(self.content)
def _rearrange(self, other):
'''If two relations share the same attributes in a different order, this method
will use projection to make them have the same attributes' order.
It is not exactely related to relational algebra. Just a method used
internally.
Will raise an exception if they don't share the same attributes'''
if not isinstance(other, relation):
raise TypeError('Expected an instance of the same class')
elif self.header == other.header:
return other
elif self.header.sharedAttributes(other.header) == len(self.header):
return other.projection(self.header)
raise TypeError('Relations differ: [%s] [%s]' % (
','.join(self.header), ','.join(other.header)
))
def selection(self, expr):
'''
Selection, expr must be a valid Python expression; can contain field names.
'''
newt = relation()
newt.header = Header(self.header)
c_expr = compile(expr, 'selection', 'eval')
for i in self.content:
# Fills the attributes dictionary with the values of the tuple
attributes = {attr: i[j].autocast()
for j, attr in enumerate(self.header)
}
try:
if eval(c_expr, attributes):
newt.content.add(i)
except Exception as e:
raise Exception(
"Failed to evaluate %s\n%s" % (expr, e.__str__()))
return newt
def product(self, other):
'''
Cartesian product. Attributes of the relations must differ.
'''
if (not isinstance(other, relation)):
raise Exception('Operand must be a relation')
if self.header.sharedAttributes(other.header) != 0:
raise Exception(
'Unable to perform product on relations with colliding attributes'
)
newt = relation()
newt.header = Header(self.header + other.header)
for i in self.content:
for j in other.content:
newt.content.add(i + j)
return newt
def projection(self, *attributes):
'''
Can be called in two different ways:
a.projection('field1','field2')
or
a.projection(['field1','field2'])
The cardinality of the result, might be less than the cardinality
of the original object.
'''
# Parameters are supplied in a list, instead with multiple parameters
if not isinstance(attributes[0], str):
attributes = attributes[0]
ids = self.header.getAttributesId(attributes)
if len(ids) == 0:
raise Exception('Invalid attributes for projection')
newt = relation()
# Create the header
h = (self.header[i] for i in ids)
newt.header = Header(h)
# Create the body
for i in self.content:
row = (i[j] for j in ids)
newt.content.add(tuple(row))
return newt
def rename(self, params):
'''
Takes a dictionary.
Will replace the field name as the key with its value.
For example if you want to rename a to b, call
rel.rename({'a':'b'})
'''
newt = relation()
newt.header = self.header.rename(params)
newt.content = self.content
self._make_duplicate(newt)
return newt
def intersection(self, other):
'''
Intersection operation. The result will contain items present in both
operands.
Will return an empty one if there are no common items.
'''
other = self._rearrange(other) # Rearranges attributes' order
newt = relation()
newt.header = Header(self.header)
newt.content = self.content.intersection(other.content)
return newt
def difference(self, other):
'''Difference operation. The result will contain items present in first
operand but not in second one.
'''
other = self._rearrange(other) # Rearranges attributes' order
newt = relation()
newt.header = Header(self.header)
newt.content = self.content.difference(other.content)
return newt
def division(self, other):
'''Division operator
The division is a binary operation that is written as R ÷ S. The
result consists of the restrictions of tuples in R to the
attribute names unique to R, i.e., in the header of R but not in the
header of S, for which it holds that all their combinations with tuples
in S are present in R.
'''
# d_headers are the headers from self that aren't also headers in other
d_headers = tuple(set(self.header) - set(other.header))
# Wikipedia defines the division as follows:
# a1,....,an are the d_headers
# T := πa1,...,an(R) × S
# U := T - R
# V := πa1,...,an(U)
# W := πa1,...,an(R) - V
# W is the result that we want
t = self.projection(d_headers).product(other)
return self.projection(d_headers).difference(t.difference(self).projection(d_headers))
def union(self, other):
'''Union operation. The result will contain items present in first
and second operands.
'''
other = self._rearrange(other) # Rearranges attributes' order
newt = relation()
newt.header = Header(self.header)
newt.content = self.content.union(other.content)
return newt
def thetajoin(self, other, expr):
'''Defined as product and then selection with the given expression.'''
return self.product(other).selection(expr)
def outer(self, other):
'''Does a left and a right outer join and returns their union.'''
a = self.outer_right(other)
b = self.outer_left(other)
return a.union(b)
def outer_right(self, other):
'''
Outer right join. Considers self as left and param as right. If the
tuple has no corrispondence, empy attributes are filled with a "---"
string. This is due to the fact that the None token would cause
problems when saving and reloading the relation.
Just like natural join, it works considering shared attributes.
'''
return other.outer_left(self)
def outer_left(self, other, swap=False):
'''
See documentation for outer_right
'''
shared = self.header.intersection(other.header)
newt = relation() # Creates the new relation
# Creating the header with all the fields, done like that because order is
# needed
h = (i for i in other.header if i not in shared)
newt.header = Header(chain(self.header, h))
# Shared ids of self
sid = self.header.getAttributesId(shared)
# Shared ids of the other relation
oid = other.header.getAttributesId(shared)
# Non shared ids of the other relation
noid = [i for i in range(len(other.header)) if i not in oid]
for i in self.content:
# Tuple partecipated to the join?
added = False
for j in other.content:
match = True
for k in range(len(sid)):
match = match and (i[sid[k]] == j[oid[k]])
if match:
item = chain(i, (j[l] for l in noid))
newt.content.add(tuple(item))
added = True
# If it didn't partecipate, adds it
if not added:
item = chain(i, repeat(rstring('---'), len(noid)))
newt.content.add(tuple(item))
return newt
def join(self, other):
'''
Natural join, joins on shared attributes (one or more). If there are no
shared attributes, it will behave as the cartesian product.
'''
# List of attributes in common between the relations
shared = self.header.intersection(other.header)
newt = relation() # Creates the new relation
# Creating the header with all the fields, done like that because order is
# needed
h = (i for i in other.header if i not in shared)
newt.header = Header(chain(self.header, h))
# Shared ids of self
sid = self.header.getAttributesId(shared)
# Shared ids of the other relation
oid = other.header.getAttributesId(shared)
# Non shared ids of the other relation
noid = [i for i in range(len(other.header)) if i not in oid]
for i in self.content:
for j in other.content:
match = True
for k in range(len(sid)):
match = match and (i[sid[k]] == j[oid[k]])
if match:
item = chain(i, (j[l] for l in noid))
newt.content.add(tuple(item))
return newt
def __eq__(self, other):
if not isinstance(other, relation):
return False
if len(self.content) != len(other.content):
return False
if set(self.header) != set(other.header):
return False
# Rearranges attributes' order so can compare tuples directly
other = self._rearrange(other)
# comparing content
return self.content == other.content
def __len__(self):
return len(self.content)
def __str__(self):
m_len = [len(i) for i in self.header] # Maximum lenght string
for f in self.content:
for col, i in enumerate(f):
if len(i) > m_len[col]:
m_len[col] = len(i)
res = ""
for f, attr in enumerate(self.header):
res += "%s" % (attr.ljust(2 + m_len[f]))
for r in self.content:
res += "\n"
for col, i in enumerate(r):
res += "%s" % (i.ljust(2 + m_len[col]))
return res
def update(self, expr, dic):
'''
Updates certain values of a relation.
expr must be a valid Python expression that can contain field names.
This operation will change the relation itself instead of generating a new one,
updating all the tuples where expr evaluates as True.
Dic must be a dictionary that has the form "field name":"new value". Every kind of value
will be converted into a string.
Returns the number of affected rows.
'''
self._make_writable(copy_content=False)
affected = self.selection(expr)
not_affected = self.difference(affected)
new_values = tuple(
zip(self.header.getAttributesId(dic.keys()), dic.values())
)
for i in set(affected.content):
i = list(i)
for column, value in new_values:
i[column] = value
not_affected.insert(i)
self.content = not_affected.content
return len(affected)
def insert(self, values):
'''
Inserts a tuple in the relation.
This function will not insert duplicate tuples.
All the values will be converted in string.
Will return the number of inserted rows.
Will fail if the tuple has the wrong amount of items.
'''
if len(self.header) != len(values):
raise Exception(
'Tuple has the wrong size. Expected %d, got %d' % (
len(self.header),
len(values)
)
)
self._make_writable()
prevlen = len(self.content)
self.content.add(tuple(map(rstring, values)))
return len(self.content) - prevlen
def delete(self, expr):
'''
Delete, expr must be a valid Python expression; can contain field names.
This operation will change the relation itself instead of generating a new one,
deleting all the tuples where expr evaluates as True.
Returns the number of affected rows.'''
l = len(self.content)
self._make_writable(copy_content=False)
self.content = self.difference(self.selection(expr)).content
return len(self.content) - l
class Header(tuple):
'''This class defines the header of a relation.
It is used within relations to know if requested operations are accepted'''
def __new__(cls, fields):
return super(Header, cls).__new__(cls, tuple(fields))
def __init__(self, *args, **kwargs):
'''Accepts a list with attributes' names. Names MUST be unique'''
for i in self:
if not is_valid_relation_name(i):
raise Exception('"%s" is not a valid attribute name' % i)
if len(self) != len(set(self)):
raise Exception('Attribute names must be unique')
def __repr__(self):
return "Header(%s)" % super(Header, self).__repr__()
def rename(self, params):
'''Returns a new header, with renamed fields.
params is a dictionary of {old:new} names
'''
attrs = list(self)
for old, new in params.items():
if not is_valid_relation_name(new):
raise Exception('%s is not a valid attribute name' % new)
try:
id_ = attrs.index(old)
attrs[id_] = new
except:
raise Exception('Field not found: %s' % old)
return Header(attrs)
def sharedAttributes(self, other):
'''Returns how many attributes this header has in common with a given one'''
return len(set(self).intersection(set(other)))
def union(self, other):
'''Returns the union of the sets of attributes with another header.'''
return set(self).union(set(other))
def intersection(self, other):
'''Returns the set of common attributes with another header.'''
return set(self).intersection(set(other))
def getAttributesId(self, param):
'''Returns a list with numeric index corresponding to field's name'''
return [self.index(i) for i in param]
# Backwards compatibility
relation = Relation
header = Header
| StarcoderdataPython |
1607328 | import os
import sys
import gensim
import logging.config
from gensim.models import KeyedVectors
from typing import List, Any, Tuple
from tqdm import tqdm
from kbc_rdf2vec.dataset import DataSet
from kbc_rdf2vec.prediction import PredictionFunctionEnum
logconf_file = os.path.join(os.path.dirname(__file__), "log.conf")
logging.config.fileConfig(fname=logconf_file, disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class Rdf2vecKbc:
def __init__(
self,
model_path: str,
data_set: DataSet,
n: Any = 10,
prediction_function: PredictionFunctionEnum = PredictionFunctionEnum.MOST_SIMILAR,
file_for_predicate_exclusion: str = None,
is_print_confidences: bool = False,
is_reflexive_match_allowed: bool = False,
):
"""Constructor
Parameters
----------
model_path : str
A path to the gensim model file. The file can also be a keyed vector file with ending ".kv".
data_set : DataSet
The dataset for which the prediction shall be performed.
n : Any
The number of predictions to make for each triple. If you want all predictions, set n to None.
file_for_predicate_exclusion : str
The RDF2Vec model learns embeddings for h,l,t but cannot differentiate between them afterwards. Hence,
when doing predictions for h and t, it may also predict l. If the file used to train the embedding is given
here, such relations will be removed from the proposal set.
is_print_confidences : bool
True if confidences shall be printed into the evaluation file. Default: False.
is_reflexive_match_allowed : bool
True if it is allowed to predict H in a <H, L, ?> task and T in a <?, L, T> task.
"""
if not os.path.isfile(model_path):
logger.error(
f"Cannot find file: {model_path}\nCurrent working directory: {os.getcwd()}"
)
if model_path.endswith(".kv"):
logger.info("Gensim vector file detected.")
self._vectors = KeyedVectors.load(model_path, mmap="r")
else:
self._vectors = gensim.models.Word2Vec.load(model_path).wv
self.n = n
self.data_set = data_set
self.test_set = self.data_set.test_set()
self.is_print_confidences = is_print_confidences
self._predicates = set()
if file_for_predicate_exclusion is not None and os.path.isfile(
file_for_predicate_exclusion
):
self._predicates = self._read_predicates(file_for_predicate_exclusion)
self._prediction_function = prediction_function.get_instance(
keyed_vectors=self._vectors,
data_set=self.data_set,
is_reflexive_match_allowed=is_reflexive_match_allowed,
)
def _read_predicates(self, file_for_predicate_exclusion: str) -> set:
"""Obtain predicates from the given nt file.
Parameters
----------
file_for_predicate_exclusion : str
The NT file which shall be checked for predicates.
Returns
-------
set
A set of predicates (str).
"""
with open(file_for_predicate_exclusion, "r", encoding="utf8") as f:
result_set = set()
for line in f:
tokens = line.split(sep=" ")
result_set.add(self.remove_tags(tokens[1]))
return result_set
@staticmethod
def remove_tags(string_to_process: str) -> str:
"""Removes tags around a string. Space-trimming is also applied.
Parameters
----------
string_to_process : str
The string for which tags shall be removed.
Returns
-------
str
Given string without tags.
"""
string_to_process = string_to_process.strip(" ")
if string_to_process.startswith("<"):
string_to_process = string_to_process[1:]
if string_to_process.endswith(">"):
string_to_process = string_to_process[: len(string_to_process) - 1]
return string_to_process
def predict(self, file_to_write: str) -> None:
"""Performs the actual predictions. A file will be generated.
Parameters
----------
file_to_write : str
File that shall be written for further evaluation.
"""
with open(file_to_write, "w+", encoding="utf8") as f:
erroneous_triples = 0
print("Predicting Tails and Heads")
if self._prediction_function.requires_predicates:
is_skip_predicate = False
else:
is_skip_predicate = True
with tqdm(total=len(self.test_set), file=sys.stdout) as pbar:
for triple in self.test_set:
logger.debug(f"Processing triple: {triple}")
if self._check_triple(triple, is_skip_predicate=is_skip_predicate):
f.write(f"{triple[0]} {triple[1]} {triple[2]}\n")
heads = self._predict_heads(triple)
tails = self._predict_tails(triple)
f.write(f"\tHeads: {self._prediction_to_string(heads)}\n")
f.write(f"\tTails: {self._prediction_to_string(tails)}\n")
else:
logger.error(f"Could not process the triple: {triple}")
erroneous_triples += 1
pbar.update(1)
# logging output for the user
if erroneous_triples == 0:
logger.info("Erroneous Triples: " + str(erroneous_triples))
else:
logger.error("Erroneous Triples: " + str(erroneous_triples))
def _prediction_to_string(self, concepts_with_scores) -> str:
"""Transform a prediction to a string.
Parameters
----------
concepts_with_scores
The predicted concepts with scores in a list.
Returns
-------
str
String representation. Depending on the class configuration, confidences are added or not.
"""
result = ""
is_first = True
for c, s in concepts_with_scores:
if is_first:
if self.is_print_confidences:
result += c + f"_{{{s}}}"
else:
result += c
is_first = False
else:
if self.is_print_confidences:
result += f" {c}" + f"_{{{s}}}"
else:
result += f" {c}"
return result
def _predict_heads(self, triple: List[str]) -> List:
"""Predicts n heads given a triple.
Parameters
----------
triple : List[str]
The triple for which n heads shall be predicted.
Returns
-------
List
A list of predicted concepts with confidences.
"""
result_with_confidence = self._prediction_function.predict_heads(triple, self.n)
result_with_confidence = self._remove_predicates(result_with_confidence)
return result_with_confidence
def _predict_tails(self, triple: List[str]) -> List:
"""Predicts n tails given a triple.
Parameters
----------
triple: List[str]
The triple for which n tails shall be predicted.
Returns
-------
List
A list of predicted concepts with confidences.
"""
result_with_confidence = self._prediction_function.predict_tails(triple, self.n)
result_with_confidence = self._remove_predicates(result_with_confidence)
return result_with_confidence
def _remove_predicates(self, list_to_process: List) -> List[Tuple[str, float]]:
"""From the result list, all predicates are removed and the new list is returned.
Parameters
----------
list_to_process : List
List from which the predicates shall be removed.
Returns
-------
List
New list with removed predicates.
"""
result = []
for entry in list_to_process:
if not entry[0] in self._predicates:
result.append(entry)
return result
def _check_triple(self, triple: List[str], is_skip_predicate: bool = True) -> bool:
"""Triples can only be processed if all three elements are available in the vector space. This methods
checks for exactly this.
Parameters
----------
triple : List[str]
The triple that shall be checked.
is_skip_predicate : bool
If True, the predicate will not be checked.
Returns
-------
bool
True if all three elements of the triple exist in the given vector space, else False.
"""
try:
self._vectors.get_vector(triple[0])
if not is_skip_predicate:
self._vectors.get_vector(triple[1])
self._vectors.get_vector(triple[2])
return True
except KeyError:
return False
| StarcoderdataPython |
3262097 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log
from keystoneauth1.exceptions import http as ks_exceptions
from keystoneauth1 import loading
from keystoneauth1 import session
from watcher._i18n import _
from watcher.common import clients
from watcher.common import exception
from watcher import conf
CONF = conf.CONF
LOG = log.getLogger(__name__)
class KeystoneHelper(object):
def __init__(self, osc=None):
""":param osc: an OpenStackClients instance"""
self.osc = osc if osc else clients.OpenStackClients()
self.keystone = self.osc.keystone()
def get_role(self, name_or_id):
try:
role = self.keystone.roles.get(name_or_id)
return role
except ks_exceptions.NotFound:
roles = self.keystone.roles.list(name=name_or_id)
if len(roles) == 0:
raise exception.Invalid(
message=(_("Role not Found: %s") % name_or_id))
if len(roles) > 1:
raise exception.Invalid(
message=(_("Role name seems ambiguous: %s") % name_or_id))
return roles[0]
def get_user(self, name_or_id):
try:
user = self.keystone.users.get(name_or_id)
return user
except ks_exceptions.NotFound:
users = self.keystone.users.list(name=name_or_id)
if len(users) == 0:
raise exception.Invalid(
message=(_("User not Found: %s") % name_or_id))
if len(users) > 1:
raise exception.Invalid(
message=(_("User name seems ambiguous: %s") % name_or_id))
return users[0]
def get_project(self, name_or_id):
try:
project = self.keystone.projects.get(name_or_id)
return project
except ks_exceptions.NotFound:
projects = self.keystone.projects.list(name=name_or_id)
if len(projects) == 0:
raise exception.Invalid(
message=(_("Project not Found: %s") % name_or_id))
if len(projects) > 1:
raise exception.Invalid(
messsage=(_("Project name seems ambiguous: %s") %
name_or_id))
return projects[0]
def get_domain(self, name_or_id):
try:
domain = self.keystone.domains.get(name_or_id)
return domain
except ks_exceptions.NotFound:
domains = self.keystone.domains.list(name=name_or_id)
if len(domains) == 0:
raise exception.Invalid(
message=(_("Domain not Found: %s") % name_or_id))
if len(domains) > 1:
raise exception.Invalid(
message=(_("Domain name seems ambiguous: %s") %
name_or_id))
return domains[0]
def create_session(self, user_id, password):
user = self.get_user(user_id)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=CONF.watcher_clients_auth.auth_url,
password=password,
user_id=user_id,
project_id=user.default_project_id)
return session.Session(auth=auth)
def create_user(self, user):
project = self.get_project(user['project'])
domain = self.get_domain(user['domain'])
_user = self.keystone.users.create(
user['name'],
password=<PASSWORD>['password'],
domain=domain,
project=project,
)
for role in user['roles']:
role = self.get_role(role)
self.keystone.roles.grant(
role.id, user=_user.id, project=project.id)
return _user
def delete_user(self, user):
try:
user = self.get_user(user)
self.keystone.users.delete(user)
except exception.Invalid:
pass
| StarcoderdataPython |
71706 | import os
import yaml
def _replace_desdata(pth, desdata):
"""Replace the NERSC DESDATA path if needed.
Parameters
----------
pth : str
The path string on which to do replacement.
desdata : str
The desired DESDATA. If None, then the path is simply returned as is.
Returns
-------
pth : str
The path, possible with DESDATA in the path replaced with the desired
one.
"""
if desdata is None:
return pth
nersc_desdata = '/global/project/projectdirs/des/y3-image-sims'
if (nersc_desdata in pth and
os.path.normpath(desdata) != os.path.normpath(nersc_desdata)):
return pth.replace(nersc_desdata, desdata)
else:
return pth
def get_bkg_path(image_path, desdata=None):
"""Get the background image path from the image path.
Parameters
----------
image_path : str
The path to the image.
desdata : str, optional
The path to the local DESDATA dir.
Returns
-------
bkg_path : str
The path to the background image.
"""
bkg_dir = os.path.join(
os.path.dirname(os.path.dirname(image_path)), "bkg")
basename = os.path.basename(image_path)
bkg_filename = "_".join(basename.split("_")[:4]) + "_bkg.fits.fz"
pth = os.path.join(bkg_dir, bkg_filename)
return _replace_desdata(pth, desdata)
def get_piff_path(image_path):
"""Get the Piff path from the image path.
Parameters
----------
image_path : str
The path to the SE image.
Returns
-------
piff_path : str
The path to the Piff model.
"""
img_bname = os.path.basename(image_path)
piff_bname = img_bname.replace(
'.fz', ''
).replace(
'immasked.fits', 'piff.fits')
expnum = int(piff_bname.split('_')[0][1:])
if "PIFF_DATA_DIR" in os.environ and "PIFF_RUN" in os.environ:
piff_path = os.path.join(
os.environ['PIFF_DATA_DIR'],
os.environ['PIFF_RUN'],
str(expnum),
piff_bname)
else:
raise ValueError(
"You must define the env vars PIFF_DATA_DIR and PIFF_RUN to "
"use Piff PSFs!")
return piff_path
def get_psfex_path(image_path, desdata=None):
"""Get the PSFEx path from the image path.
Parameters
----------
image_path : str
The path to the image.
desdata : str, optional
The path to the local DESDATA dir.
Returns
-------
psfex_path : str
The path to the psfex model.
"""
psfex_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(image_path))), "psf")
basename = os.path.basename(image_path)
psfex_filename = "%s_psfexcat.psf" % ("_".join((basename.split("_"))[:-1]))
pth = os.path.join(psfex_dir, psfex_filename)
return _replace_desdata(pth, desdata)
def get_psfex_path_coadd(coadd_path, desdata=None):
"""Get the coadd PSFEx path from the image path.
Parameters
----------
image_path : str
The path to the image.
desdata : str, optional
The path to the local DESDATA dir.
Returns
-------
coadd_psfex_path : str
The path to the coadd psfex model.
"""
psfex_dir = os.path.join(
os.path.dirname(os.path.dirname(coadd_path)), "psf")
basename = os.path.basename(coadd_path)
psfex_filename = "%s_psfcat.psf" % (basename.split(".")[0])
pth = os.path.join(psfex_dir, psfex_filename)
return _replace_desdata(pth, desdata)
def get_orig_coadd_file(desdata, desrun, tilename, band):
"""Get the path to the original coadd file.
NOTE: This function will replace the NERSC DESDATA path with the input path
if it they are not the same. This special case is useful for people who
copy the simulation data off of the NERSC filesystem to another location.
Parameters
----------
desdata : str
The path to the local DESDATA dir.
desrun : str
The DES run name.
tilename : str
The name of the coadd tile.
band : str
The desired band (e.g., 'r').
Returns
-------
coadd_image_path : str
The path to the original coadd image.
"""
tile_data_file = os.path.join(
desdata, desrun, tilename, "lists",
"%s_%s_fileconf-%s.yaml" % (tilename, band, desrun))
with open(tile_data_file, "rb") as f:
# new pyyaml syntax
tile_data = yaml.load(f, Loader=yaml.Loader)
# special case here since sometimes we pull data from nersc and I cannot
# seem to find code to remake the tile lists
return _replace_desdata(tile_data["coadd_image_url"], desdata)
| StarcoderdataPython |
3371212 | <filename>tests/test_package.py
from importlib import util
def test_package():
fastapi_profile_spec = util.find_spec("fastapi_profile")
assert fastapi_profile_spec is not None
| StarcoderdataPython |
1780470 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_options_tab.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_options(object):
def setupUi(self, options):
options.setObjectName("options")
options.resize(761, 737)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(options.sizePolicy().hasHeightForWidth())
options.setSizePolicy(sizePolicy)
self.label_2 = QtWidgets.QLabel(options)
self.label_2.setGeometry(QtCore.QRect(10, 10, 118, 29))
self.label_2.setObjectName("label_2")
self.line = QtWidgets.QFrame(options)
self.line.setGeometry(QtCore.QRect(0, 50, 741, 21))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label = QtWidgets.QLabel(options)
self.label.setGeometry(QtCore.QRect(20, 20, 16, 29))
self.label.setText("")
self.label.setObjectName("label")
self.widgetFlipButtons = QtWidgets.QWidget(options)
self.widgetFlipButtons.setGeometry(QtCore.QRect(10, 150, 344, 161))
self.widgetFlipButtons.setObjectName("widgetFlipButtons")
self.label_3 = QtWidgets.QLabel(self.widgetFlipButtons)
self.label_3.setGeometry(QtCore.QRect(9, 9, 85, 17))
self.label_3.setObjectName("label_3")
self.layoutWidget = QtWidgets.QWidget(self.widgetFlipButtons)
self.layoutWidget.setGeometry(QtCore.QRect(10, 30, 306, 100))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 0, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 0, 3, 1, 1)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 1, 0, 1, 1)
self.checkBoxSagittalFlipX = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxSagittalFlipX.setText("")
self.checkBoxSagittalFlipX.setObjectName("checkBoxSagittalFlipX")
self.gridLayout.addWidget(self.checkBoxSagittalFlipX, 1, 1, 1, 1)
self.checkBoxCoronalFlipX = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxCoronalFlipX.setText("")
self.checkBoxCoronalFlipX.setObjectName("checkBoxCoronalFlipX")
self.gridLayout.addWidget(self.checkBoxCoronalFlipX, 1, 2, 1, 1)
self.checkBoxAxialFlipx = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxAxialFlipx.setText("")
self.checkBoxAxialFlipx.setObjectName("checkBoxAxialFlipx")
self.gridLayout.addWidget(self.checkBoxAxialFlipx, 1, 3, 1, 1)
self.checkBoxSagittalFlipY = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxSagittalFlipY.setText("")
self.checkBoxSagittalFlipY.setObjectName("checkBoxSagittalFlipY")
self.gridLayout.addWidget(self.checkBoxSagittalFlipY, 2, 1, 1, 1)
self.checkBoxCoronalFlipY = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxCoronalFlipY.setText("")
self.checkBoxCoronalFlipY.setObjectName("checkBoxCoronalFlipY")
self.gridLayout.addWidget(self.checkBoxCoronalFlipY, 2, 2, 1, 1)
self.checkBoxAxialFlipY = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxAxialFlipY.setText("")
self.checkBoxAxialFlipY.setObjectName("checkBoxAxialFlipY")
self.gridLayout.addWidget(self.checkBoxAxialFlipY, 2, 3, 1, 1)
self.label_8 = QtWidgets.QLabel(self.layoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 3, 0, 1, 1)
self.checkBoxSagittalFlipZ = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxSagittalFlipZ.setText("")
self.checkBoxSagittalFlipZ.setObjectName("checkBoxSagittalFlipZ")
self.gridLayout.addWidget(self.checkBoxSagittalFlipZ, 3, 1, 1, 1)
self.checkBoxCoronalFlipZ = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxCoronalFlipZ.setText("")
self.checkBoxCoronalFlipZ.setObjectName("checkBoxCoronalFlipZ")
self.gridLayout.addWidget(self.checkBoxCoronalFlipZ, 3, 2, 1, 1)
self.checkBoxAxialFlipZ = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxAxialFlipZ.setText("")
self.checkBoxAxialFlipZ.setObjectName("checkBoxAxialFlipZ")
self.gridLayout.addWidget(self.checkBoxAxialFlipZ, 3, 3, 1, 1)
self.label_10 = QtWidgets.QLabel(self.layoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 2, 0, 1, 1)
self.checkBoxImpcView = QtWidgets.QCheckBox(options)
self.checkBoxImpcView.setGeometry(QtCore.QRect(20, 350, 62, 20))
self.checkBoxImpcView.setText("")
self.checkBoxImpcView.setObjectName("checkBoxImpcView")
self.textBrowser = QtWidgets.QTextBrowser(options)
self.textBrowser.setGeometry(QtCore.QRect(50, 350, 256, 151))
self.textBrowser.setObjectName("textBrowser")
self.pushButtonFilterLabels = QtWidgets.QPushButton(options)
self.pushButtonFilterLabels.setGeometry(QtCore.QRect(20, 80, 181, 25))
self.pushButtonFilterLabels.setObjectName("pushButtonFilterLabels")
self.retranslateUi(options)
QtCore.QMetaObject.connectSlotsByName(options)
def retranslateUi(self, options):
_translate = QtCore.QCoreApplication.translate
options.setWindowTitle(_translate("options", "Form"))
self.label_2.setText(_translate("options", "Options"))
self.label_3.setText(_translate("options", "Orientations"))
self.label_4.setText(_translate("options", "Sagittal"))
self.label_5.setText(_translate("options", "coronal"))
self.label_6.setText(_translate("options", "axial"))
self.label_7.setText(_translate("options", "Flip horizontal"))
self.label_8.setText(_translate("options", "Reverse slice ordering"))
self.label_10.setText(_translate("options", "flip vertical"))
self.textBrowser.setHtml(_translate("options", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">IMPC view. </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Checking this button with ensure that data downloaded from the DCC will be in the agreed orientation. For other data you may have to use the individual flip options.</p></body></html>"))
self.pushButtonFilterLabels.setText(_translate("options", "Label filter (f)"))
| StarcoderdataPython |
3325805 | <filename>pyinsteon/handlers/from_device/broadcast_command.py
"""Base class to handle Broadcast messages from devices."""
from datetime import datetime
from ...constants import MessageFlagType
from ..inbound_base import InboundHandlerBase
class BroadcastCommandHandlerBase(InboundHandlerBase):
"""Base class to handle inbound Broadcast messages."""
def __init__(self, topic, address, group=None):
"""Init the broadcast_handlerBase class."""
self._group = group
super().__init__(
topic=topic,
address=address,
group=group,
message_type=MessageFlagType.ALL_LINK_BROADCAST,
)
self._last_command = datetime(1, 1, 1)
self._last_hops_left = None
def is_first_message(self, target, hops_left):
"""Test if the message is a duplicate."""
curr_time = datetime.now()
tdelta = curr_time - self._last_command
self._last_command = curr_time
if (
target.middle != 0
and hops_left == self._last_hops_left
and tdelta.seconds < 1
):
return False
if (
self._last_hops_left is None
or (hops_left == self._last_hops_left and tdelta.seconds > 0.7)
or hops_left > self._last_hops_left
or tdelta.seconds >= 2
):
self._last_hops_left = hops_left
return True
return False
| StarcoderdataPython |
1789071 | import os
import struct
import binascii
import socket
import threading
import datetime
from time import time, sleep
from json import load, loads, dumps
from src.utils import *
from src.db_worker import *
from src.logs.log_config import logger
from src.protocols.Teltonika.crc import crc16
class Teltonika:
BASE_PATH = 'tracker_receiver/src/protocols/Teltonika/'
NAME = 'Teltonika'
TRACKERS = set()
def __init__(self, sock, addr, model):
self.sock = sock
self.addr = addr
self.model = model
self.command_response = {}
def start(self):
self.imei = self.handle_imei()
logger.info(f'Teltonika{self.model} {self.imei} подключен [{self.addr[0]}:{self.addr[1]}]')
Teltonika.TRACKERS.add(self)
self.assign = get_configuration(self.NAME, self.imei, self.model)
self.decoder = self.get_decoder(self.model)
self.ign_v = get_ignition_v(self.imei)
self.lock = threading.Lock()
self.stop = False
main_th = threading.Thread(target=self.handle_packet)
main_th.start()
def get_decoder(self, model):
if model:
decoder = load(open(self.BASE_PATH+f'avl_ids/{model.lower()}.json', 'r'))
logger.debug(f"[Teltonika] для {self.imei} выбрана модель {model.lower()}\n")
return decoder
else:
logger.critical(f"Teltonika для imei {self.imei} модель не найдена\n")
raise ValueError('Unknown tracker')
def handle_imei(self):
try:
packet = binascii.hexlify(self.sock.recv(34))
length = int(packet[:4], 16)
packet = unpack_from_bytes(f'{length}s', packet[4:])[0]
imei = packet.decode('ascii')
logger.debug(f'[Teltonika] imei получен {imei}\n')
self.sock.send(struct.pack('!B', 1))
logger.debug(f'[Teltonika] ответ на сообщение с imei отправлен\n')
except Exception as e:
self.sock.close()
Teltonika.TRACKERS.remove(self)
self.stop = True
raise e
return imei
def handle_packet(self):
while not self.stop:
try:
packet = binascii.hexlify(self.sock.recv(4096))
except Exception:
self.sock.close()
self.stop = True
Teltonika.TRACKERS.remove(self)
logger.debug(f'[Teltonika{self.model}] {self.imei} отключен [{self.addr[0]}:{self.addr[1]}]')
break
self.lock.acquire()
if len(packet)<8:
if packet==b'\xff' or packet==b'' or packet==b'ff':
continue
else:
logger.error(f'[Teltonika] непонятный пакет: {packet}')
self.sock.close()
self.stop = True
Teltonika.TRACKERS.remove(self)
logger.debug(f'[Teltonika{self.model}] {self.imei} отключен [{self.addr[0]}:{self.addr[1]}]')
break
logger.debug(f'[Teltonika] получен пакет:\n{packet}\n')
try:
packet, z = extract_int(packet) #preamble zero bytes
assert z==0, 'Not teltonika packet'
packet, data_len = extract_uint(packet)
packet, self.codec = extract_ubyte(packet)
packet, self.count = extract_ubyte(packet)
logger.debug(f'[Teltonika] codec={self.codec} rec_count={self.count}\n')
except Exception as e:
with open('tracker_receiver/src/logs/errors.log', 'a') as fd:
fd.write(f'Ошибка в распаковке {packet}\n{e}\n')
if self.codec in (8, 142, 16):
self.data = self.handle_data(packet)
self.data = prepare_geo(self.data)
count = insert_geo(self.data)
logger.info(f'Teltonika{self.model} {self.imei} принято {count}/{len(self.data)} записей')
self.sock.send(struct.pack("!I", count))
elif self.codec in (12, 13, 14):
result = self.handle_command(packet)
resp = {"action":"response", "result": result}
resp = dumps(resp)
self.command_response = resp
logger.debug(f'[Teltonika] ответ на команду принят\n{result}\n')
else:
logger.critical(f"Teltonika неизвестный кодек {self.codec}")
raise ValueError('Unknown codec')
self.lock.release()
sleep(2)
del(self)
def send_command(self, codec, command):
result = ''
codec = int(codec)
if codec==12:
com_length = len(command)
length = 8+com_length
elif codec==14:
com_length = 8+len(command)
length = 8+com_length
elif codec==13:
result = 'Сервер не может отправлять команду по кодеку 13!'
return result
else:
result = f'Неизвестный кодек - {codec}'
return result
packet = ''
packet = add_int(packet, 0)
packet = add_uint(packet, length)
packet = add_ubyte(packet, codec)
packet = add_ubyte(packet, 1)
packet = add_ubyte(packet, 5)
packet = add_uint(packet, com_length)
if codec==14:
packet = add_str(packet, self.imei.rjust(16, '0'))
packet = add_str(packet, command)
packet = add_ubyte(packet, 1)
crc16_pack = struct.unpack(f'{len(packet[16:])//2}s', binascii.a2b_hex(packet[16:].encode('ascii')))[0]
packet = add_uint(packet, crc16(crc16_pack))
logger.debug(f'[Teltonika] командный пакет сформирован:\n{packet}\n')
packet = pack(packet)
self.sock.send(packet)
logger.debug(f'[Teltonika] команда отправлена\n')
def handle_data(self, packet):
all_data = []
codec_func = None
#codec 8
if self.codec==8:
codec_func = self.codec_8
#codec 8 extended
elif self.codec==142:
codec_func = self.codec_8
#codec 16
elif self.codec==16:
codec_func = self.codec_16
else:
logger.critical(f"Teltonika неизвестный кодек {self.codec}")
raise ValueError('Unknown codec')
for rec in range(self.count):
data = {
'imei': self.imei,
'ts': datetime.datetime.utcfromtimestamp(int(time()))
}
packet, codecdata = codec_func(packet)
data.update(codecdata)
if 'voltage' in data['iodata'].keys():
if self.ign_v is not None:
if data['iodata']['voltage']>self.ign_v:
data['iodata']['ignition'] = 1
else:
data['iodata']['ignition'] = 0
all_data.append(data)
logger.debug(f"[Teltonika] #{len(all_data)}:\n{data}\n")
logger.debug(f'[Teltonika] data:\n{all_data}\n')
return all_data
def handle_command(self, packet):
packet, _ = extract_ubyte(packet)
packet, length = extract_uint(packet)
if self.codec==14:
packet, imei = extract_str(packet, 8)
length -= 8
elif self.codec==13:
packet, ts = extract(packet, 8)
ts = int(b'0x'+timestamp, 16)
ts /= 1000
packet, response = extract_str(packet, length)
packet, _ = extract_ubyte(packet)
packet, _ = extract_uint(packet)
logger.debug(f'[Teltonika] пакет с ответом на команду распакован\n')
return response.decode('ascii')
def codec_8(self, packet):
logger.debug(f'[Teltonika] CODEC {self.codec} AVL Data packet:\n{packet}\n')
packet, timestamp = extract(packet, 8)
timestamp = b'0x'+timestamp
timestamp = int(timestamp, 16)
timestamp /= 1000
packet, _ = extract_ubyte(packet) #priority
packet, lon = extract_int(packet)
lon /= 10000000
packet, lat = extract_int(packet)
lat /= 10000000
packet, alt = extract_ushort(packet)
packet, dr = extract_ushort(packet)
dr = dr//2
packet, sat_num = extract_ubyte(packet)
packet, speed = extract_ushort(packet)
dt = datetime.datetime.utcfromtimestamp(timestamp)
data = {
"datetime": dt,
"lon": lon,
"lat": lat,
"alt": alt,
"direction": dr,
"sat_num": sat_num,
"speed": speed
}
logger.debug(f'[Teltonika] AVL Data обработана:\n{data}\n')
if self.codec==8:
packet, EventIO = extract_ubyte(packet)
packet, NumOfIO = extract_ubyte(packet)
elif self.codec==142:
packet, EventIO = extract_ushort(packet)
packet, NumOfIO = extract_ushort(packet)
elif self.codec==16:
packet, EventIO = extract_ushort(packet)
packet, Generation_type = extract_ubyte(packet)
packet, NumOfIO = extract_ubyte(packet)
else:
logger.critical(f"Teltonika неизвестный кодек {self.codec}\n")
raise ValueError('Unknown codec')
if EventIO==385:
packet, iodata = self.handle_beacon(packet)
else:
packet, iodata = self.handle_io(packet)
data.update({"iodata": iodata})
logger.debug(f'[Teltonika] AVL IO Data обработана:\n{iodata}\n')
return packet, data
def codec_16(self, packet):
return self.codec_8(packet)
def handle_io(self, packet):
data = {}
for extract_func in [extract_byte, extract_short, extract_int, extract_longlong]:
if self.codec==8 or self.codec==16:
packet, count = extract_ubyte(packet)
elif self.codec==142:
packet, count = extract_ushort(packet)
else:
logger.critical(f"Teltonika неизвестный кодек {self.codec}\n")
raise ValueError('Unknown codec')
iodata = {}
for _ in range(count):
if self.codec==8:
packet, io_id = extract_ubyte(packet)
elif self.codec==142 or self.codec==16:
packet, io_id = extract_ushort(packet)
else:
logger.critical(f"Teltonika неизвестный кодек {self.codec}\n")
raise ValueError('Unknown codec')
packet, io_val = extract_func(packet)
if str(io_id) not in self.decoder.keys():
logger.error(f'[Teltonika] Неизвестный AVL IO ID {io_id}\n')
else:
if str(io_id) in self.assign.keys():
ikey = self.assign[str(io_id)]
if '*' in ikey:
spl = ikey.split('*')
ikey, k = spl[0], spl[1]
io_val = round(io_val*float(k), 4)
iodata.update({ikey: io_val})
elif self.decoder[str(io_id)] in self.assign.keys():
ikey = self.assign[self.decoder[str(io_id)]]
if '*' in ikey:
spl = ikey.split('*')
ikey, k = spl[0], spl[1]
io_val = round(io_val*float(k), 4)
iodata.update({ikey: io_val})
else:
iodata.update({self.decoder[str(io_id)]: io_val})
data.update(iodata)
if self.codec==142:
packet, count = extract_ushort(packet)
iodata = {}
for _ in range(count):
packet, io_id = extract_ushort(packet)
packet, length = extract_ushort(packet)
if length>8:
packet, io_val = extract(packet, length)
else:
packet, io_val = extract_x(packet, 'q', length)
if str(io_id) not in self.decoder.keys():
logger.error(f'[Teltonika] Неизвестный AVL IO ID {io_id}\n')
else:
if str(io_id) in self.assign.keys():
ikey = self.assign[str(io_id)]
if '*' in ikey:
spl = ikey.split('*')
ikey, k = spl[0], spl[1]
io_val = round(io_val*float(k), 4)
iodata.update({ikey: io_val})
elif self.decoder[str(io_id)] in self.assign.keys():
ikey = self.assign[self.decoder[str(io_id)]]
if '*' in ikey:
spl = ikey.split('*')
ikey, k = spl[0], spl[1]
io_val = round(io_val*float(k), 4)
iodata.update({ikey: io_val})
else:
iodata.update({self.decoder[str(io_id)]: io_val})
data.update(iodata)
return packet, data
def handle_beacon(self, packet):
packet, _ = extract_short(packet)
packet, _ = extract_short(packet)
packet, _ = extract_short(packet)
packet, _ = extract_short(packet)
packet, _ = extract_short(packet)
packet, length = extract_short(packet)
packet, beacon = extract(packet, length)
return packet, {"Beacon": beacon}
@staticmethod
def get_tracker(imei):
for t in Teltonika.TRACKERS:
if str(t.imei)==str(imei):
return t
return None
def prepare_geo(records):
all_geo = []
for data in records:
data['iodata'].update({"sat_num": data['sat_num']})
reserve = str(data['iodata']).replace("'", '"').replace(' ', '')
reserve = reserve[1:-1]
geo = {
'imei': data['imei'],
'lat': float('{:.6f}'.format(data['lat'])),
'lon': float('{:.6f}'.format(data['lon'])),
'datetime': data['datetime'],
'type': 0,
'speed': data['speed'],
'direction': data['direction'],
'bat': 0,
'fuel': 0,
'ignition': data['iodata'].get('ignition', 0),
'sensor': data['iodata'].get('sensor', 0),
'reserve': reserve,
'ts': data['ts']
}
all_geo.append(geo)
return all_geo | StarcoderdataPython |
55707 | # FinSim
# Copyright 2018 <NAME>. All Rights Reserved.
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
### ---
# @file resources.py
# @brief endpoints to interact with finsim-trans server
## @list of endpoints
# ListAccounts (frontend)
# GetAccountInfo (frontend)
# ListTransactions (frontend)
# FlagAsFraud
# ProcessCard
# BankWithdrawal
# BankDeposit
# AccountCreation
# UserRegistration
# UserLogin
# TokenRefresh
### ---
from finsim_trans.helper import bnk_login, check_login_list
from flask_restful import Resource, reqparse, marshal_with, fields
from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType
from flask_jwt_extended import ( create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt )
from flask import jsonify
from decimal import Decimal
import requests
current_component = []
#Used to control what exactly gets returned by resources as a response
#account_fields used by ListAccounts and GetAccountInfo resources
account_fields = { 'id': fields.String, 'account_number': fields.String, 'balance': fields.String }
#transaction fields used by ListTransactions resource
transaction_fields = { 'id': fields.String, 'amount': fields.String, 'type': fields.String, 'account_id': fields.String, 'account': fields.String, 'counterpart_name': fields.String, 'counterpart_acct': fields.String }
# These nearly identical parsers pull arguments out of the request and return an error if anything is missing
#parser is used in UserRegistration
parser = reqparse.RequestParser()
parser.add_argument( 'username', help = 'This field cannot be blank', required = True )
parser.add_argument( 'password', help = 'This field cannot be blank', required = True )
parser.add_argument( 'role', help = 'This field cannot be blank', required = True )
#parserLogin is used in UserLogin resource
parserLogin = reqparse.RequestParser()
parserLogin.add_argument( 'username', help = 'This field cannot be blank', required = True )
parserLogin.add_argument( 'password', help = 'This field cannot be blank', required = True )
#accountParser is used in GetAccountInfo, ListTransactions, and AccountCreation resources
accountParser = reqparse.RequestParser()
accountParser.add_argument( 'number', help = 'This field cannot be blank', required = True )
#depositParser is used in BankDeposit resource
depositParser = reqparse.RequestParser()
depositParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'counterpart_name', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'counterpart_acct', help = 'This field cannot be blank', required = False )
#withdrawalParser is used in BankWithdrawal resource
withdrawalParser = reqparse.RequestParser()
withdrawalParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'counterpart_name', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'counterpart_acct', help = 'This field cannot be blank', required = True )
#cardParser is used in ProcessCard resource
cardParser = reqparse.RequestParser()
cardParser.add_argument( 'source', help = 'This field cannot be blank', required = True )
cardParser.add_argument( 'destination', help = 'This field cannot be blank', required = True )
cardParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
#fraudParser is used in FlagAsFraud resource
fraudParser = reqparse.RequestParser()
fraudParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
fraudParser.add_argument( 'transaction', help = 'This field cannot be blank', required = True )
## ---
# @brief ListAccounts returns a list of all the accounts that the currently logged in user owns
# Primarily used by finsim-web Angular frontend for bank
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return list of accounts
## ---
class ListAccounts( Resource ):
@jwt_required
@marshal_with( account_fields )
def get( self ):
user = UserModel.find_by_username( get_jwt_identity()['username'] )
accounts = AccountModel.find_all_by_id( user.id );
for a in accounts:
print( a.id, a.account_number, a.balance )
print( type( accounts ) )
return accounts
## ---
# @brief GetAccountInfo returns the account info for one bank account owned by the logged in user
# Primarily used by finsim-web Angular frontend for bank
# @input number - account number that the user account wants to access
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return account id, number, and balance for the requested account number IF the user does own the requested account
## ---
class GetAccountInfo( Resource ):
@jwt_required
@marshal_with( account_fields )
def get( self ):
data = accountParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['number'] ) == True:
account = AccountModel.find_by_account_number( data['number'] )
return { 'id': account.id, 'account_number': account.account_number, 'balance': account.balance }
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief ListTransactions returns a list of all the transactions that involves the input account
# Primarily used by finsim-web Angular frontend for bank
# @input number - account number they want the transactions history of
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return list of transactions
## ---
class ListTransactions( Resource):
@jwt_required
@marshal_with( transaction_fields )
def get( self ):
data = accountParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['number'] ) == True:
account = AccountModel.find_by_account_number( data['number'] )
transactions = TransactionModel.find_all_by_account_id( account.id )
for t in transactions:
print( t.id, t.amount, t.type, t.account_id )
return transactions
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief FlagAsFraud flags a transaction as fraud
# @input account, transaction - account number and the transaction the user wants to flag as fraud
# User must be logged into finsim-trans in order to successfully flag a transaction as fraud
# @return message specifiying success or failure with flagging the transaction
## ---
class FlagAsFraud( Resource ):
@jwt_required
def post( self ):
data = fraudParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['account'] ) == True:
account = AccountModel.find_by_account_number( data['account'] )
transaction = TransactionModel.find_by_id( account.id, data['transaction'] )
if transaction != None:
transaction.fraud_flag = FraudType.FLAG
transaction.save_to_db()
print(transaction.fraud_flag)
return { 'message': 'Selected transaction has been flagged as fraud.' }
return { 'message': 'Transaction does not exist or is not made by the specified account.' }
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief ProcessCard flags a transaction as fraud
# Primarily used by finsim-cc. finsim-trans responding to this web request is always the bank for the destination account
# If source account is with another bank, this bank will make login and make a web request to the other bank's /bank/withdrawal endpoint
# @input source, destination, amount - source is the user account, destination is the merchant account, amount is what will be transferred from user to destination accounts
# finsim-cc component must be logged into finsim-trans
# @return message specifiying success or failure with processing the transaction
## ---
class ProcessCard( Resource ):
@jwt_required
def post( self ):
global current_component
#Always start by checking whether current_component is filled out or not
if len(current_component) == 0:
current_component = check_login_list()
if len(current_component) == 0:
return { 'message': 'There was a problem with the current component functionality.' }
data = cardParser.parse_args()
if get_jwt_identity()['role'] != 'CC': #maybe compare this using the enum instead of a string?
return { 'message': 'Only a credit card processor can initiate a card processing transaction.' }
user = UserModel.find_by_username( get_jwt_identity()['username'] )
destAccount = AccountModel.find_by_account_number( data['destination'] )
destUser = UserModel.find_by_id( destAccount.owner_id )
depositTrans = TransactionModel( amount = Decimal( data['amount'] ), type = TxType.CREDIT, account_id = destAccount.id, counterpart_acct = data['source'][-4:], fraud_flag = FraudType.NONE )
# First we have to confirm that the destination account resides at this bank
# If not, send an error message.
if not destAccount.account_number.startswith( str(current_component[5]) ):
return { 'message': 'Cannot process a card transaction that does not terminate at this bank.' }
# Next, figure out if this transaction takes place within this bank or across different banks
if not data['source'].startswith( str(current_component[5]) ):
# For a back-to-bank transaction
for bank in current_component[4]:
if len(bank) != 1:
if data['source'].startswith( str(bank[3]) ):
#cc_login is a helper function in helper.py used to take care of logging into the correct bank
token = bnk_login(bank[1], current_component[3], bank[2])
header = { "Authorization": "Bearer " + token['access_token'], "Content-Type": "application/json" }
# Make the request to the remote bank
bankResponse = requests.post( bank[1] + "/bank/withdrawal", json = { 'amount': data['amount'], 'account': data['source'], 'counterpart_name': destUser.username, 'counterpart_acct': destAccount.account_number[-4:] }, headers = header, verify=False ).json()
print( "Withdrawal request response: " )
print( bankResponse['message'] )
if bankResponse['message'] == "Withdrawal successful.":
depositTrans.counterpart_name = bankResponse['counterpart_name']
destAccount.applyTransaction( depositTrans )
depositTrans.save_to_db()
destAccount.save_to_db()
return { 'message': 'Card processed successfully 2.' }
else:
return { 'message': 'Insufficient funds in source account 2.' }
break
else:
# For a local transaction
srcAccount = AccountModel.find_by_account_number( data['source'] )
withdrawalTrans = TransactionModel( amount = Decimal( data['amount'] ), type = TxType.DEBIT, account_id = srcAccount.id, counterpart_acct = destAccount.account_number[-4:], fraud_flag = FraudType.NONE )
try:
withdrawalTrans.counterpart_name = destUser.username
debitOk = srcAccount.applyTransaction( withdrawalTrans )
if debitOk:
srcUser = UserModel.find_by_id( srcAccount.owner_id )
depositTrans.counterpart_name = srcUser.username
destAccount.applyTransaction( depositTrans )
depositTrans.save_to_db()
withdrawalTrans.save_to_db()
srcAccount.save_to_db()
destAccount.save_to_db()
return { 'message': 'Card processed successfully.' }
else:
return { 'message': 'Insufficient funds in source account.' }
except Exception as e:
print( e )
return { 'message': 'There was a problem processing this card transaction.' }
## ---
# @brief BankWithdrawal withdraws a specified amount from the bank account
# @input amount, account, counterpart_name, counterpart_acct - account is the account that will potentially have the amount taken out of it's balance. Counterpart refers to who initiated the withdrawal
# A BANK user must be logged in in order to initiate a withdrawal. No other users may do so
# @return id, balance, owner, message, counterpart - counterpart indicates who initiated the withdrawal
## ---
class BankWithdrawal( Resource ):
@jwt_required
def post( self ):
data = withdrawalParser.parse_args()
if get_jwt_identity()['role'] != 'BANK': #maybe compare this using the enum instead of a string?
return { 'message': 'Only a bank can initiate a withdrawal.' }
user = UserModel.find_by_username( get_jwt_identity() )
try:
account = AccountModel.find_by_account_number( data['account'] )
#TODO Add logic to prevent account from going negative
# Good approach would be to do this in the applyTransaction() method. make it return a bool
# True signifies successful transaction, False means it failed (would go negative) then check
# the result here and react accordingly.
trans = TransactionModel( amount = data['amount'], type = TxType.DEBIT, account_id = account.id, counterpart_name = data['counterpart_name'], counterpart_acct = data['counterpart_acct'], fraud_flag = FraudType.NONE )
acctUser = UserModel.find_by_id( account.owner_id )
trans.save_to_db()
account.applyTransaction( trans )
account.save_to_db()
return { 'id': account.id, 'balance': str(account.balance), 'owner': account.owner_id, 'message': 'Withdrawal successful.', 'counterpart_name': acctUser.username}
except Exception as e:
print( e )
return { 'message': 'There was a problem processing the credit transaction.' }
## ---
# @brief BankDeposit deposits a specified amount into the bank account
# @input amount, account, counterpart_name, counterpart_acct - account is the account that will potentially have the amount added to its balance. Counterpart refers to who initiated the deposit
# A user must be logged in in order to initiate a deposit
# @return id, balance, owner - counterpart indicates who initiated the deposit
## ---
class BankDeposit( Resource ):
@jwt_required
def post( self ):
data = depositParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity() )
try:
account = AccountModel.find_by_account_number( data['account'] )
trans = TransactionModel( amount = data['amount'], type = TxType.CREDIT, account_id = account.id, counterpart_name = data['counterpart_name'], counterpart_acct = data['counterpart_acct'], fraud_flag = FraudType.NONE )
trans.save_to_db()
account.applyTransaction( trans )
account.save_to_db()
return jsonify( { 'id': account.id, 'balance': str(account.balance), 'owner': account.owner_id } )
except Exception as e:
print( e )
return { 'message': 'There was a problem processing the credit transaction.' }
## ---
# @brief AccountCreation creates a bank account for the logged in user
# @input number - account number to create
# A user must be logged in in order to create an account
# @return message - indicates result of account creation
## ---
class AccountCreation( Resource ):
@jwt_required
def post( self ):
# TODO maybe restrict this feature to certain roles (admin only)
data = accountParser.parse_args()
identity = get_jwt_identity()
print( "{}".format( identity['role'] ) )
user = UserModel.find_by_username( identity['username'] )
print( "Creating account for {}".format( user.username ) )
newAccount = AccountModel( balance = 0.0, owner_id = user.id, account_number = data['number'] )
try:
newAccount.save_to_db()
return { 'message': 'Account created successfully' }
except Exception as e:
print( e )
return { 'message': 'Could not create account' }
## ---
# @brief UserRegistration is the endpoint used to register users to finsim-trans
# @input username, password, role
# @return message, access_token, refresh_token - message indicates successful registration
# or failure message
## ---
class UserRegistration( Resource ):
def post( self ):
data = parser.parse_args()
if UserModel.find_by_username( data['username'] ):
return { 'message': 'User {} already exists.'.format(data['username']) }
new_user = UserModel( username = data['username'], password = UserModel.generate_hash( data['password'] ), role = RoleType[data['role']] )
try:
new_user.save_to_db()
identity = { 'username': new_user.username, 'role': new_user.role.name }
access_token = create_access_token( identity = identity )
refresh_token = create_refresh_token( identity = identity )
return { 'message': 'User {} was created.'.format( data['username'] ), 'access_token': access_token, 'refresh_token': refresh_token }
except Exception as e:
print( e )
return { 'message': 'Something went wrong.' }, 500
## ---
# @brief UserLogin is used by users to connect to finsim-trans
# @input username, password
# @return message, access_token, refresh_token - message indicates successful login
# or failure message
## ---
class UserLogin( Resource ):
def post( self ):
data = parserLogin.parse_args()
current_user = UserModel.find_by_username( data['username'] )
if not current_user:
return {'message':'User {} does not exist.'.format(data['username']) }, 401
if UserModel.verify_hash( data['password'], current_user.password ):
identity = { 'username': data['username'], 'role': current_user.role.name }
access_token = create_access_token( identity = identity )
refresh_token = create_refresh_token( identity = identity )
return {'message': 'Logged in as {}.'.format( current_user.username ), 'access_token': access_token, 'refresh_token': refresh_token}
else:
return {'message': 'Wrong credentials'}, 401
## ---
# @brief TokenRefresh endpoint to provide a new access_token
# @return access_token
## ---
class TokenRefresh( Resource ):
@jwt_refresh_token_required
def post( self ):
current_user = get_jwt_identity()
access_token = create_access_token( identity = current_user )
return { 'access_token': access_token }
| StarcoderdataPython |
1649339 | #----------------------------------------------------------------------
# Copyright (c) 2012-2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
import logging
from ...geni.util.urn_util import is_valid_urn, URN, string_to_urn_format
from .framework_pg import Framework as pg_framework
class Framework(pg_framework):
"""Framework to talk to a PG-style clearinghouse implemented using
GCF, for use by GENI-in-a-Box"""
def __init__(self, config, opts):
pg_framework.__init__(self, config, opts)
self.logger = logging.getLogger("omni.gib")
def list_my_slices(self, user):
'''List slices owned by the user (name or hrn or URN) provided, returning a list of slice URNs.'''
userhrn = self.user_name_to_hrn(user)
return self._list_my_slices(userhrn)
def user_name_to_hrn(self, name):
'''Convert a username to an HRN. Accept an HRN or URN though. Authority
is taken from the SA hostname.'''
if name is None or name.strip() == '':
raise Exception('Empty user name')
# If the name is a URN, convert it
if is_valid_urn(name):
(hrn, type) = urn_to_hrn(name)
if type != 'user':
raise Exception("Not a user! %s is of type %s" % (name, type))
name = hrn
self.logger.debug("Treating name %s as a URN, with hrn %s", name, hrn)
# Otherwise, construct the hrn (or maybe this is one)
if not self.config.has_key('authority'):
raise Exception("Invalid configuration: no authority name defined")
auth = self.config['authority']
# Assume for now that if the name looks like auth.name, then it is a valid hrn
if name.startswith(auth + '.'):
self.logger.debug("Treating %s as an hrn", name)
return name
hrn = auth + '.' + name
self.logger.debug("Treating %s as just the name, with full hrn %s", name, hrn)
return hrn
# Use an 'authority' field from the omni_config to set the
# authority part of the URN
def slice_name_to_urn(self, name):
"""Convert a slice name to a slice urn."""
if name is None or name.strip() == '':
raise Exception('Empty slice name')
if is_valid_urn(name):
urn = URN(None, None, None, name)
if not urn.getType() == "slice":
raise Exception("Invalid Slice name: got a non Slice URN %s", name)
# if config has an authority, make sure it matches
if self.config.has_key('authority'):
auth = self.config['authority']
urn_fmt_auth = string_to_urn_format(urn.getAuthority())
if urn_fmt_auth != auth:
self.logger.warn("CAREFUL: slice' authority (%s) doesn't match current configured authority (%s)" % (urn_fmt_auth, auth))
self.logger.info("This may be OK though if you are using delegated slice credentials...")
# raise Exception("Invalid slice name: slice' authority (%s) doesn't match current configured authority (%s)" % (urn_fmt_auth, auth))
return name
if not self.config.has_key('authority'):
raise Exception("Invalid configuration: no authority defined")
auth = self.config['authority']
return URN(auth, "slice", name).urn_string()
# Auto recreate slices whenever the user asks for a slice
# Note any slice renewal from last time will be gone
def get_slice_cred(self, urn):
return self.create_slice(urn)
| StarcoderdataPython |
3377238 | from __future__ import print_function
import unittest
import nifty
import nifty.graph
nlmc = nifty.graph.lifted_multicut
import numpy
import random
class TestLiftedGraphFeatures(unittest.TestCase):
def generateGrid(self, gridSize):
def nid(x, y):
return x*gridSize[1] + y
G = nifty.graph.UndirectedGraph
g = G(gridSize[0] * gridSize[1])
for x in range(gridSize[0]):
for y in range(gridSize[1]):
u = nid(x,y)
if x + 1 < gridSize[0]:
v = nid(x+1, y)
g.insertEdge(u, v)
if y + 1 < gridSize[1]:
v = nid(x, y+1)
g.insertEdge(u, v)
return g, nid
def gridLiftedModel(self, gridSize = [3,2], bfsRadius=2, weightRange = [-1,1]):
g,nid = self.generateGrid(gridSize)
obj = nlmc.liftedMulticutObjective(g)
graph = obj.graph
liftedGraph = obj.liftedGraph
# this should add edges
obj.insertLiftedEdgesBfs(bfsRadius)
postEdges = liftedGraph.numberOfEdges
for edge in liftedGraph.edges():
u,v = liftedGraph.uv(edge)
w = random.uniform(weightRange[0],weightRange[1])
obj.setCost(u, v, w)
return obj,nid
def testLiftedUcFeatures(self):
obj,nid = self.gridLiftedModel(gridSize=[100,100],bfsRadius=3)
graph = obj.graph
edgeIndicators = numpy.random.rand(graph.edgeIdUpperBound + 1) + 2.0
edgeSizes = numpy.random.rand(graph.edgeIdUpperBound + 1) + 0.5
nodeSizes = numpy.random.rand(graph.nodeIdUpperBound + 1) + 0.5
features = nlmc.liftedUcmFeatures(
objective=obj,
edgeIndicators=edgeIndicators,
edgeSizes=edgeSizes,
nodeSizes=nodeSizes,
sizeRegularizers=[0.01, 0.02, 0.3, 0.4]
)
featuresReg = features[::2,:]
featuresRaw = features[1::2,:]
self.assertEqual(featuresReg.shape[1], obj.numberOfLiftedEdges)
self.assertEqual(featuresReg.shape[0], 4)
self.assertEqual(featuresRaw.shape[0], 4)
self.assertTrue(numpy.all(numpy.isfinite(featuresReg)))
self.assertTrue(numpy.all(numpy.isfinite(featuresRaw)))
self.assertGreaterEqual(featuresRaw.min(), 2.0)
self.assertLessEqual(featuresRaw.max(), 3.0)
self.assertGreaterEqual(featuresReg.min(), 0.0) | StarcoderdataPython |
3220996 | """Repository rule for def file filter autoconfiguration.
This repository reuses Bazel's VC detect mechanism to find undname.exe,
which is a tool used in def_file_filter.py.
def_file_filter.py is for filtering the DEF file for TensorFlow on Windows.
On Windows, we use a DEF file generated by Bazel to export symbols from the
tensorflow dynamic library(_pywrap_tensorflow.dll). The maximum number of
symbols that can be exported per DLL is 64K, so we have to filter some useless
symbols through this python script.
`def_file_filter_config` depends on the following environment variables:
* `BAZEL_VC`
* `BAZEL_VS`
* `VS90COMNTOOLS`
* `VS100COMNTOOLS`
* `VS110COMNTOOLS`
* `VS120COMNTOOLS`
* `VS140COMNTOOLS`
"""
load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_vc_path")
load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_msvc_tool")
load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "auto_configure_fail")
def _def_file_filter_configure_impl(repository_ctx):
if repository_ctx.os.name.lower().find("windows") == -1:
repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
repository_ctx.file("def_file_filter.py", "")
return
vc_path = find_vc_path(repository_ctx)
if vc_path == "visual-studio-not-found":
auto_configure_fail("Visual C++ build tools not found on your machine")
undname = find_msvc_tool(repository_ctx, vc_path, "undname.exe")
if undname == None:
auto_configure_fail("Couldn't find undname.exe under %s, please check your VC installation and set BAZEL_VC environment variable correctly." % vc_path)
undname_bin_path = undname.replace("\\", "\\\\")
repository_ctx.template(
"def_file_filter.py",
Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
{
"%{undname_bin_path}": undname_bin_path,
})
repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
def_file_filter_configure = repository_rule(
implementation = _def_file_filter_configure_impl,
environ = [
"BAZEL_VC",
"BAZEL_VS",
"VS90COMNTOOLS",
"VS100COMNTOOLS",
"VS110COMNTOOLS",
"VS120COMNTOOLS",
"VS140COMNTOOLS"
],
)
| StarcoderdataPython |
116213 | <reponame>Jumpscale/jumpscale_core8
"""
Test JSCuisine (core)
"""
import unittest
from unittest import mock
from JumpScale import j
from JumpScale.tools.cuisine.JSCuisine import JSCuisine
import JumpScale
from JumpScale.tools.cuisine.ProcessManagerFactory import ProcessManagerFactory
class TestJSCuisine(unittest.TestCase):
def setUp(self):
self._local_executor = j.tools.executor.getLocal()
self.JSCuisine = JSCuisine(self._local_executor)
def tearDown(self):
pass
def test_create_cuisine2(self):
"""
Test creating an instance
"""
self.assertIsNotNone(self.JSCuisine.core)
self.assertIsNotNone(self.JSCuisine.tools.sshreflector)
self.assertIsNotNone(self.JSCuisine.solutions.proxyclassic)
self.assertIsNotNone(self.JSCuisine.tools.bootmediainstaller)
self.assertIsNotNone(self.JSCuisine.solutions.vrouter)
self.assertIsNotNone(self.JSCuisine.tmux)
self.assertIsNotNone(self.JSCuisine.pnode)
self.assertIsNotNone(self.JSCuisine.tools.stor)
def test_create_cuisine2_platformtype(self):
"""
Test accessing platformtype property
"""
self.assertIsNotNone(self.JSCuisine.platformtype)
def test_create_cuisine2_id(self):
"""
Test accessing id property
"""
self.assertIsNotNone(self.JSCuisine.id)
def test_create_cuisine2_btrfs(self):
"""
Test accessing btrfs property
"""
self.assertIsNotNone(self.JSCuisine.btrfs)
def test_create_cuisine2_package(self):
"""
Test accessing package property
"""
self.assertIsNotNone(self.JSCuisine.package)
def test_create_cuisine2_process(self):
"""
Test accessing process property
"""
self.assertIsNotNone(self.JSCuisine.process)
def test_create_cuisine2_pip_is_not_None(self):
"""
Test accessing pip property
"""
self.assertIsNotNone(self.JSCuisine.development.pip)
def test_create_cuisine2_fw(self):
"""
Test accessing fw property
"""
self.assertIsNotNone(self.JSCuisine.systemservices.ufw)
def test_create_cuisine2_golang(self):
"""
Test accessing golang property
"""
self.assertIsNotNone(self.JSCuisine.development.golang)
def test_create_cuisine2_geodns(self):
"""
Test accessing geodns property
"""
self.assertIsNotNone(self.JSCuisine.apps.geodns)
def test_create_cuisine2_apps(self):
"""
Test accessing apps property
"""
self.assertIsNotNone(self.JSCuisine.apps)
@unittest.skip("Builder is removed while writing this")
def test_create_cuisine2_builder(self):
"""
Test accessing builder property
"""
self.assertIsNotNone(self.JSCuisine.builder)
def test_create_cuisine2_ns(self):
"""
Test accessing ns property
"""
self.assertIsNotNone(self.JSCuisine.ns)
def test_create_cuisine2_docker(self):
"""
Test accessing docker property
"""
self.assertIsNotNone(self.JSCuisine.systemservices.docker)
def test_create_cuisine2_ssh(self):
"""
Test accessing ssh property
"""
self.assertIsNotNone(self.JSCuisine.ssh)
@unittest.skip("couldn't find avahi")
def test_create_cuisine2_avahi(self):
"""
Test accessing avahi property
"""
self.assertIsNotNone(self.JSCuisine.avahi)
def test_create_cuisine2_bash(self):
"""
Test accessing bash property
"""
self.assertIsNotNone(self.JSCuisine.bash)
def test_create_cuisine2_net(self):
"""
Test accessing net property
"""
self.assertIsNotNone(self.JSCuisine.net)
def test_create_cuisine2_user_is_not_None(self):
"""
Test accessing user property
"""
self.assertIsNotNone(self.JSCuisine.user)
def test_create_cuisine2_group(self):
"""
Test accessing group property
"""
self.assertIsNotNone(self.JSCuisine.group)
def test_create_cuisine2_git(self):
"""
Test accessing git property
"""
self.assertIsNotNone(self.JSCuisine.development.git)
@mock.patch('JumpScale.tools.cuisine.ProcessManagerFactory.ProcessManagerFactory')
def test_create_cuisine2_processmanager(self, processmanager_mock):
"""
Test accessing processmanager property
"""
processmanager_mock.get.return_value = ProcessManagerFactory(self.JSCuisine)
self.assertIsNotNone(self.JSCuisine.processmanager)
| StarcoderdataPython |
57231 | <filename>test/test_exec.py<gh_stars>0
from unittest import TestCase
from easy_exec import exec
class TestExec(TestCase):
def test_stdout(self):
stdout, stderr, has_error = exec('echo "hello world"')
self.assertEqual('hello world\n', stdout)
self.assertEqual('', stderr)
self.assertFalse(has_error)
def test_stderr(self):
stdout, stderr, has_error = exec('logger -s error')
self.assertEqual('', stdout)
self.assertIn('error', stderr)
self.assertFalse(has_error)
def test_has_error(self):
stdout, stderr, has_error = exec('python3 -m asdfljasldkjfaöldkf')
self.assertEqual('', stdout)
self.assertTrue(has_error)
| StarcoderdataPython |
3300335 | from django.apps import AppConfig
class GoodscfConfig(AppConfig):
name = 'goodscf'
| StarcoderdataPython |
4828527 | <reponame>Snehakri022/HackerrankPractice
# Problem: https://www.hackerrank.com/challenges/new-year-chaos/problem
# Score: 40
t = int(input())
for test in range(t):
n = int(input())
arr = list(map(int, input().split()))
count = 0
for i in range(2):
for j in range(len(arr) - 1, 0, -1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
count += 1
if arr == sorted(arr):
print(count)
else:
print('Too chaotic')
| StarcoderdataPython |
3236501 | <filename>embiggen/node_label_prediction/node_label_prediction_model.py<gh_stars>1-10
"""Module providing abstract node label prediction model."""
from typing import Optional, Union, List, Dict, Any, Tuple
import pandas as pd
import numpy as np
import warnings
from ensmallen import Graph
from embiggen.utils.abstract_models import AbstractClassifierModel, AbstractEmbeddingModel, abstract_class, format_list
@abstract_class
class AbstractNodeLabelPredictionModel(AbstractClassifierModel):
"""Class defining an abstract node label prediction model."""
def __init__(self, random_state: Optional[int] = None):
"""Create new abstract node-label prediction model.
Parameters
---------------
random_state: Optional[int] = None
The random state to use if the model is stocastic.
"""
self._is_binary_prediction_task = None
self._is_multilabel_prediction_task = None
super().__init__(random_state=random_state)
@staticmethod
def requires_node_types() -> bool:
"""Returns whether this method requires node types."""
return True
@staticmethod
def task_name() -> str:
"""Returns name of the task this model is used for."""
return "Node Label Prediction"
@staticmethod
def is_topological() -> bool:
return False
@staticmethod
def get_available_evaluation_schemas() -> List[str]:
"""Returns available evaluation schemas for this task."""
return [
"Stratified Monte Carlo",
"Stratified Kfold",
"Monte Carlo",
"Kfold",
]
def is_binary_prediction_task(self) -> bool:
"""Returns whether the model was fit on a binary prediction task."""
return self._is_binary_prediction_task
def is_multilabel_prediction_task(self) -> bool:
"""Returns whether the model was fit on a multilabel prediction task."""
return self._is_multilabel_prediction_task
@classmethod
def split_graph_following_evaluation_schema(
cls,
graph: Graph,
evaluation_schema: str,
random_state: int,
holdout_number: int,
number_of_holdouts: int,
**holdouts_kwargs: Dict
) -> Tuple[Graph]:
"""Return train and test graphs tuple following the provided evaluation schema.
Parameters
----------------------
graph: Graph
The graph to split.
evaluation_schema: str
The evaluation schema to follow.
random_state: int
The random state for the evaluation
holdout_number: int
The current holdout number.
number_of_holdouts: int
The total number of holdouts.
holdouts_kwargs: Dict[str, Any]
The kwargs to be forwarded to the holdout method.
"""
if evaluation_schema in ("Stratified Monte Carlo", "Monte Carlo"):
return graph.get_node_label_holdout_graphs(
**holdouts_kwargs,
use_stratification="Stratified" in evaluation_schema,
random_state=random_state+holdout_number,
)
if evaluation_schema in ("Kfold", "Stratified Kfold"):
return graph.get_node_label_kfold(
k=number_of_holdouts,
k_index=holdout_number,
use_stratification="Stratified" in evaluation_schema,
random_state=random_state,
)
raise ValueError(
f"The requested evaluation schema `{evaluation_schema}` "
"is not available. The available evaluation schemas "
f"are: {format_list(cls.get_available_evaluation_schemas())}."
)
@classmethod
def _prepare_evaluation(
cls,
graph: Graph,
train: Graph,
test: Graph,
support: Optional[Graph] = None,
subgraph_of_interest: Optional[Graph] = None,
random_state: int = 42,
verbose: bool = True,
**kwargs: Dict
) -> Dict[str, Any]:
"""Return additional custom parameters for the current holdout."""
return {}
def _evaluate(
self,
graph: Graph,
train: Graph,
test: Graph,
support: Optional[Graph] = None,
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[str, pd.DataFrame, np.ndarray]]]] = None,
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[str, pd.DataFrame, np.ndarray]]]] = None,
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[str, pd.DataFrame, np.ndarray]]]] = None,
subgraph_of_interest: Optional[Graph] = None,
random_state: int = 42,
verbose: bool = True,
) -> List[Dict[str, Any]]:
"""Return model evaluation on the provided graphs."""
train_size = train.get_known_node_types_number() / graph.get_known_node_types_number()
if self.is_multilabel_prediction_task():
labels = graph.get_one_hot_encoded_node_types()
elif self.is_binary_prediction_task():
labels = graph.get_boolean_node_type_ids()
else:
labels = graph.get_single_label_node_type_ids()
performance = []
for evaluation_mode, evaluation_graph in (
("train", train),
("test", test),
):
prediction_probabilities = self.predict_proba(
evaluation_graph,
support=support,
node_features=node_features,
node_type_features=node_type_features,
edge_features=edge_features
)
if self.is_binary_prediction_task():
predictions = prediction_probabilities
elif self.is_multilabel_prediction_task():
predictions = prediction_probabilities > 0.5
else:
predictions = prediction_probabilities.argmax(axis=-1)
mask = evaluation_graph.get_known_node_types_mask()
prediction_probabilities = prediction_probabilities[mask]
predictions = predictions[mask]
labels_subset = labels[mask]
performance.append({
"evaluation_mode": evaluation_mode,
"train_size": train_size,
"known_nodes_number": evaluation_graph.get_known_node_types_number(),
**self.evaluate_predictions(
labels_subset,
predictions,
),
**self.evaluate_prediction_probabilities(
labels_subset,
prediction_probabilities,
),
})
return performance
def predict(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
) -> np.ndarray:
"""Execute predictions on the provided graph.
Parameters
--------------------
graph: Graph
The graph to run predictions on.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The node features to use.
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The node type features to use.
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The edge features to use.
"""
if edge_features is not None:
raise NotImplementedError(
"Currently edge features are not supported in node-label prediction models."
)
if node_type_features is not None:
raise NotImplementedError(
"Support for node type features is not currently available for any "
"of the node-label prediction models."
)
return super().predict(graph, support=support, node_features=node_features)
def predict_proba(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
) -> np.ndarray:
"""Execute predictions on the provided graph.
Parameters
--------------------
graph: Graph
The graph to run predictions on.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The node features to use.
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The node type features to use.
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The edge features to use.
"""
if edge_features is not None:
raise NotImplementedError(
"Currently edge features are not supported in node-label prediction models."
)
if node_type_features is not None:
raise NotImplementedError(
"Support for node type features is not currently available for any "
"of the node-label prediction models."
)
return super().predict_proba(graph, support=support, node_features=node_features)
def fit(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
node_type_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None,
):
"""Execute predictions on the provided graph.
Parameters
--------------------
graph: Graph
The graph to run predictions on.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The node features to use.
node_type_features: Optional[Union[str, pd.DataFrame, np.ndarray, AbstractEmbeddingModel, List[Union[str, pd.DataFrame, np.ndarray, AbstractEmbeddingModel]]]] = None
The node type features to use.
edge_features: Optional[Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]] = None
The edge features to use.
"""
if edge_features is not None:
raise NotImplementedError(
"Currently edge features are not supported in node-label prediction models."
)
if node_type_features is not None:
raise NotImplementedError(
"Support for node type features is not currently available for any "
"of the node-label prediction models."
)
self._is_binary_prediction_task = graph.get_node_types_number() == 2
self._is_multilabel_prediction_task = graph.has_multilabel_node_types()
node_type_counts = graph.get_node_type_names_counts_hashmap()
most_common_node_type_name, most_common_count = max(
node_type_counts.items(),
key=lambda x: x[1]
)
least_common_node_type_name, least_common_count = min(
node_type_counts.items(),
key=lambda x: x[1]
)
if most_common_count > least_common_count * 20:
warnings.warn(
(
"Please do be advised that this graph defines "
"an unbalanced node-label prediction task, with the "
"most common node type `{}` appearing {} times, "
"while the least common one, `{}`, appears only `{}` times. "
"Do take this into account when designing the node-label prediction model."
).format(
most_common_node_type_name, most_common_count,
least_common_node_type_name, least_common_count
)
)
super().fit(
graph=graph,
support=support,
node_features=node_features,
edge_features=None,
)
@staticmethod
def can_use_node_types() -> bool:
"""Returns whether the model can optionally use node types."""
return True
def is_using_node_types(self) -> bool:
"""Returns whether the model is parametrized to use node types."""
return True
@staticmethod
def task_involves_edge_weights() -> bool:
"""Returns whether the model task involves edge weights."""
return False
@staticmethod
def task_involves_edge_types() -> bool:
"""Returns whether the model task involves edge types."""
return False
@staticmethod
def task_involves_node_types() -> bool:
"""Returns whether the model task involves node types."""
return True
@staticmethod
def task_involves_topology() -> bool:
"""Returns whether the model task involves topology."""
return False
| StarcoderdataPython |
60404 | <filename>test/sysl/test_sysldata.py<gh_stars>1-10
from sysl.core import syslloader, sysldata
import unittest
import re
import os
import sys
from os import path
import traceback
import tempfile
import argparse as ap
class TestSetOf(unittest.TestCase):
def setUp(self):
self.outpath = tempfile.gettempdir()
def test_set_of(self):
try:
(module, _, _) = syslloader.load('/test/data/test_data', True, '.')
d = {
'project': 'TestData :: Data Views',
'output': path.join(self.outpath, 'test_set_of-data.png'),
'plantuml': '',
'verbose': '',
'filter': ''}
args = ap.Namespace(**d)
out = sysldata.dataviews(module, args)
setof_re = re.compile(r'_\d+\s+\*-- "0\.\.\*"\s+_\d+')
self.assertTrue(setof_re.search(out[0]))
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_at_prefixed_attr(self):
try:
(module, _, _) = syslloader.load(
'/test/data/test_at_prefixed_attr', True, '.')
val_set = set(
elt.s for elt in module.apps['TestData :: Top Level App'].endpoints['Second Level App'].attrs['bracketed_array_attr'].a.elt)
self.assertTrue({'bval1', 'bval2'} & val_set)
self.assertTrue('sla_attribute string' ==
module.apps['TestData :: Top Level App'].endpoints['Second Level App'].attrs['sla_attribute'].s)
self.assertTrue(
'test id' == module.apps['TestData :: Top Level App'].types['TestType'].attrs['id'].s)
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4826105 | <gh_stars>0
from django import template
from django.db.models import Count, F
from shop.models import Category
register = template.Library()
@register.simple_tag()
def get_categories():
return Category.objects.annotate(
cnt=Count('product', filter=F('product__is_published'))).filter(cnt__gt=0).order_by('name')
| StarcoderdataPython |
3355222 | from clinicadl.utils.network.autoencoder.cnn_transformer import CNN_Transformer
from clinicadl.utils.network.cnn.models import Conv4_FC3, Conv5_FC3, resnet18
from clinicadl.utils.network.sub_network import AutoEncoder
class AE_Conv5_FC3(AutoEncoder):
"""
Autoencoder derived from the convolutional part of CNN Conv5_FC3
"""
def __init__(self, input_size, use_cpu=False):
# fmt: off
cnn_model = Conv5_FC3(input_size=input_size, use_cpu=use_cpu)
autoencoder = CNN_Transformer(cnn_model)
# fmt: on
super().__init__(
encoder=autoencoder.encoder, decoder=autoencoder.decoder, use_cpu=use_cpu
)
class AE_Conv4_FC3(AutoEncoder):
"""
Autoencoder derived from the convolutional part of CNN Conv4_FC3
"""
def __init__(self, input_size, use_cpu=False):
# fmt: off
cnn_model = Conv4_FC3(input_size=input_size, use_cpu=use_cpu)
autoencoder = CNN_Transformer(cnn_model)
# fmt: on
super().__init__(
encoder=autoencoder.encoder, decoder=autoencoder.decoder, use_cpu=use_cpu
)
| StarcoderdataPython |
3370962 | """Plotting methods."""
from collections import Counter
from itertools import cycle
from itertools import islice
import os
import pickle
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import LinearLocator
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import AutoMinorLocator
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .helpers import identify_peaks
from .helpers import load_pickle
from .helpers import millify
from .helpers import round_to_nearest
from .helpers import set_xrotation
__FRAME_COLORS__ = ["#1b9e77", "#d95f02", "#7570b3"]
__FRAME_COLORS__ = ["#fc8d62", "#66c2a5", "#8da0cb"]
DPI = 300
def setup_plot():
"""Setup plotting defaults"""
plt.rcParams["savefig.dpi"] = 120
plt.rcParams["figure.dpi"] = 120
plt.rcParams["figure.autolayout"] = False
plt.rcParams["figure.figsize"] = 12, 8
plt.rcParams["axes.labelsize"] = 18
plt.rcParams["axes.titlesize"] = 20
plt.rcParams["font.size"] = 10
plt.rcParams["lines.linewidth"] = 2.0
plt.rcParams["lines.markersize"] = 8
plt.rcParams["legend.fontsize"] = 14
sns.set_style("white")
sns.set_context("paper", font_scale=2)
def setup_axis(ax, axis="x", majorticks=5, minorticks=1, xrotation=45, yrotation=0):
"""Setup axes defaults
Parameters
----------
ax : matplotlib.Axes
axis : str
Setup 'x' or 'y' axis
majorticks : int
Length of interval between two major ticks
minorticks : int
Length of interval between two major ticks
xrotation : int
Rotate x axis labels by xrotation degrees
yrotation : int
Rotate x axis labels by xrotation degrees
"""
major_locator = MultipleLocator(majorticks)
major_formatter = FormatStrFormatter("%d")
minor_locator = MultipleLocator(minorticks)
if axis == "x":
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(major_formatter)
ax.xaxis.set_minor_locator(minor_locator)
elif axis == "y":
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
elif axis == "both":
setup_axis(ax, "x", majorticks, minorticks, xrotation, yrotation)
setup_axis(ax, "y", majorticks, minorticks, xrotation, yrotation)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ax.yaxis.set_minor_locator(AutoMinorLocator())#integer=True))
ax.tick_params(which="major", width=2, length=10)
ax.tick_params(which="minor", width=1, length=6)
ax.tick_params(axis="x", labelrotation=xrotation)
ax.tick_params(axis="y", labelrotation=yrotation)
# ax.yaxis.set_major_locator(LinearLocator(10))
# ax.yaxis.set_minor_locator(LinearLocator(10))
# set_xrotation(ax, xrotation)
def plot_read_length_dist(
read_lengths,
ax=None,
millify_labels=True,
input_is_stream=False,
title=None,
saveto=None,
ascii=False,
**kwargs
):
"""Plot read length distribution.
Parameters
----------
read_lengths : array_like
Array of read lengths
ax : matplotlib.Axes
Axis object
millify_labels : bool
True if labels should be formatted to
read millions/trillions etc
input_is_stream : bool
True if input is sent through stdin
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
if input_is_stream:
counter = {}
for line in read_lengths:
splitted = list([int(x) for x in line.strip().split("\t")])
counter[splitted[0]] = splitted[1]
read_lengths = Counter(counter)
elif isinstance(read_lengths, six.string_types):
if ".pickle" in str(read_lengths):
# Try opening as a pickle first
read_lengths = load_pickle(read_lengths)
elif isinstance(read_lengths, pd.Series):
pass
else:
# Some random encoding error
try:
read_lengths = pd.read_table(read_lengths)
read_lengths = pd.Series(
read_lengths["count"].tolist(),
index=read_lengths.read_length.tolist(),
)
except KeyError:
pass
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 5
if "minorticks" not in kwargs:
kwargs["minorticks"] = 1
if "xrotation" not in kwargs:
kwargs["xrotation"] = 0
if isinstance(read_lengths, Counter) or isinstance(read_lengths, pd.Series):
read_lengths = pd.Series(read_lengths)
read_lengths_counts = read_lengths.values
else:
read_lengths = pd.Series(read_lengths)
read_lengths_counts = read_lengths.value_counts().sort_index()
ax.set_ylim(
min(read_lengths_counts), round_to_nearest(max(read_lengths_counts), 5) + 0.5
)
ax.set_xlim(
min(read_lengths.index) - 0.5,
round_to_nearest(max(read_lengths.index), 10) + 0.5,
)
ax.bar(read_lengths.index, read_lengths_counts)
setup_axis(ax, **kwargs)
reads_total = millify(read_lengths_counts.sum())
if title:
ax.set_title("{}\n Total reads = {}".format(title, reads_total))
else:
ax.set_title("Total reads = {}".format(reads_total))
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
# sns.despine(trim=True, offset=20)
if saveto:
fig.tight_layout()
if ".dat" in saveto:
fig.savefig(saveto, format="png", dpi=DPI)
else:
fig.savefig(saveto, dpi=DPI)
if ascii:
import gnuplotlib as gp
sys.stdout.write(os.linesep)
gp.plot(
(read_lengths.index, read_lengths.values, {"with": "boxes"}),
terminal="dumb 160, 40",
unset="grid",
)
sys.stdout.write(os.linesep)
return ax, fig
def plot_framewise_counts(
counts,
frames_to_plot="all",
ax=None,
title=None,
millify_labels=False,
position_range=None,
saveto=None,
ascii=False,
input_is_stream=False,
**kwargs
):
"""Plot framewise distribution of reads.
Parameters
----------
counts : Series
A series with position as index and value as counts
frames_to_plot : str or range
A comma seaprated list of frames to highlight or a range
ax : matplotlib.Axes
Default none
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
# setup_plot()
if input_is_stream:
counts_counter = {}
for line in counts:
splitted = list([int(x) for x in line.strip().split("\t")])
counts_counter[splitted[0]] = splitted[1]
counts = Counter(counts_counter)
elif isinstance(counts, six.string_types):
try:
# Try opening as a pickle first
counts = load_pickle(counts)
except KeyError:
pass
if isinstance(counts, Counter):
counts = pd.Series(counts)
# TODO
if isinstance(frames_to_plot, six.string_types) and frames_to_plot != "all":
frames_to_plot = list([int(x) for x in frames_to_plot.rstrip().split(",")])
if isinstance(position_range, six.string_types):
splitted = list([int(x) for x in position_range.strip().split(":")])
position_range = list(range(splitted[0], splitted[1] + 1))
if position_range:
counts = counts[list(position_range)]
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 10
if "minorticks" not in kwargs:
kwargs["minorticks"] = 5
if "xrotation" not in kwargs:
kwargs["xrotation"] = 90
setup_axis(ax, **kwargs)
ax.set_ylabel("Number of reads")
# ax.set_xlim(
# min(counts.index) - 0.6,
# round_to_nearest(max(counts.index), 10) + 0.6)
barlist = ax.bar(counts.index, counts.values)
barplot_colors = list(islice(cycle(__FRAME_COLORS__), None, len(counts.index)))
for index, cbar in enumerate(barlist):
cbar.set_color(barplot_colors[index])
ax.legend(
(barlist[0], barlist[1], barlist[2]),
("Frame 1", "Frame 2", "Frame 3"),
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc=3,
ncol=3,
mode="expand",
borderaxespad=0.0,
)
if title:
ax.set_title(title)
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
if ascii:
sys.stdout.write(os.linesep)
import gnuplotlib as gp
gp.plot(
np.array(counts.index.tolist()),
np.array(counts.values.tolist()),
_with="boxes", # 'points pointtype 0',
terminal="dumb 200,40",
unset="grid",
)
sys.stdout.write(os.linesep)
set_xrotation(ax, kwargs["xrotation"])
fig.tight_layout()
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
return ax
def plot_read_counts(
counts,
ax=None,
marker=None,
color="royalblue",
title=None,
label=None,
millify_labels=False,
identify_peak=True,
saveto=None,
position_range=None,
ascii=False,
input_is_stream=False,
ylabel="Normalized RPF density",
**kwargs
):
"""Plot RPF density aro und start/stop codons.
Parameters
----------
counts : Series/Counter
A series with coordinates as index and counts as values
ax : matplotlib.Axes
Axis to create object on
marker : string
'o'/'x'
color : string
Line color
label : string
Label (useful only if plotting multiple objects on same axes)
millify_labels : bool
True if labels should be formatted to
read millions/trillions etc
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
# setup_plot()
if input_is_stream:
counts_counter = {}
for line in counts:
splitted = list([int(x) for x in line.strip().split("\t")])
counts_counter[splitted[0]] = splitted[1]
counts = Counter(counts_counter)
elif isinstance(counts, six.string_types):
try:
# Try opening as a pickle first
counts = load_pickle(counts)
except IndexError:
counts_pd = pd.read_table(counts)
counts = pd.Series(
counts_pd["count"].tolist(), index=counts_pd["position"].tolist()
)
except KeyError:
pass
if not isinstance(counts, pd.Series):
counts = pd.Series(counts)
if isinstance(position_range, six.string_types):
splitted = list([int(x) for x in position_range.strip().split(":")])
position_range = np.arange(splitted[0], splitted[1] + 1)
if position_range is not None:
counts = counts[position_range]
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if "majorticks" not in kwargs:
kwargs["majorticks"] = 10
if "minorticks" not in kwargs:
kwargs["minorticks"] = 5
if "xrotation" not in kwargs:
kwargs["xrotation"] = 0
if "yrotation" not in kwargs:
kwargs["yrotation"] = 0
if not marker:
ax.plot(
counts.index,
counts.values,
color=color,
linewidth=1,
markersize=1.5,
label=label,
)
else:
ax.plot(
counts.index,
counts.values,
color=color,
marker="o",
linewidth=1,
markersize=1.5,
label=label,
)
# ax.set_xlim(round_to_nearest(ax.get_xlim()[0], 50) - 0.6,
# round_to_nearest(ax.get_xlim()[1], 50) + 0.6)
peak = None
if identify_peak:
peak = identify_peaks(counts)
ax.axvline(x=peak, color="r", linestyle="dashed")
ax.text(peak + 0.5, ax.get_ylim()[1] * 0.9, "{}".format(peak), color="r")
if millify_labels:
ax.set_yticklabels(list([millify(x) for x in ax.get_yticks()]))
setup_axis(ax, **kwargs)
ax.set_xlim(
round_to_nearest(min(counts.index), 10) - 1,
round_to_nearest(max(counts.index), 10) + 1,
)
if ylabel:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
# sns.despine(trim=True, offset=10)
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
if ascii:
sys.stdout.write(os.linesep)
import gnuplotlib as gp
gp.plot(
np.array(counts.index.tolist()),
np.array(counts.values.tolist()),
_with="lines", # 'points pointtype 0',
terminal="dumb 200,40",
unset="grid",
)
sys.stdout.write(os.linesep)
return ax, fig, peak
def plot_featurewise_barplot(
utr5_counts, cds_counts, utr3_counts, ax=None, saveto=None, **kwargs
):
"""Plot barplots for 5'UTR/CDS/3'UTR counts.
Parameters
----------
utr5_counts : int or dict
Total number of reads in 5'UTR region
or alternatively a dictionary/series with
genes as key and 5'UTR counts as values
cds_counts : int or dict
Total number of reads in CDs region
or alternatively a dictionary/series with
genes as key and CDS counts as values
utr3_counts : int or dict
Total number of reads in 3'UTR region
or alternatively a dictionary/series with
genes as key and 3'UTR counts as values
saveto : str
Path to save output file to (<filename>.png/<filename>.pdf)
"""
fig = None
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
barlist = ax.bar([0, 1, 2], [utr5_counts, cds_counts, utr3_counts])
barlist[0].set_color("#1b9e77")
barlist[1].set_color("#d95f02")
barlist[2].set_color("#7570b3")
ax.set_xticks([0, 1, 2])
ax.set_xticklabels(["5'UTR", "CDS", "3'UTR"])
max_counts = np.max(np.hstack([utr5_counts, cds_counts, utr3_counts]))
setup_axis(
ax=ax, axis="y", majorticks=max_counts // 10, minorticks=max_counts // 20
)
ax.set_ylabel("# RPFs")
# sns.despine(trim=True, offset=10)
if saveto:
fig.tight_layout()
fig.savefig(saveto, dpi=DPI)
return ax, fig
def create_wavelet(data, ax):
import pycwt as wavelet
t = data.index
N = len(data.index)
p = np.polyfit(data.index, data, 1)
data_notrend = data - np.polyval(p, data.index)
std = data_notrend.std() # Standard deviation
var = std ** 2 # Variance
data_normalized = data_notrend / std # Normalized dataset
mother = wavelet.Morlet(6)
dt = 1
s0 = 2 * dt # Starting scale, in this case 2 * 0.25 years = 6 months
dj = 1 / 12 # Twelve sub-octaves per octaves
J = 7 / dj # Seven powers of two with dj sub-octaves
alpha, _, _ = wavelet.ar1(data) # Lag-1 autocorrelation for red noise
wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
data_normalized, dt=dt, dj=dj, s0=s0, J=J, wavelet=mother
)
iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std
power = (np.abs(wave)) ** 2
fft_power = np.abs(fft) ** 2
period = 1 / freqs
power /= scales[:, None]
signif, fft_theor = wavelet.significance(
1.0, dt, scales, 0, alpha, significance_level=0.95, wavelet=mother
)
sig95 = np.ones([1, N]) * signif[:, None]
sig95 = power / sig95
glbl_power = power.mean(axis=1)
dof = N - scales # Correction for padding at edges
glbl_signif, tmp = wavelet.significance(
var, dt, scales, 1, alpha, significance_level=0.95, dof=dof, wavelet=mother
)
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
ax.contourf(
t,
np.log2(period),
np.log2(power),
np.log2(levels),
extend="both",
cmap=plt.cm.viridis,
)
extent = [t.min(), t.max(), 0, max(period)]
ax.contour(
t, np.log2(period), sig95, [-99, 1], colors="k", linewidths=2, extent=extent
)
ax.fill(
np.concatenate([t, t[-1:] + dt, t[-1:] + dt, t[:1] - dt, t[:1] - dt]),
np.concatenate(
[np.log2(coi), [1e-9], np.log2(period[-1:]), np.log2(period[-1:]), [1e-9]]
),
"k",
alpha=0.3,
hatch="x",
)
ax.set_title("Wavelet Power Spectrum")
ax.set_ylabel("Frequency")
Yticks = 2 ** np.arange(0, np.ceil(np.log2(period.max())))
ax.set_yticks(np.log2(Yticks))
ax.set_yticklabels(np.round(1 / Yticks, 3))
return (iwave, period, power, sig95, coi)
def plot_periodicity_df(df, saveto, cbar=False, figsize=(8, 8)):
"""Plot periodicty values across fragment lengths as a matrix.
Parameters
----------------
df: string
Path to dataframe containing fragment length specific periodicities
saveto: string
Path to output plot file
cbar: bool
Whether to plot cbar or not
"""
fig, ax = plt.subplots(figsize=figsize)
df = pd.read_table(df, index_col=0)
sns.heatmap(df.T, cmap="Blues", square=True, annot=True, cbar=cbar, ax=ax)
fig.tight_layout()
fig.savefig(saveto)
| StarcoderdataPython |
1674144 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Orchestration GAE App."""
from cStringIO import StringIO
from datetime import datetime
import json
import logging
import os
import jinja2
from PIL import Image
import webapp2
import yaml
import cloudstorage as gcs
from models import Bitdoc
from google.appengine.api import images
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
# Set up the Jinja templating environment.
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.dirname(__file__)))
# Load configuration options from settings.cfg file.
config = yaml.load(open('settings.cfg', 'r'))
MAIN_BUCKET = config['GCS']['MAIN_BUCKET']
BIT_BUCKET = config['GCS']['BIT_BUCKET']
TASKQUEUE = 'imagetasks'
# Get Task Queue.
processing_task_queue = taskqueue.Queue(TASKQUEUE)
#############
# Helper Functions
def GetGreeting(user, request):
if user:
return ('Logged in as %s. <a href="%s"'
' class="button">Log Out</a>') % (user.nickname(),
users.create_logout_url('/'))
else:
return ('<a href="%s" class="button">'
'Please log in.</a>' % users.create_login_url(request.uri))
def GetImageLinkFromBucket(bucket_name, object_name):
# Get Image location in GCS.
gcs_image_location = '/gs/%s/%s' % (bucket_name, object_name)
logging.info('Trying to get image link for "%s"', gcs_image_location)
blob_key = blobstore.create_gs_key(gcs_image_location)
# Try to get public image URL for entry creation.
try:
image_link = images.get_serving_url(blob_key, secure_url=True)
logging.info('Image link: %s', image_link)
return image_link
except images.ObjectNotFoundError:
logging.error('Could not find image link for %s.',
gcs_image_location)
return 'http://commondatastorage.googleapis.com/8bit-images%2Fnoimage.gif'
except Exception, e:
logging.error('Exception getting image link from bucket: %s', e)
return ('http://commondatastorage.googleapis.com/8bit-images'
'%2Fbucket_missing_image.png')
#############
# Request Handlers
class MainPage(webapp2.RequestHandler):
"""Main page and OCN handler."""
def get(self): # pylint: disable=g-bad-name
"""Returns front page list of bitdocs, only if logged in."""
user = users.get_current_user()
greeting = GetGreeting(user, self.request)
bitdocs = []
bitdocs = db.GqlQuery('SELECT * FROM Bitdoc ORDER BY timestamp '
'DESC LIMIT 100')
template = jinja_environment.get_template('index.html')
template_data = {
'greeting': greeting,
'bitdocs': bitdocs
}
self.response.write(template.render(template_data))
class UploadPage(webapp2.RequestHandler):
"""Upload page handles uploading new images to cloud storage."""
def get(self): # pylint: disable=g-bad-name
"""Returns basic upload form."""
user = users.get_current_user()
greeting = GetGreeting(user, self.request)
upload_url = '/upload'
template = jinja_environment.get_template('upload.html')
template_data = {
'greeting': greeting,
'upload_url': upload_url
}
self.response.write(template.render(template_data))
def post(self): # pylint: disable=g-bad-name
"""Handles image upload form post."""
file_img = self.request.get('file')
if not file_img:
logging.error('No image uploaded.')
self.error(400)
img_str = StringIO(file_img)
contents = img_str.getvalue()
try:
img = Image.open(img_str)
except IOError, e:
logging.error('%s', e)
self.error(404)
logging.info('FORMAT: %s', img.format)
if img.format == 'JPEG':
content_type = 'image/jpeg'
elif img.format == 'PNG':
content_type = 'image/png'
else:
logging.error('Unknown format: %s', img.format)
content_type = 'text/plain'
image_name = self.request.params['file'].filename
logging.info('Uploading file "%s"...', image_name)
if image_name.find('.'):
image_name = image_name[:image_name.find('.')]
filename = '/%s/%s_%s' % (MAIN_BUCKET,
image_name,
datetime.strftime(datetime.now(),
'%Y_%M_%d_%H_%M_%S_%s'))
user = users.get_current_user()
if user:
owner = user.nickname()
else:
owner = 'Anonymous'
# Create new image file
gcs_file = gcs.open(filename,
'w',
content_type=content_type,
options={'x-goog-meta-owner': owner})
gcs_file.write(contents)
gcs_file.close()
logging.info('Uploaded file %s as %s in the cloud.', image_name, filename)
self.redirect('/')
class UpdateWithBitifiedPic(webapp2.RequestHandler):
"""Handler for GCE callback with updated 8-bit image data."""
def post(self): # pylint: disable=g-bad-name
"""GCE callback."""
logging.debug(
'%s\n\n%s',
'\n'.join(['%s: %s' % x for x in self.request.headers.iteritems()]),
self.request.body)
bitdoc_id = self.request.get('id')
# Update existing Bitdoc with image link and timestamp.
bitdoc = db.get(bitdoc_id)
if not bitdoc:
logging.error('No Bitdoc found for id: %s', bitdoc_id)
self.error(404)
status = self.request.get('status') == 'True'
image_8bit_name = self.request.get('image_8bit_name')
if status and image_8bit_name:
bitdoc.image_8bit_link = GetImageLinkFromBucket(BIT_BUCKET,
image_8bit_name)
else:
bitdoc.image_8bit_link = ('http://commondatastorage.googleapis.com/'
'8bit-images%2Fbucket_missing_image.png')
bitdoc.timestamp_8bit = datetime.now()
bitdoc.put()
logging.info('Successfully updated Bitdoc %s with link %s',
bitdoc_id, bitdoc.image_8bit_link)
class ObjectChangeNotification(webapp2.RequestHandler):
"""Object Change Notification (OCN) handler for cloud storage upload."""
def post(self): # pylint: disable=g-bad-name
"""Handles Object Change Notifications."""
logging.debug(
'%s\n\n%s',
'\n'.join(['%s: %s' % x for x in self.request.headers.iteritems()]),
self.request.body)
resource_state = self.request.headers['X-Goog-Resource-State']
if resource_state == 'sync':
logging.info('Sync OCN message received.')
elif resource_state == 'exists':
logging.info('New file upload OCN message received.')
data = json.loads(self.request.body)
bucket = data['bucket']
object_name = data['name']
# Get Image location in GCS.
gcs_image_location = '/gs/%s/%s' % (bucket, object_name)
blob_key = blobstore.create_gs_key(gcs_image_location)
# Try and get username from metadata.
if data.has_key('metadata') and data['metadata'].has_key('owner'):
owner = data['metadata']['owner']
else:
owner = data['owner']['entity']
# Try to get public image URL for entry creation.
image_link = None
try:
image_link = images.get_serving_url(blob_key, secure_url=True)
except images.ObjectNotFoundError:
logging.error('Could not find image link for %s.',
gcs_image_location)
except images.TransformationError:
logging.error('Could not convert link to image: %s.',
gcs_image_location)
if image_link:
bitdoc = Bitdoc(user=owner,
image_link=image_link,
file_name=object_name)
logging.info('Creating Entry... %s - %s',
bitdoc.user,
bitdoc.image_link)
# timestamp auto.
bitdoc.put()
# Add Task to pull queue.
info = {'key': unicode(bitdoc.key()),
'image_link': unicode(image_link)}
processing_task_queue.add(taskqueue.Task(payload=json.dumps(info),
method='PULL'))
class Delete(webapp2.RequestHandler):
"""Bitdoc removal handler."""
def post(self): # pylint: disable=g-bad-name
"""Removes the bitdoc with given id."""
bitdoc = db.get(self.request.get('id'))
if bitdoc:
bitdoc.delete()
self.redirect('/')
class GetEntry(webapp2.RequestHandler):
"""Bitdoc single entry handler."""
def get(self, key): # pylint: disable=g-bad-name
"""Returns single entry in html or json format."""
bitdoc = db.get(key)
if not bitdoc:
self.error(404)
mode = self.request.get('mode')
if mode == 'json':
data = {
'user': bitdoc.user,
'timestamp': bitdoc.timestamp_strsafe,
'image_link': bitdoc.image_link,
'image_8bit_link': bitdoc.image_8bit_link,
'timestamp_8bit': bitdoc.timestamp_8bit_strsafe,
'key': str(bitdoc.key())
}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(data)
else:
# Get Logged in user data.
user = users.get_current_user()
greeting = GetGreeting(user, self.request)
template = jinja_environment.get_template('single_entry.html')
template_data = {
'greeting': greeting,
'bitdoc': bitdoc
}
self.response.write(template.render(template_data))
class GetImage(webapp2.RequestHandler):
"""Bitdoc single image handler."""
def get(self, key): # pylint: disable=g-bad-name
"""Returns processed or original image associated entry."""
bitdoc = db.get(key)
if not bitdoc:
self.error(404)
mode = self.request.get('mode')
if mode == 'original':
self.redirect(str(bitdoc.image_link))
else:
self.redirect(str(bitdoc.image_8bit_link))
application = webapp2.WSGIApplication([('/', MainPage),
('/ocn', ObjectChangeNotification),
('/delete', Delete),
('/get/image/(.*)', GetImage),
('/get/(.*)', GetEntry),
('/update', UpdateWithBitifiedPic),
('/upload', UploadPage)
],
debug=True)
| StarcoderdataPython |
1685837 | from datetime import datetime, timedelta
from calelib import Constants
def get_deadline(deadline_string):
"""
Parse string like to datetime object
:param deadline_string: string in format "DAY MONTH"
:return: string in datetime format
"""
if len(deadline_string.split(',')) > 1:
date, time = [e.strip() for e in deadline_string.split(',')]
else:
date = deadline_string
time = None
input_format = '%d %B%Y'
now = datetime.now()
curr_year_input = datetime.strptime(date + str(now.year), input_format)
if time:
hour, minutes = [int(e) for e in time.split(':')]
curr_year_input = curr_year_input.replace(hour=hour, minute=minutes)
else:
curr_year_input += timedelta(days=1)
if curr_year_input < now:
return curr_year_input.replace(year=now.year + 1)
else:
return curr_year_input
def parse_iso_pretty(date_iso):
"""
PArse iso-like date to human-like
:param date_iso: date in iso-like format
:return: human-like formated date like "DAY MONTH"
"""
return date_iso.strftime('%d %b %Y')
def get_first_weekday(month, year):
"""
Get first weekday of this month
:param month: month to show
:param year: year to show
:return: int value [1..7]
"""
string_date = str(month) + str(year)
date_datetime = datetime.strptime('1' + string_date, '%d%m%Y')
return date_datetime.weekday() + 1
def get_month_number(str_month):
months = (
'jan',
'feb',
'mar',
'apr',
'may',
'jun',
'jul',
'aug',
'sep',
'oct',
'nov',
'dec'
)
return months.index(str_month[:3].lower()) + 1
def get_month_word(number):
months = (
'january',
'february',
'march',
'april',
'may',
'june',
'july',
'august',
'september',
'october',
'november',
'december'
)
return months[number - 1]
def get_weekday_number(str_weekday):
"""
Using name of weekday return its number representation
:param str_weekday: weekday from mon - sun
:return: integer number [0..7]
"""
weekdays = (
'mon',
'tue',
'wed',
'thu',
'fri',
'sat',
'sun'
)
return weekdays.index(str_weekday[:3].lower())
def get_weekday_word(number):
"""
Using weekday index return its word representation
:param number: number of weekday [0..6]
:return: word representation
"""
weekdays = (
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
)
return weekdays[number]
def parse_period(period_type, period_value):
"""
parse entered period in period what can understand "plan"
:param period_type: type of periodic
:param period_value: date of period
:return: dict of readable for plan data for creating tasks
"""
parsed = {'period': None, 'type': None}
if period_type == 'day':
parsed['period'] = {'day': int(period_value)}
parsed['type'] = Constants.REPEAT_DAY
elif period_type == 'week':
weekdays_list = period_value.strip().split()
weekdays_digits_list = [get_weekday_number(day) for day in weekdays_list]
parsed['period'] = {'days': list(set(weekdays_digits_list))}
parsed['type'] = Constants.REPEAT_WEEKDAY
elif period_type == 'month':
period_value = period_value.strip().split()
month_list = period_value[1:]
month_digits_list = [get_month_number(month) for month in month_list]
parsed['period'] = {
'months': list(set(month_digits_list)),
'day': int(period_value[0])
}
parsed['type'] = Constants.REPEAT_MONTH
elif period_type == 'year':
period_value = period_value.strip().split()
parsed['type'] = Constants.REPEAT_YEAR
parsed['period'] = {
'day': int(period_value[0]),
'month': get_month_number(period_value[1])
}
return parsed['type'], parsed['period']
def parse_time(string_time):
"""
Parse time for plans.
:param string_time: time in format HH:MM or only HH
:return: depending on param return dict with type and value of time
"""
hm_time = {'hour': None,
'minutes': None,
'with_minutes': None
}
if ':' in string_time:
hm_time['hour'] = int(string_time.split(':')[0])
hm_time['minutes'] = int(string_time.split(':')[1])
hm_time['with_minutes'] = True
if hm_time['hour'] > 24 or hm_time['hour'] < 0 or hm_time['minutes'] > 60 or hm_time['minutes'] < 0:
raise ValueError
else:
hm_time['hour'] = int(string_time)
hm_time['with_minutes'] = False
return hm_time
def parse_remind_type(string_type):
if string_type == 'min':
return Constants.REMIND_MINUTES
elif string_type == 'hour':
return Constants.REMIND_HOURS
elif string_type == 'day':
return Constants.REMIND_DAYS
else:
return Constants.REMIND_MONTHS
| StarcoderdataPython |
4838795 | <gh_stars>1-10
# Exercise 1
# Напишете програма, която дава възможност на потребителя да въвежда неограничен брой цели числа.
# Да се изведе какъв е процента на числата, които са кратни на 7, резултата да се закръгли до втория
# знак след десетичната запетая. Да се изведе каква е сумата на числата, които не са кратни на 7.
# Да се изведе максималното число измежду всички числа. Да се намери средноаритметичното на числата
# кратни на седем, резултата да се закръгли до втория знак след десетичната запетая.
from sys import maxsize
sevens_sum = 0
sevens_count = 0
max_num = - maxsize
not_sevens_sum = 0;
total_numbers_count = 0
while True:
number = input()
if number.lower() != "stop":
number = int(number)
total_numbers_count += 1
if number % 7 == 0:
sevens_sum += number
sevens_count += 1
if number > max_num:
max_num = number
else:
not_sevens_sum += number
if number > max_num:
max_num = number
else:
break
percent_divided_by_seven = sevens_count / total_numbers_count * 100
average_numbers_divided_by_seven = sevens_sum / sevens_count
print(f"{percent_divided_by_seven:.2f}")
print(f"{not_sevens_sum}")
print(f"{max_num}")
print(F"{average_numbers_divided_by_seven:.2f}")
# Exercise 2
# Да се напише програма, която реализира играта „познай числото“ : потребителя въвежда цяло число в
# интервала от 1 до 10. Програмата също генерира едно случайно число. Ако потребителя е въвел същото
# число, което е генерирано от програмата се принтира съобщение : „Позна числото“, в противен случай
# се принтира съобщение : „Съжалявам не позна“. И така продължава този процес на въвеждане на число
# докато потребителя не въведе 0, нулата е знак за край на играта. След като приключи играта на екрана
# да се принтира общия брой опити на потребителя, както и броя на познатите числа.
import random
total_numbers_count = 0
guessed_numbers_count = 0
while True:
generated_num = random.randrange(1, 11)
number = int(input("Enter number between 1 - 10 or 0 to quit: "))
total_numbers_count += 1
if number == 0:
break
else:
if number == generated_num:
guessed_numbers_count += 1
print("Позна числото")
else:
print("Съжалявам не позна")
print(f"Брой опити: {total_numbers_count}")
print(f"Брой познати числа: {guessed_numbers_count}")
# Exercise 3
# Сезона за каране на ски е вече тук. Сформира се голяма група приятели, които възнамеряват да отидат на ски почивка.
# Някои от тях се нуждаят от подновяване на екипировката си. Ето защо първоначално трябва да се прочете броя на скиорите,
# които ще купят екипировка. След това на отделни редове получаваме броя на якета, каски и ски обувките, които ще
# бъдат закупени от един скиор. Трябва да се има предвид следния ценоразпис:
# • Якета - 120 лв.
# • Каски – 75 лв.
# • Комплект обувки – 299,90 лв.
# Към крайната сума се начислява допълнително 20% ДДС. Да се напише програма, която изчислява общата сумата,
# която скиорите трябва да заплатят( парите, които скиорите трябва да платят, форматирани до втория знак след десетичната запетая) .
# Забележка: Един скиор съвсем спокойно може да закупи повече от 1 яке, каска или комплект обувки
skier_equipment_amount = int(input("Enter the amount of skiers willing to buy equipment: "))
jacket_for_one_skier = int(input("Enter the amount of jackets: "))
helmet_for_one_skier = int(input("Enter the amount of helmets: "))
boots_for_one_skier = int(input("Enter the amount of boots: "))
jacket_price = skier_equipment_amount * jacket_for_one_skier * 120
helmet_price = skier_equipment_amount * helmet_for_one_skier * 75
boots_price = skier_equipment_amount * boots_for_one_skier * 299.90
total_price = jacket_price + helmet_price + boots_price
total_price_plus_dds = total_price + total_price * 0.2
print(f"{total_price_plus_dds:.2f}")
# Exercise 4
# Напишете програма меню със следните опции:
# - добавяне на студент
# - търсене на студент по език
# - извеждане на информация за всички студенти
# - изход
# За целта създайте клас Student със полета: номер на студент, име на студент и език за програмиране.
# Дефинирайте и методи: get_id(self), get_name(self), get_language(self) , които връщат стойностите на полетата.
# Информацията за студентите се пази в текстов файл.
# Дефинирайте функция Add_student(student),която се използва за добавяне на информация за нов студент в текстов файл.
# Дефинирайте функция Search(language) за търсене на студент по език.
# Дефинирайте функция Display() за принтиране на информация за всички студенти .
class Student:
def __init__(self, id, name, programming_language):
self.id = id
self.name = name
self.programming_language = programming_language
def get_id(self):
return f"Student's ID: {self.id}"
def get_name(self):
return f"Student's name: {self.name}"
def get_language(self):
return f"Stuedent's programming language: {self.programming_language}"
def Add_student(student):
f = open("doc.txt", "a+")
f.write(f"{student.id}, {student.name}, {student.programming_language}")
f.close()
student1 = Student(123123123, "Ivan", "Pyhton")
print(student1.get_id())
print(student1.get_name())
print(student1.get_language())
Add_student(student1)
| StarcoderdataPython |
3398879 | """Tests for ``from_datatimes`` generator class method."""
import math
import random
import numpy as np
from waves import Sound
def test_from_datatimes_mono(mono_ttf_gen):
fps, frequency, volume = (44100, 110, 0.5)
time_to_frame = mono_ttf_gen(fps=fps, frequency=frequency, volume=volume)
sound = Sound.from_datatimes(time_to_frame, fps=fps)
assert sound.n_bytes == 2
assert sound.n_frames is None
assert sound.duration == math.inf
times = [random.uniform(0, 5) for i in range(10)]
for t in times:
assert time_to_frame(t) == sound.time_to_frame(t)
def test_from_datatimes_stereo():
fps, frequencies, volume = (44100, (110, 440), 0.5)
amplitude = np.iinfo(np.int16).max * volume
time_to_frame_left = lambda t: (
np.sin(frequencies[0] * 2 * np.pi * t) * amplitude
).astype(np.int16)
time_to_frame_right = lambda t: (
np.sin(frequencies[1] * 2 * np.pi * t) * amplitude
).astype(np.int16)
sound = Sound.from_datatimes(
lambda t: [time_to_frame_left(t), time_to_frame_right(t)], fps=fps
)
assert sound.n_bytes == 2
assert sound.n_frames is None
assert sound.duration == math.inf
times = [random.uniform(0, 5) for i in range(10)]
for t in times:
frame = sound.time_to_frame(t)
assert frame[0] == time_to_frame_left(t)
assert frame[1] == time_to_frame_right(t)
| StarcoderdataPython |
63904 | import sys
import numpy as np
import pandas as pd
#py 101903371.py 101903371-data.csv 1,1,1,2,2 +,-,+,-,+ 101903371-result.csv
def topsis(filename,weights,impacts,output_filename):
#Check if input file is csv
if filename.split(".")[-1]!="csv":
sys.exit("Error: Please enter a valid input csv file path.")
#Check if output file is csv
if output_filename.split(".")[-1]!="csv":
sys.exit("Error: Please enter a valid output csv file path.")
try:
#read data
df = pd.read_csv(filename)
#Check for all numeric values
num_data = df.iloc[:,1:].values.tolist()
if (np.isnan(num_data).any()==True):
sys.exit("Error: From 2nd to last columns must contain numeric values only.")
#Checking that number of columns is not less than 3
if len(df.columns)<3:
sys.exit("Error: Input file must contain three or more columns.")
#Number of rows and columns
ncols=df.iloc[:,1:].shape[1]
nrows=df.iloc[:,1:].shape[0]
#Checking that weights are correctly input
# weights=list(map(int,weights_csv.strip().split(',')))
# if len(weights)!=ncols:
# sys.exit("Error: Number of weights should be equal to number of columns.")
#
#
# #Checking that impacts are correctly input
# impacts=impacts_csv.strip().split(',')
# if len(impacts)!=ncols:
# sys.exit("Error: Number of impacts should be equal to number of columns.")
for i in impacts:
if i not in ['+','-']:
sys.exit("Error: Impacts should contain '+' or '-' signs only ")
#Fetch column names
columns_list = list(df.columns)
columns_list.append('Topsis Score')
columns_list.append('Rank')
#Extract values from dataframe to work with into numpy array
data=df.iloc[:,1:].values.tolist()
#Calculate Root Mean Square for each column
rms=[0]*ncols
for j in range(ncols):
for i in range(nrows):
rms[j] = rms[j] + (data[i][j])**2
rms[j] = rms[j]**(1/2)
#Normalisation by dividing each entry by rms value
for i in range(nrows):
for j in range(ncols):
data[i][j]=data[i][j]/rms[j]
#Multiply weights with data
for j in range(ncols):
for i in range(nrows):
data[i][j]=data[i][j]*weights[j]
#Transpose data in numpy array: data
t_data=[]
for j in range(ncols):
temp1=[]
for i in range(nrows):
temp1.append(data[i][j])
t_data.append(temp1)
#Calculate ideal_best and ideal_worst
ideal_best = []
ideal_worst = []
for i in range(len(t_data)):
if impacts[i] == "+":
ideal_best.append(max(t_data[i]))
ideal_worst.append(min(t_data[i]))
if impacts[i] == "-":
ideal_best.append(min(t_data[i]))
ideal_worst.append(max(t_data[i]))
#Calculate euclidean distance from ideal best and ideal worst and Calculate performance score
score_list=[]
for i in range(nrows):
ed_pos_ib=0 #Ideal Best
ed_neg_iw=0 #Ideal Worst
for j in range(ncols):
ed_pos_ib = ed_pos_ib + (data[i][j] - ideal_best[j])**2
ed_neg_iw = ed_neg_iw + (data[i][j] - ideal_worst[j])**2
ed_pos_ib = ed_pos_ib**0.5
ed_neg_iw = ed_neg_iw**0.5
perf = ed_neg_iw/(ed_neg_iw+ed_pos_ib)
score_list.append(perf)
df["Topsis Score"]=score_list #Append topsis score
#Calculate ranks
ranks = []
score_desc=sorted(score_list,reverse=True)\
for i in score_list:
ranks.append(score_desc.index(i)+1)
df["Ranks"]=ranks
df.to_csv(output_filename)
except:
sys.exit("File Not Found: You need to specify the file path. Incorrect file path detected. Recheck the full file path.")
if __name__ == "__main__":
| StarcoderdataPython |
3385723 | <reponame>jimnarey/alu_auto_builder
from shared import configs, help_messages
import runners
input_path_opt = {
'name': 'input_path',
'cli_short': 'i',
'gui_required': True,
'type': 'file_open',
'help': help_messages.INPUT_PATH
}
input_dir_opt = {
'name': 'input_dir',
'cli_short': 'i',
'gui_required': True,
'type': 'dir',
'help': help_messages.INPUT_DIR
}
output_path_opt = {
'name': 'output_path',
'cli_short': 'o',
'gui_required': False,
'type': 'file_save',
'help': help_messages.OUTPUT_PATH
}
output_dir_opt = {
'name': 'output_dir',
'cli_short': 'o',
'gui_required': False,
'type': 'dir',
'help': help_messages.OUTPUT_DIR
}
extra_build_opts = (
{
'name': 'core_path',
'cli_short': 'c',
'gui_required': True,
'type': 'file_open',
'help': help_messages.CORE_PATH
},
{
'name': 'bios_dir',
'cli_short': 'b',
'gui_required': False,
'type': 'dir',
'help': help_messages.BIOS_DIR
}
)
platform_opt = {
'name': 'platform',
'cli_short': 'p',
'gui_required': True,
'type': 'text',
'help': help_messages.PLATFORM,
'selections': configs.PLATFORMS.keys()
}
other_scrape_opts = (
{
'name': 'scrape_module',
'cli_short': 's',
'gui_required': False,
'type': 'text',
'help': help_messages.SCRAPE_MODULE,
'selections': configs.SCRAPING_MODULES
},
{
'name': 'user_name',
'cli_short': 'u',
'gui_required': False,
'type': 'text',
'help': help_messages.USER_NAME
},
{
'name': 'password',
'cli_short': 'q',
'gui_required': False,
'type': 'text',
'help': help_messages.PASSWORD
},
{
'name': 'scrape_videos',
'cli_short': 'V',
'gui_required': False,
'type': 'bool',
'help': help_messages.SCRAPE_VIDEOS
},
{
'name': 'refresh_rom_data',
'cli_short': 'R',
'gui_required': False,
'type': 'bool',
'help': help_messages.REFRESH_ROM_DATA
}
)
export_assets_opts = (
{
'name': 'export_cox_assets',
'cli_short': 'C',
'gui_required': False,
'type': 'bool',
'help': help_messages.EXPORT_COX_ASSETS
},
{
'name': 'export_bitpixel_marquees',
'cli_short': 'Q',
'gui_required': False,
'type': 'bool',
'help': help_messages.EXPORT_BITPIXEL_MARQUEES
}
)
do_bezel_scape_opt = {
'name': 'do_bezel_scrape',
'cli_short': 'B',
'gui_required': False,
'type': 'bool',
'help': help_messages.DO_BEZEL_SCRAPE
}
do_summarise_gamelist_opt = {
'name': 'do_summarise_gamelist',
'cli_short': 'S',
'gui_required': False,
'type': 'bool',
'help': help_messages.DO_SUMMARISE_GAMELIST
}
add_bezels_to_gamelist_opts = (
{
'name': 'min_match_score',
'cli_short': 'm',
'gui_required': False,
'type': 'text',
'help': help_messages.MIN_MATCH_SCORE
},
{
'name': 'compare_filename',
'cli_short': 'F',
'gui_required': False,
'type': 'bool',
'help': help_messages.COMPARE_FILENAME
},
{
'name': 'filter_unsupported_regions',
'cli_short': 'U',
'gui_required': False,
'type': 'bool',
'help': help_messages.FILTER_UNSUPPORTED_REGIONS
}
)
replace_save_part_opt = {
'name': 'part_path',
'cli_short': 'p',
'gui_required': True,
'type': 'file_open',
'help': help_messages.PART_PATH
}
edit_save_part_opts = (
{
'name': 'mount_method',
'cli_short': 'M',
'gui_required': False,
'type': 'bool',
'help': help_messages.MOUNT_METHOD
},
{
'name': 'file_manager',
'cli_short': 'f',
'gui_required': False,
'type': 'text',
'help': help_messages.FILE_MANAGER}
)
backup_save_part_opt = {
'name': 'backup_uce',
'cli_short': 'B',
'gui_required': False,
'type': 'bool',
'help': help_messages.BACKUP_UCE
}
scrape_and_build_opts = (input_dir_opt, output_dir_opt, *extra_build_opts, platform_opt, *other_scrape_opts)
build_from_game_list_opts = (input_path_opt, output_dir_opt, *extra_build_opts)
operations = {
'scrape_to_uces': { #
'options': (*scrape_and_build_opts, do_summarise_gamelist_opt, do_bezel_scape_opt, *add_bezels_to_gamelist_opts, *export_assets_opts),
'runner': runners.scrape_and_build_uces,
'help': help_messages.SCRAPE_TO_UCES,
'gui_user_continue_check': False
},
'scrape_to_recipes': { #
'options': (*scrape_and_build_opts, do_summarise_gamelist_opt, do_bezel_scape_opt, *add_bezels_to_gamelist_opts, *export_assets_opts),
'runner': runners.scrape_and_make_recipes,
'help': help_messages.SCRAPE_TO_RECIPES,
'gui_user_continue_check': False
},
'scrape_to_gamelist': { #
'options': (input_dir_opt, output_dir_opt, platform_opt, *other_scrape_opts, do_summarise_gamelist_opt, do_bezel_scape_opt, *add_bezels_to_gamelist_opts,),
'runner': runners.scrape_and_make_gamelist,
'help': help_messages.SCRAPE_TO_GAMELIST,
'gui_user_continue_check': False
},
'gamelist_to_uces': {
'options': (*build_from_game_list_opts, *export_assets_opts, do_summarise_gamelist_opt),
'runner': runners.build_uces_from_gamelist,
'help': help_messages.GAMELIST_TO_UCES,
'gui_user_continue_check': False
},
'gamelist_to_recipes': {
'options': (*build_from_game_list_opts, *export_assets_opts, do_summarise_gamelist_opt),
'runner': runners.build_recipes_from_gamelist,
'help': help_messages.GAMELIST_TO_RECIPES,
'gui_user_continue_check': False
},
'export_gamelist_assets': {
'options': (input_path_opt, output_dir_opt, *export_assets_opts, do_summarise_gamelist_opt),
'runner': runners.export_assets_from_gamelist,
'help': help_messages.EXPORT_GAMELIST_ASSETS,
'gui_user_continue_check': False
},
'add_bezels_to_gamelist': { #
'options': (input_path_opt, platform_opt, *add_bezels_to_gamelist_opts, do_summarise_gamelist_opt),
'runner': runners.add_bezels_to_existing_gamelist,
'help': help_messages.ADD_BEZELS_TO_GAMELIST,
'gui_user_continue_check': False
},
'summarise_gamelist': {
'options': (input_path_opt, output_dir_opt),
'runner': runners.create_summary_of_gamelist,
'help': help_messages.SUMMARISE_GAMELIST,
'gui_user_continue_check': False
},
'recipes_to_uces': {
'options': (input_dir_opt, output_dir_opt),
'runner': runners.build_uces_from_recipes,
'help': help_messages.RECIPES_TO_UCES,
'gui_user_continue_check': False
},
'recipe_to_uce': {
'options': (input_dir_opt, output_path_opt),
'runner': runners.build_single_uce_from_recipe,
'help': help_messages.RECIPE_TO_UCE,
'gui_user_continue_check': False
},
'edit_save_partition': {
'options': (input_path_opt, *edit_save_part_opts, backup_save_part_opt),
'runner': runners.edit_uce_save_partition,
'help': help_messages.EDIT_SAVE_PARTITION,
'gui_user_continue_check': True
},
'extract_save_partition': {
'options': (input_path_opt, output_path_opt),
'runner': runners.extract_uce_save_partition,
'help': help_messages.EXTRACT_SAVE_PARTITION,
'gui_user_continue_check': False
},
'replace_save_partition': {
'options': (input_path_opt, replace_save_part_opt, backup_save_part_opt),
'runner': runners.replace_uce_save_partition,
'help': help_messages.REPLACE_SAVE_PARTITION,
'gui_user_continue_check': False
}
}
| StarcoderdataPython |
1607367 | <reponame>kissingurami/python_twisted<filename>multi_thread/RLock_example.py
# -*- coding: utf-8 -*-
'''
Lock与RLock的区别
从原理上来说:在同一线程内,对RLock进行多次acquire()操作,程序不会阻塞。
每个thread都运行f(),f()获取锁后,运行g(),但g()中也需要获取同一个锁。如果用Lock,这里多次获取锁,就发生了死锁。
但我们代码中使用了RLock。在同一线程内,对RLock进行多次acquire()操作,程序不会堵塞,
'''
import threading
rlock = threading.RLock()
def f():
with rlock:
g()
h()
def g():
with rlock:
h()
do_something1()
def h():
with rlock:
do_something2()
def do_something1():
print('do_something1')
def do_something2():
print('do_something2')
# Create and run threads as follows
try:
threading.Thread(target=f).start()
threading.Thread(target=f).start()
threading.Thread(target=f).start()
except Exception as e:
print("Error: unable to start thread")
| StarcoderdataPython |
11865 | """Use pika with the Tornado IOLoop
"""
import logging
from tornado import ioloop
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
from pika.adapters import base_connection
LOGGER = logging.getLogger(__name__)
class TornadoConnection(base_connection.BaseConnection):
"""The TornadoConnection runs on the Tornado IOLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the TornadoConnection class, connecting
to RabbitMQ automatically
:param pika.connection.Parameters parameters: Connection parameters
:param on_open_callback: The method to call when the connection is open
:type on_open_callback: method
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | ioloop.IOLoop |
nbio_interface.AbstractIOServices custom_ioloop:
Override using the global IOLoop in Tornado
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = (
selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance()))
super(TornadoConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
| StarcoderdataPython |
72625 | <filename>fairseq/models/hubert/hubert.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import (
ConvFeatureExtractionModel,
TransformerEncoder,
)
from fairseq.modules import GradMultiply, LayerNorm
from fairseq.tasks.hubert_pretraining import (
HubertPretrainingConfig,
HubertPretrainingTask,
)
from omegaconf import II
logger = logging.getLogger(__name__)
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class HubertConfig(FairseqDataclass):
label_rate: int = II("task.label_rate")
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for the transformer"},
)
attention_dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for attention weights"},
)
activation_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability after activation in FFN"},
)
encoder_layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a tarnsformer layer"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
},
)
untie_final_proj: bool = field(
default=False,
metadata={"help": "use separate projection for each target"},
)
layer_norm_first: bool = field(
default=False,
metadata={"help": "apply layernorm first in the transformer"},
)
conv_feature_layers: str = field(
default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2",
metadata={
"help": "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0,
metadata={"help": "multiply feature extractor var grads by this"},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={"help": "legacy (to be removed)"},
)
# loss computation
skip_masked: bool = field(
default=False,
metadata={"help": "skip computing losses over masked frames"},
)
skip_nomask: bool = field(
default=False,
metadata={"help": "skip computing losses over unmasked frames"},
)
@register_model("hubert", dataclass=HubertConfig)
class HubertModel(BaseFairseqModel):
def __init__(
self,
cfg: HubertConfig,
task_cfg: HubertPretrainingConfig,
dictionaries: List[Dictionary],
) -> None:
super().__init__()
logger.info(f"HubertModel Config: {cfg}")
feature_enc_layers = eval(cfg.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.untie_final_proj = cfg.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
cfg.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
# modules below are not needed during fine-tuning
if any([d is None for d in dictionaries]):
logger.info("cannot find dictionary. assume will be used for fine-tuning")
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask):
"""Build a new model instance."""
model = HubertModel(cfg, task.cfg, task.dictionaries)
return model
def apply_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward_features(self, source: torch.Tensor) -> torch.Tensor:
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
return features
def forward_targets(
self,
features: torch.Tensor,
target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self,
features: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)
padding_mask = padding_mask.all(-1)
return padding_mask
def forward(
self,
source: torch.Tensor,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
features = self.forward_features(source)
if target_list is not None:
features, target_list = self.forward_targets(features, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask, target_list)
else:
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1,
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
if self.untie_final_proj:
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
else:
proj_x_m_list = [proj_x_m for _ in range(len(target_list))]
logit_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list))
]
else:
logit_m_list = [None for _ in target_list]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = self.final_proj(x[nomask_indices])
if self.untie_final_proj:
proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1)
else:
proj_x_u_list = [proj_x_u for _ in range(len(target_list))]
logit_u_list = [
compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i])
for i, (proj_x_u, t) in enumerate(zip(proj_x_u_list, target_list))
]
else:
logit_u_list = [None for _ in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
feature = res["features"] if ret_conv else res["x"]
return feature, res["padding_mask"]
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x.float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, net_output, is_masked=True):
logits_list = self.get_logits(net_output, is_masked)
targets_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list]
return targets_list
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
return extra_losses, names
def remove_pretraining_modules(self):
self.target_glu = None
self.final_proj = None
| StarcoderdataPython |
3262708 | <filename>lazysort.py
import typing
from random import randint
from heapq import merge
from itertools import chain, tee
def lazysort(l: list) -> typing.Iterator:
# Stage 1
stack = []
current_list = iter(l)
sentinel = object()
first = next(current_list, sentinel)
while first is not sentinel:
sortedish, surplus = dropsort(chain((first,), current_list))
stack.append(sortedish)
current_list = surplus
first = next(current_list, sentinel)
# Stage 2
if len(stack) < 2: # the case where the list `l` is already sorted
return iter(l)
cur = merge(stack.pop(), stack.pop())
while stack:
cur = merge(cur, stack.pop())
return cur
def dropsort(s: typing.Iterable):
def result_iterator(seq: typing.Iterator):
last_element = next(seq)
yield last_element
while True:
current_element = next(seq)
if current_element >= last_element:
last_element = current_element
yield last_element
def surplus_iterator(seq: typing.Iterator):
last_element = next(seq)
while True:
current_element = next(seq)
if current_element >= last_element:
last_element = current_element
else:
yield current_element
it1, it2 = tee(s, 2)
return result_iterator(it1), surplus_iterator(it2)
def main():
l = [randint(1, 255) for _ in range(100)]
print("lazysort is working eh?", list(lazysort(l)) == sorted(l))
for e in lazysort(l):
print("{:3}".format(e), end=" ")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3333017 | from lib.base_controller import CommandController
class CollectinfoCommandController(CommandController):
log_handler = None
def __init__(self, log_handler):
CollectinfoCommandController.log_handler = log_handler
| StarcoderdataPython |
3382304 | # -*- coding: utf-8 -*-
from django.urls import re_path
from layouter.views import ToggleGridView
app_name = 'layouter'
urlpatterns = [
re_path(r'^toggle-grid/', ToggleGridView.as_view(), name='toggle-grid')
]
| StarcoderdataPython |
3247218 | from __future__ import absolute_import, division, print_function
__all__ = ["chachifuncs","descriptors","chachies", "version"]
from chachies import chachifuncs
from chachies import descriptors
from chachies.version import __version__
| StarcoderdataPython |
125206 | <filename>python-flask/tests/test_api.py<gh_stars>0
# Copyright (c) 2016 <NAME>
# All rights reserved.
import unittest
from app import app
class APITestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_index(self):
with app.test_client() as client:
resp = client.get('/')
self.assertEqual('Hello, world!', resp.data, msg='unexpected index response')
| StarcoderdataPython |
1732750 | <reponame>tokenchain/tronpytool
#!/usr/bin/env python
# coding: utf-8
from tronpytool import Tron
tron = Tron().setNetwork('nile')
st1 = tron.address.to_hex('TT67rPNwgmpeimvHUMVzFfKsjL9GZ1wGw8')
st2 = tron.address.to_hex_0x('TT67rPNwgmpeimvHUMVzFfKsjL9GZ1wGw8')
st22 = tron.address.to_hex_0x_41('TT67rPNwgmpeimvHUMVzFfKsjL9GZ1wGw8')
st3 = tron.address.from_hex('41BBC8C05F1B09839E72DB044A6AA57E2A5D414A10')
print(st1)
# 41BBC8C05F1B09839E72DB044A6AA57E2A5D414A10
print(st2)
# 0xBBC8C05F1B09839E72DB044A6AA57E2A5D414A10
print(st22)
# 0x41BBC8C05F1B09839E72DB044A6AA57E2A5D414A10
print(st3)
# TT67rPNwgmpeimvHUMVzFfKsjL9GZ1wGw8
| StarcoderdataPython |
3367294 | # general imports
import shutil
import pytest
from pathlib import Path
# AHA imports
import fault
import magma as m
# DragonPHY-specific imports
from dragonphy import get_deps_cpu_sim
THIS_DIR = Path(__file__).parent.resolve()
BUILD_DIR = THIS_DIR / 'build'
def list_head(lis, n=25):
trimmed = lis[:n]
if len(lis) > n:
trimmed += ['...']
return str(trimmed)
@pytest.mark.parametrize('ndiv', list(range(16)))
def test_sim(simulator_name, ndiv, N=4, nper=5):
# set defaults
if simulator_name is None:
if shutil.which('iverilog'):
simulator_name = 'iverilog'
else:
simulator_name = 'ncsim'
# declare circuit
class dut(m.Circuit):
name = 'avg_pulse_gen'
io = m.IO(
clk=m.ClockIn,
rstb=m.BitIn,
ndiv=m.In(m.Bits[N]),
out=m.BitOut
)
# create tester
t = fault.Tester(dut, dut.clk)
# initialize
t.zero_inputs()
t.poke(dut.ndiv, ndiv)
# run a few cycles
t.step(10)
t.poke(dut.rstb, 1)
# capture output
results = []
for k in range(nper*(2**ndiv)):
results.append(t.get_value(dut.out))
t.step(2)
##################
# run simulation #
##################
t.compile_and_run(
target='system-verilog',
simulator=simulator_name,
ext_srcs=get_deps_cpu_sim(cell_name='avg_pulse_gen'),
parameters={
'N': N,
},
ext_model_file=True,
disp_type='realtime',
dump_waveforms=False,
directory=BUILD_DIR,
num_cycles=1e12
)
##################
# convert values #
##################
results = [elem.value for elem in results]
################
# check values #
################
# trim initial zeros
results = results[results.index(1):]
# print the beginning of the list
print('results', list_head(results))
# trim to an integer number of periods
nper_meas = len(results) // (2**ndiv)
assert nper_meas >= (nper-1), 'Not enough output periods observed.'
results = results[:nper_meas * (2**ndiv)]
# check that results match expectations
expct = []
for _ in range(nper_meas):
expct += [1]
expct += [0]*((2**ndiv)-1)
# compare observed and expected outputs
assert results == expct, 'Mismatch in measured vs. expected outputs.'
###################
# declare success #
###################
print('Success!')
| StarcoderdataPython |
1643419 | from unittest import TestCase
from salesdataanalyzer.analyzer import analyze_data
from salesdataanalyzer.helpers import Salesman, Customer, Sale, SaleItem, \
DataSummary
class AnalyzerTest(TestCase):
def test_analyze_data(self):
salesmen = [
Salesman('12312312312', '<NAME>', 192000.00),
Salesman('45645645645', '<NAME>', 167500.00),
]
customers = [
Customer('12312312312312', '<NAME>', 'Farming'),
Customer('45645645645645', '<NAME>', 'Automotive Retail'),
]
sales = [
Sale(1, [SaleItem(1, 1, 110500.99)], '<NAME>'),
Sale(2, [SaleItem(2, 25, 230.45)], '<NAME>'),
Sale(4, [SaleItem(4, 1, 65198.90)], '<NAME>'),
Sale(5, [SaleItem(5, 5, 470.10)], '<NAME>'),
]
data_summary: DataSummary = analyze_data({
'salesmen': salesmen,
'customers': customers,
'sales': sales,
})
expected_data_summary: DataSummary = {
'customers_amount': 2,
'salesmen_amount': 2,
'most_expensive_sale_id': 1,
'worst_salesman_name': '<NAME>',
}
self.assertDictEqual(expected_data_summary, data_summary)
| StarcoderdataPython |
1759228 | <gh_stars>1-10
import fulfillment
fulfillment.core.api_key = 'YOUR_API_KEY_GOES_HERE'
# set debug to true to get print json
fulfillment.core.Debug = True
fulfillment.Product.create(
title='example product',
barcode='123456789',
type='merchandise',
origin_country='US',
hs_code='1234.56.78',
requires_serial_tracking=False,
length={'value': 1},
width={'value': 2},
height={'value': 3},
weight={'value': 4},
price={'value': 5}
)
fulfillment.Product.retrieve('prod_123456789abcdefghijklmnopqrstuvwxyz')
fulfillment.Product.retrieveAll(
options={
'limit': 3,
'offset': 0,
'page': 0,
'per_page': 2
}
)
fulfillment.Product.update(
'prod_123456789abcdefghijklmnopqrstuvwxyz',
title='updated example product'
)
fulfillment.Product.delete('prod_123456789abcdefghijklmnopqrstuvwxyz')
| StarcoderdataPython |
3367658 | <reponame>awsa2ron/aws-doc-sdk-examples
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for find_running_models.py.
"""
import pytest
from botocore.exceptions import ClientError
import datetime
import boto3
import models
from boto3.session import Session
from find_running_models import find_running_models, find_running_models_in_project
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_models'),
('TestException', 'stub_describe_model'),
])
def test_find_models_in_project(make_stubber, stub_runner, error_code, stop_on_method):
lookoutvision_client = boto3.client('lookoutvision')
lookoutvision_stubber = make_stubber(lookoutvision_client)
project_name = 'test-project_name'
model = 'test-model'
model_version = 'test-model'
model_arn = 'test-arn'
description = 'test description'
status = 'HOSTED'
message = 'Test message!'
created = datetime.datetime.now()
trained = created + datetime.timedelta(minutes=10)
recall = .3
precision = .5
f1 = .7
out_buck = 'doc-example-bucket'
out_folder = 'test-folder'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(lookoutvision_stubber.stub_list_models,
project_name, [model])
runner.add(lookoutvision_stubber.stub_describe_model,
project_name, model_version, model_arn, status, {
'description': description, 'message': message, 'created': created,
'trained': trained, 'recall': recall, 'precision': precision, 'f1': f1,
'out_bucket': out_buck, 'out_folder': out_folder
})
if error_code is None:
running_models = find_running_models_in_project(
lookoutvision_client, project_name)
assert len(running_models) == 1
else:
with pytest.raises(ClientError) as exc_info:
find_running_models_in_project(lookoutvision_client, project_name)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_projects'),
('TestException', 'stub_list_models'),
('TestException', 'stub_describe_model'),
])
def test_find_running_models(make_stubber, stub_runner, monkeypatch,
error_code, stop_on_method):
lookoutvision_client = boto3.client('lookoutvision')
lookoutvision_stubber = make_stubber(lookoutvision_client)
project_name = 'test-project_name'
model = 'test-model'
model_version = 'test-model'
model_arn = 'test-arn'
description = 'test description'
status = 'HOSTED'
message = 'Test message!'
created = datetime.datetime.now()
trained = created + datetime.timedelta(minutes=10)
recall = .3
precision = .5
f1 = .7
out_buck = 'doc-example-bucket'
out_folder = 'test-folder'
project_arn = 'test-arn'
region = 'us-east-1'
def region_list(*args, **kwargs):
# Patches call to Session.get_available_regions.
# Returns a single AWS Region list.
return [region]
def get_boto_entity(
client, region_name=None, aws_session_token=None):
# Patches lookoutvision cient.
# Needed as clients are created for multiple AWS Regions.
# Returns the previously created, and stubbed, lookoutvision client.
return lookoutvision_client
# Patch AWS Region list
monkeypatch.setattr(Session, 'get_available_regions', region_list)
# Patch lookoutvision client to manage multiple AWS Region clients.
monkeypatch.setattr(boto3, 'client', get_boto_entity)
# Set up stubbed calls needed to mock getting running models.
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
lookoutvision_stubber.stub_list_projects, [project_name],
[{'arn': project_arn, 'created': created}])
runner.add(lookoutvision_stubber.stub_list_models,
project_name, [model])
runner.add(lookoutvision_stubber.stub_describe_model,
project_name, model_version, model_arn, status, {
'description': description, 'message': message, 'created': created,
'trained': trained, 'recall': recall, 'precision': precision, 'f1': f1,
'out_bucket': out_buck, 'out_folder': out_folder
})
if error_code is None:
running_models = find_running_models()
assert len(running_models) == 1
else:
with pytest.raises(ClientError) as exc_info:
find_running_models()
assert exc_info.value.response['Error']['Code'] == error_code
| StarcoderdataPython |
93114 | import pprint
import re
from typing import Any, Dict
import numpy as np
import pytest
from qcelemental.molutil import compute_scramble
from qcengine.programs.tests.standard_suite_contracts import (
contractual_accsd_prt_pr,
contractual_ccd,
contractual_ccsd,
contractual_ccsd_prt_pr,
contractual_ccsdpt_prccsd_pr,
contractual_ccsdt,
contractual_ccsdt1a,
contractual_ccsdt1b,
contractual_ccsdt2,
contractual_ccsdt3,
contractual_ccsdt_prq_pr,
contractual_ccsdtq,
contractual_cisd,
contractual_current,
contractual_dft_current,
contractual_fci,
contractual_hf,
contractual_lccd,
contractual_lccsd,
contractual_mp2,
contractual_mp2p5,
contractual_mp3,
contractual_mp4,
contractual_mp4_prsdq_pr,
contractual_qcisd,
contractual_qcisd_prt_pr,
query_has_qcvar,
query_qcvar,
)
from qcengine.programs.tests.standard_suite_ref import answer_hash, std_suite
from qcengine.programs.util import mill_qcvars
from .utils import compare, compare_values
pp = pprint.PrettyPrinter(width=120)
def runner_asserter(inp, ref_subject, method, basis, tnm, scramble, frame):
qcprog = inp["qc_module"].split("-")[0]
qc_module_in = inp["qc_module"] # returns "<qcprog>"|"<qcprog>-<module>" # input-specified routing
qc_module_xptd = (
(qcprog + "-" + inp["xptd"]["qc_module"]) if inp.get("xptd", {}).get("qc_module", None) else None
) # expected routing
driver = inp["driver"]
reference = inp["reference"]
fcae = inp["fcae"]
mode_options = inp.get("cfg", {})
if qc_module_in == "nwchem-tce" and basis == "cc-pvdz":
pytest.skip(
f"TCE throwing 'non-Abelian symmetry not permitted' for HF molecule when not C1. fix this a different way than setting C1."
)
# <<< Molecule >>>
# 1. ref mol: `ref_subject` nicely oriented mol taken from standard_suite_ref.py
ref_subject.update_geometry()
min_nonzero_coords = np.count_nonzero(np.abs(ref_subject.geometry(np_out=True)) > 1.0e-10)
# print(
# "MOL 1/REF: ref_subject",
# ref_subject.com_fixed(),
# ref_subject.orientation_fixed(),
# ref_subject.symmetry_from_input(),
# )
# with np.printoptions(precision=3, suppress=True):
# print(ref_subject.geometry(np_out=True))
if scramble is None:
subject = ref_subject
ref2in_mill = compute_scramble(
subject.natom(), do_resort=False, do_shift=False, do_rotate=False, do_mirror=False
) # identity AlignmentMill
else:
subject, scramble_data = ref_subject.scramble(**scramble, do_test=False, fix_mode="copy")
ref2in_mill = scramble_data["mill"]
# with np.printoptions(precision=12, suppress=True):
# print(f"ref2in scramble mill= {ref2in_mill}")
# print("MOL 2/IN: subject", subject.com_fixed(), subject.orientation_fixed(), subject.symmetry_from_input())
# with np.printoptions(precision=3, suppress=True):
# print(subject.geometry(np_out=True))
# 2. input mol: `subject` now ready for `atin.molecule`. may have been scrambled away from nice ref orientation
# <<< Reference Values >>>
# ? precedence on next two
mp2_type = inp.get("corl_type", inp["keywords"].get("mp2_type", "df")) # hard-code of read_options.cc MP2_TYPE
mp_type = inp.get("corl_type", inp["keywords"].get("mp_type", "conv")) # hard-code of read_options.cc MP_TYPE
ci_type = inp.get("corl_type", inp["keywords"].get("ci_type", "conv")) # hard-code of read_options.cc CI_TYPE
cc_type = inp.get("corl_type", inp["keywords"].get("cc_type", "conv")) # hard-code of read_options.cc CC_TYPE
corl_natural_values = {
"hf": "conv", # dummy to assure df/cd/conv scf_type refs available
"mp2": mp2_type,
"mp3": mp_type,
"mp4(sdq)": mp_type,
"mp4": mp_type,
"cisd": ci_type,
"qcisd": ci_type,
"qcisd(t)": ci_type,
"fci": ci_type,
"lccd": cc_type,
"lccsd": cc_type,
"ccd": cc_type,
"ccsd": cc_type,
"ccsd+t(ccsd)": cc_type,
"ccsd(t)": cc_type,
"a-ccsd(t)": cc_type,
"ccsdt-1a": cc_type,
"ccsdt-1b": cc_type,
"ccsdt-2": cc_type,
"ccsdt-3": cc_type,
"ccsdt": cc_type,
"ccsdt(q)": cc_type,
"ccsdtq": cc_type,
"pbe": "conv",
"b3lyp": "conv",
"b3lyp5": "conv",
"mrccsdt-1a": cc_type,
"mrccsdt-1b": cc_type,
"mrccsdt-2": cc_type,
"mrccsdt-3": cc_type,
}
corl_type = corl_natural_values[method]
natural_ref = {"conv": "pk", "df": "df", "cd": "cd"}
scf_type = inp["keywords"].get("scf_type", natural_ref[corl_type])
natural_values = {"pk": "pk", "direct": "pk", "df": "df", "mem_df": "df", "disk_df": "df", "cd": "cd"}
scf_type = natural_values[scf_type]
is_dft = method in ["pbe", "b3lyp", "b3lyp5"]
# * absolute and relative tolerances function approx as `or` operation. see https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
# * can't go lower on atol_e because hit digit limits accessible for reference values
# * dz gradients tend to be less accurate than larger basis sets/mols
# * analytic Hessian very loose to catch gms/nwc HF Hessian
atol_e, rtol_e = 2.0e-7, 1.0e-16
atol_g, rtol_g = 5.0e-7, 2.0e-5
atol_h, rtol_h = 1.0e-5, 2.0e-5
if is_dft:
atol_g = 6.0e-6
using_fd = "xptd" in inp and "fd" in inp["xptd"] # T/F: notate fd vs. anal for docs table
loose_fd = inp.get("xptd", {}).get("fd", False) # T/F: relax conv crit for 3-pt internal findif fd
if loose_fd:
if basis == "cc-pvdz":
atol_g = 1.0e-4
atol_h, rtol_h = 1.0e-4, 5.0e-4
else:
atol_g = 2.0e-5
atol_h, rtol_h = 5.0e-5, 2.0e-4
# VIEW atol_e, atol_g, atol_h, rtol_e, rtol_g, rtol_h = 1.e-9, 1.e-9, 1.e-9, 1.e-16, 1.e-16, 1.e-16
chash = answer_hash(
system=subject.name(),
basis=basis,
fcae=fcae,
scf_type=scf_type,
reference=reference,
corl_type=corl_type,
)
ref_block = std_suite[chash]
# check all calcs against conventional reference to looser tolerance
atol_conv = 1.0e-4
rtol_conv = 1.0e-3
chash_conv = answer_hash(
system=subject.name(),
basis=basis,
fcae=fcae,
reference=reference,
corl_type="conv",
scf_type="pk",
)
ref_block_conv = std_suite[chash_conv]
# <<< Prepare Calculation and Call API >>>
import qcdb
driver_call = {"energy": qcdb.energy, "gradient": qcdb.gradient, "hessian": qcdb.hessian}
# local_options = {"nnodes": 1, "ncores": 2, "scratch_messy": False, "memory": 4}
local_options = {"nnodes": 1, "ncores": 1, "scratch_messy": False, "memory": 10}
qcdb.set_options(
{
# "guess": "sad",
# "e_convergence": 8,
# "d_convergence": 7,
# "r_convergence": 7,
"e_convergence": 10,
"d_convergence": 9,
# "r_convergence": 9,
# "points": 5,
}
)
extra_kwargs = inp["keywords"].pop("function_kwargs", {})
qcdb.set_options(inp["keywords"])
if "error" in inp:
errtype, errmatch, reason = inp["error"]
with pytest.raises(errtype) as e:
driver_call[driver](inp["call"], molecule=subject, local_options=local_options, **extra_kwargs)
assert re.search(errmatch, str(e.value)), f"Not found: {errtype} '{errmatch}' in {e.value}"
_recorder(qcprog, qc_module_in, driver, method, reference, fcae, scf_type, corl_type, "error", "nyi: " + reason)
return
ret, wfn = driver_call[driver](
inp["call"], molecule=subject, return_wfn=True, local_options=local_options, mode_options=mode_options, **extra_kwargs
)
print("WFN")
pp.pprint(wfn)
qc_module_out = wfn["provenance"]["creator"].lower()
if "module" in wfn["provenance"]:
qc_module_out += "-" + wfn["provenance"]["module"] # returns "<qcprog>-<module>"
# assert 0, f"{qc_module_xptd=} {qc_module_in=} {qc_module_out=}" # debug
# 3. output mol: `wfn.molecule` after calc. orientation for nonscalar quantities may be different from `subject` if fix_=False
wfn_molecule = qcdb.Molecule.from_schema(wfn["molecule"])
# print(
# "MOL 3/WFN: wfn.mol",
# wfn_molecule.com_fixed(),
# wfn_molecule.orientation_fixed(),
# wfn_molecule.symmetry_from_input(),
# )
# with np.printoptions(precision=3, suppress=True):
# print(wfn_molecule.geometry(np_out=True))
_, ref2out_mill, _ = ref_subject.B787(wfn_molecule, atoms_map=False, mols_align=True, fix_mode="true", verbose=0)
# print(f"{ref2out_mill=}")
# print("PREE REF")
# print(ref_block["HF TOTAL GRADIENT"])
if subject.com_fixed() and subject.orientation_fixed():
assert frame == "fixed"
with np.printoptions(precision=3, suppress=True):
assert compare_values(
subject.geometry(), wfn_molecule.geometry(), atol=5.0e-8
), f"coords: atres ({wfn_molecule.geometry(np_out=True)}) != atin ({subject.geometry(np_out=True)})" # 10 too much
assert (
ref_subject.com_fixed()
and ref_subject.orientation_fixed()
and subject.com_fixed()
and subject.orientation_fixed()
and wfn_molecule.com_fixed()
and wfn_molecule.orientation_fixed()
), f"fixed, so all T: {ref_subject.com_fixed()} {ref_subject.orientation_fixed()} {subject.com_fixed()} {subject.orientation_fixed()} {wfn_molecule.com_fixed()} {wfn_molecule.orientation_fixed()}"
ref_block = mill_qcvars(ref2in_mill, ref_block)
ref_block_conv = mill_qcvars(ref2in_mill, ref_block_conv)
else:
assert frame == "free" or frame == "" # "": direct from standard_suite_ref.std_molecules
with np.printoptions(precision=3, suppress=True):
assert compare(
min_nonzero_coords,
np.count_nonzero(np.abs(wfn_molecule.geometry(np_out=True)) > 1.0e-10),
tnm + " !0 coords wfn",
), f"ncoords {wfn_molecule.geometry(np_out=True)} != {min_nonzero_coords}"
assert (
(not ref_subject.com_fixed())
and (not ref_subject.orientation_fixed())
and (not subject.com_fixed())
and (not subject.orientation_fixed())
and (not wfn_molecule.com_fixed())
and (not wfn_molecule.orientation_fixed())
), f"free, so all F: {ref_subject.com_fixed()} {ref_subject.orientation_fixed()} {subject.com_fixed()} {subject.orientation_fixed()} {wfn_molecule.com_fixed()} {wfn_molecule.orientation_fixed()}"
if scramble is None:
# wfn exactly matches ref_subject and ref_block
with np.printoptions(precision=3, suppress=True):
assert compare_values(
ref_subject.geometry(), wfn_molecule.geometry(), atol=5.0e-8
), f"coords: atres ({wfn_molecule.geometry(np_out=True)}) != atin ({ref_subject.geometry(np_out=True)})"
else:
# wfn is "pretty" (max zeros) but likely not exactly ref_block (by axis exchange, phasing, atom shuffling) since Psi4 ref frame is not unique
ref_block = mill_qcvars(ref2out_mill, ref_block)
ref_block_conv = mill_qcvars(ref2out_mill, ref_block_conv)
# print("POST REF")
# print(ref_block["HF TOTAL GRADIENT"])
# <<< Comparison Tests >>>
assert wfn["success"] is True
assert (
wfn["provenance"]["creator"].lower() == qcprog
), f'ENGINE used ({ wfn["provenance"]["creator"].lower()}) != requested ({qcprog})'
# qcvars
contractual_args = [
qc_module_out,
driver,
reference,
method,
corl_type,
fcae,
]
asserter_args = [
[qcdb, wfn["qcvars"]],
ref_block,
[atol_e, atol_g, atol_h],
[rtol_e, rtol_g, rtol_h],
ref_block_conv,
atol_conv,
rtol_conv,
tnm,
]
def qcvar_assertions():
print("BLOCK", chash, contractual_args)
if method == "hf":
_asserter(asserter_args, contractual_args, contractual_hf)
elif method == "mp2":
_asserter(asserter_args, contractual_args, contractual_mp2)
elif method == "mp3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
elif method == "mp4(sdq)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
elif method == "mp4":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
_asserter(asserter_args, contractual_args, contractual_mp4)
elif method == "cisd":
_asserter(asserter_args, contractual_args, contractual_cisd)
elif method == "qcisd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
elif method == "qcisd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
_asserter(asserter_args, contractual_args, contractual_qcisd_prt_pr)
elif method == "fci":
_asserter(asserter_args, contractual_args, contractual_fci)
elif method == "lccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccd)
elif method == "lccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccsd)
elif method == "ccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccd)
elif method == "ccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
elif method == "ccsd+t(ccsd)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsdpt_prccsd_pr)
elif method == "ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsd_prt_pr)
elif method == "a-ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_accsd_prt_pr)
elif method == "ccsdt-1a":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1a)
elif method == "ccsdt-1b":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1b)
elif method == "ccsdt-2":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt2)
elif method == "ccsdt-3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt3)
elif method == "ccsdt":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
elif method == "ccsdt(q)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
_asserter(asserter_args, contractual_args, contractual_ccsdt_prq_pr)
elif method == "ccsdtq":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdtq)
# separations here for DFT appropriate when qcvars are labeled by functional
if "wrong" in inp:
if basis == "cc-pvdz" and contractual_args in [
["cfour-ecc", "gradient", "rhf", mtd, "conv", "fc"]
for mtd in ["ccsdt-1a", "ccsdt-1b", "ccsdt-2", "ccsdt-3"]
]:
# these four tests have pass/fail too close for dz to "get it right" with general tolerances
pass
else:
errmatch, reason = inp["wrong"]
with pytest.raises(AssertionError) as e:
qcvar_assertions()
assert errmatch in str(e.value), f"Not found: AssertionError '{errmatch}' for '{reason}' in {e.value}"
_recorder(
qcprog,
qc_module_out,
driver,
method,
reference,
fcae,
scf_type,
corl_type,
"wrong",
reason + f" First wrong at `{errmatch}`.",
)
pytest.xfail(reason)
# primary label checks
qcvar_assertions()
# routing checks
if qc_module_in != qcprog:
assert qc_module_out == qc_module_in, f"QC_MODULE used ({qc_module_out}) != requested ({qc_module_in})"
if qc_module_xptd:
assert qc_module_out == qc_module_xptd, f"QC_MODULE used ({qc_module_out}) != expected ({qc_module_xptd})"
# aliases checks
if is_dft:
_asserter(asserter_args, contractual_args, contractual_dft_current)
else:
_asserter(asserter_args, contractual_args, contractual_current)
# returns checks
if driver == "energy":
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"], wfn["return_result"], tnm + " wfn", atol=atol_e, rtol=rtol_e
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn["properties"]["return_energy"],
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
)
assert compare_values(ref_block[f"{method.upper()} TOTAL ENERGY"], ret, tnm + " return")
elif driver == "gradient":
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn["return_result"],
tnm + " grad wfn",
atol=atol_g,
rtol=rtol_g,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn["properties"]["return_energy"],
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn["properties"]["return_gradient"],
tnm + " grad prop",
atol=atol_g,
rtol=rtol_g,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"], ret, tnm + " grad return", atol=atol_g, rtol=rtol_g
)
elif driver == "hessian":
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn["return_result"],
tnm + " hess wfn",
atol=atol_h,
rtol=rtol_h,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn["properties"]["return_energy"],
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
)
# assert compare_values(ref_block[f"{method.upper()} TOTAL GRADIENT"], wfn["properties"]["return_gradient"], tnm + " grad prop", atol=atol_g, rtol=rtol_g)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn["properties"]["return_hessian"],
tnm + " hess prop",
atol=atol_h,
rtol=rtol_h,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"], ret, tnm + " hess return", atol=atol_h, rtol=rtol_h
)
# generics checks
# yapf: disable
assert compare(ref_block["N BASIS FUNCTIONS"], wfn["properties"]["calcinfo_nbasis"], tnm + " nbasis wfn"), f"nbasis {wfn['properties']['calcinfo_nbasis']} != {ref_block['N BASIS FUNCTIONS']}"
assert compare(ref_block["N MOLECULAR ORBITALS"], wfn["properties"]["calcinfo_nmo"], tnm + " nmo wfn"), f"nmo {wfn['properties']['calcinfo_nmo']} != {ref_block['N MOLECULAR ORBITALS']}"
assert compare(ref_block["N ALPHA ELECTRONS"], wfn["properties"]["calcinfo_nalpha"], tnm + " nalpha wfn"), f"nalpha {wfn['properties']['calcinfo_nalpha']} != {ref_block['N ALPHA ELECTRONS']}"
assert compare(ref_block["N BETA ELECTRONS"], wfn["properties"]["calcinfo_nbeta"], tnm + " nbeta wfn"), f"nbeta {wfn['properties']['calcinfo_nbeta']} != {ref_block['N BETA ELECTRONS']}"
# yapf: enable
# record
_recorder(
qcprog, qc_module_out, driver, method, reference, fcae, scf_type, corl_type, "fd" if using_fd else "pass", ""
)
# assert 0
def _asserter(asserter_args, contractual_args, contractual_fn):
"""For expectations in `contractual_fn`, check that the QCVars are present in P::e.globals and wfn and match expected ref_block."""
qcvar_stores, ref_block, atol_egh, rtol_egh, ref_block_conv, atol_conv, rtol_conv, tnm = asserter_args
for obj in qcvar_stores:
for rpv, pv, present in contractual_fn(*contractual_args):
label = tnm + " " + pv
atol = atol_egh["EGH".index(rpv.split()[-1][0])]
rtol = rtol_egh["EGH".index(rpv.split()[-1][0])]
if present:
# verify exact match to method (may be df) and near match to conventional (non-df) method
tf, errmsg = compare_values(
ref_block[rpv], query_qcvar(obj, pv), label, atol=atol, rtol=rtol, return_message=True, quiet=True
)
assert compare_values(ref_block[rpv], query_qcvar(obj, pv), label, atol=atol, rtol=rtol), errmsg
tf, errmsg = compare_values(
ref_block_conv[rpv],
query_qcvar(obj, pv),
label,
atol=atol_conv,
rtol=rtol_conv,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block_conv[rpv], query_qcvar(obj, pv), label, atol=atol_conv, rtol=rtol_conv
), errmsg
# Note that the double compare_values lines are to collect the errmsg in the first for assertion in the second.
# If the errmsg isn't present in the assert, the string isn't accessible through `e.value`.
# If a plain bool is compared in the assert, the printed message will show booleans and not numbers.
else:
# verify and forgive known contract violations
assert compare(False, query_has_qcvar(obj, pv), label + " SKIP"), f"{label} wrongly present"
def _recorder(engine, module, driver, method, reference, fcae, scf_type, corl_type, status, note):
with open("stdsuite_qcng.txt", "a") as fp:
stuff = {
"module": module,
"driver": driver,
"method": method,
"reference": reference,
"fcae": fcae,
"scf_type": scf_type,
"corl_type": corl_type,
"status": status,
"note": note,
}
fp.write(f"{stuff!r}\n")
| StarcoderdataPython |
1764088 | <reponame>sonecabr/konker-platform
import xml.etree.ElementTree as ET
from .gateway import *
CARTS_RESOURCE = "http://ec2-52-4-244-64.compute-1.amazonaws.com/konkershop/api/carts/"
FIND_LAST_CART_TEMPLATE = CARTS_RESOURCE + "?filter[id_customer]=%s&sort=[id_DESC]&limit=1"
GET_LAST_CART_TEMPLATE = CARTS_RESOURCE + "%s"
def add_to_cart(customer_code,product_code,quantity):
def find_last():
carts_xml = parse(get(FIND_LAST_CART_TEMPLATE % customer_code))
if carts_xml is not None:
carts = carts_xml.find(".//carts")
if len(carts) > 0: return carts[0].attrib['id']
def get_last_as_xml(cart_id):
if cart_id is not None:
return parse(get(GET_LAST_CART_TEMPLATE % cart_id))
def new_cart():
data = open("files/blank_cart.xml")
xml = parse(data.read())
xml.find(".//id_customer").text = str(customer_code)
xml.find(".//id_product").text = str(product_code)
xml.find(".//quantity").text = str(quantity)
return xml
def is_open(cart):
return int(cart.find(".//id_carrier").text) == 0
def is_product_missing(cart):
return len(cart.findall(".//cart_row[id_product='%s']" % product_code)) == 0
def append_to(cart):
def new_element(name,value):
el = ET.Element(name)
el.text = str(value)
return el
new_row = ET.Element("cart_row")
new_row.append(new_element("id_product",product_code))
new_row.append(new_element("id_product_attribute",0))
new_row.append(new_element("id_address_delivery",0))
new_row.append(new_element("quantity",quantity))
cart.find(".//cart_rows").append(new_row)
return cart
def save(cart,new=False):
if new:
return post(CARTS_RESOURCE, ET.tostring(cart))
else:
return put(CARTS_RESOURCE, ET.tostring(cart))
last_cart_id = find_last()
if last_cart_id is None:
return save(new_cart(),new=True)
else:
last_cart = get_last_as_xml(last_cart_id)
if is_open(last_cart):
if is_product_missing(last_cart):
return save(append_to(last_cart))
else:
return save(new_cart(),new=True) | StarcoderdataPython |
1794341 | {
"targets": [
{
"target_name": "addon",
"sources": [
"src/addon.cc",
"src/blake2.cc",
"lib/BLAKE2/sse/blake2b.c",
"lib/BLAKE2/sse/blake2bp.c",
"lib/BLAKE2/sse/blake2s.c",
"lib/BLAKE2/sse/blake2sp.c"
],
"include_dirs" : [
"lib/BLAKE2/sse"
],
"cflags_c": [ "-std=c99" ]
}
]
}
| StarcoderdataPython |
1671700 | """
Contains classes and functions pertaining to database serialization.
"""
import os
from breakdb.io.export.voc import VOCDatabaseEntryExporter
from breakdb.io.export.yolo import YOLODatabaseEntryExporter
from breakdb.io.reading import CsvDatabaseReader, ExcelDatabaseReader, \
JsonDatabaseReader
from breakdb.io.writing import CsvDatabaseWriter, ExcelDatabaseWriter, \
JsonDatabaseWriter
_EXPORTERS = {
"voc": VOCDatabaseEntryExporter(),
"yolov3": YOLODatabaseEntryExporter()
}
_READERS = {
".csv": CsvDatabaseReader(),
".xlsx": ExcelDatabaseReader(),
".json": JsonDatabaseReader()
}
_WRITERS = {
".csv": CsvDatabaseWriter(),
".xlsx": ExcelDatabaseWriter(),
".json": JsonDatabaseWriter()
}
COLUMN_NAMES = [
"ID", # The SOP instance UID.
"Series", # The series UID.
"Study", # The study UID.
"Classification", # Whether or not a fracture is present (ground-truth).
"Body Part", # The type of body part imaged.
"Width", # The image width (in pixels).
"Height", # The image height (in pixels).
"File Path", # Location of image file on disk.
"Scaling", # Whether there is any scaling information.
"Windowing", # Whether or not there is any windowing information.
"Annotation" # All discovered annotations.
]
def filter_files(paths, extensions=None, relative=False):
"""
Searches for all files in the specified collection of paths and filters
them by the specified collection of admissible extensions.
:param paths: A collection of directories to search.
:param extensions: A collection of extensions to filter by.
:param relative: Whether or not to use relative paths.
:return: A generator over a collection of filtered files.
"""
if not extensions:
raise ValueError("At least one extension must be provided.")
if isinstance(extensions, str):
extensions = [extensions]
resolver = os.path.abspath if not relative else os.path.relpath
for file_path in paths:
for root, _, files in os.walk(file_path, topdown=False):
root = resolver(root)
for file in files:
for extension in extensions:
if file.endswith(extension):
yield os.path.join(root, file)
def get_entry_exporter(format_name):
"""
Searches for and returns the database entry exporter associated with the
specified format name.
:param format_name: The file format to use.
:return: A database entry exporter.
"""
if format_name not in _EXPORTERS:
raise KeyError(f"Cannot find exporter - unknown format: "
f"{format_name}.")
return _EXPORTERS[format_name]
def read_database(file_path):
"""
Reads a database located from the specified file on disk.
:param file_path: The file to read a database from.
:return: A database.
:raises KeyError: If a reader cannot be found for a particular file path.
"""
_, extension = os.path.splitext(file_path)
if extension not in _READERS:
raise KeyError(f"Cannot read database - unknown file extension:"
f" {file_path}.")
with open(file_path, "r") as stream:
return _READERS[extension].read(stream)
def write_database(db, file_path):
"""
Writes the specified database to the specified file on disk.
:param db: The database to write.
:param file_path: The file to write the database to.
:raises KeyError: If a writer cannot be found for a particular file path.
"""
_, extension = os.path.splitext(file_path)
if extension not in _WRITERS:
raise KeyError(f"Cannot write database - unknown file extension: "
f"{file_path}")
with open(file_path, "w") as stream:
_WRITERS[extension].write(db, stream)
| StarcoderdataPython |
1668384 | <filename>workbench/invoices/test_projected_invoices.py<gh_stars>10-100
from decimal import Decimal
from django.core import mail
from django.test import TestCase
from django.utils.translation import deactivate_all
from time_machine import travel
from workbench import factories
from workbench.invoices.tasks import send_unsent_projected_invoices_reminders
from workbench.reporting.key_data import projected_invoices
class ProjectedInvoicesTest(TestCase):
def setUp(self):
deactivate_all()
@travel("2021-11-30")
def test_projected_invoices(self):
"""Functionality around projected invoices and reminder mails"""
obj = factories.ProjectedInvoiceFactory.create(gross_margin=Decimal(1000))
pi = projected_invoices()
self.assertEqual(pi["monthly_overall"][(2021, 11)], Decimal(1000))
self.assertEqual(
pi["projects"],
[
{
"delta": Decimal("1000.00"),
"gross_margin": Decimal("0.00"),
"invoiced": [],
"monthly": {(2021, 11): Decimal("1000.00")},
"project": obj.project,
"projected": [obj],
"projected_total": Decimal("1000.00"),
}
],
)
send_unsent_projected_invoices_reminders()
self.assertEqual(len(mail.outbox), 0)
with travel("2021-12-01"):
send_unsent_projected_invoices_reminders()
self.assertEqual(len(mail.outbox), 0)
with travel("2021-11-28"):
send_unsent_projected_invoices_reminders()
self.assertEqual(len(mail.outbox), 1)
# print(mail.outbox[0].__dict__)
| StarcoderdataPython |
125527 | import json
import pickle
import ast
import math
import operator
def get_lang_prior(pomdp_to_map_fp, lang_dict_fp):
"""
Convert the lang prior dictionary to a format usable by pomdp
"""
pomdp_to_map = {}
with open(pomdp_to_map_fp, 'r') as fin:
pomdp_to_map = json.load(fin)
lang_dict = {}
with open(lang_dict_fp, 'rb') as fin:
lang_dict = pickle.load(fin)
# https://stackoverflow.com/questions/483666/reverse-invert-a-dictionary-mapping
map_to_pomdp = {v: k for k, v in pomdp_to_map.items()}
outer_dict = {}
for objid in lang_dict.keys():
inner_dict = {}
for map_idx in map_to_pomdp:
observation = 1e-6 # default if there was no observation
pomdp_tup = map_to_pomdp[map_idx]
pomdp_tup = ast.literal_eval(pomdp_tup)
if map_idx in lang_dict[objid]: # observed in prior
observation = lang_dict[objid][map_idx]
inner_dict[pomdp_tup] = max(observation, 1e-6)
# normalize values of dictionary -- prob distro sums to 1
# https://stackoverflow.com/questions/16417916/normalizing-dictionary-values
factor=1.0 / math.fsum(inner_dict.values())
for k in inner_dict:
inner_dict[k] = inner_dict[k]*factor
outer_dict[objid] = inner_dict
return outer_dict
def get_center_latlon(cell_idx, pomdp_to_map_fp, idx_to_cell_fp):
"""
Arguments:
- cell_idx: Tuple (row, col) representing a POMDP grid cell index
- idx_to_cell_fp: String filepath to the idx_to_cell JSON for this map
Returns:
Tuple (lat, lon) of the center of that cell
"""
with open(pomdp_to_map_fp, 'r') as fin:
pomdp_to_map = json.load(fin)
with open(idx_to_cell_fp, 'r') as fin:
idx_to_cell = json.load(fin)
cell_dict = idx_to_cell[str(pomdp_to_map[str(cell_idx)])]
# TODO: Is the order of lat lon correct?
center_lat = (cell_dict["nw"][0] + cell_dict["se"][0]) / 2
center_lon = (cell_dict["nw"][1] + cell_dict["se"][1]) / 2
return (center_lat, center_lon)
def latlon_to_pomdp_cell(lat, lon, pomdp_to_map_fp, idx_to_cell_fp):
with open(pomdp_to_map_fp, 'r') as fin:
pomdp_to_map = json.load(fin)
with open(idx_to_cell_fp, 'r') as fin:
idx_to_cell = json.load(fin)
idx = None
# import pdb; pdb.set_trace()
for i in idx_to_cell.keys():
cell = idx_to_cell[i]
if cell_contains(cell, lat, lon):
idx = i
# print("map indices: ", sorted(pomdp_to_map.values()))
for pomdp_idx in pomdp_to_map.keys():
map_idx = pomdp_to_map[pomdp_idx]
# print("map_idx: ", map_idx)
if str(map_idx) == idx:
return pomdp_idx
# print("No POMDP cell was found !")
return "None"
def cell_contains(cell_dict, lat, lon):
south = cell_dict["sw"][1]
west = cell_dict["sw"][0]
north = cell_dict["ne"][1]
east = cell_dict["ne"][0]
if west <= lat <= east and south <= lon <= north:
return True
# cell_to_coord_dict[map_idx] = int(cell_idx)
# break
return round(cell_dict["sw"][0], 5) <= lat <= round(cell_dict["ne"][0], 5) \
and round(cell_dict["sw"][1], 5) <= lon <= round(cell_dict["ne"][1], 5)
| StarcoderdataPython |
3358772 | """
##################################################################################################
# Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# Filename : builder.py
# Abstract :
# Current Version: 1.0.0
# Date : 2020-05-31
##################################################################################################
"""
from mmcv.utils import Registry, build_from_cfg
CONVERTER = Registry('converter')
def build_converter(cfg):
"""
Args:
cfg (config): model config):
Returns:
build the converter
"""
assert 'type' in cfg and isinstance(cfg['type'], str)
converter = build_from_cfg(cfg, CONVERTER)
return converter
| StarcoderdataPython |
3256032 | from .webgme import WebGME
from .pluginbase import PluginBase
from .exceptions import CoreIllegalArgumentError, CoreIllegalOperationError, CoreInternalError, JSError
name = "webgme_bindings"
| StarcoderdataPython |
1667169 | <filename>gammapy/spectrum/sherpa_models.py<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sherpa spectral models
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from sherpa.models import ArithmeticModel, Parameter, modelCacher1d
__all__ = [
'SherpaExponentialCutoffPowerLaw',
]
# Partly copied from https://github.com/zblz/naima/blob/master/naima/sherpa_models.py#L33
class SherpaExponentialCutoffPowerLaw(ArithmeticModel):
"""Exponential CutoffPowerLaw
Note that the cutoff is given in units '1/TeV' in order to bring the Sherpa
optimizers into a valid range. All other parameters still have units 'keV'
and 'cm2'.
"""
def __init__(self, name='ecpl'):
self.gamma = Parameter(name, 'gamma', 2, min=-10, max=10)
self.ref = Parameter(name, 'ref', 1, frozen=True)
self.ampl = Parameter(name, 'ampl', 1, min=0)
self.cutoff = Parameter(name, 'cutoff', 1, min=0, units='1/TeV')
ArithmeticModel.__init__(self, name, (self.gamma, self.ref, self.ampl,
self.cutoff))
self._use_caching = True
self.cache = 10
@modelCacher1d
def calc(self, p, x, xhi=None):
from .models import ExponentialCutoffPowerLaw
kev_to_tev = 1e-9
model = ExponentialCutoffPowerLaw(index=p[0],
reference=p[1],
amplitude=p[2],
lambda_=p[3] * kev_to_tev)
if xhi is None:
val = model(x)
else:
val = model.integral(x, xhi, intervals=True)
return val
| StarcoderdataPython |
112013 | CONTAINER = "container"
BLUE = "#33aeff"
GREEN = "#a4c639"
| StarcoderdataPython |
3299188 | '''
Given a string S, consider all duplicated substrings: (contiguous) substrings of S that occur 2 or more times. (The occurrences may overlap.)
Return any duplicated substring that has the longest possible length. (If S does not have a duplicated substring, the answer is "".)
Example 1:
Input: "banana"
Output: "ana"
Example 2:
Input: "abcd"
Output: ""
Note:
2 <= S.length <= 10^5
S consists of lowercase English letters.
'''
class Solution:
def longestDupSubstring(self, s: str) -> str:
low, high = 2, len(s) - 1
best = ''
while low <= high:
mid = low + (high - low) // 2
v = self.find_duplicate_substr_of_len_k(s, mid)
if v != '':
best = v
low = mid + 1
else:
high = mid - 1
return best
def find_duplicate_substr_of_len_k(self, s, k):
MOD = (1 << 61) - 1
BASE = 26
D = pow(BASE, k - 1, MOD)
chash = 0
seen = collections.defaultdict(list)
for i in range(len(s)):
if i >= k:
l_chval = ord(s[i - k]) - ord('a')
chash = (chash - l_chval * D) % MOD
chval = ord(s[i]) - ord('a')
chash = (chash * BASE + chval) % MOD
if i >= k - 1:
if chash in seen:
substr_i = s[i - k + 1:i + 1]
for j in seen[chash]:
substr_j = s[j - k + 1:j + 1]
if substr_i == substr_j:
return substr_i
seen[chash].append(i)
return ''
| StarcoderdataPython |
79755 | """
The Yahoo finance component.
https://github.com/iprak/yahoofinance
"""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Final, Union
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
import voluptuous as vol
from custom_components.yahoofinance.coordinator import YahooSymbolUpdateCoordinator
from .const import (
CONF_DECIMAL_PLACES,
CONF_INCLUDE_FIFTY_DAY_VALUES,
CONF_INCLUDE_POST_VALUES,
CONF_INCLUDE_PRE_VALUES,
CONF_INCLUDE_TWO_HUNDRED_DAY_VALUES,
CONF_SHOW_TRENDING_ICON,
CONF_SYMBOLS,
CONF_TARGET_CURRENCY,
DEFAULT_CONF_DECIMAL_PLACES,
DEFAULT_CONF_INCLUDE_FIFTY_DAY_VALUES,
DEFAULT_CONF_INCLUDE_POST_VALUES,
DEFAULT_CONF_INCLUDE_PRE_VALUES,
DEFAULT_CONF_INCLUDE_TWO_HUNDRED_DAY_VALUES,
DEFAULT_CONF_SHOW_TRENDING_ICON,
DOMAIN,
HASS_DATA_CONFIG,
HASS_DATA_COORDINATOR,
SERVICE_REFRESH,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCAN_INTERVAL: Final = timedelta(hours=6)
MINIMUM_SCAN_INTERVAL: Final = timedelta(seconds=30)
BASIC_SYMBOL_SCHEMA = vol.All(cv.string, vol.Upper)
COMPLEX_SYMBOL_SCHEMA = vol.All(
dict,
vol.Schema(
{
vol.Required("symbol"): BASIC_SYMBOL_SCHEMA,
vol.Optional(CONF_TARGET_CURRENCY): BASIC_SYMBOL_SCHEMA,
}
),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SYMBOLS): vol.All(
cv.ensure_list,
[vol.Any(BASIC_SYMBOL_SCHEMA, COMPLEX_SYMBOL_SCHEMA)],
),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.Any("none", "None", cv.positive_time_period),
vol.Optional(CONF_TARGET_CURRENCY): vol.All(cv.string, vol.Upper),
vol.Optional(
CONF_SHOW_TRENDING_ICON, default=DEFAULT_CONF_SHOW_TRENDING_ICON
): cv.boolean,
vol.Optional(
CONF_DECIMAL_PLACES, default=DEFAULT_CONF_DECIMAL_PLACES
): vol.Coerce(int),
vol.Optional(
CONF_INCLUDE_FIFTY_DAY_VALUES,
default=DEFAULT_CONF_INCLUDE_FIFTY_DAY_VALUES,
): cv.boolean,
vol.Optional(
CONF_INCLUDE_POST_VALUES, default=DEFAULT_CONF_INCLUDE_POST_VALUES
): cv.boolean,
vol.Optional(
CONF_INCLUDE_PRE_VALUES, default=DEFAULT_CONF_INCLUDE_PRE_VALUES
): cv.boolean,
vol.Optional(
CONF_INCLUDE_TWO_HUNDRED_DAY_VALUES,
default=DEFAULT_CONF_INCLUDE_TWO_HUNDRED_DAY_VALUES,
): cv.boolean,
}
)
},
# The complete HA configuration is passed down to`async_setup`, allow the extra keys.
extra=vol.ALLOW_EXTRA,
)
class SymbolDefinition:
"""Symbol definition."""
symbol: str
target_currency: str
def __init__(self, symbol: str, target_currency: Union[str, None] = None) -> None:
"""Create a new symbol definition."""
self.symbol = symbol
self.target_currency = target_currency
def __repr__(self) -> str:
"""Return the representation."""
return f"{self.symbol},{self.target_currency}"
def __eq__(self, other: any) -> bool:
"""Return the comparison."""
return (
isinstance(other, SymbolDefinition)
and self.symbol == other.symbol
and self.target_currency == other.target_currency
)
def __hash__(self) -> int:
"""Make hashable."""
return hash((self.symbol, self.target_currency))
def parse_scan_interval(scan_interval: Union[timedelta, str]) -> timedelta:
"""Parse and validate scan_interval."""
if isinstance(scan_interval, str):
if isinstance(scan_interval, str):
if scan_interval.lower() == "none":
scan_interval = None
else:
raise vol.Invalid(
f"Invalid {CONF_SCAN_INTERVAL} specified: {scan_interval}"
)
elif scan_interval < MINIMUM_SCAN_INTERVAL:
raise vol.Invalid("Scan interval should be at least 30 seconds.")
return scan_interval
def normalize_input(defined_symbols: list) -> tuple[list[str], list[SymbolDefinition]]:
"""Normalize input and remove duplicates."""
symbols = set()
symbol_definitions: list[SymbolDefinition] = []
for value in defined_symbols:
if isinstance(value, str):
if value not in symbols:
symbols.add(value)
symbol_definitions.append(SymbolDefinition(value))
else:
symbol = value["symbol"]
if symbol not in symbols:
symbols.add(symbol)
symbol_definitions.append(
SymbolDefinition(symbol, value.get(CONF_TARGET_CURRENCY))
)
return (list(symbols), symbol_definitions)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the component."""
domain_config = config.get(DOMAIN, {})
defined_symbols = domain_config.get(CONF_SYMBOLS, [])
symbols, symbol_definitions = normalize_input(defined_symbols)
domain_config[CONF_SYMBOLS] = symbol_definitions
scan_interval = parse_scan_interval(domain_config.get(CONF_SCAN_INTERVAL))
# Populate parsed value into domain_config
domain_config[CONF_SCAN_INTERVAL] = scan_interval
coordinator = YahooSymbolUpdateCoordinator(symbols, hass, scan_interval)
# Refresh coordinator to get initial symbol data
_LOGGER.info(
"Requesting data from coordinator with update interval of %s.", scan_interval
)
await coordinator.async_refresh()
# Pass down the coordinator and config to platforms.
hass.data[DOMAIN] = {
HASS_DATA_COORDINATOR: coordinator,
HASS_DATA_CONFIG: domain_config,
}
async def handle_refresh_symbols(_call) -> None:
"""Refresh symbol data."""
_LOGGER.info("Processing refresh_symbols")
await coordinator.async_request_refresh()
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH,
handle_refresh_symbols,
)
if not coordinator.last_update_success:
_LOGGER.debug("Coordinator did not report any data, requesting async_refresh")
hass.async_create_task(coordinator.async_request_refresh())
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
return True
def convert_to_float(value) -> float | None:
"""Convert specified value to float."""
try:
return float(value)
except: # noqa: E722 pylint: disable=bare-except
return None
| StarcoderdataPython |
2562 | <gh_stars>0
import logging
import pathlib
logging.basicConfig(level=logging.INFO)
# Dirs
ROOT_DIR = pathlib.Path(__file__).parent.absolute()
DUMP_DIR = ROOT_DIR / 'dumps'
| StarcoderdataPython |
3234211 | <reponame>JalajaTR/cQube
import csv
import os
import re
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
class test_course_based_on_timeperiods():
def __init__(self,driver):
self.driver = driver
def test_last30_days(self):
self.data = GetData()
self.p = pwd()
self.msg = file_extention()
count = 0
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
timeperiod =Select(self.driver.find_element_by_name('timePeriod'))
# timeperiod.select_by_visible_text(' Last 30 Days ')
timeperiod.select_by_index(3)
self.data.page_loading(self.driver)
if self.msg.no_data_available() in self.driver.page_sourse:
print("Last 30 days does not having data")
else:
self.driver.find_element_by_id('download').click()
time.sleep(3)
self.filename = self.p.get_download_dir() + "/collectionType_textbook_data.csv"
if not os.path.isfile(self.filename):
print("Diksha course type of last 30 days data csv file not downloaded")
else:
with open(self.filename) as fin:
csv_reader = csv.reader(fin, delimiter=',')
header = next(csv_reader)
contentplays = 0
for row in csv.reader(fin):
contentplays += int(row[0])
play_count = self.driver.find_element_by_id('totalCount').text
pc = re.sub('\D', "", play_count)
if int(pc) != int(contentplays):
print("Course type of last 30 days has difference between screen count value and csv file count ")
count = count + 1
self.data.page_loading(self.driver)
os.remove(self.filename)
return count
def test_last7_days(self):
self.data = GetData()
self.p = pwd()
self.msg = file_extention()
count = 0
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
timeperiod = Select(self.driver.find_element_by_name('timePeriod'))
# timeperiod.select_by_visible_text(' Last 30 Days ')
timeperiod.select_by_index(2)
self.data.page_loading(self.driver)
if self.msg.no_data_available() in self.driver.page_source:
print("Last 7 days does not having data")
else:
self.driver.find_element_by_id('download').click()
time.sleep(3)
self.filename = self.p.get_download_dir() + "/collectionType_textbook_data.csv"
if not os.path.isfile(self.filename):
print("Diksha course type of last 30 days data csv file not downloaded")
else:
with open(self.filename) as fin:
csv_reader = csv.reader(fin, delimiter=',')
header = next(csv_reader)
contentplays = 0
for row in csv.reader(fin):
contentplays += int(row[0])
play_count = self.driver.find_element_by_id('totalCount').text
pc = re.sub('\D', "", play_count)
if int(pc) != int(contentplays):
print("Course type of last 30 days has difference between screen count value and csv file count ")
count = count + 1
self.data.page_loading(self.driver)
os.remove(self.filename)
return count
def test_last_day(self):
self.data = GetData()
self.p = pwd()
self.msg = file_extention()
count = 0
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
timeperiod = Select(self.driver.find_element_by_name('timePeriod'))
# timeperiod.select_by_visible_text(' Last 30 Days ')
timeperiod.select_by_index(1)
self.data.page_loading(self.driver)
if self.msg.no_data_available() in self.driver.page_source:
print("Last day does not having data")
else:
self.driver.find_element_by_id('download').click()
time.sleep(3)
self.filename = self.p.get_download_dir() + "/collectionType_textbook_data.csv"
if not os.path.isfile(self.filename):
print("Diksha course type of last 30 days data csv file not downloaded")
else:
with open(self.filename) as fin:
csv_reader = csv.reader(fin, delimiter=',')
header = next(csv_reader)
contentplays = 0
for row in csv.reader(fin):
contentplays += int(row[0])
play_count = self.driver.find_element_by_id('totalCount').text
pc = re.sub('\D', "", play_count)
if int(pc) != int(contentplays):
print("Course type of last 30 days has difference between screen count value and csv file count ")
count = count + 1
self.data.page_loading(self.driver)
os.remove(self.filename)
return count
| StarcoderdataPython |
1608082 | __all__ = ['retry', 'retry_call']
import logging
from .api import retry, retry_call
from .compat import NullHandler
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
| StarcoderdataPython |
1614365 | <reponame>Time0o/advent-of-code
#!/usr/bin/env python3
import json
def add_numbers(data, ignore_red: bool = False) -> int:
if isinstance(data, dict):
if ignore_red and ('red' in data.keys() or 'red' in data.values()):
return 0
return sum([add_numbers(k, ignore_red) + add_numbers(v, ignore_red)
for k, v in data.items()])
if isinstance(data, list):
return sum([add_numbers(e, ignore_red) for e in data])
elif isinstance(data, int):
return data
else:
return 0
if __name__ == '__main__':
data = json.loads(input())
sum_numbers = add_numbers(data)
sum_numbers_no_red = add_numbers(data, ignore_red=True)
fmt = "Sum of all numbers is {}."
print(fmt.format(sum_numbers))
fmt = "Sum of all numbers is {} (without dictionaries containing 'red')."
print(fmt.format(sum_numbers_no_red))
| StarcoderdataPython |
3282822 | from .block_body import BlockBodyFactory # noqa: F401
from .block_hash import BlockHashFactory, Hash32Factory # noqa: F401
from .chain_context import ChainContextFactory # noqa: F401
from .db import ( # noqa: F401
MemoryDBFactory,
AtomicDBFactory,
HeaderDBFactory,
AsyncHeaderDBFactory,
)
from .les.proto import ( # noqa: F401
LESV1HandshakerFactory,
LESV2HandshakerFactory,
LESV1PeerPairFactory,
LESV2PeerPairFactory,
)
from .eth.proto import ( # noqa: F401
ETHHandshakerFactory,
ETHV65PeerPairFactory,
ETHV63PeerPairFactory,
ETHV64PeerPairFactory,
LatestETHPeerPairFactory,
ALL_PEER_PAIR_FACTORIES,
)
from .headers import BlockHeaderFactory # noqa: F401
from .receipts import UninterpretedReceiptFactory # noqa: F401
from .transactions import ( # noqa: F401
UninterpretedTransactionFactory,
)
| StarcoderdataPython |
138541 | <filename>supbot/statemanager/state.py
from enum import Enum
from typing import Tuple, cast
from abc import ABC
from supbot import g
from supbot.results import GotoStateResult
class State(Enum):
"""
Represents different states of the gui in whatsapp app
"""
MAIN = 0,
CHAT = 1,
SEARCH = 2
class GUIState(ABC):
def __init__(self, state: State):
self.state = state
def check(self) -> bool:
"""
Checks if the gui on this state or not
:return: False if state is not same as the gui
"""
...
def _to_state(self, target: 'GUIState') -> Tuple[GotoStateResult, 'GUIState']:
"""
takes in a state and takes ONLY ONE step towards the target state,
if the step is taken, success is returned
:param target:
:return:
"""
...
def to(self, target: 'GUIState') -> Tuple[GotoStateResult, 'GUIState']:
result, current = self._to_state(target)
if result == GotoStateResult.SUCCESS:
if current.check():
return result, current
else:
return GotoStateResult.CHECK_FAILED, current
return result, current
class MainState(GUIState):
def __init__(self):
super().__init__(State.MAIN)
self.scrolling = False
def scroll_to_top(self):
while not g.driver.check_scroll_top():
g.driver.scroll_chat(reverse=True)
self.scrolling = False
def scroll_down(self):
self.scrolling = True
g.driver.scroll_chat()
def check(self):
return g.driver.check_fab()
def _to_state(self, target: 'GUIState') -> Tuple[GotoStateResult, 'GUIState']:
if target.state == State.MAIN:
if self.scrolling:
self.scroll_to_top()
return GotoStateResult.SUCCESS, target
elif target.state == State.SEARCH:
if g.driver.click_search():
return GotoStateResult.SUCCESS, target
elif target.state == State.CHAT:
if g.driver.click_on_chat(cast(ChatState, target).contact):
return GotoStateResult.SUCCESS, target
else:
return GotoStateResult.ELEMENT_NOT_FOUND, self
class SearchState(GUIState):
def __init__(self):
super().__init__(State.SEARCH)
def check(self):
return g.driver.check_search_input()
def _to_state(self, target: 'GUIState') -> Tuple[GotoStateResult, 'GUIState']:
if target.state == State.MAIN:
g.driver.press_search_back()
return GotoStateResult.SUCCESS, target
elif target.state == State.SEARCH:
return GotoStateResult.SUCCESS, target
elif target.state == State.CHAT:
chat_name = cast(ChatState, target).contact
if g.driver.type_in_search(chat_name) and g.driver.click_on_chat(chat_name, True):
return GotoStateResult.SUCCESS, target
else:
return GotoStateResult.ELEMENT_NOT_FOUND, self
class ChatState(GUIState):
def __init__(self, contact):
super().__init__(State.CHAT)
self.contact = contact
def check(self):
return g.driver.check_chat(self.contact)
def _to_state(self, target: 'GUIState') -> Tuple[GotoStateResult, 'GUIState']:
if target.state == State.MAIN:
g.driver.press_chat_back()
return GotoStateResult.SUCCESS, target
elif target.state == State.SEARCH:
g.driver.press_search_back()
return GotoStateResult.SUCCESS, main_state
elif target.state == State.CHAT:
if cast(ChatState, target).contact == self.contact:
return GotoStateResult.SUCCESS, self
else:
g.driver.press_chat_back()
return GotoStateResult.SUCCESS, main_state
main_state = MainState()
search_state = SearchState()
temp_group = ChatState("!temp")
| StarcoderdataPython |
3332701 | <gh_stars>0
# class for generating command line, and configuration files:
# a replacement for arparser, it performas all the same functions with several key differences
# 1: can use an editable configuration file to set all or some arguments, allowing cmdline entries to take precedence if present
# 2: it can generate Namespace args: a single entry in parser_args() return that hold multiple argument values. To create, im add_argument() use 'param="group name"'
# 3: arguments not available at the cmdline can be added to the config file.
# 4: the config file can run arbitrary python code as needed to generate arguments and their values
import os
import re
import sys
import datetime
from argparse import ArgumentParser, Namespace
# check arguments for type
def check_arg(arg, req_type, def_value=None, item_type=None, caller_name=None, item_test=None):
result = def_value
if arg is not None:
if isinstance(arg, req_type):
if isinstance(arg, (list, tuple)):
if all(isinstance(v, item_type) for v in arg):
result = arg
else:
raise TypeError(f"{caller_name if caller_name is not None else 'check_arg()'}: args items must all be {item_type}")
else:
result = arg
else:
raise TypeError(f"{caller_name if caller_name is not None else 'check_arg()'}: arg must be {req_type}")
return result
# recursively determine longest key length with prefix
def rec_key_len(d, prefix_len=0, maxlen=0):
if type(d) is Namespace:
d = vars(d)
for k in d:
t = type(d[k])
if t is dict or t is Namespace:
dlen = rec_key_len(d[k], prefix_len+4, maxlen)
maxlen = dlen if dlen > maxlen else maxlen
else:
maxlen = len(k) + prefix_len if len(k) > maxlen else maxlen
return maxlen
# recursive pretty printing of dictionaries and Namespaces
def rec_print(d, prefix='', maxlen=0, sep=' : ', sep_len=3):
if d and type(d) is Namespace:
d = vars(d)
if d:
maxlen = rec_key_len(d, len(prefix), maxlen)
for k in d:
t = type(d[k])
if t is dict or t is Namespace:
print(f"{prefix}{k} :\n")
rec_print(d[k], prefix+' ', maxlen)
else:
v = d[k] if type(d[k]) != str else f"\'{d[k]}\'"
print(f"{prefix}{k}{sep}{' '*(maxlen+sep_len-len(k)-len(prefix))}{v}")
print()
# identical in call signature to ArgumentParser
# create needed dictionaries and lists, call super
class ArgParseDirector(ArgumentParser):
def __init__(self, *args, **kwargs):
# must create the dictionary add_argument uses here, because super calls add_argument()
self.__def_dict = {}
super(ArgParseDirector, self).__init__(*args, **kwargs)
# merg_args() populates these in from the config file
self.__config_args_d = {} # dictionary of configuration args
self.__config_grps_d = {} # dictionary of group lists
self.__config_rqrd_d = {} # dictionary of required/positional args
# add_argument() tracks group names and required arguments in these
self.__group_names_l = [] # list of group names
self.__reqrd_args_d = {} # list of required command line args
# store the config file name if used - only so user can get it with get_config_file_name()
self.__config_file_name = None
# superset of ArgumentParser.add_argument(), accepts argument arg_group - create/add to arg group
def add_argument(self, *args, default=None, arg_group=None, **kwargs):
# for each arg
# if positional
# save arg and type
# else
# get default and process with type func if available, save in __def_dict
# if arg_group present: create inst var and save group name if not already done
# add arg to inst var
for a in args:
if a!='--help' and a!='-h':
argtype = kwargs.get('type', None)
if not self._has_prefix(a):
self.__reqrd_args_d[a] = argtype
else:
v = default if argtype is None else argtype(default) if default is not None else None
self.__def_dict[self._remove_prefix(a)] = v
# if arg_group
# if tuple
# for each entry
# create inst var if needed, add name to names list
# else
# create inst var if needed, add name to names list
if arg_group is not None:
if type(arg_group) is tuple:
for g in arg_group:
if not hasattr(self, g):
self.__dict__[g] = []
self.__group_names_l.append(g)
self.__dict__[g].append(self._remove_prefix(a))
else:
if not hasattr(self, arg_group):
self.__dict__[arg_group] = []
self.__group_names_l.append(arg_group)
self.__dict__[arg_group].append(self._remove_prefix(a))
# let argparse do the rest
super().add_argument(*args, default=None if a!='--help' and a!='-h' else default, **kwargs)
# parse arguments --
# get config file if it exists
# exec() it, get dictionaries
# if required_args, manipulate command line to put things in place for super
# remove --config_path from command line
# merge config dictionaries and returned unaltered Namespace from super()
def parse_args(self, *args, config_path=None, **kwargs):
# need config_path BEFORE we call super parse.args to get required positional arguments - go directly into sys.argv
# try to get --config_path from command line
# if there but ill formed exit
# if not there, use config_path
config_name = config_path
idx = 0
try:
idx = sys.argv.index('--config_path')
except ValueError:
# config_path not on command line
pass
# idx will index --config_path if it was found ...
# if --config_path found
# if argv holds at least one more item
# get it
# if it doesn't name an exiting file, exit
# set config_name to the file
# else no value for --config_path, exit
if idx>0:
if len(sys.argv)>idx+1:
fname = sys.argv[idx+1]
if not os.path.exists(fname):
exit(f"config_path {fname} does not exist - exiting()")
config_name = fname
else:
exit(f"config_path file name not specified - exiting()")
# open file, read int str, add code at bottom to move dictionaries to locals()
# exec() str, get dictionaries for configured args, groups, required/positional args
if config_name is not None:
# save file name
self.__config_file_name = config_name
with open(config_name, 'r') as config_file:
config_str = config_file.read()
config_str += "\nlocals()['config_args_d']=config_args if 'config_groups' in locals() else {}\n" +\
"locals()['config_grps_d']=config_groups if 'config_groups' in locals() else {}\n" +\
"locals()['config_rqrd_d']=config_required if 'config_required' in locals() else {}\n"
exec(config_str, globals(), locals())
self.__config_args_d = locals()['config_args_d']
self.__config_grps_d = locals()['config_grps_d']
self.__config_rqrd_d = locals()['config_rqrd_d']
# required args are positional and MUST be inserted into sys.argv
# for each key in positional args set with add_argument()
# if at end of cmdline or within and not --config_path
# if config_rqrd_d has any entries
# if entry is a list, add each item from list to sys.argv one at a time
# else insert value from config_required/self.__config_rgrd_d
j = 1
for k in self.__reqrd_args_d:
if j==len(sys.argv) or sys.argv[j]=='--config_path':
if len(self.__config_rqrd_d)>0:
if type(self.__config_rqrd_d[k]) == list:
for e in self.__config_rqrd_d[k]:
v = e if type(e)==str else f"{e}"
sys.argv.insert(j, v)
j += 1
else:
v = self.__config_rqrd_d[k] if type(self.__config_rqrd_d[k])==str else f"{self.__config_rqrd_d[k]}"
sys.argv.insert(j, v)
j += 1
else:
j += 1
# find --config_path if it exists and remove it and file name from cmd line
# NOTE: if code gets here '--config_path' either isn't present or is well formed
try:
idx = sys.argv.index('--config_path')
del sys.argv[idx+1]
del sys.argv[idx]
except ValueError:
pass
# get super to do all the hard work, merge args dictionary with config dicitionaries, convert to Namespace, and return it
args_d = vars(super().parse_args(*args, **kwargs))
merged_args = self.merge_args(args_d)
return merged_args
# merge argument dictionary with config file and command line
# NOTE: positional args were processed by super().parse_args()
def merge_args(self, args_d):
# process group names from configuration file, creating inst vars and adding to names as needed
for g in self.__config_grps_d:
if not hasattr(self, g):
self.__dict__[g] = []
self.__group_names_l.append(g)
for k in self.__config_grps_d[g]:
if k not in self.__dict__[g]:
self.__dict__[g].append(k)
# create new_args dict, if any __group names, create name dict as entry in new_args
new_args = {}
for g in self.__group_names_l:
new_args[g] = {}
# create set of keys, combining args_d, config_d:
keyset = set()
if args_d is not None:
for k in args_d:
keyset.add(k)
for k in self.__config_args_d:
keyset.add(k)
# set v: cmdline 1st, config 2nd, default 3rd:
for k in keyset:
v = None
if args_d and k in args_d and args_d[k] is not None:
v = args_d[k]
# elif self.__config_args_d and k in self.__config_args_d:
elif k in self.__config_args_d:
v = self.__config_args_d[k]
# elif self.__def_dict and k in self.__def_dict:
elif k in self.__def_dict:
v = self.__def_dict[k]
# add v to new_args, and if in a group, to the group
# for each group in __group_names
# if k in list, add to __group, else to new_args
new_args[k] = v
for g in self.__group_names_l:
if k in self.__dict__[g]:
new_args[g][k] = v
# convert __group_names to Namespaces
for g in self.__group_names_l:
new_args[g] = Namespace(**new_args[g])
# return new_args Namepsace
return Namespace(**new_args)
# generates a config file
# generates a valid configuration file named by argument fname
# will contain all arguments added with add_argument(). those with group names will also be added to group lists.
def gen_config_file(self , fname, prefix=' '):
# if anything in the default dictionary built be add_argument()
# if fname exists, warn user
# process dictionaries
if len(self.__def_dict) > 0:
if os.path.exists(fname):
if input(f"\t{fname} exists: replace? [y/n]: ")!='y':
print(f'exiting: please rename \'{fname}\' or change target config file name')
# get longest key so config file can be easy to read
# open file
# write preamble and entries for each dictionary
maxlen = rec_key_len(self.__def_dict, len(prefix)) + 2 # add length of quotes around key strings
with open(fname, 'w') as config_file:
config_file.write(f"# ArgParseDirector configuration file '{fname}': original generated {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
config_file.write(f"# '{fname}' may be edited to:\n#\tadd/change entries and values in config_args,\n#\tadd/change group names and keys in config_groups,\n#\tchange values for positional args in config_required\n#\tor add python code as needed")
config_file.write(f"\n\n# config_args: original generated from all args added with add_argument()\n")
config_file.write(f"# change values to override defaults, add keys to create arguments that will be returned from parse_args() but not available from command line\n")
config_file.write("# do not change dictionary name 'config_args'\n")
config_file.write("config_args = {\n")
for k in self.__def_dict:
v = f"\'{self.__def_dict[k]}\'" if type(self.__def_dict[k])==str else None if type(self.__def_dict[k]) is type(None) else self.__def_dict[k]
config_file.write(f"{prefix}\'{k}\' : {' '*(maxlen-len(k)-len(prefix))}{v},\n")
config_file.write("}\n")
config_file.write(f"\n# config_groups: from all args added with add_argument() call that includes 'arg_group = 'some_group_name' or ('group1', 'group2')\n")
config_file.write(f"# add names to lists, or create new groups\n")
config_file.write("# do not change dictionary name 'config_groups'\n")
config_file.write("config_groups = {\n")
for k in self.__group_names_l:
config_file.write(f"{prefix}\'{k}\' : [\n")
for n in self.__dict__[k]:
config_file.write(f"{prefix*2}\'{n}\',\n")
config_file.write(f"{prefix}],\n")
config_file.write("}\n")
config_file.write("\n# required arguments: from all args added as positional with add_argument()\n")
config_file.write("# change values, do NOT remove or add any entries\n")
config_file.write("# do not change dictionary name 'config_required'\n")
config_file.write("config_required = {\n")
for k in self.__reqrd_args_d:
config_file.write(f"{prefix}\'{k}\' : {' '*(maxlen-len(k)-len(prefix))}'{self.__reqrd_args_d[k]} VALUE NEEDED HERE',\n")
config_file.write("}\n")
# return full path and name of config file if one was used
def get_config_file_name(self):
return self.__config_file_name
# test if arg has a prefix: no prefix signals required option
def _has_prefix(self, arg):
return re.match(f'[{self.prefix_chars}]', arg) is not None
# remove any prefix characeters from arg
def _remove_prefix(self, arg):
return re.sub(f'[{self.prefix_chars}]', '', arg)
| StarcoderdataPython |
89461 | <reponame>Allain18/pimontecarlo
"""Calcule pi grace à la méthode de monte Carlo"""
import random
import argparse
import matplotlib.pyplot as plt
def compute_pi(iteration, show_plot=False):
"""Compute pi"""
inside = 0
x_inside = []
y_inside = []
x_outside = []
y_outside = []
for _ in range(iteration):
x_coord = random.random() * 2 - 1
y_coord = random.random() * 2 - 1
if (abs(x_coord)**2 + abs(y_coord**2)) < 1:
x_inside.append(x_coord)
y_inside.append(y_coord)
inside += 1
else:
x_outside.append(x_coord)
y_outside.append(y_coord)
pi_aprox = inside/iteration*4
if show_plot:
fig = plt.figure(num="Pi")
axe = fig.add_subplot(1, 1, 1)
axe.set_aspect(aspect=1)
axe.set_xlim(-1, 1)
axe.set_ylim(-1, 1)
axe.plot(x_inside, y_inside, "bo")
axe.plot(x_outside, y_outside, "ro")
circle = plt.Circle((0, 0), 1, color="g")
axe.add_artist(circle)
axe.set_xlabel("pi = {}".format(pi_aprox))
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.show()
else:
print(pi_aprox)
return pi_aprox
def get_argument():
"""
Get the command line argument
Number of iterations to compute pi
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"iteration", help="number of iterations to compute pi", type=int)
parser.add_argument(
"-p", "--plot", help="Show the position of the point", action="store_true")
args = parser.parse_args()
return args
def main():
"""
Entry point for command line
"""
arg = get_argument()
iteration = arg.iteration
show_plot = arg.plot
compute_pi(iteration, show_plot)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1623048 | #Programa creado por <NAME>
def foreign_exchange_calculator(ammount):
mex_to_col_rate = 145.97
return mex_to_col_rate * ammount
def run():
print('CALCULADORA DE DIVISAS')
print('Convierte pesos mexicanos a persos colombianos.')
print('')
ammount = float(input('ingresa la cantidad de pesos mexicanos que quieres convertir '))
result = foreign_exchange_calculator(ammount)
print('${} pesos mexicanos son ${} pesos colombianos'.format(ammount, result))
print('')
if __name__=='__main__':
run() | StarcoderdataPython |
1682369 | <reponame>lrei/text-classification
import math
import torch
from tqdm import tqdm
def train_eval(model, criterion, eval_iter, rnn_out=False):
model.eval()
acc = 0.0
n_total = 0
n_correct = 0
test_loss = 0.0
for x, y in eval_iter:
with torch.no_grad():
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
output = None
if rnn_out:
output, _ = model(x)
else:
output = model(x)
loss = criterion(output, y)
n_correct += (output.argmax(1) == y).sum().item()
n_total += len(y)
test_loss += loss.item() / len(y)
test_loss /= len(eval_iter)
acc = 100. * (n_correct / n_total)
print(f'Test Accuracy: {acc:.2f}\tTest Loss (avg): {test_loss}')
def train(model, optim, criterion, train_iter, epochs, clip=0,
eval_iter=None, eval_every=50):
for epoch in tqdm(range(1, epochs + 1)):
model.train()
total_epoch_loss = 0.0
for x, y in train_iter:
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
# forward
output = model(x)
# backward
optim.zero_grad()
loss = criterion(output, y)
loss.backward()
if clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optim.step()
loss_val = loss.item()
if math.isnan(loss_val):
print('loss = nan')
else:
total_epoch_loss += loss.item() / len(y)
# display epoch stats
total_epoch_loss /= len(train_iter)
# eval
if eval_iter and epoch % eval_every == 0:
print(f'Epoch: {epoch}\tTrain Loss (avg): {total_epoch_loss}')
train_eval(model, criterion, eval_iter)
def train_reg(model, optim, criterion, train_iter, epochs, clip=0,
ar=0, tar=0, eval_iter=None, eval_every=50):
for epoch in tqdm(range(1, epochs + 1)):
model.train()
total_epoch_loss = 0.0
for x, y in train_iter:
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
# forward
output, rnn_out = model(x)
# backward
optim.zero_grad()
loss = criterion(output, y)
loss_val = loss.item()
# Activation Regularization
if ar:
loss += ar * rnn_out.pow(2).mean()
# Temporal Activation Regularization (slowness)
if tar:
loss += tar * (rnn_out[1:] - rnn_out[:-1]).pow(2).mean()
# Backprop
loss.backward()
if clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optim.step()
if math.isnan(loss_val):
print('loss = nan')
else:
total_epoch_loss += loss_val / len(y)
# display epoch stats
total_epoch_loss /= len(train_iter)
# eval
if eval_iter and epoch % eval_every == 0:
print(f'Epoch: {epoch}\tTrain Loss (avg): {total_epoch_loss}')
train_eval(model, criterion, eval_iter, True)
| StarcoderdataPython |
1621805 | <reponame>ArenaNetworks/dto-digitalmarketplace-supplier-frontend
# coding: utf-8
from __future__ import unicode_literals
import urllib2
from app.main.helpers.users import generate_supplier_invitation_token
from dmapiclient import HTTPError
from dmapiclient.audit import AuditTypes
from dmutils.email import generate_token, EmailError
from dmutils.forms import FakeCsrf
from ..helpers import BaseApplicationTest, csrf_only_request
import mock
EMAIL_EMPTY_ERROR = "Email address must be provided"
EMAIL_INVALID_ERROR = "Please enter a valid email address"
EMAIL_SENT_MESSAGE = "If the email address you've entered belongs to a Digital Marketplace account, we'll send a link to reset the password." # noqa
PASSWORD_EMPTY_ERROR = "Please enter your password"
PASSWORD_INVALID_ERROR = "Passwords must be between 10 and 50 characters"
PASSWORD_MISMATCH_ERROR = "The passwords you entered do not match"
NEW_PASSWORD_EMPTY_ERROR = "Please enter a new password"
NEW_PASSWORD_CONFIRM_EMPTY_ERROR = "Please confirm your new password"
TOKEN_CREATED_BEFORE_PASSWORD_LAST_CHANGED_ERROR = "This password reset link is invalid."
USER_LINK_EXPIRED_ERROR = "Check you’ve entered the correct link or ask the person who invited you to send a new invitation." # noqa
class TestSupplierRoleRequired(BaseApplicationTest):
def test_buyer_cannot_access_supplier_dashboard(self):
with self.app.app_context():
self.login_as_buyer()
dashboard_url = self.url_for('main.dashboard')
res = self.client.get(dashboard_url)
assert res.status_code == 302
assert res.location == self.get_login_redirect_url(dashboard_url)
self.assert_flashes('supplier-role-required', expected_category='error')
class TestInviteUser(BaseApplicationTest):
def test_should_be_an_error_for_invalid_email(self):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': 'invalid'
}
)
assert EMAIL_INVALID_ERROR in res.get_data(as_text=True)
assert res.status_code == 400
def test_should_be_an_error_for_missing_email(self):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data=csrf_only_request
)
assert EMAIL_EMPTY_ERROR in res.get_data(as_text=True)
assert res.status_code == 400
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.send_email')
def test_should_redirect_to_list_users_on_success_invite(self, send_email, data_api_client):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': '<EMAIL>',
}
)
assert res.status_code == 200
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.send_email')
def test_should_strip_whitespace_surrounding_invite_user_email_address_field(self, send_email, data_api_client):
with self.app.app_context():
self.login()
self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': ' <EMAIL> ',
}
)
send_email.assert_called_once_with(
'<EMAIL>',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
)
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.generate_supplier_invitation_token')
@mock.patch('app.main.views.login.send_email')
def test_should_call_generate_token_with_correct_params(self, send_email, supplier_token_mock, data_api_client):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': '<EMAIL>',
})
assert res.status_code == 200
supplier_token_mock.assert_called_once_with(
name='',
email_address='<EMAIL>',
supplier_code=1234,
supplier_name='Supplier Name',
)
@mock.patch('app.main.views.login.send_email')
@mock.patch('app.main.views.login.generate_token')
def test_should_not_generate_token_or_send_email_if_invalid_email(self, send_email, generate_token):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': 'total rubbish',
})
assert res.status_code == 400
assert not send_email.called
assert not generate_token.called
@mock.patch('app.main.views.login.send_email')
def test_should_be_an_error_if_send_invitation_email_fails(self, send_email):
with self.app.app_context():
self.login()
send_email.side_effect = EmailError(Exception('API is down'))
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': '<EMAIL>',
'name': 'valid',
}
)
assert res.status_code == 503
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.send_email')
def test_should_call_send_invitation_email_with_correct_params(self, send_email, data_api_client):
with self.app.app_context():
self.login()
self.app.config['INVITE_EMAIL_FROM'] = "EMAIL FROM"
self.app.config['INVITE_EMAIL_NAME'] = "EMAIL NAME"
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': '<EMAIL>',
'name': 'valid',
}
)
assert res.status_code == 200
send_email.assert_called_once_with(
"<EMAIL>",
mock.ANY,
'Invitation to join Supplier Name as a team member',
"EMAIL FROM",
"EMAIL NAME",
)
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.send_email')
def test_should_create_audit_event(self, send_email, data_api_client):
with self.app.app_context():
self.login()
res = self.client.post(
self.url_for('main.send_invite_user'),
data={
'csrf_token': FakeCsrf.valid_token,
'email_address': '<EMAIL>',
'name': 'valid',
}
)
assert res.status_code == 200
data_api_client.create_audit_event.assert_called_once_with(
audit_type=AuditTypes.invite_user,
user='<EMAIL>',
object_type='suppliers',
object_id=mock.ANY,
data={'invitedEmail': '<EMAIL>'})
class TestCreateUser(BaseApplicationTest):
def _generate_token(self, supplier_code=1234, supplier_name='Supplier Name', name='Me', email_address='<EMAIL>'): # noqa
with self.app.app_context():
return generate_supplier_invitation_token(
name=name,
email_address=email_address,
supplier_code=supplier_code,
supplier_name=supplier_name,
)
def create_user_setup(self, data_api_client):
data_api_client.create_user.return_value = self.user(123, '<EMAIL>', 'Supplier', 0, 'valid name')
def test_should_be_an_error_for_invalid_token(self):
token = "<PASSWORD>"
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 404
def test_should_be_an_error_for_missing_token(self):
res = self.client.get('/suppliers/create-user')
assert res.status_code == 404
@mock.patch('app.main.views.login.data_api_client')
def test_should_be_an_error_for_invalid_token_contents(self, data_api_client):
token = generate_token(
{
'this_is_not_expected': 1234
},
self.app.config['SECRET_KEY'],
self.app.config['SUPPLIER_INVITE_TOKEN_SALT']
)
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 404
assert data_api_client.get_user.called is False
assert data_api_client.get_supplier.called is False
def test_should_be_a_bad_request_if_token_expired(self):
res = self.client.get(
self.url_for('main.create_user', token=12345)
)
assert res.status_code == 404
assert USER_LINK_EXPIRED_ERROR in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_render_create_user_page_if_user_does_not_exist(self, data_api_client):
data_api_client.get_user.return_value = None
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 200
page_text = res.get_data(as_text=True).replace(' ', '')
for message in [
"Supplier Name",
"<EMAIL>",
'<input type="submit" class="button-save"',
urllib2.quote(token),
]:
assert message.replace(' ', '') in page_text
def test_should_be_an_error_if_invalid_token_on_submit(self):
res = self.client.post(
self.url_for('main.submit_create_user', token='invalidtoken'),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>',
'name': 'name',
'email_address': '<EMAIL>',
'accept_terms': 'y',
}
)
assert res.status_code == 404
assert USER_LINK_EXPIRED_ERROR in res.get_data(as_text=True)
assert (
'<input type="submit" class="button-save"'
not in res.get_data(as_text=True)
)
def test_should_be_an_error_if_missing_name_and_password(self):
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data=csrf_only_request
)
assert res.status_code == 400
for message in [
"Please enter a name",
"Please enter a password"
]:
assert message in res.get_data(as_text=True)
def test_should_be_an_error_if_too_short_name_and_password(self):
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': "<PASSWORD>",
'name': '',
'accept_terms': 'y',
}
)
assert res.status_code == 400
for message in [
"Please enter a name",
"Passwords must be between 10 and 50 characters"
]:
assert message in res.get_data(as_text=True)
def test_should_be_an_error_if_too_long_name_and_password(self):
with self.app.app_context():
token = self._generate_token()
twofiftysix = "a" * 256
fiftyone = "a" * 51
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': <PASSWORD>,
'name': twofiftysix,
'accept_terms': 'y',
}
)
assert res.status_code == 400
for message in [
'Names must be between 1 and 255 characters',
'Passwords must be between 10 and 50 characters',
'Create',
'<EMAIL>'
]:
assert message in res.get_data(as_text=True)
def test_require_acceptance_of_terms(self):
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>!!!',
'name': 'Person',
# no accept_terms
}
)
assert res.status_code == 400
assert 'must accept the terms' in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_exists_and_is_a_buyer(self, data_api_client):
data_api_client.get_user.return_value = self.user(123, '<EMAIL>', None, None, 'Users name')
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
print("RESPONSE: {}".format(res.get_data(as_text=True)))
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_admin_message_if_user_is_an_admin(self, data_api_client):
data_api_client.get_user.return_value = self.user(123, '<EMAIL>', None, None, 'Users name', role='admin')
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_locked_message_if_user_is_locked(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'<EMAIL>',
1234,
'Supplier Name',
'Users name',
locked=True
)
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert "Your account has been locked" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_inactive_message_if_user_is_not_active(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'<EMAIL>',
1234,
'Supplier Name',
'Users name',
active=False
)
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert "Your account has been deactivated" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_wrong_supplier_message_if_invited_by_wrong_supplier(self, data_api_client): # noqa
data_api_client.get_user.return_value = self.user(
123,
'<EMAIL>',
1234,
'Supplier Name',
'Users name'
)
token = self._generate_token(
supplier_code=9999,
supplier_name='Different Supplier Name',
email_address='<EMAIL>'
)
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert u"You can only use your existing account with one company." in res.get_data(as_text=True)
assert u"You already have an existing account with Supplier Name" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_is_already_a_supplier(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'<EMAIL>',
1234,
'Supplier Name',
'Users name'
)
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token),
follow_redirects=True
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_logged_in_user_is_not_invited_user(self, data_api_client):
self.login()
data_api_client.get_user.return_value = self.user(
999,
'<EMAIL>',
1234,
'Supplier Name',
'Different users name'
)
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_is_already_logged_in(self, data_api_client):
self.login()
data_api_client.get_user.return_value = self.user(
123,
'<EMAIL>',
1234,
'Supplier Name',
'Users name'
)
token = self._generate_token()
res = self.client.get(
self.url_for('main.create_user', token=token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_create_user_if_user_does_not_exist(self, data_api_client):
data_api_client.get_user.return_value = None
self.create_user_setup(data_api_client)
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>',
'name': 'valid name',
'accept_terms': 'y',
}
)
data_api_client.create_user.assert_called_once_with({
'role': 'supplier',
'password': '<PASSWORD>',
'emailAddress': '<EMAIL>',
'name': 'valid name',
'supplierCode': 1234
})
assert res.status_code == 302
assert res.location == self.url_for('main.dashboard', _external=True)
self.assert_flashes('account-created', 'flag')
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_exists(self, data_api_client):
data_api_client.create_user.side_effect = HTTPError(mock.Mock(status_code=409))
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>',
'name': 'valid name',
'accept_terms': 'y',
}
)
assert res.status_code == 400
data_api_client.create_user.assert_called_once_with({
'role': 'supplier',
'password': '<PASSWORD>',
'emailAddress': '<EMAIL>',
'name': 'valid name',
'supplierCode': 1234
})
@mock.patch('app.main.views.login.data_api_client')
def test_should_strip_whitespace_surrounding_create_user_name_field(self, data_api_client):
data_api_client.get_user.return_value = None
self.create_user_setup(data_api_client)
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>',
'name': ' valid name ',
'accept_terms': 'y',
}
)
assert res.status_code == 302
data_api_client.create_user.assert_called_once_with({
'role': mock.ANY,
'password': '<PASSWORD>',
'emailAddress': mock.ANY,
'name': 'valid name',
'supplierCode': mock.ANY
})
@mock.patch('app.main.views.login.data_api_client')
def test_should_not_strip_whitespace_surrounding_create_user_password_field(self, data_api_client):
data_api_client.get_user.return_value = None
self.create_user_setup(data_api_client)
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': ' <PASSWORD> ',
'name': 'valid name ',
'accept_terms': 'y',
}
)
assert res.status_code == 302
data_api_client.create_user.assert_called_once_with({
'role': mock.ANY,
'password': ' <PASSWORD> ',
'emailAddress': mock.ANY,
'name': 'valid name',
'supplierCode': mock.ANY
})
@mock.patch('app.main.views.login.data_api_client')
def test_should_be_a_503_if_api_fails(self, data_api_client):
with self.app.app_context():
data_api_client.create_user.side_effect = HTTPError("bad email")
token = self._generate_token()
res = self.client.post(
self.url_for('main.submit_create_user', token=token),
data={
'csrf_token': FakeCsrf.valid_token,
'password': '<PASSWORD>',
'name': 'valid name',
'accept_terms': 'y',
}
)
assert res.status_code == 503
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.