id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4887036 | <filename>Normalization/NLTKNormalization.py
#!/usr/bin/python3
# Created by MikBac on 2020
import unicodedata
try:
from .NGram.NGram import getNGramTuples
except:
from NGram.NGram import getNGramTuples
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize, WhitespaceTokenizer
stop_words = set(stopwords.words('english'))
blankline_tokenizer = WhitespaceTokenizer()
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
def getNLTKNormalization(d, ngram=1):
d = unicodedata.normalize('NFKD', d).encode('ascii', 'ignore').decode('utf-8', 'ignore')
words = word_tokenize(d)
words = [word for word in words if word.isalpha()]
words = [i for i in words if not i in stop_words]
for word in words:
blankline_tokenizer.tokenize(word)
words = [word for word in words if word.isalpha()]
words = [word.lower() for word in words]
words = [word for word in words if not word in stopwords.words("english")]
words = [lemmatizer.lemmatize(word, pos="v") for word in words]
words = [lemmatizer.lemmatize(word, pos="n") for word in words]
words = [stemmer.stem(word) for word in words]
words = getNGramTuples(words, ngram)
return words
| StarcoderdataPython |
5149116 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contacts', '0003_auto_20150904_2018'),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('changed', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('pending', 'pending'), ('processing', 'processing'), ('error', 'error')], max_length=100, default='pending')),
('email', models.EmailField(max_length=254)),
('sent', models.DateTimeField(null=True)),
('key', models.CharField(unique=True, max_length=32)),
('book', models.ForeignKey(to='contacts.Book', null=True, blank=True)),
('sender', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, blank=True)),
],
),
]
| StarcoderdataPython |
5136581 | <filename>WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_Rounding_Decimals__fix.txt.py
import numpy as np
arr = np.fix([-3.1666, 3.6667])
print(arr)
| StarcoderdataPython |
1647480 | <filename>src/utils/pyutils.py<gh_stars>1-10
"""
Python utils, reusable chunks etc.
"""
from typing import List, Union
import sys
import os
import time
import numpy as np
import logging
def rprint(value):
"""Similar to print function but overwrites last line.
Parameters
----------
value
Value to print.
"""
sys.stdout.write(f'\r{value}')
sys.stdout.flush()
def print_progress(i: int, n_total: int, prefix: str = '') -> None:
"""Print progress bar.
E.g. with ``prefix`` = 'training':
training: 97% |||||||||||||||||||||||||||||||||||||||||||||||| |
Parameters
----------
i
Current step.
n_total
Total number of steps.
prefix
Printed in front of progress bar, limited to 20 characters.
"""
perc = np.floor((i + 1) / n_total * 100)
n_print = 50
n_done = int(np.floor(perc / 100 * n_print))
n_to_go = n_print - n_done
if perc != 100:
n_to_go = n_to_go-1
msg = f'{perc:3.0f}% |{"|"*n_done}>{" "*n_to_go}' + '|'
else:
msg = f'{perc:3.0f}% |{"|"*n_done}{" "*n_to_go}' + '|'
rprint(f'{prefix:20s} ' + msg)
if perc == 100:
print('\n')
class ProgressBar(object):
def __init__(self, n):
self.i = 0
self.n = n
self.t_start = time.time()
self.print()
def step(self):
self.i += 1
if self.i <= self.n:
self.print()
if self.i == self.n:
print('\n')
elif self.i > self.n:
pass
def print(self):
perc = np.floor(self.i / self.n * 100)
elapsed = time.time() - self.t_start
remaining = np.ceil((elapsed / max(1, self.i)) * (self.n - self.i))
rprint(f'{self.i:6} of {self.n} -- {perc:3.0f}% -- elapsed: {np.floor(elapsed):5.0f} s -- remaining: {remaining:5.0f} s')
def exit_if_exists(file: str, overwrite: bool = False):
"""Stop script if ``file`` exists and ``overwrite``is False.
Parameters
----------
file
File path.
overwrite (default: False)
Whether to overwrite existing file (triggers warning) or not (quits if
exists).
"""
if os.path.exists(file):
if overwrite:
logging.warn('Overwriting existing file as overwrite is True.')
os.remove(file)
else:
logging.info(
'File exists and will not be overwritten as overwrite is False.')
sys.exit(0)
def rm_existing(files: Union[str, List[str]]):
"""Remove file(s) if existing.
Parapeters
----------
files
File(s) to remove.
"""
if type(files) == str:
files = [files]
for f in files:
if os.path.exists(f):
try:
os.remove(f)
except OSError as e:
print("Error: %s - %s." % (e.f, e.strerror))
| StarcoderdataPython |
3229948 | <filename>PycharmProjects/Cursoemvideo/exe082.py<gh_stars>0
numeros = []
par = []
impar = []
while True:
numeros.append(int (input ('DIGITE UM NUMERO: ')))
resp = str (input ('DESEJA CONTINUAR [S/N]: ')).upper()
if resp == 'N':
break
print (f'A LISTA COMPLETA É {numeros}')
for i, v in enumerate(numeros):
if v %2 == 0:
par.append(v)
elif v % 2 == 1:
impar.append(v)
print ('=-'*30)
print (f'OS NUMEROS PARES DIGITADOS FORAM {par}')
print ('=-'*30)
print (f'OS NUMEROS IMPARES DIGITADOS FORAM {impar}') | StarcoderdataPython |
1903035 | <filename>demo/demo_poc.py
"""
The intention of this script is to provide a demonstration of how ConFlowGen is supposed to be used as a library.
It is, by design, a stateful library that persists all input in an SQL database format to enable reproducibility.
The intention of this demo is further explained in the logs it generated.
"""
import datetime
import sys
try:
import conflowgen
print(f"Importing ConFlowGen version {conflowgen.__version__}")
except ImportError:
print("Please first install conflowgen as a library")
sys.exit()
from conflowgen import ContainerFlowGenerationManager
from conflowgen import ModeOfTransport
from conflowgen import PortCallManager
from conflowgen import ExportFileFormat
from conflowgen import ExportContainerFlowManager
from conflowgen import DatabaseChooser
from conflowgen import setup_logger
from conflowgen import InboundAndOutboundVehicleCapacityPreviewReport
from conflowgen import ContainerFlowByVehicleTypePreviewReport
from conflowgen import VehicleCapacityExceededPreviewReport
from conflowgen import ModalSplitPreviewReport
from conflowgen import InboundAndOutboundVehicleCapacityAnalysisReport
from conflowgen import ContainerFlowByVehicleTypeAnalysisReport
from conflowgen import ModalSplitAnalysisReport
from conflowgen import ContainerFlowAdjustmentByVehicleTypeAnalysisReport
from conflowgen import ContainerFlowAdjustmentByVehicleTypeAnalysisSummaryReport
# Start logging
logger = setup_logger()
logger.info("""####
## Demo Proof of Concept
####
This demo is based on some example data and is meant to show the basic functionality. For a slightly more realistic
example, please check out `demo_DEHAM_CTA.py`. However, computing those numbers also takes longer than quickly running
this small example.
""")
# Pick database
database_chooser = DatabaseChooser()
demo_file_name = "demo_poc.sqlite"
if demo_file_name in database_chooser.list_all_sqlite_databases():
database_chooser.load_existing_sqlite_database(demo_file_name)
else:
database_chooser.create_new_sqlite_database(demo_file_name)
# Set settings
container_flow_generation_manager = ContainerFlowGenerationManager()
container_flow_generation_manager.set_properties(
name="Demo file",
start_date=datetime.datetime.now().date(),
end_date=datetime.datetime.now().date() + datetime.timedelta(days=21)
)
# Add vehicles that frequently visit the terminal.
port_call_manager = PortCallManager()
feeder_service_name = "LX050"
if not port_call_manager.has_schedule(feeder_service_name, vehicle_type=ModeOfTransport.feeder):
logger.info(f"Add feeder service '{feeder_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=ModeOfTransport.feeder,
service_name=feeder_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 9),
vehicle_arrives_at_time=datetime.time(11),
average_vehicle_capacity=800,
average_moved_capacity=100,
next_destinations=[
("DEBRV", 0.4), # 50% of the containers go here...
("RULED", 0.6) # and the other 50% of the containers go here.
]
)
else:
logger.info(f"Feeder service '{feeder_service_name}' already exists")
train_service_name = "JR03A"
if not port_call_manager.has_schedule(train_service_name, vehicle_type=ModeOfTransport.train):
logger.info(f"Add train service '{train_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=ModeOfTransport.train,
service_name=train_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 12),
vehicle_arrives_at_time=datetime.time(17),
average_vehicle_capacity=90,
average_moved_capacity=90,
next_destinations=None # Here we don't have containers that need to be grouped by destination
)
else:
logger.info(f"Train service '{train_service_name}' already exists")
deep_sea_service_name = "LX050"
if not port_call_manager.has_schedule(deep_sea_service_name, vehicle_type=ModeOfTransport.deep_sea_vessel):
logger.info(f"Add deep sea vessel service '{deep_sea_service_name}' to database")
port_call_manager.add_large_scheduled_vehicle(
vehicle_type=ModeOfTransport.deep_sea_vessel,
service_name=deep_sea_service_name,
vehicle_arrives_at=datetime.date(2021, 7, 10),
vehicle_arrives_at_time=datetime.time(19),
average_vehicle_capacity=16000,
average_moved_capacity=150, # for faster demo
next_destinations=[
("ZADUR", 0.3), # 30% of the containers go here...
("CNSHG", 0.7) # and the other 70% of the containers go here.
]
)
else:
logger.info(f"Deep sea service '{deep_sea_service_name}' already exists")
logger.info("Generating reports on the input data (preview of container flow to generate)")
inbound_and_outbound_vehicle_capacity_preview_report = InboundAndOutboundVehicleCapacityPreviewReport()
report = inbound_and_outbound_vehicle_capacity_preview_report.get_report_as_text()
logger.info("Inbound and outbound traffic: ")
logger.info(report)
container_flow_by_vehicle_type_preview_report = ContainerFlowByVehicleTypePreviewReport()
report = container_flow_by_vehicle_type_preview_report.get_report_as_text()
logger.info("Container flow between vehicle types as defined by schedules and distributions: ")
logger.info(report)
modal_split_preview_report = ModalSplitPreviewReport()
report = modal_split_preview_report.get_report_as_text()
logger.info("The same container flow expressed in terms of transshipment and modal split for the hinterland: ")
logger.info(report)
vehicle_capacity_exceeded_preview_report = VehicleCapacityExceededPreviewReport()
report = vehicle_capacity_exceeded_preview_report.get_report_as_text()
logger.info("Consequences of container flow for outgoing vehicles: ")
logger.info(report)
logger.info("All reports on the input data have been generated")
# Generate all fleets with all vehicles. This is the core of the whole code.
container_flow_generation_manager.generate()
logger.info("The container flow data have been generated, run posthoc analyses on them")
logger.info("Analyze the amount of containers being delivered at the terminal and being picked by by mode of transport")
inbound_and_outbound_vehicle_capacity_analysis_report = InboundAndOutboundVehicleCapacityAnalysisReport()
report = inbound_and_outbound_vehicle_capacity_analysis_report.get_report_as_text()
logger.info(report)
logger.info("Analyze the amount of containers being delivered by one vehicle and being picked up by another vehicle "
"(by vehicle type)")
container_flow_by_vehicle_type_analysis_report = ContainerFlowByVehicleTypeAnalysisReport()
report = container_flow_by_vehicle_type_analysis_report.get_report_as_text()
logger.info(report)
logger.info("Reformat same data to show the transshipment share and modal split in the hinterland")
modal_split_analysis_report = ModalSplitAnalysisReport()
report = modal_split_analysis_report.get_report_as_text()
logger.info(report)
logger.info("Analyze the amount of containers which require an adjustment in mode of transport because they could not "
"leave the container terminal within the maximum container dwell time otherwise. If the initial type "
"and the adjusted type are identical, no adjustment has taken place. These numbers are just reported "
"for reference.")
container_flow_adjustment_by_vehicle_type_analysis_report = ContainerFlowAdjustmentByVehicleTypeAnalysisReport()
report = container_flow_adjustment_by_vehicle_type_analysis_report.get_report_as_text()
logger.info(report)
logger.info("Summarize the previous figures of how containers have been redirected to other vehicle types")
container_flow_adjustment_by_vehicle_type_analysis_summary = ContainerFlowAdjustmentByVehicleTypeAnalysisSummaryReport()
report = container_flow_adjustment_by_vehicle_type_analysis_summary.get_report_as_text()
logger.info(report)
logger.info("All posthoc analyses have been run.")
logger.info("For a better understanding of the data, it is advised to study the logs and compare the preview with the "
"posthoc analysis results")
logger.info("Start data export...")
# Export important entries from SQL to CSV so that it can be further processed, e.g. by a simulation software
export_container_flow_manager = ExportContainerFlowManager()
export_folder_name = "demo-poc--" + str(datetime.datetime.now()).replace(":", "-").replace(" ", "--").split(".")[0]
export_container_flow_manager.export(
folder_name=export_folder_name + "-csv",
file_format=ExportFileFormat.csv
)
export_container_flow_manager.export(
folder_name=export_folder_name + "-xlsx",
file_format=ExportFileFormat.xlsx
)
# Gracefully close everything
database_chooser.close_current_connection()
logger.info("Demo 'demo_poc' finished successfully.")
| StarcoderdataPython |
1811382 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SingleDynamicData import SingleDynamicData
class AlipaySocialBaseMessageDynamicicondataModifyModel(object):
def __init__(self):
self._biz_id = None
self._op_data = None
self._op_type = None
self._user_id = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def op_data(self):
return self._op_data
@op_data.setter
def op_data(self, value):
if isinstance(value, SingleDynamicData):
self._op_data = value
else:
self._op_data = SingleDynamicData.from_alipay_dict(value)
@property
def op_type(self):
return self._op_type
@op_type.setter
def op_type(self, value):
self._op_type = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.op_data:
if hasattr(self.op_data, 'to_alipay_dict'):
params['op_data'] = self.op_data.to_alipay_dict()
else:
params['op_data'] = self.op_data
if self.op_type:
if hasattr(self.op_type, 'to_alipay_dict'):
params['op_type'] = self.op_type.to_alipay_dict()
else:
params['op_type'] = self.op_type
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySocialBaseMessageDynamicicondataModifyModel()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'op_data' in d:
o.op_data = d['op_data']
if 'op_type' in d:
o.op_type = d['op_type']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| StarcoderdataPython |
56392 | <reponame>deepest-stack/graph2tensor
# coding=utf-8
import numpy as np
from setuptools import find_packages, setup, Extension
sampler = Extension('graph2tensor.client.sampler',
include_dirs=[np.get_include(), ],
extra_compile_args=['-std=c++11', '-fopenmp'],
extra_link_args=['-fopenmp'],
sources=['graph2tensor/client/sampler.cpp'])
setup(
name='graph2tensor',
version='0.2.0',
description='Graph learning framework based on neighbour sampling',
packages=find_packages(exclude=("test",)),
ext_modules=[sampler, ]
)
| StarcoderdataPython |
11386860 | # coding: utf-8
import numpy as np
import pysptk
from nose.tools import raises
from pysptk.util import mcepalpha
from pysptk.util import apply_along_last_axis, automatic_type_conversion
def test_assert_gamma():
def __test(gamma):
pysptk.util.assert_gamma(gamma)
for gamma in [-2.0, 0.1]:
yield raises(ValueError)(__test), gamma
def test_assert_pade():
def __test(pade):
pysptk.util.assert_pade(pade)
for pade in [3, 8]:
yield raises(ValueError)(__test), pade
def test_assert_fftlen():
def __test(fftlen):
pysptk.util.assert_fftlen(fftlen)
for fftlen in [255, 257]:
yield raises(ValueError)(__test), fftlen
def test_phidf():
def __test(order, alpha):
np.random.seed(98765)
dummy_input = np.random.rand(64)
delay = np.zeros(order + 1)
for x in dummy_input:
pysptk.phidf(x, order, alpha, delay)
assert np.all(np.isfinite(delay))
for order in [15, 20, 25, 30]:
for alpha in [0.35, 0.41, 0.5]:
yield __test, order, alpha
def test_lspcheck():
def __test(order):
np.random.seed(98765)
lsp = np.random.rand(order + 1)
pysptk.lspcheck(lsp)
# TODO: valid check
for order in [15, 20, 25, 30]:
yield __test, order
def test_example_audio_file():
from os.path import exists
path = pysptk.util.example_audio_file()
assert exists(path)
def test_mcepalpha():
assert np.isclose(mcepalpha(8000), 0.312)
assert np.isclose(mcepalpha(11025), 0.357)
assert np.isclose(mcepalpha(16000), 0.41)
assert np.isclose(mcepalpha(22050), 0.455)
assert np.isclose(mcepalpha(44100), 0.544)
assert np.isclose(mcepalpha(48000), 0.554)
def test_automatic_type_conversion():
@automatic_type_conversion
def f(x):
return x
for dtype in [np.float32, np.float16, np.float64]:
x = np.ones(10, dtype=dtype)
y = f(x)
assert y.dtype == x.dtype
y = f(x=x)
assert y.dtype == x.dtype
def test_apply_along_last_axis():
@apply_along_last_axis
def f(x):
assert x.ndim == 1
return x[:len(x) // 2] + np.arange(len(x) // 2)
for shape in [(10,), (2, 10), (2, 2, 10)]:
x = np.ones(shape)
y = f(x)
xshape = x.shape
yshape = y.shape
assert len(xshape) == len(yshape)
assert xshape[-1] // 2 == yshape[-1]
y = f(x=x)
yshape = y.shape
assert len(xshape) == len(yshape)
assert xshape[-1] // 2 == yshape[-1]
# manually expand 1-loop
x = np.ones((2, 10), dtype=np.float64)
y = np.empty((2, 5), dtype=np.float64)
for i in range(len(x)):
y[i] = f(x[i])
yhat = f(x)
assert np.allclose(yhat, y)
# expand 2-loop
x = np.ones((2, 2, 10), dtype=np.float64)
y = np.empty((2, 2, 5), dtype=np.float64)
for i in range(len(x)):
for j in range(len(x[i])):
y[i][j] = f(x[i][j])
yhat = f(x)
assert np.allclose(yhat, y)
def test_multiple_decorators():
@apply_along_last_axis
@automatic_type_conversion
def half_vec(x):
assert x.ndim == 1
return x[:len(x) // 2]
for shape in [(10,), (2, 10), (2, 2, 10)]:
for dtype in [np.float32, np.float16, np.float64]:
x = np.ones(shape, dtype=dtype)
y = half_vec(x)
xshape = x.shape
yshape = y.shape
assert len(xshape) == len(yshape)
assert xshape[-1] // 2 == yshape[-1]
assert x.dtype == y.dtype
| StarcoderdataPython |
9623493 | # coding: utf-8
from jiraprompt.wrapper import IssueFields, JiraWrapper
from jirasync.utils import echo
class MyJiraWrapper(JiraWrapper):
def create_issue(
self,
summary,
details=None,
component=None,
labels=None,
assignee=None,
sprint=None,
issuetype="Task",
):
"""
Create an issue (by default, a Story) in the agile sprint.
Args:
summary (str): issue title/summary
details (str): detailed issue description
component (str): component name
labels (list of str): labels
assignee (str): user id of assignee
sprint (str): sprint name, sprint number, or 'backlog'
Default is current sprint
issueype (str): issue type, default is "Task",
you likely won't change this.
Returns:
The newly created JIRA.Issue resource
"""
if labels and not isinstance(labels, list):
raise TypeError("labels must be a list")
if not sprint:
sprint_id = self.current_sprint_id
elif sprint != "backlog":
_, sprint_id = self.find_sprint(sprint)
f = IssueFields()
comp_name_server_side, _ = self.find_component(component)
f.summary(summary).description(details).component(
comp_name_server_side
).labels(labels).project(id=self.project_id).issuetype(issuetype)
new_issue = self.jira.create_issue(**f.kwarg)
if assignee:
self.jira.assign_issue(new_issue.key, assignee)
try:
if sprint == "backlog":
self.jira.move_to_backlog([new_issue.key])
else:
self.jira.add_issues_to_sprint(sprint_id, [new_issue.key])
except IndexError:
# Some Jira installations have no Scrum plugin so no backlog exists
pass
return new_issue
def change_status(self, issue_name, new_status_name):
issue = self.jira.issue(issue_name)
avail_statuses = self.get_avail_statuses(issue)
new_status_id = self.get_avail_status_id(
avail_statuses, new_status_name
)
self.jira.transition_issue(issue, new_status_id)
def change_assignee(self, issue_name, new_assignee):
issue = self.jira.issue(issue_name)
f = IssueFields().assignee(new_assignee)
issue.update(**f.kwarg)
def change_title(self, issue_name, new_summary):
issue = self.jira.issue(issue_name)
f = IssueFields().summary(new_summary)
issue.update(**f.kwarg)
def update_sprint(self, issue_name):
issue = self.jira.issue(issue_name)
current_sprint_id = self.current_sprint_id
if None not in (current_sprint_id, issue.key):
self.jira.add_issues_to_sprint(current_sprint_id, issue.key)
def search_existing_task(self, issue_text, assignee=None, only_open=False):
# check if it already exists
search_query = ('project = {} AND summary ~ \\"{}\\"').format(
self.project_id, issue_text.replace("#", "\u0023")
)
if assignee is not None:
search_query += " AND assignee = {}".format(assignee)
if only_open:
search_query += " AND status != Done "
echo(
"Searching Jira for {0} using query [{1}]".format(
issue_text, search_query
)
)
tasks = self.jira.search_issues(search_query)
return tasks
| StarcoderdataPython |
9751929 | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SingleSignOnTokenRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'grant_type': 'str'
}
attribute_map = {
'code': 'code',
'grant_type': 'grant_type'
}
def __init__(self, code=None, grant_type=None): # noqa: E501
"""SingleSignOnTokenRequest - a model defined in Swagger""" # noqa: E501
self._code = None
self._grant_type = None
self.discriminator = None
if code is not None:
self.code = code
if grant_type is not None:
self.grant_type = grant_type
@property
def code(self):
"""Gets the code of this SingleSignOnTokenRequest. # noqa: E501
The code received on the redirect URI after the customer approved the single sign on request. # noqa: E501
:return: The code of this SingleSignOnTokenRequest. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this SingleSignOnTokenRequest.
The code received on the redirect URI after the customer approved the single sign on request. # noqa: E501
:param code: The code of this SingleSignOnTokenRequest. # noqa: E501
:type: str
"""
self._code = code
@property
def grant_type(self):
"""Gets the grant_type of this SingleSignOnTokenRequest. # noqa: E501
Grant type. The value should be simple_key. # noqa: E501
:return: The grant_type of this SingleSignOnTokenRequest. # noqa: E501
:rtype: str
"""
return self._grant_type
@grant_type.setter
def grant_type(self, grant_type):
"""Sets the grant_type of this SingleSignOnTokenRequest.
Grant type. The value should be simple_key. # noqa: E501
:param grant_type: The grant_type of this SingleSignOnTokenRequest. # noqa: E501
:type: str
"""
self._grant_type = grant_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SingleSignOnTokenRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SingleSignOnTokenRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
136208 | import os
import tensorflow as tf
import numpy as np
import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import argparse
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
class Extractor:
def __init__(self):
# weights: 'imagenet'
# pooling: 'max' or 'avg'
# input_shape: (width, height, 3), width and height should >= 48
self.input_shape = (224, 224, 3)
self.weight = 'imagenet'
self.pooling = 'max'
self.model = VGG16(weights = self.weight, input_shape = (self.input_shape[0], self.input_shape[1], self.input_shape[2]), pooling = self.pooling, include_top = True)
self.model = Model(inputs=self.model.inputs, outputs=self.model.get_layer('fc2').output)
self.model.predict(np.zeros((1, 224, 224 , 3)))
'''
Use vgg16 model to extract features
Output normalized feature vector
'''
def extract_feat(self, image):
if(isinstance(image, np.ndarray)):
if(image.shape == (224,224,3)):
img = np.asarray(image, dtype=np.float64) # This is an int array!
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
else:
raise ValueError("Input shape is incorrect")
else:
raise ValueError("Input is incorrect")
return None
def save_extracted_feat_as_image(self, extracted, save_dir):
plt.plot(extracted)
plt.savefig(os.path.join(save_dir,'extracted.png'))
| StarcoderdataPython |
9614620 | # -*- coding: utf-8 -*-
class quantization(object):
"""docstring for quantization"""
def __init__(self, nbit=8):
super(quantization, self).__init__()
self.__nbit = nbit
def quantify(self, w):
with tf.variable_scope("Conv2D"):
| StarcoderdataPython |
338311 | <gh_stars>0
from tkinter import *
import mysql.connector
import tkinter.messagebox as tmsg
def pack1(rootname, text):
lablename = Label(rootname, text=text,
bg="#eb9234",
fg="white",
padx=5,
pady=5,
font=("REVAMPED", 25, "bold"),
borderwidth=20,
relief=RIDGE)
lablename.pack(side=TOP, fill="x")
def place(rootname, text, y):
labelname = Label(rootname, text=text,
padx=5,
pady=5,
font=("REVAMPED", 25, "bold"))
labelname.place(x=5, y=y)
def toot(a):
a.geometry("650x434+120+120")
a.minsize(600, 300)
a.maxsize(1200, 600)
a.title("Your passwords")
def upper():
root4.destroy()
sutar()
def apple(mycursor1):
global root4
root4 = Tk()
toot(root4)
u = {}
for k, c in mycursor1:
u[k] = c
f6 = Frame(root4, width=650, height=384, bg="blue")
f7 = Frame(root4, width=650, height=50, bg="green")
lt = Label(f6, text="USERNAME : PASSWORD", fg="black", font=("REVAMPED", 15, "bold"))
lt.pack(side=TOP, anchor=NW, padx=9, pady=9)
scrollbar = Scrollbar(f6) # Scroll bar on root assign to scrollbar variable
scrollbar.pack(side=RIGHT, fill=Y)
list1 = Listbox(f6, height=10, font=("REVAMPED", 15, "bold"), yscrollcommand=scrollbar.set)
n = 0
for i, j in u.items():
n += 1
list1.insert(END, f"{n}){i} : {j}")
list1.pack(side=TOP, fill=BOTH, expand=True)
scrollbar.config(command=list1.yview)
Button(f7, text="Back", command=upper, font=("REVAMPED", 13, "bold")).pack(side=TOP, anchor=NW, padx=9, pady=9)
f6.pack(side=TOP, fill=BOTH, expand=True)
f7.pack(side=BOTTOM, fill=BOTH, expand=False)
root4.mainloop()
def getval1():
mydb1 = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>", database="password_saver")
mycursor1 = mydb1.cursor()
sqlform = "insert into sub_password(username, password) values(%s, %s)"
employees = [(uservalue.get(), passvalue.get())]
mycursor1.executemany(sqlform, employees)
mydb1.commit()
tmsg.showinfo('Done', 'Successfully Saved')
uservalue.set('')
uservalue1.update()
passvalue.set('')
passvalue1.update()
def value(rootname, getvalname):
global uservalue, passvalue, uservalue1, passvalue1
uservalue = StringVar()
passvalue = StringVar()
uservalue1 = Entry(rootname, textvariable=uservalue, font=("REVAMPED", 15, "bold"))
passvalue1 = Entry(rootname, textvariable=passvalue, font=("REVAMPED", 15, "bold"))
uservalue1.place(x=250, y=170)
passvalue1.place(x=250, y=218)
Button(rootname, text="Submit", command=getvalname, font=("REVAMPED", 13, "bold")).place(x=300, y=250)
def retrieve():
mydb1 = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>", database="password_saver")
mycursor1 = mydb1.cursor()
mycursor1.execute("select * from sub_password")
root1.destroy()
apple(mycursor1)
def sutar():
global root1
root1 = Tk()
toot(root1)
pack1(root1, """Add New Password""")
place(root1, "Username : ", 150)
place(root1, "Password : ", 200)
value(root1, getval1)
Button(root1, text="Open my Passwords", command=retrieve, font=("REVAMPED", 13, "bold")).place(x=250, y=290)
root1.mainloop()
def getvals2():
mydb1 = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>", database="password_saver")
mycursor1 = mydb1.cursor()
mycursor1.execute("select username from main_password")
for i in mycursor1:
if i[0] == uservalue.get():
mycursor1.execute("select password from main_password")
for j in mycursor1:
if j[0] == passvalue.get():
root.destroy()
sutar()
else:
tmsg.showerror('Error', 'Password not correct')
else:
tmsg.showerror('Error', 'Username not correct')
def omkar():
global root
root = Tk()
toot(root)
pack1(root, """Login ID Password""")
place(root, "Username : ", 150)
place(root, "Password : ", 200)
value(root, getvals2)
root.mainloop()
def getvals3():
mydb1 = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>", database="password_saver")
mycursor1 = mydb1.cursor()
sqlform = "insert into main_password(Username, Password) values(%s, %s)"
employees = [(uservalue.get(), passvalue.get())]
mycursor1.executemany(sqlform, employees)
mydb1.commit()
tmsg.showinfo('Done', 'Saved Successfully')
try:
mydb = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>")
mycursor = mydb.cursor()
mycursor.execute("create database password_saver")
mycursor.execute("use password_saver")
mycursor.execute("CREATE TABLE main_password("
"Username varchar (40) NOT NULL,"
"Password varchar (40) NOT NULL);")
mycursor.execute("CREATE TABLE sub_password("
"Username varchar (40) NOT NULL,"
"Password varchar (40) NOT NULL);")
root3 = Tk()
toot(root3)
pack1(root3, """welcome
Create your account""")
place(root3, "Username : ", 150)
place(root3, "Password : ", 200)
value(root3, getvals3)
root3.mainloop()
omkar()
except Exception:
omkar()
| StarcoderdataPython |
8049063 | <gh_stars>0
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
'''
Note - the pbs module isn't very pythonic, so you'll see things like
value = job.Resource_List["attribute"] or 100
instead of
value = job.Resource_List.get("attribute", 100)
That is because is a metaclass, not a dict.
Also, place and select objects use repr() to convert to a parseable string, but
so you'll see guards against repr(None) (combined with the above) and
Quick start:
qmgr -c "create hook cycle_sub_hook"
qmgr -c "set hook cycle_sub_hook event = queuejob"
qmgr -c "create hook cycle_sub_periodic_hook"
qmgr -c "set hook cycle_sub_periodic_hook event = periodic"
# reload source / config
qmgr -c "import hook cycle_sub_hook application/x-python default submit_hook.py"
qmgr -c "import hook cycle_sub_hook application/x-config default submit_hook.json"
qmgr -c "import hook cycle_sub_periodic_hook application/x-python default submit_hook.py"
qmgr -c "import hook cycle_sub_periodic_hook application/x-config default submit_hook.json"
Queue setup
qmgr -c "set queue <queue_name> resources_default.slot_type = <queue_name>"
qmgr -c "set queue <queue_name> resources_default.ungrouped = false"
qmgr -c "set queue <queue_name> default_chunk.slot_type = <queue_name>"
qmgr -c "set queue <queue_name> default_chunk.ungrouped = false"
See PBS Professional Programmers Guide for detailed information.
See /var/spool/pbs/server_logs/* for log messages
'''
from collections import OrderedDict
import json
import sys
import traceback
import os
import subprocess
import json
try:
import pbs
except ImportError:
import mockpbs as pbs
def validate_groupid_placement(job):
'''
@return True if the job has a placement group of group_id
Note we will set it to group_id if it isn't specified.
'''
place = repr(job.Resource_List["place"]) if job.Resource_List["place"] else ""
status, mj_place = get_groupid_placement(place)
if mj_place != None:
job.Resource_List["place"] = pbs.place(mj_place)
return status
def get_groupid_placement(place):
debug("Get groupid placement: %s" % place)
placement_grouping = None
for expr in place.split(":"):
placement_grouping = None
if "=" in expr:
key, value = [x.lower().strip() for x in expr.split("=", 1)]
if key == "group":
placement_grouping = value
if placement_grouping is None:
debug("The user didn't specify place=group, setting group=group_id")
placement_grouping = "group_id"
prefix = ":" if place else ""
mj_place = place + prefix + "group=group_id"
return [True, mj_place]
if placement_grouping == "group_id":
return [True, None]
else:
debug("User specified a placement group that is not group_id - skipping.")
return [False, None]
def parse_select(job, select_str=None):
# 3:ncpus=2:slot_type=something
select_toks = get_select_expr(job).split(":")
select_N = int(select_toks[0])
return select_N, OrderedDict([e.split("=", 1) for e in select_toks[1:]])
def get_select(job):
debug("Get select: %s" %job.Resource_List["select"])
return job.Resource_List["select"]
def get_select_expr(job):
return repr(get_select(job))
def append_select_expr(job, key, value):
select_expr = get_select_expr(job)
prefix = ":" if select_expr else ""
job.Resource_List["select"] = pbs.select(select_expr + "%s%s=%s" % (prefix, key, value))
def set_select_key(job, key, value):
select_expr = get_select_expr(job)
key_values = select_expr.split(":")
found = False
for i in range(1, len(key_values)):
possible_key, _ = key_values[i].split("=", 1)
if possible_key == key:
found = True
key_values[i] = "%s=%s" % (key, value)
if not found:
append_select_expr(job, key, value)
else:
job.Resource_List["select"] = pbs.select(":".join(key_values))
def placement_hook(hook_config, job):
if not get_select(job):
# pbs 18 seems to treat host booleans as strings, which is causing this very annoying workaround.
#job.Resource_List["ungrouped"] = "true"
if job.Resource_List["slot_type"]:
job.Resource_List["slot_type"] = job.Resource_List["slot_type"]
# Check to see if job is interactive
if job.interactive:
debug("Job is interactive")
return
debug("The job doesn't have a select statement, it doesn't have any placement requirements.")
debug("Place a hold on the job")
job.Hold_Types = pbs.hold_types("so")
return
if validate_groupid_placement(job):
_, select_dict = parse_select(job)
if "ungrouped" not in select_dict:
set_select_key(job, "ungrouped", "false")
slot_type = select_dict.get("slot_type")
if slot_type:
set_select_key(job, "slot_type", slot_type)
debug("Using the grouped slot_type as a resource (%s)." % slot_type)
def debug(msg):
pbs.logmsg(pbs.EVENT_DEBUG3, "cycle_sub_hook - %s" % msg)
def error(msg):
pbs.logmsg(pbs.EVENT_ERROR, "cycle_sub_hook - %s" % msg)
def run_cmd(cmd):
debug("Cmd: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
debug('cmd failed!\n\tstdout="%s"\n\tstderr="%s"' % (stdout, stderr))
return stdout, stderr
# another non-pythonic thing - this can't be behind a __name__ == '__main__',
# as the hook code has to be executable at the load module step.
hook_config = {}
if pbs.hook_config_filename:
with open(pbs.hook_config_filename) as fr:
hook_config.update(json.load(fr))
try:
e = pbs.event()
if e.type == pbs.QUEUEJOB:
j = e.job
placement_hook(hook_config, j)
elif e.type == pbs.PERIODIC:
# Defined paths to PBS commands
qselect_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qselect')
qstat_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qstat')
qalter_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qalter')
qrls_cmd = os.path.join(pbs.pbs_conf['PBS_EXEC'], 'bin', 'qrls')
# Get the jobs in an "so" hold state
cmd = [qselect_cmd, "-h", "so"]
stdout, stderr = run_cmd(cmd)
jobs = stdout.split()
debug("Jobs: %s" % jobs)
# Get the job information
if not jobs:
debug("No jobs to evaluate")
e.accept()
# Get Queue defaults information
cmd = [qstat_cmd, "-Qf", "-F", "json"]
stdout, stderr = run_cmd(cmd)
qstat_Qf_json = json.loads(stdout)
# Get job information
cmd = [qstat_cmd, "-f", "-F", "json"] + jobs[:25]
stdout, stderr = run_cmd(cmd)
qstat_json = json.loads(stdout)
jobs = qstat_json["Jobs"]
for key, value in jobs.iteritems():
# Reevaluate each held job
debug("Key: %s\nValue: %s" % (key, value))
j_queue = jobs[key]["queue"]
j_place = jobs[key]["Resource_List"]["place"]
j_select = jobs[key]["Resource_List"]["select"]
# Check the groupid placement
mj_place = "group=group_id"
# Assign default placement from queue. If none, assign group=group_id
if j_queue in qstat_Qf_json["Queue"]:
if "resources_default" in qstat_Qf_json["Queue"][j_queue]:
if "place" in qstat_Qf_json["Queue"][j_queue]["resources_default"]:
mj_place = qstat_Qf_json["Queue"][j_queue]["resources_default"]["place"]
# Qalter the job
cmd = [qalter_cmd]
if mj_place != None:
debug("New place statement: %s" % mj_place)
cmd.append("-lselect=%s" % j_select)
cmd.append("-lplace=%s" % mj_place)
debug("qalter the job")
cmd.append(key)
stdout, stderr = run_cmd(cmd)
# Release the hold on the job
cmd = [qrls_cmd, "-h", "so", key]
debug("Release the hold on the job")
stdout, stderr = run_cmd(cmd)
except SystemExit:
debug("Exited with SystemExit")
except:
error(traceback.format_exc())
raise
| StarcoderdataPython |
8094093 | import dash
from dash.dependencies import Output, Event
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
import plotly.graph_objs as go
from collections import deque
X = deque(maxlen=20)
Y = deque(maxlen=20)
X.append(1)
Y.append(1)
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Graph(id='live-graph', animate=True),
dcc.Interval(
id='graph-update',
interval=1000
)
])
@app.callback(Output('live-graph', 'figure'),
events= [Event('graph-update', 'interval')])
def update_graph():
X.append(X[-1]+1)
Y.append(X[-1]+Y[-1]*random.uniform(-0.1,0.1))
data = go.Scatter(
x = list(X),
y = list(Y),
name = 'Scatter',
mode = 'lines+markers'
)
return {'data': [data], 'layout': go.Layout(xaxis = dict(range=[min(X), max(X)]),
yaxis = dict(range=[min(Y), max(Y)])
)}
if __name__=='__main__':
app.run_server(debug=True)
| StarcoderdataPython |
6478141 | import json
from functools import wraps
from flask import Response
class Json:
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
from types import MethodType
app.json = MethodType(json_route, app)
return self
class _Encoder(json.JSONEncoder):
bytype = []
def default(self, obj):
for the_type, __json__ in _Encoder.bytype:
if isinstance(obj, the_type):
return __json__(obj)
if hasattr(obj, '__jsonfields__'):
return self._jsonfields(obj, obj.__jsonfields__)
if hasattr(obj, '__json__'):
return obj.__json__()
return super(_Encoder, self).default(obj)
def _jsonfields(self, obj, fields):
data = dict()
def autofields(lst):
for name in lst:
data[name] = getattr(obj, name)
if isinstance(fields, dict):
for key, val in fields.items():
if key == '__auto__':
autofields(val)
elif callable(val):
data[key] = val(obj)
else:
data[key] = val
else:
autofields(fields)
return data
def jsonEncode(ty):
if not isinstance(ty, type):
raise TypeError("Must call the jsonEncode decorator with a type.")
def decorate(__json__):
_Encoder.bytype.insert(0, (ty, __json__))
return __json__
return decorate
def dumps(obj):
return json.dumps(obj, cls=_Encoder)
def json_route(app, route, **kwargs):
def decorate(handler):
@wraps(handler)
def inner(*args, **kwargs):
# TODO check accept header
obj = handler(*args, **kwargs)
# TODO when an error occurs, provide JSON error pages
it = dumps(obj)
return Response(it, mimetype='application/json')
app.route(route, **kwargs)(inner)
return inner
return decorate
| StarcoderdataPython |
3463018 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 00:53:34 2020
@author: <NAME>
"""
| StarcoderdataPython |
154729 | <filename>calculations.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 09:44:59 2018
@author: simonk
"""
import math
# ref for tangents
# https://en.wikipedia.org/wiki/Tangent_lines_to_circles
def CalcTangents(p1, r1, p2, r2):
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
half_pi = math.pi / 2.0
gamma = -math.atan((y2 - y1) / (x2 - x1))
beta = math.asin((r2 - r1) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))
alpha = gamma - beta
x3 = x1 + r1 * math.cos(half_pi - alpha)
y3 = y1 + r1 * math.sin(half_pi - alpha)
x4 = x2 + r2 * math.cos(half_pi - alpha)
y4 = y2 + r2 * math.sin(half_pi - alpha)
p1 = (x3, y3)
p2 = (x4, y4)
# returns a quad
pts = [p1, (p1[0], -p1[1]), (p2[0], -p2[1]), p2, p1]
return pts
| StarcoderdataPython |
3268908 | from pyworks import Task, State
class BaseState(State):
def pw_timeout(self):
self.log("Timeout")
def worker_done(self, msg):
self.log("worker_done(): Wrong state: %s" % self)
class InitialState(BaseState):
def pw_timeout(self):
self.log("timeout(): Going to TimeoutState")
self.state_set(TimeoutState)
class TimeoutState(BaseState):
def pw_timeout(self):
# self.log("timeout in TimeoutState")
pass
def worker_done(self, msg):
self.task.count += 1
self.log("Worker done at %d, set InitialState" % self.task.count)
self.state_set(InitialState)
def pw_close(self):
self.actor.pw_close()
class StateTask(Task):
def pw_initialized(self):
self.state_set(InitialState)
self.count = 0
self.log("StateTask init")
def pw_configured(self):
self.observe("worker")
| StarcoderdataPython |
6609507 | from django.db.models.fields import BigIntegerField
class SequenceField(BigIntegerField):
# TODO(dcramer): implement sequences based on Instagram's work
pass
| StarcoderdataPython |
5055615 | <reponame>allannjuguna/allannjuguna.github.io
#1 /usr/bin/python3
import sys
arguments=sys.argv
def main(filename):
try:
with open(filename,'r') as r:
contents=r.readlines()
r.close()
lines=[print("<code>"+line.strip()+"</code><br/>") for line in contents if line]
# print(lines)
except:
print(f"Unable to read file")
exit(-1)
try:
filename=arguments[1]
main(filename)
except:
print(f"Usage : highlighter.py filename.txt")
exit(-1) | StarcoderdataPython |
1899684 | <reponame>MeirKriheli/Open-Knesset
# encoding: UTF-8
from django.conf.urls import url, patterns
from ok_tag.views import suggest_tag_post, add_tag_to_object, remove_tag_from_object, \
add_tag_synonym, TagList, TagDetail, CommitteeMeetingMoreView, untagged_objects
ok_tag_patterns = patterns('',
url(r'^tags/(?P<app>\w+)/(?P<object_type>\w+)/(?P<object_id>\d+)/add-tag/$',
add_tag_to_object,
name='add-tag-to-object'),
url(r'^tags/(?P<app>\w+)/(?P<object_type>\w+)/(?P<object_id>\d+)/remove-tag/$',
remove_tag_from_object),
# disabled for now, because we don't want users to add more tags.
# will be added back in the future, but for editors only.
# url(r'^tags/(?P<app>\w+)/(?P<object_type>\w+)/(?P<object_id>\d+)/create-tag/$', create_tag_and_add_to_item, name='create-tag'),
url(r'^add_tag_synonym/(?P<parent_tag_id>\d+)/(?P<synonym_tag_id>\d+)/$', add_tag_synonym),
url(r'^tags/$', TagList.as_view(), name='tags-list'),
url(r'^tags/(?P<slug>.*?)/$', TagDetail.as_view(), name='tag-detail'),
url(r'^tags_cms/(?P<pk>\d+)/$', CommitteeMeetingMoreView.as_view(),
name='tag-detail-more-committees'),
url(r'^suggest-tag-post/$', suggest_tag_post, name='suggest-tag-post'),
url(r'^untagged/$', untagged_objects, name="untagged-objects"),
)
| StarcoderdataPython |
1943050 | import argparse
import boto3
from gbs3.backup import backup
from gbs3.restore import restore
import gbs3.settings
from gbs3.settings import *
from gbs3.util import eprint
def verify_conf(conf_name):
if not getattr(gbs3.settings, conf_name):
eprint('missing config option {}'.format(conf_name))
exit(1)
def main():
argparser = argparse.ArgumentParser(
description='Backups Grafana dashboards and uploads them to S3 bucket.')
sp = argparser.add_subparsers(dest='command', title='actions')
sp.add_parser('backup',
description='Performs backup.',
help='performs backup')
sp_restore = sp.add_parser('restore',
description='Restores latest backup',
help='restores latest backup')
sp_restore.add_argument('object_name', type=str,
help='S3 backup object name to restore')
args = argparser.parse_args()
eprint('verifying config')
verify_conf('S3_ACCESS_KEY_ID')
verify_conf('S3_SECRET_ACCESS_KEY')
verify_conf('GRAFANA_URL')
verify_conf('GRAFANA_TOKEN')
eprint('creating s3 client')
s3 = boto3.resource('s3',
region_name=S3_REGION_NAME,
endpoint_url=S3_ENDPOINT_URL,
aws_access_key_id=S3_ACCESS_KEY_ID,
aws_secret_access_key=S3_SECRET_ACCESS_KEY)
if args.command == 'backup':
backup(s3)
elif args.command == 'restore':
restore(s3, args.object_name)
| StarcoderdataPython |
41999 | <reponame>hung0913208/Base
from threading import Lock
from time import sleep
from .logger import Logger, DEBUG
from .utils import update_variable_safety
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import subprocess
import sys
import os
class Performer(object):
def __init__(self, manager, silence=False, **kwargs):
super(Performer, self).__init__()
self._events = {}
self._manager = manager
self._consumer = 0
self._running = 0
self._online = 0
self._count = 0
self._outside = Lock()
self._inside = Lock()
self._lock = Lock()
self._jobs = Queue()
self._pipe = Queue()
self._silence = silence
@property
def type(self):
return 'Performer'
@property
def online(self):
self._lock.acquire()
result = self._online
self._lock.release()
return result
def reset(self):
if self._inside.locked():
self._inside.release()
self._manager._keep = True
self._consumer = 0
def apply(self, commands):
self._manager._lock.acquire()
if self._manager._keep is True:
self._pipe.put({
'type': 'implement',
'commands': commands
})
self._manager._lock.release()
def signal(self, callback):
if isinstance(callback, list) or isinstance(callback, tuple):
self._pipe.put({
'type': 'signal',
'commands': callback
})
elif callable(callback):
self._pipe.put({
'type': 'signal',
'commands': [callback]
})
def pending(self):
self._lock.acquire()
if self._online == 0:
result = True
elif self._running <= self._online:
result = self._online - self._running < self._count
else:
result = False
self._lock.release()
return result
@property
def running(self):
self._lock.acquire()
result = self._running
self._lock.release()
return result
@property
def consumer(self):
self._outside.acquire()
consumer = self._consumer
self._outside.release()
return consumer
@consumer.setter
def consumer(self, value):
self._outside.acquire()
self._consumer = value
self._outside.release()
@property
def is_keeping(self):
self._manager._lock.acquire()
keep = self._manager._keep
self._manager._lock.release()
return keep
@staticmethod
def pretty(string, max_collumn):
result = string
for i in range(len(result), max_collumn):
result += ' '
return result
def print_command(self, executor, expected):
self._manager._lock.acquire()
expected = expected.split('/')[-1]
# @NOTE: make output look pretty and simple to understand
if not executor.lower() in ['link', 'test']:
expected = '.'.join(expected.split('.')[:-1])
if self._silence is False:
print(' %s %s' % (Performer.pretty(executor.upper(), 6), expected))
self._manager._lock.release()
def print_output(self, command):
self._manager._lock.acquire()
if self._silence is False:
pass
self._manager._lock.release()
def perform_on_multi_thread(self, timeout=1):
def stop_running():
self._manager._keep = False
if self._inside.locked():
self._inside.release()
def payload():
is_okey = True
do_nothing = False
if self.pending() is False:
return
self._lock.acquire()
self._online += 1
self._lock.release()
while self.is_keeping is True and (self._jobs.qsize() > 0 or self._pipe.qsize() > 0):
finish_task = False
do_nothing = False
self._lock.acquire()
if self._count == 0 and self._running > 0:
keep = False
else:
keep = True
self._lock.release()
if keep is False:
Logger.debug('we have nothing to do, depath now')
break
# @NOTE: catch an instruction and perform on payloads
try:
executor, command, expected, event = \
self._jobs.get(timeout=timeout)
# @TODO: make command more color and look beautifull
self._lock.acquire()
self._running += 1
self._lock.release()
env = os.environ.copy()
env['LIBC_FATAL_STDERR_'] = '1'
build = subprocess.Popen(command.split(' '), env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_console = build.stderr.read()
output_console = build.stdout.read()
build.wait()
# @NOTE: we will show what we have done instead of what we
# have to do
self._lock.acquire()
self.print_command(executor, expected)
self._lock.release()
if not build.returncode is None and build.returncode != 0:
is_okey = False
# elif len(error_console) > 0:
# is_okey = False
elif os.path.exists(expected) is False and \
not event in ['invoking', 'testing']:
is_okey = False
# @TODO: make output more color and more highlight
# @NOTE: check exit code
if is_okey is False:
update_variable_safety(self._manager._lock, stop_running)
# @NOTE: since Python2 usually return to str but Python3
# would prefer bytes
if len(error_console) == 0:
error_console = output_console
if isinstance(error_console, bytes):
error_console = error_console.decode('utf-8')
if sys.version_info < (3, 0) and isinstance(error_console, unicode):
error_console = error_console.encode('ascii', 'ignore')
# @NOTE: perform event when perform a command fail
if event in self._events:
for callback in self._events[event][False]:
callback(expected, build.returncode)
if build.returncode != -6:
self._manager.found_bug( \
AssertionError(u'error {} when runing command \'{}\': \n\n{}\n'
.format(build.returncode, command, error_console)))
else:
self._manager.found_bug( \
AssertionError(u'crash when runing command \'{}\''.format(command)))
else:
finish_task = True
# @NOTE: perform event when perform a command pass
if event in self._events:
for callback in self._events[event][True]:
callback(expected)
Logger.debug('finish task %s %s' % (executor, expected))
except Exception as error:
if isinstance(error, Empty):
do_nothing = True
if self.pending() is False:
break
else:
update_variable_safety(self._manager._lock, stop_running)
if sys.version_info < (3, 0) and isinstance(error_console, unicode):
error_console = error_console.encode('ascii', 'ignore')
self._manager.found_bug(
AssertionError(u'error when runing command \'{}\': \n\n{}\n'
.format(command, error_console)))
break
finally:
if do_nothing is False:
self._lock.acquire()
self._count -= 1
if finish_task:
self._running -= 1
# @NOTE: everything done on good condition, release master
if self._jobs.qsize() == 0 and self._running <= 0 and finish_task:
if self._inside.locked():
Logger.debug('Release master when counter is %d '
'and running is %d' % (self._count, self._running))
self._inside.release()
Logger.debug('Finish a task, counter=%d, running=%d' % (self._count, self._running))
self._lock.release()
else:
sleep(timeout)
finish_task = False
else:
# @NOTE: when on fall cases, the lastest payload must release
# master before closing
if self._online > 1:
Logger.debug('counter show %d tasks on pending' % self._count)
try:
if self.is_keeping is False and self._inside.locked() and self.online == 1:
self._inside.release()
except RuntimeError:
pass
# @NOTE: auto update status of payloads to optimize performance
self._lock.acquire()
self._online -= 1
self._lock.release()
def master():
while self.is_keeping is True and self._pipe.qsize() > 0:
try:
job = self._pipe.get(timeout=timeout)
need_to_wait = Logger.get_level() == DEBUG
error_message = None
if job['type'] == 'implement':
self._count = 0
# @NOTE: parse command structure and instruct payloads
for command in job['commands']:
pattern = command['pattern']
event = command['event']
if not command['output'] is None:
workspace, output_file = command['output']
# @NOTE: check and join inputs
if isinstance(command['input'], str):
if os.path.exists(command['input']) is False:
error_message = 'missing %s while it has ' \
'been required by %s' % (command['input'], command['output'])
else:
input_path = command['input']
else:
for item in command['input']:
if os.path.exists(item) is False:
error_message = 'missing %s while it has ' \
'been required by %s' % (command['input'], command['output'])
else:
input_path = ' '.join(command['input'])
if not error_message is None:
update_variable_safety(self._manager._lock, stop_running, error_message)
if not command['output'] is None:
# @NOTE: check workspace and create if it's not existed
if command['executor'].lower() in ['ar']:
instruct = pattern % (workspace, output_file, input_path,)
else:
instruct = pattern % (input_path, workspace, output_file)
expected = '%s/%s' % (workspace, output_file)
if os.path.exists(workspace) is False:
os.mkdir(workspace, 0o777)
elif os.path.isfile(workspace) is True:
os.remove(workspace)
os.mkdir(workspace, 0o777)
# @NOTE: prepare output dir if it needs
current_dir = workspace
for dir_name in output_file.split('/')[:-1]:
# @NOTE: it seems on MacOS, python don't allow
# to create dir with '//' in path
if dir_name == '/' or len(dir_name) == 0:
continue
elif current_dir[-1] == '/':
current_dir = '%s%s' % (current_dir, dir_name)
else:
current_dir = '%s/%s' % (current_dir, dir_name)
if os.path.exists(current_dir) is False:
os.mkdir(current_dir, 0o777)
if os.path.exists(expected) is True:
continue
else:
self._jobs.put((command['executor'], instruct,
expected, event))
self._count += 1
need_to_wait = True
elif not self._manager.backends['config'].os in ['Window', 'Drawin']:
if 'executor' in command:
self._jobs.put((command['executor'],
pattern % input_path,
input_path.split('/')[-1],
event))
else:
self._jobs.put(('TEST',
pattern % input_path,
input_path.split('/')[-1],
event))
self._count += 1
else:
Logger.debug('finish adding a bundle of tasks, '
'count=%d, running=%d' % (self._count, self._running))
if need_to_wait is False:
continue
if self._manager.count_payload == 0:
# @TODO: in many case this would mean payloads have done completely
# and no pending tasks here now, so we must exit on safe way now
# However, we not sure about fail cases so we must be carefull
# checking before annouce any decision
Logger.debug("when count_payload == 0 we have %d jobs" % self._jobs.qsize())
if self._jobs.qsize() == 0:
Logger.debug('Finish jobs now, going to stop everything from now')
update_variable_safety(self._manager._lock, stop_running)
else:
Logger.debug("wait payload(s) join(s) to finish %d jobs" % self._jobs.qsize())
self._inside.acquire()
elif self._count > 0:
Logger.debug("wait %d finish %d jobs" % (self._manager.count_payload, self._jobs.qsize()))
self._inside.acquire()
else:
Logger.debug('going to add new task without wait payload, '
'count=%d, running=%d' % (self._count, self._running))
elif job['type'] == 'signal':
for callback in job['commands']:
callback()
except Empty:
continue
# @NOTE: we will use bootstrap as a specific way to choose which role would be
# perform to the current thread
self._lock.acquire()
current = self.consumer
self.consumer += 1
self._lock.release()
if current == 0:
self._inside.acquire()
return 'master', master
else:
return 'payload', payload
def perform_on_single_thread(self, timeout=1):
self.consumer += 1
def stop_running():
self._manager._keep = False
def master():
while self.is_keeping is True and self._pipe.qsize() > 0:
try:
job = self._pipe.get(timeout=timeout)
error_message = None
if job['type'] == 'implement':
# @NOTE: parse command structure and instruct payloads
for command in job['commands']:
pattern = command['pattern']
event = command['event']
if not command['output'] is None:
workspace, output_file = command['output']
# @NOTE: check and join inputs
if isinstance(command['input'], str):
if os.path.exists(command['input']) is False:
error_message = 'missing %s' % command['input']
else:
input_path = command['input']
else:
for item in command['input']:
if os.path.exists(item) is False:
error_message = AssertionError('missing %s' % item)
else:
input_path = ' '.join(command['input'])
if not error_message is None:
update_variable_safety(None, stop_running, error_message)
if not command['output'] is None:
# @NOTE: check workspace and create if it's not existed
if command['executor'].lower() in ['ar']:
instruct = pattern % (workspace, output_file, input_path,)
else:
instruct = pattern % (input_path, workspace, output_file)
expected = '%s/%s' % (workspace, output_file)
if os.path.exists(workspace) is False:
os.mkdir(workspace, 0o777)
elif os.path.isfile(workspace) is True:
os.remove(workspace)
os.mkdir(workspace, 0o777)
# @NOTE: prepare output dir if it needs
current_dir = workspace
for dir_name in output_file.split('/')[:-1]:
# @NOTE: it seems on MacOS, python don't allow
# to create dir with '//' in path
if dir_name == '/' or len(dir_name) == 0:
continue
elif current_dir[-1] == '/':
current_dir = '%s%s' % (current_dir, dir_name)
else:
current_dir = '%s/%s' % (current_dir, dir_name)
if os.path.exists(current_dir) is False:
os.mkdir(current_dir, 0o777)
if os.path.exists(expected) is True:
continue
else:
self._jobs.put((command['executor'], instruct,
expected, event))
self._count += 1
elif not self._manager.backends['config'].os in ['Window', 'Drawin']:
if 'executor' in command:
self._jobs.put((command['executor'],
pattern % input_path,
input_path.split('/')[-1],
event))
else:
self._jobs.put(('TEST',
pattern % input_path,
input_path.split('/')[-1],
event))
else:
return True
elif job['type'] == 'signal':
for callback in job['commands']:
callback()
except Empty:
return True
def payload():
error_console = None
output_console = None
while self.is_keeping is True and self._jobs.qsize() > 0:
# @NOTE: catch an instruction and perform on payloads
try:
executor, command, expected, event = \
self._jobs.get(timeout=timeout)
# @TODO: make command more color and look beautifull
self.print_command(executor, expected)
env = os.environ.copy()
env['LIBC_FATAL_STDERR_'] = '1'
build = subprocess.Popen(command.split(' '), env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_console = build.stderr.read()
output_console = build.stdout.read()
build.wait()
if not build.returncode is None and build.returncode != 0:
is_okey = False
elif os.path.exists(expected) is False and \
not event in ['invoking', 'testing']:
print(expected)
is_okey = False
else:
is_okey = True
# @TODO: make output more color and more highlight
# @NOTE: check exit code
if is_okey is False:
update_variable_safety(None, stop_running)
# @NOTE: since Python2 usually return to str but Python3
# would prefer bytes
if len(error_console) == 0:
error_console = output_console
if isinstance(error_console, bytes):
error_console = error_console.decode('utf-8')
if sys.version_info < (3, 0) and isinstance(error_console, unicode):
error_console = error_console.encode('ascii', 'ignore')
# @NOTE: perform event when perform a command fail
if event in self._events:
for callback in self._events[event][False]:
callback(expected, build.returncode)
if build.returncode != -6:
self._manager.found_bug( \
AssertionError('error when {} runing command \'{}\': \n\n{}\n'
.format(build.returncode, command, error_console)),
no_lock=True)
else:
self._manager.found_bug( \
AssertionError('crash when runing command \'{}\': \n\n{}\n'
.format(command)),
no_lock=True)
else:
# @NOTE: perform event when perform a command pass
if event in self._events:
for callback in self._events[event][True]:
callback(expected)
except Exception as error:
if isinstance(error, Empty):
return True
else:
update_variable_safety(None, stop_running)
if sys.version_info < (3, 0) and isinstance(error_console, unicode):
error_console = error_console.encode('ascii', 'ignore')
self._manager.found_bug(
AssertionError('error when runing command \'{}\': \n\n{}\n'
.format(command, error_console)),
no_lock=True)
return False
else:
return True
if self.consumer == 1:
return master
else:
return payload
def install_event(self, command, passing, callback):
if not command in self._events:
self._events[command] = {True: [], False: []}
self._events[command][passing].append(callback)
| StarcoderdataPython |
142306 | # -*- coding: utf-8 -*-
"""
Using TorchScript to serialize and deploy model
===============================================
Models in TorchANI's model zoo support TorchScript. TorchScript is a way to create
serializable and optimizable models from PyTorch code. It allows users to saved their
models from a Python process and loaded in a process where there is no Python dependency.
"""
###############################################################################
# To begin with, let's first import the modules we will use:
import torch
import torchani
###############################################################################
# Let's now load the built-in ANI-1ccx models. The builtin ANI-1ccx contains 8
# models trained with diffrent initialization.
model = torchani.models.ANI1ccx()
###############################################################################
# It is very easy to compile and save the model using `torch.jit`.
compiled_model = torch.jit.script(model)
torch.jit.save(compiled_model, 'compiled_model.pt')
###############################################################################
# Besides compiling the ensemble, it is also possible to compile a single network
compiled_model0 = torch.jit.script(model[0])
torch.jit.save(compiled_model0, 'compiled_model0.pt')
###############################################################################
# For testing purposes, we will now load the models we just saved and see if they
# produces the same output as the original model:
loaded_compiled_model = torch.jit.load('compiled_model.pt')
loaded_compiled_model0 = torch.jit.load('compiled_model0.pt')
###############################################################################
# We use the molecule below to test:
coordinates = torch.tensor([[[0.03192167, 0.00638559, 0.01301679],
[-0.83140486, 0.39370209, -0.26395324],
[-0.66518241, -0.84461308, 0.20759389],
[0.45554739, 0.54289633, 0.81170881],
[0.66091919, -0.16799635, -0.91037834]]])
species = model.species_to_tensor('CHHHH').unsqueeze(0)
###############################################################################
# And here is the result:
energies_ensemble = model((species, coordinates)).energies
energies_single = model[0]((species, coordinates)).energies
energies_ensemble_jit = loaded_compiled_model((species, coordinates)).energies
energies_single_jit = loaded_compiled_model0((species, coordinates)).energies
print('Ensemble energy, eager mode vs loaded jit:', energies_ensemble.item(), energies_ensemble_jit.item())
print('Single network energy, eager mode vs loaded jit:', energies_single.item(), energies_single_jit.item())
| StarcoderdataPython |
3489960 | <gh_stars>0
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from django.contrib import messages
from django.http import Http404
from django.views import generic
from django.contrib.auth import get_user_model
User = get_user_model()
from . import models
from . import forms
from category.models import Category
from shopping_cart.models import Order
from django.db.models import Q
# Create your views here.
def Product_list(request):
products = models.Product.objects.all()
categories = Category.objects.all()
if request.user.is_authenticated:
filtered_orders = Order.objects.filter(is_ordered=False, owner=request.user.user_profile)
current_order_products = []
if filtered_orders.exists():
user_order = filtered_orders[0]
user_order_items = user_order.items.all()
current_order_products = [product.product for product in user_order_items]
context = {
'products':products,
'categories':categories,
'current_order_products':current_order_products
}
return render(request, 'products/product-list.html', context)
context = {
'products':products,
'categories':categories,
}
return render(request, 'products/product-list.html', context)
class ProductDetailView(generic.DetailView):
model = models.Product
template_name = 'products/product_detail.html'
select_related = ('category')
def get_context_data(self, **kwargs):
context = super(ProductDetailView, self).get_context_data(**kwargs)
context['products'] = models.Product.objects.filter(category__name=self.object.category.name).order_by('created_at')
return context
class DeleteProduct(LoginRequiredMixin, generic.DeleteView):
model = models.Product
success_url = reverse_lazy('products:all')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user__id = self.request.user.id)
def delete(self, *args, **kwargs):
messages.success(self.request, 'Product deleted successfully')
return super().delete(*args, **kwargs)
@login_required
def sell_product(request):
if request.method == 'POST':
form = forms.ProductCreateForm(request.POST, request.FILES)
if form.is_valid():
product = form.save(commit=False)
product.user = request.user
#check if an image is provided
if 'image' in request.FILES:
product.ad_image = request.FILES['image']
product.save()
messages.error(request, 'Your product has been successfully submitted.')
return redirect('home')
else:
messages.error(request, 'Invalid form, please fill the necessary fields')
return redirect('create')
else:
form = forms.ProductCreateForm()
return render(request, 'products/product_form.html', {'form': form})
def search_product(request):
products = models.Product.objects.all()
query = request.GET.get('search')
results = products.filter(Q(title__icontains=query) | Q(price__icontains=query)).distinct()
print('RESULTS', results);
return render(request, 'products/search.html', {'results': results})
| StarcoderdataPython |
1969757 | import json
from plynx.db.node import Node
from plynx.base import hub
from plynx.utils.common import parse_search_string
def _enhance_list_item(raw_item):
if raw_item['_type'] == 'Group':
# TODO proper checking
items = []
for raw_subitem in raw_item['items']:
items.append(_enhance_list_item(raw_subitem))
raw_item['items'] = items
return raw_item
# check if the node is valid
node = Node.from_dict(raw_item)
return node.to_dict()
class StaticListHub(hub.BaseHub):
def __init__(self, filename):
super(StaticListHub, self).__init__()
self.list_of_nodes = []
with open(filename) as f:
data_list = json.load(f)
for raw_item in data_list:
self.list_of_nodes.append(_enhance_list_item(raw_item))
def search(self, query):
# TODO use search_parameters
# TODO should parse_search_string be removed from nodes_collection?
_, search_string = parse_search_string(query.search)
def filter_func(raw_node):
return len(search_string) == 0 or \
search_string.upper() in raw_node['title'].upper()
res = list(filter(
filter_func,
self.list_of_nodes
))
return {
'list': res,
'metadata': [
{
'total': len(res)
},
],
}
| StarcoderdataPython |
6425759 | <filename>tests/test_all.py<gh_stars>1-10
import pytest
from werkzeug.routing import RequestRedirect
from flask import Flask, abort, Response, Blueprint
from werkzeug.datastructures import WWWAuthenticate
from datetime import datetime
from flask_errors_handler import (
ErrorHandler, SubdomainDispatcher, URLPrefixDispatcher
)
error = ErrorHandler()
@pytest.fixture
def app():
_app = Flask(__name__)
_app.config['ERROR_PAGE'] = 'error.html'
_app.config['SERVER_NAME'] = 'flask.dev:5000'
api = Blueprint('api', __name__)
web = Blueprint('web', __name__, url_prefix='/web')
custom = Blueprint('custom', __name__, subdomain='api', url_prefix='/custom')
error.init_app(_app, dispatcher='notfound')
error.init_app(_app, dispatcher=SubdomainDispatcher)
error.api_register(api)
error.web_register(web)
error.failure_register(_app)
@_app.route('/not-allowed', methods=['GET'])
def test_not_allowed():
return 'Not allowed'
@error.register(custom)
def error_handler(exc):
return str(exc), 404, {'Content-Type': 'text/html', 'custom': 'test'}
@api.route('/api')
def index():
abort(500, 'Error from app')
@api.route('/api/unauthorized')
def unauthorized():
auth = WWWAuthenticate()
auth.set_basic()
abort(401, www_authenticate=auth)
@api.route('/api/retry')
def retry():
abort(429, retry_after=datetime(year=2000, month=3, day=1))
@api.route('/api/response')
def response():
abort(500, response=Response("response"))
@api.route('/api/range')
def ranger():
abort(416, length=10)
@api.route('/permanent/')
def permanent():
return 'redirected'
@api.route('/api/error')
def api_error():
raise NameError('exception from app')
@api.route('/methodnotallowed/option')
def method_not_allowed_option():
abort(405, valid_methods=['GET', 'POST'])
@api.route('/methodnotallowed')
def method_not_allowed_without_option():
abort(405)
@web.route('/web')
def index():
abort(500, 'Error from web blueprint')
@web.route('/redirect')
def redirect():
raise RequestRedirect("/web")
@web.route('/web/error')
def web_error():
_app.config['ERROR_PAGE'] = None
abort(500, 'Error from web blueprint')
@custom.route('/custom')
def index():
abort(500, 'Error from custom blueprint')
_app.register_blueprint(api)
_app.register_blueprint(custom, url_prefix='/custom')
_app.register_blueprint(web, url_prefix='/web')
_app.testing = True
return _app
@pytest.fixture
def client(app):
_client = app.test_client()
return _client
def test_app_runs(client):
res = client.get('/')
assert res.status_code == 404
assert res.get_json()['type'] == 'https://httpstatuses.com/404'
def test_method_not_allowed(client):
res = client.post('/api')
assert res.status_code == 405
assert 'Allow' in res.headers
assert 'allowed' in res.get_json()['response']
assert res.get_json()['type'] == 'https://httpstatuses.com/405'
def test_api(client):
res = client.get('/api')
assert res.status_code == 500
assert res.headers.get('Content-Type') == 'application/problem+json'
data = res.get_json()
assert data['type'] == 'https://httpstatuses.com/500'
assert data['title'] == 'Internal Server Error'
assert data['detail'] is not None
assert data['status'] == 500
assert data['instance'] == 'about:blank'
assert data['response'] is None
def test_api_error(client):
res = client.get('/api/error')
assert res.status_code == 500
assert res.headers.get('Content-Type') == 'application/problem+json'
def test_web(client):
res = client.get('/web/web')
assert res.status_code == 500
assert res.headers.get('Content-Type') == 'text/html; charset=utf-8'
def test_web_redirect(client):
res = client.get('/web/redirect')
assert res.status_code == 308
assert res.headers.get('Content-Type') == 'text/html; charset=utf-8'
assert res.headers.get('Location').endswith('/web')
def test_web_xhr(client):
res = client.get('/web/web', headers={'X-Requested-With': 'XMLHttpRequest'})
assert res.status_code == 500
assert res.headers.get('Content-Type') == 'application/problem+json'
def test_web_error(client):
res = client.get('/web/web/error')
assert res.status_code == 500
assert res.headers.get('Content-Type') == 'text/html; charset=utf-8'
def method_not_allowed(client):
res = client.get('/methodnotallowed')
assert res.status_code == 405
assert res.headers.get('Allow') is None
res = client.get('/methodnotallowed/options')
assert res.status_code == 405
assert res.headers['Allow'] == 'GET, POST'
assert res.get_json()['response']['Allow'] == ['GET', 'POST']
def test_custom(client, app):
res = client.get('/custom/custom', base_url='http://api.' + app.config['SERVER_NAME'])
assert res.status_code == 404
assert res.headers.get('Content-Type') == 'text/html'
def test_dispatch_error_web(client, app):
error.register_dispatcher(app, URLPrefixDispatcher)
res = client.get('/web/web/page-not-found')
assert res.status_code == 404
assert 'text/html' in res.headers['Content-Type']
def test_dispatch_error_api(client, app):
res = client.get('/api-not-found', base_url='http://api.' + app.config['SERVER_NAME'])
assert res.status_code == 404
assert 'text/html' in res.headers['Content-Type']
assert 'test' in res.headers['custom']
def test_dispatch_default(client, app):
error.register_dispatcher(app, dispatcher='default')
res = client.get('/not-found')
assert res.status_code == 404
assert 'text/html' in res.headers['Content-Type']
assert 'https://httpstatuses.com/404' in res.data.decode()
res = client.post('/not-allowed')
assert res.status_code == 405
assert 'text/html' in res.headers['Content-Type']
assert 'https://httpstatuses.com/405' in res.data.decode()
def test_permanent_redirect(client):
res = client.get('/permanent')
assert res.status_code in (301, 308)
assert 'Location' in res.headers
def test_response(client):
res = client.get('/api/response')
assert res.status_code == 500
assert res.data == b'response'
def test_unauthorized(client):
res = client.get('/api/unauthorized')
assert res.status_code == 401
assert res.headers['WWW-Authenticate'] == 'Basic realm="authentication required"'
auth = res.get_json()['response']['authenticate'][0]
assert auth['auth_type'] == 'basic'
assert auth['realm'] == 'authentication required'
def test_retry_after(client):
date = 'Wed, 01 Mar 2000 00:00:00 GMT'
res = client.get('/api/retry')
assert res.status_code == 429
assert res.headers['Retry-After'] == date
assert res.get_json()['response']['retry_after'] == date
def test_range(client):
res = client.get('/api/range')
assert res.status_code == 416
assert res.headers['Content-Range'] == f"bytes */10"
data = res.get_json()['response']
assert data['length'] == 10
assert data['units'] == 'bytes'
| StarcoderdataPython |
1704815 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, logging
# logging.disable(sys.maxsize)
## Set dir
testdir = os.path.dirname(os.path.abspath(__file__))
datdir = testdir+'/dat/'
outdir = testdir+'/out/'
if not os.path.exists(outdir):
os.makedirs(outdir)
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
## Local
sys.path.insert(0, testdir+'/..') ## astylo path
from astylo.iolib import (fclean, read_fits, write_fits,
read_hdf5, write_hdf5,
read_ascii, write_ascii,
read_csv, write_csv, )
print('\n TEST read_fits ')
print('----------------')
get_fdata = read_fits(datdir+'M83', datdir+'M83_unc')
# print('header \n', get_fdata.header)
# print('header of TAB \n', get_fdata.header_w)
print('data (shape) \n', get_fdata.data.shape)
print('wave (shape) \n', get_fdata.wave.shape)
print('uncertainty (shape) \n', get_fdata.unc.shape)
print('\n TEST write_fits ')
print('----------------')
write_fits(outdir+'M83_copy',
get_fdata.header, get_fdata.data, get_fdata.wave)
write_fits(outdir+'M83_irs_data_format_copy',
get_fdata.header, get_fdata.data, get_fdata.wave, wmod=1)
print('\n TEST write_hdf5 ')
print('-----------------')
label = [['alpha', 'beta', 'gamma'], [1,2,3]]
data1 = np.arange(5, 23, 2).reshape((3,3))
# data2 = [1,2,3]
# data2 = [[1,2,3]]
data2 = np.arange(4).reshape((2,2))
write_hdf5(outdir+'test_iolib', 'Label', label)
write_hdf5(outdir+'test_iolib', 'Data', data1, append=True)
# write_hdf5(outdir+'test_iolib', 'Data', data2, ind1=[2,5], append=True)
# write_hdf5(outdir+'test_iolib', 'Data', data2, ind1=1, ind2=[0,3], append=True)
write_hdf5(outdir+'test_iolib', 'Data', data2, ind1=[0,2], ind2=[1,3], append=True)
write_hdf5(outdir+'test_iolib_h5grp', 'Grp data', data1, group='/test_grp', verbose=True)
write_hdf5(outdir+'test_iolib_h5grp', 'Grp data', data2, group='/test_grp',
ind1=[0,2], ind2=[1,3], append=True, verbose=True)
print('See ./out [Done]')
print('\n TEST read_hdf5 ')
print('----------------')
get_label = read_hdf5(outdir+'test_iolib', 'Label')
print('Label \n', get_label)
get_data = read_hdf5(outdir+'test_iolib', 'Data')
print('Data \n', get_data)
get_grp = read_hdf5(outdir+'test_iolib_h5grp', 'Grp data', group='test_grp')
print('Grp data \n', get_grp)
print('\n TEST write_ascii ')
print('------------------')
lab = ['col1', 'col2']
arr = np.arange(12).reshape((6,2))
arr_trans = np.arange(0, 12000, 1000).reshape((2,6))
write_ascii(outdir+'test_iolib', comment='Empty file!')
write_ascii(outdir+'test_iolib', lab, arr, comment='Data begins from line 3')
write_ascii(outdir+'test_iolib', dset=arr_trans, trans=True, append=True)
# write_ascii(outdir+'ascii_csv', lab, arr, ascext='.csv') # not compatible
print('See ./out [Done]')
print('\n TEST read_ascii ')
print('-----------------')
get_arr = read_ascii(outdir+'test_iolib', dtype=float)
print('col1 \n', get_arr['col1'])
print('col2 \n', get_arr['col2'])
get_adata = read_ascii(datdir+'filt_IRAC1', start_header=2)
print('col1 - Wave (microns) \n', get_adata['Wave'][:10])
print('col2 - Spectral Response (electrons/photon) \n', get_adata['col2'][:10])
print('\n TEST write_csv ')
print('----------------')
write_csv(outdir+'test_iolib', lab, arr)
print('See ./out [Done]')
print('\n TEST read_csv ')
print('---------------')
get_cdata = read_csv(outdir+'test_iolib', 'col2', 'col1')
print('col2: \n', get_cdata['col2'])
print('col1: \n', get_cdata['col1'])
if input('Test fclean (y/n): ')=='y':
print('\n TEST fclean ')
print('-------------')
info = './out is empty! [Done]'
fclean('out', info)
| StarcoderdataPython |
4902021 | <filename>tests/test_post_train.py
import math
import numpy as np
import pytest
from helpers.utils import get_features_labels, get_test_case, time_predict
@pytest.mark.parametrize(
"kwargs",
[
# 1. Having a house built later should not impact the SalePrice
({"key": "YearBuilt", "add": 1}),
],
)
def test_invariance_tests(kwargs, model, dummy_house):
"""
Keeping all except 1 feature at a time the same
Changing these features a bit should not result in a noticeable difference
in the models prediction with the ground truth
"""
changed_score, unchanged_score = get_test_case(
dummy_house,
model,
**kwargs,
)
# check that there's about max $5k difference between unchanged and changed
# house prices
# $5k is something I feel makes sense, obviously domain knowledge plays
# a big role in coming up with these test parameters
assert math.isclose(
changed_score,
unchanged_score,
rel_tol=5e3,
)
# TODO: Add in argument to parametrize to handle cases where a feature should
# negatively impact price
@pytest.mark.parametrize(
"kwargs",
[
# 1. Increasing overall quality should increase the SalePrice
({"key": "LotArea", "multiply": 1.5}),
# 2. Having a garage with a bigger capacity should increase the SalePrice
({"key": "GarageCars", "add": 2}),
# 3. Better OverallCond should increase the SalePrice
({"key": "OverallCond", "add": 5}),
],
)
def test_directional_expectation_tests(
kwargs,
model,
dummy_house,
):
"""
Keeping all except 1 feature at a time the same
Chaning these features a bit should result in a notieceable difference
"""
changed_score, unchanged_score = get_test_case(
dummy_house,
model,
**kwargs,
)
assert changed_score > unchanged_score
def test_model_inference_times(request, dataset, model):
X, _ = get_features_labels(dataset, target_column="SalePrice")
latency = np.array([time_predict(model, X) for _ in range(100)])
latency_p99 = np.quantile(latency, 0.99)
inference_time = float(request.config.getini("inference_time"))
assert (
latency_p99 < inference_time
), f"Prediction time at the 99th percentile should be < {inference_time} but was {latency_p99}"
def test_model_metric(request, model_metrics):
current_score = model_metrics.get("rmse")
rmse = request.config.getini("rmse")
assert int(current_score) <= int(rmse)
| StarcoderdataPython |
5135987 | # print method is to show text to console
print("life is short,I use python!")
print('--<-<-<@')
print('good good study,\nday day up')
print('AB'*3+'CD') | StarcoderdataPython |
170715 | import unittest
import tock
from tock.grammars import *
from tock.syntax import String
class TestGrammar(unittest.TestCase):
def test_init(self):
g = Grammar()
g.set_start_nonterminal('S')
g.add_nonterminal('T')
g.add_rule('S', 'a S b')
g.add_rule('S', 'T')
g.add_rule('T', 'c T d')
g.add_rule('T', '&')
self.assertEqual(g.nonterminals, {'S', 'T'})
self.assertEqual(set(g.rules), {(String('S'), String('a S b')),
(String('S'), String('T')),
(String('T'), String('c T d')),
(String('T'), String('&'))})
self.assertEqual(str(g), 'nonterminals: {S,T}\nstart: S\nS → a S b\nS → T\nT → c T d\nT → ε')
def test_from_lines(self):
g = Grammar.from_lines([
'S -> a S b',
'S -> &'
])
self.assertEqual(g.nonterminals, {'S'})
self.assertEqual(set(g.rules), {(String('S'), String('a S b')),
(String('S'), String('&'))})
self.assertEqual(str(g), 'nonterminals: {S}\nstart: S\nS → a S b\nS → ε')
def test_is(self):
g = Grammar.from_lines([
'S -> a S',
'S -> &'
])
self.assertFalse(g.is_leftlinear())
self.assertTrue(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertFalse(g.is_contextsensitive())
self.assertFalse(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
g = Grammar.from_lines([
'S -> S b',
'S -> b'
])
self.assertTrue(g.is_leftlinear())
self.assertFalse(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertTrue(g.is_contextsensitive())
self.assertTrue(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
g = Grammar.from_lines([
"S' -> &",
"S' -> S",
'S -> a S b',
'S -> a b'
])
self.assertFalse(g.is_leftlinear())
self.assertFalse(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertTrue(g.is_contextsensitive())
self.assertTrue(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
def test_ll(self):
self.maxDiff = None
g = Grammar.from_lines(["S -> a S c",
"S -> T",
"T -> b T",
"T -> &"])
nullable = g.compute_nullable()
self.assertEqual(nullable, set(map(String,
['S', 'T', '&'])))
first = g.compute_first(nullable)
first_correct = dict([(String(k), set(v)) for (k, v) in [
('S', ['a', 'b']),
('T', ['b']),
('a', ['a']),
('b', ['b']),
('c', ['c']),
('&', []),
('S c', ['a', 'b', 'c']),
('a S c', ['a']),
('b T', ['b']),
]])
self.assertEqual(first, first_correct)
follow = g.compute_follow(nullable, first)
follow_correct = {'S': {'c', '⊣'},
'T': {'c', '⊣'}}
self.assertEqual(follow, follow_correct)
| StarcoderdataPython |
195174 | <reponame>maikenp/galaxy
# for rgenetics - lped to pbed
# where to stop with converters
# pbed might be central
# eg lped/eigen/fbat/snpmatrix all to pbed
# and pbed to lped/eigen/fbat/snpmatrix ?
# that's a lot of converters
import os
import subprocess
import sys
import time
prog = os.path.split(sys.argv[0])[-1]
myversion = 'Oct 10 2009'
galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://getgalaxy.org" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="document">
"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
def rgConv(inpedfilepath, outhtmlname, outfilepath, plink):
"""
"""
basename = os.path.split(inpedfilepath)[-1] # get basename
outroot = os.path.join(outfilepath, basename)
subprocess.check_call([plink, '--noweb', '--bfile', inpedfilepath, '--recode', '--out', outroot], cwd=outfilepath)
def main():
"""
need to work with rgenetics composite datatypes
so in and out are html files with data in extrafiles path
<command>python '$__tool_directory__/pbed_to_lped_converter.py' '$input1/$input1.metadata.base_name'
'$output1' '$output1.extra_files_path' '${GALAXY_DATA_INDEX_DIR}/rg/bin/plink'
</command>
"""
nparm = 4
if len(sys.argv) < nparm:
sys.exit('PBED to LPED converter called with %s - needs %d parameters \n' % (sys.argv, nparm))
inpedfilepath = sys.argv[1]
outhtmlname = sys.argv[2]
outfilepath = sys.argv[3]
try:
os.makedirs(outfilepath)
except Exception:
pass
plink = sys.argv[4]
rgConv(inpedfilepath, outhtmlname, outfilepath, plink)
flist = os.listdir(outfilepath)
with open(outhtmlname, 'w') as f:
f.write(galhtmlprefix % prog)
s = f'## Rgenetics: http://bitbucket.org/rgalaxy Galaxy Tools {prog} {timenow()}' # becomes info
print(s)
f.write('<div>%s\n<ol>' % (s))
for data in flist:
f.write('<li><a href="{}">{}</a></li>\n'.format(os.path.split(data)[-1], os.path.split(data)[-1]))
f.write("</ol></div></div></body></html>")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1600228 | <reponame>ruchirjain86/professional-services
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud.functions.context import Context
import logging
import os
import abc
from google.cloud.iam_credentials_v1 import IAMCredentialsClient
class NotConfiguredException(Exception):
pass
class NoCredentialsException(Exception):
pass
class Output:
config = None
output_config = None
data = None
event = None
context: Context
jinja_environment = None
logger = None
def __init__(self, config, output_config, jinja_environment, data, event,
context: Context):
self.config = config
self.output_config = output_config
self.jinja_environment = jinja_environment
self.data = data
self.event = event
self.context = context
self.logger = logging.getLogger('pubsub2inbox')
def get_token_for_scopes(self, scopes, service_account=None):
if not service_account:
service_account = os.getenv('SERVICE_ACCOUNT')
if not service_account:
raise NoCredentialsException(
'You need to specify a service account for Directory API credentials, either through SERVICE_ACCOUNT environment variable or serviceAccountEmail parameter.'
)
client = IAMCredentialsClient()
name = 'projects/-/serviceAccounts/%s' % service_account
response = client.generate_access_token(name=name, scope=scopes)
return response.access_token
@abc.abstractmethod
def output(self):
pass | StarcoderdataPython |
3347868 | <reponame>KT12/capstone_in_progress
# -*- coding: utf-8 -*-
from gensim.parsing import PorterStemmer
global_stemmer = PorterStemmer()
class StemmingHelper(object):
"""
Class to aid the stemming process - from word to stemmed form,
and vice versa.
The 'original' form of a stemmed word will be returned as the
form in which its been used the most number of times in the text.
"""
#This reverse lookup will remember the original forms of the stemmed
#words
word_lookup = {}
@classmethod
def stem(cls, word):
"""
Stems a word and updates the reverse lookup.
"""
#Stem the word
stemmed = global_stemmer.stem(word)
#Update the word lookup
if stemmed not in cls.word_lookup:
cls.word_lookup[stemmed] = {}
cls.word_lookup[stemmed][word] = (
cls.word_lookup[stemmed].get(word, 0) + 1)
return stemmed
@classmethod
def original_form(cls, word):
"""
Returns original form of a word given the stemmed version,
as stored in the word lookup.
"""
if word in cls.word_lookup:
return max(cls.word_lookup[word].keys(),
key=lambda x: cls.word_lookup[word][x])
else:
return word | StarcoderdataPython |
355761 | <filename>alti_discord/bot_database.py<gh_stars>1-10
import sqlite3
class BotDatabase:
def __init__(self):
self.connection = sqlite3.connect('alti_discord.database')
self.cursor = self.connection.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS AltiDiscord(vaporId TEXT, discordId TEXT)")
self.connection.commit()
def add(self, vapor_id, discord_id):
self.cursor.execute("INSERT INTO AltiDiscord VALUES(?, ?)", (vapor_id, discord_id,))
self.connection.commit()
def check_if_registered(self, vapor_id):
for user in self.cursor.execute("SELECT 1 FROM AltiDiscord WHERE vaporId = ?", (vapor_id,)):
if user == (1,):
return True
return False
def check_if_in_channel(self, vapor_id, discord_ids):
self.cursor.execute("SELECT vaporId, discordId FROM AltiDiscord")
for user in self.cursor.fetchall():
if user[0] == vapor_id and user[1] in discord_ids:
return user[1]
def get_in_channel(self, vapor_ids, discord_ids):
self.cursor.execute("SELECT vaporId, discordId FROM AltiDiscord")
users = []
for user in self.cursor.fetchall():
if user[0] in vapor_ids and user[1] in discord_ids:
users.append(user[0])
return users
| StarcoderdataPython |
9681438 | <reponame>mfassler/python3-quanergyM8
#!/usr/bin/env python3
import sys
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
import time
import socket
import struct
import numpy as np
import matplotlib.pyplot as plt
import cv2
from quanergyM8 import Quanergy_M8_Parser, MAGIC_SIGNATURE
if len(sys.argv) != 2:
print('usage: %s ip_address' % (sys.argv[0]))
sys.exit(1)
lidar_address = (sys.argv[1], 4141)
qparse = Quanergy_M8_Parser()
jet_colormap = plt.cm.jet(np.linspace(0,1,256))[:, :3]
_self = None
def pointcloud_callback(self):
global ax
global pcd
global _self
_self = self
pointcloud = self.pointclouds[self.pc_idx]
intensities = self.intensities[self.pc_idx]
numpoints = self.num_points[self.pc_idx]
print(" ******* NEW Pointcloud! %0.03f %d pts" % (time.time(), numpoints))
# These are all the points from just the horizontal laser:
horizIdxs = np.where(pointcloud[:numpoints, 2] == 0)
horizGrid = pointcloud[horizIdxs]
# Flip the Y axis:
horizGrid[:,1] = -horizGrid[:,1]
width_px = 800
height_px = 600
pixels_per_meter = 50
img = np.zeros((height_px, width_px), np.uint8)
# center the coords:
horizGrid[:,0] += (width_px / 2) / pixels_per_meter
horizGrid[:,1] += (height_px / 2) / pixels_per_meter
# convert to pixel coordinates:
coords = np.round(horizGrid * pixels_per_meter).astype(np.uint32)
# remove out-of-bound coordinates:
coords = coords[ coords[:, 0] >= 0]
coords = coords[ coords[:, 1] >= 0]
coords = coords[ coords[:, 0] < width_px]
coords = coords[ coords[:, 1] < height_px]
# Remember: Y coords become "row index" and X coords become "column index":
img[ coords[:, 1], coords[:, 0] ] = 255
cv2.imshow('asdf', img)
cv2.waitKey(1)
# Convert the intensites into a Jet colormap:
#ii = intensities[:numpoints]
#colors = jet_colormap[ii]
qparse.pointcloud_callback = pointcloud_callback
#f = open('/data/Lidar_Capture/quanergy_capture-1.raw', 'rb')
#allData = f.read()
#f.close()
#print("expecting about ~%d packets" % (len(allData) / 6632))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("0.0.0.0", 0))
sock.connect(lidar_address)
while True:
ch0 = ord(sock.recv(1))
if ch0 == MAGIC_SIGNATURE[0]:
ch1 = ord(sock.recv(1))
if ch1 == MAGIC_SIGNATURE[1]:
ch2 = ord(sock.recv(1))
if ch2 == MAGIC_SIGNATURE[2]:
ch3 = ord(sock.recv(1))
if ch3 == MAGIC_SIGNATURE[3]:
header = sock.recv(16, socket.MSG_WAITALL)
size, seconds, nanoseconds, \
version_major, version_minor, version_patch, \
packet_type = struct.unpack('>IIIBBBB', header)
# size is either 6632 or 2224
# packet_type is either 0 or 4
if packet_type == 0 and size == 6632:
pkt = sock.recv(6612, socket.MSG_WAITALL)
qparse.parse_00(pkt)
else:
print('unsupported packet type: %d, %d bytes' % (packet_type, size))
| StarcoderdataPython |
338866 | import torch, math
from .CosineSimilarity import CosineSimilarity
DEFAULT = CosineSimilarity(dim=2)
class PrototypeSimilarity(torch.nn.Module):
def __init__(self, features, classes, similarity=DEFAULT):
super().__init__()
self.C = classes
self.D = features
self.weight = torch.nn.Parameter(torch.zeros(1, classes, features))
self.similarity = similarity
self.reset_parameters()
def forward(self, X):
'''
Input:
X - torch Tensor of shape (N, D, *), input features.
Output:
X' - torch Tensor of shape (N, C, *), the cosine similarity
between the feature vector and each C prototypes.
'''
X = X.unsqueeze(1)
e = len(X.shape) - len(self.weight.shape)
P = self.weight.view(*self.weight.shape, *([1]*e))
return self.similarity(X, P)
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
| StarcoderdataPython |
1902676 | <reponame>VictorMello1993/CursoPythonUdemy<gh_stars>0
# %% [markdown]
## Criando uma classe Data
class Data:
# def toStr(self):
# print(f'{self.dia}/{self.mes}/{self.ano}')
'''Chamando o método mágico que retorna um objeto
em formato de string. Este método é padrão
para todos os objetos do Python'''
def __str__(self):
return(f'{self.dia}/{self.mes}/{self.ano}')
# Instanciando Data
d1 = Data()
d1.dia = 3
d1.mes = 3
d1.ano = 2020
'''
Em Python, toda a criação dos métodos de uma classe deve
sempre começar com o objeto self como primeiro parâmetro,
que representa um objeto que está sendo referenciado
ao realizar a chamada de um método'''
#Instanciando mais um objeto data
d2 = Data()
d2.dia = 15
d2.mes = 12
d2.ano = 1993
#Invocando o método da classe Data
# d1.toStr() #O objeto self aponta para objeto d1
# d2.toStr() #O objeto self aponta para objeto d2
#Invocando um método de um objeto implicitamente (método mágico)
print(d1)
print(d2) | StarcoderdataPython |
5103769 | #!/usr/bin/env python
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from ccsTools import ccsProducer, CcsRaftSetup
ccsProducer('ts8_ready_acq', 'ccs_ts8_ready_acq.py',
ccs_setup_class=CcsRaftSetup,
sys_paths=(os.path.join(os.environ['IANDTJOBSDIR'], 'python'),))
| StarcoderdataPython |
3394766 | import torch
from torch.nn.parameter import Parameter
import numbers
import numpy as np
from scipy.special import factorial
from . import point_process
from . import distributions as dist
from . import base
class count_model(base._likelihood):
"""
Count likelihood base class.
"""
def __init__(self, tbin, neurons, dispersion_mapping, inv_link, tensor_type):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
self.strict_likelihood = True
if dispersion_mapping is not None:
self.add_module('dispersion_mapping', dispersion_mapping)
else:
self.dispersion_mapping = None
def set_params(self, strict_likelihood=None):
"""
:param float tbin: time bin duration in some time unit (sets time unit in model)
:param bool strict_likelihood: flag for whether to compute the count probability (involves
constants to be loaded into memory)
:param float jitter: value for stabilization of matrix inverses
"""
if strict_likelihood is not None:
self.strict_likelihood = strict_likelihood
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for quick log-likelihood evaluation
Tensor shapes: self.spikes (neuron_dim, batch_dim)
tfact is the log of time_bin times the spike count
lfact is the log (spike count)!
"""
super().set_Y(spikes, batch_size, filter_len=filter_len)
self.lfact = []
self.tfact = []
self.totspik = []
for b in range(self.batches):
spikes = self.spikes[b][..., self.filter_len-1:]
self.totspik.append(spikes.sum(-1))
self.tfact.append(spikes*torch.log(self.tbin.cpu()))
self.lfact.append(torch.lgamma(spikes+1.))
def KL_prior(self):
"""
"""
if self.dispersion_mapping is not None:
return self.dispersion_mapping.KL_prior()
else:
return 0
def sample_helper(self, h, b, neuron, samples):
"""
NLL helper function for sample evaluation. Note that spikes is batched including history
when the model uses history couplings, hence we sample the spike batches without the
history segments from this function.
"""
rates = self.f(h) # watch out for underflow or overflow here
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.trials != 1 and samples > 1 and self.trials < h.shape[0]: # cannot rely on broadcasting
spikes = spikes.repeat(samples, 1, 1) # MCxtrials
if self.inv_link == 'exp': # spike count times log rate
l_rates = (spikes*h)
else:
l_rates = (spikes*torch.log(rates+1e-12))
return rates, l_rates, spikes
def eval_dispersion_mapping(self, XZ, samples, neuron):
"""
Posterior predictive mean of the dispersion model.
"""
disp, disp_var = self.dispersion_mapping.compute_F(XZ)
dh = self.mc_gen(disp, disp_var, samples, neuron)
return self.dispersion_mapping.f(dh).mean(0) # watch out for underflow or overflow here
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
"""
Computes the terms for variational expectation :math:`\mathbb{E}_{q(f)q(z)}[]`, which
can be used to compute different likelihood objectives.
The returned tensor will have sample dimension as MC over :math:`q(z)`, depending
on the evaluation mode will be MC or GH or exact over the likelihood samples. This
is all combined in to the same dimension to be summed over. The weights :math:`w_s`
are the quadrature weights or equal weights for MC, with appropriate normalization.
:param int samples: number of MC samples or GH points (exact will ignore and give 1)
:returns: negative likelihood term of shape (samples, timesteps), sample weights (samples, 1
:rtype: tuple of torch.tensors
"""
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron) # h has only observed neurons
rates, l_rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = torch.tensor(1./rates.shape[0])
elif mode == 'GH':
h, ws = self.gh_gen(F_mu, F_var, samples, neuron)
rates, l_rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = ws[:, None]
else:
raise NotImplementedError
if self.dispersion_mapping is None:
disper_param = None
else: # MC sampling
disper_param = self.eval_dispersion_mapping(XZ, samples, neuron)
return self.nll(b, rates, l_rates, spikes, neuron, disper_param), ws
class renewal_model(base._likelihood):
"""
Renewal model base class
"""
def __init__(self, tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
self.allow_duplicate = allow_duplicate
self.dequant = dequantize
def train_to_ind(self, train):
if self.allow_duplicate:
duplicate = False
spike_ind = train.nonzero().flatten()
bigger = torch.where(train > 1)[0]
add_on = (spike_ind,)
for b in bigger:
add_on += (b*torch.ones(int(train[b])-1, device=train.device, dtype=int),)
if len(add_on) > 1:
duplicate = True
spike_ind = torch.cat(add_on)
return torch.sort(spike_ind)[0], duplicate
else:
return torch.nonzero(train).flatten(), False
def ind_to_train(self, ind, timesteps):
train = torch.zeros((timesteps))
train[ind] += 1
return train
def rate_rescale(self, neuron, spike_ind, rates, duplicate, minimum=1e-8):
"""
Rate rescaling with option to dequantize, which will be random per sample.
:param torch.tensor rates: input rates of shape (trials, neurons, timesteps)
:returns: list of rescaled ISIs, list index over neurons, elements of shape (trials, ISIs)
:rtype: list
"""
rtime = torch.cumsum(rates, dim=-1)*self.tbin
samples = rtime.shape[0]
rISI = []
for tr in range(self.trials):
isis = []
for en, n in enumerate(neuron):
if len(spike_ind[tr][n]) > 1:
if self.dequant:
deqn = torch.rand(
samples,
*spike_ind[tr][n].shape,
device=rates.device
)*rates[tr::self.trials, en, spike_ind[tr][n]]*self.tbin # assume spike at 0
tau = rtime[tr::self.trials, en, spike_ind[tr][n]] - deqn
if duplicate[n]: # re-oder in case of duplicate spike_ind
tau = torch.sort(tau, dim=-1)[0]
else:
tau = rtime[tr::self.trials, en, spike_ind[tr][n]]
a = tau[:, 1:]-tau[:, :-1]
a[a < minimum] = minimum # don't allow near zero ISI
isis.append(a) # samples, order
else:
isis.append([])
rISI.append(isis)
return rISI
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for quick log-likelihood evaluation
Tensor shapes: self.act [neuron_dim, batch_dim]
"""
if self.allow_duplicate is False and spikes.max() > 1: # only binary trains
raise ValueError('Only binary spike trains are accepted in set_Y() here')
super().set_Y(spikes, batch_size, filter_len=filter_len)
self.spiketimes = []
self.intervals = torch.empty((self.batches, self.trials, self.neurons))
self.duplicate = np.empty((self.batches, self.trials, self.neurons), dtype=bool)
for b, spk in enumerate(self.spikes):
spiketimes = []
for tr in range(self.trials):
cont = []
for k in range(self.neurons):
s, self.duplicate[b, tr, k] = self.train_to_ind(spk[tr, k])
cont.append(s)
self.intervals[b, tr, k] = len(s)-1
spiketimes.append(cont)
self.spiketimes.append(spiketimes) # batch list of trial list of spike times list over neurons
def sample_helper(self, h, b, neuron, scale, samples):
"""
MC estimator for NLL function.
:param torch.tensor scale: additional scaling of the rate rescaling to preserve the ISI mean
:returns: tuple of rates, spikes*log(rates*scale), rescaled ISIs
:rtype: tuple
"""
scale = scale.expand(1, self.F_dims)[:, neuron, None] # rescale to get mean 1 in renewal distribution
rates = self.f(h)*scale
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.trials != 1 and samples > 1 and self.trials < h.shape[0]: # cannot rely on broadcasting
spikes = spikes.repeat(samples, 1, 1) # trial blocks are preserved, concatenated in first dim
if self.inv_link == 'exp': # bit masking seems faster than integer indexing using spiketimes
l_rates = (spikes*(h+torch.log(scale))).sum(-1)
else:
l_rates = (spikes*torch.log(rates+1e-12)).sum(-1) # rates include scaling
spiketimes = [[s.to(self.tbin.device) for s in ss] for ss in self.spiketimes[b]]
rISI = self.rate_rescale(neuron, spiketimes, rates, self.duplicate[b])
return rates, l_rates, rISI
def objective(self, F_mu, F_var, XZ, b, neuron, scale, samples=10, mode='MC'):
"""
:param torch.tensor F_mu: model output F mean values of shape (samplesxtrials, neurons, time)
:returns: negative likelihood term of shape (samples, timesteps), sample weights (samples, 1
:rtype: tuple of torch.tensors
"""
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron)
rates, l_rates, rISI = self.sample_helper(h, b, neuron, scale, samples)
ws = torch.tensor(1./rates.shape[0])
else:
raise NotImplementedError
return self.nll(l_rates, rISI, neuron), ws
def sample(self, rate, neuron=None, XZ=None):
"""
Sample spike trains from the modulated renewal process.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
spiketimes = point_process.gen_IRP(self.ISI_dist(neuron), rate[:, neuron, :], self.tbin.item())
tr_t_spike = []
for sp in spiketimes:
tr_t_spike.append(self.ind_to_train(torch.tensor(sp), rate.shape[-1]).numpy())
return np.array(tr_t_spike).reshape(rate.shape[0], -1, rate.shape[-1])
# Special cases
class Spike_phase(base._likelihood):
"""
Renewal model base class
"""
def __init__(self, tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize):
super().__init__(tbin, neurons, neurons, inv_link, tensor_type)
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Assumes at time zero, we start at global phase zero. At the end after the last spike, we do
not increment the phase here.
"""
assert spikes.max() < 2 # only binary trains
super().set_Y(spikes, batch_size, filter_len=filter_len)
phases = [] # list of spike phases
for spiketrain in self.all_spikes: # loop over trials
cont = []
dphase = torch.zeros(*spiketrain.shape, dtype=self.tensor_type)
for k in range(self.neurons):
locs = torch.nonzero(spiketrain).flatten()
dlocs = torch.cat((locs[0:1]+1, locs[1:]-locs[:-1]))
cur = 0
for dd in dlocs:
dphase[k, cur:cur+dd] = 1./dd
phases.append(torch.cumsum(dphase, dim=-1)) # global spike phase
self.phases = torch.stack(phases) # tr, n, time
def geodesic(x, y):
"""
Returns the geodesic displacement between x and y, (x-y).
"""
xy = (x-y) % 1.
xy[xy > 0.5] -= 1.
return xy
def sample_helper(self, h, b, neuron, scale, samples):
"""
MC estimator for NLL function.
:param torch.tensor scale: additional scaling of the rate rescaling to preserve the ISI mean
:returns: tuple of rates, spikes*log(rates*scale), rescaled ISIs
:rtype: tuple
"""
rates = self.f(h)
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.inv_link == 'exp': # bit masking seems faster than integer indexing using spiketimes
l_rates = (spikes*(h+torch.log(scale))).sum(-1)
else:
l_rates = (spikes*torch.log(rates+1e-12)).sum(-1) # rates include scaling
spiketimes = [s.to(self.tbin.device) for s in self.spiketimes[b]]
rISI = self.rate_rescale(neuron, spiketimes, rates, self.duplicate[b])
return rates
def objective(self, F_mu, F_var, XZ, b, neuron, scale, samples=10, mode='MC'):
"""
"""
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron)
rates, l_rates, rISI = self.sample_helper(h, b, neuron, scale, samples)
ws = torch.tensor(1./rates.shape[0])
else:
raise NotImplementedError
return self.nll(l_rates, rISI, neuron), ws
def nll(phase, tar_phase):
"""
"""
lowest_loss = np.inf
shift = Parameter(torch.zeros(1, device=dev))
a = Parameter(torch.zeros(1, device=dev))
XX = torch.tensor(x, device=dev)
HD = torch.tensor(theta, device=dev)
losses = []
for k in range(iters):
optimizer.zero_grad()
X_ = 2*np.pi*XX*a + shift
loss = (metric(X_, HD, 'torus')**2).mean()
loss.backward()
optimizer.step()
losses.append(loss.cpu().item())
l_ = loss.cpu().item()
print(l_)
shift_ = shift.cpu().item()
a_ = a.cpu().item()
return nll
def sample(self, rate, neuron=None, XZ=None):
"""
Sample spike trains from the modulated renewal process.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
spiketimes = point_process.gen_IRP(self.ISI_dist(neuron), rate[:, neuron, :], self.tbin.item())
tr_t_spike = []
for sp in spiketimes:
tr_t_spike.append(self.ind_to_train(torch.tensor(sp), rate.shape[-1]).numpy())
return np.array(tr_t_spike).reshape(rate.shape[0], -1, rate.shape[-1])
class Universal(base._likelihood):
"""
Universal count distribution with finite cutoff at max_count.
"""
def __init__(self, neurons, C, inv_link, max_count, mapping_net, tensor_type=torch.float):
super().__init__(1., neurons*C, neurons, inv_link, tensor_type) # dummy tbin
self.K = max_count+1
self.C = C
self.neurons = neurons
self.lsoftm = torch.nn.LogSoftmax(dim=-1)
self.add_module('mapping_net', mapping_net) # maps from NxC to NxK
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for fast log-likelihood evaluation.
Batched spikes will be a list of tensors of shape (trials, neurons, time) with trials
set to 1 if input has no trial dimension (e.g. continuous recording).
:param np.array spikes: becomes a list of [neuron_dim, batch_dim]
:param int/list batch_size:
:param int filter_len: history length of the GLM couplings (1 indicates no history coupling)
"""
if self.K <= spikes.max():
raise ValueError('Maximum count is exceeded in the spike count data')
super().set_Y(spikes, batch_size, filter_len)
def onehot_to_counts(self, onehot):
"""
Convert one-hot vector representation of counts. Assumes the event dimension is the last.
:param torch.tensor onehot: one-hot vector representation of shape (..., event)
:returns: spike counts
:rtype: torch.tensor
"""
counts = torch.zeros(*onehot.shape[:-1], device=onehot.device)
inds = torch.where(onehot)
counts[inds[:-1]] = inds[-1].float()
return counts
def counts_to_onehot(self, counts):
"""
Convert counts to one-hot vector representation. Adds the event dimension at the end.
:param torch.tensor counts: spike counts of some tensor shape
:param int max_counts: size of the event dimension (max_counts + 1)
:returns: one-hot representation of shape (counts.shape, event)
:rtype: torch.tensor
"""
onehot = torch.zeros(*counts.shape, self.K, device=counts.device)
onehot_ = onehot.view(-1, self.K)
g = onehot_.shape[0]
onehot_[np.arange(g), counts.flatten()[np.arange(g)].long()] = 1
return onehot_.view(*onehot.shape)
def get_logp(self, F_mu, neuron):
"""
Compute count probabilities from the rate model output.
:param torch.tensor F_mu: the F_mu product output of the rate model (samples and/or trials, F_dims, time)
:returns: log probability tensor
:rtype: tensor of shape (samples and/or trials, n, t, c)
"""
T = F_mu.shape[-1]
samples = F_mu.shape[0]
a = self.mapping_net(F_mu.permute(0, 2, 1).reshape(samples*T, -1), neuron) # samplesxtime, NxK
log_probs = self.lsoftm(a.view(samples, T, -1, self.K).permute(0, 2, 1, 3))
return log_probs
def sample_helper(self, h, b, neuron, samples):
"""
NLL helper function for sample evaluation. Note the F_mu dimensions here is equal to NxC.
"""
logp = self.get_logp(h, neuron)
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if self.trials != 1 and samples > 1 and self.trials < h.shape[0]: # cannot rely on broadcasting
spikes = spikes.repeat(samples, 1, 1)
tar = self.counts_to_onehot(spikes)
return logp, tar
def _neuron_to_F(self, neuron):
"""
Access subset of neurons in expanded space.
"""
neuron = self._validate_neuron(neuron)
if len(neuron) == self.neurons:
F_dims = list(range(self.F_dims))
else: # access subset of neurons
F_dims = list(np.concatenate([np.arange(n*self.C, (n+1)*self.C) for n in neuron]))
return F_dims
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
"""
Computes the terms for variational expectation :math:`\mathbb{E}_{q(f)q(z)}[]`, which
can be used to compute different likelihood objectives.
The returned tensor will have sample dimension as MC over :math:`q(z)`, depending
on the evaluation mode will be MC or GH or exact over the likelihood samples. This
is all combined in to the same dimension to be summed over. The weights :math:`w_s`
are the quadrature weights or equal weights for MC, with appropriate normalization.
:param int samples: number of MC samples or GH points (exact will ignore and give 1)
:returns: negative likelihood term of shape (samples, timesteps)
"""
F_dims = self._neuron_to_F(neuron)
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, F_dims) # h has only observed neurons (from neuron)
logp, tar = self.sample_helper(h, b, neuron, samples)
ws = torch.tensor(1./logp.shape[0])
elif mode == 'GH':
h, ws = self.gh_gen(F_mu, F_var, samples, F_dims)
logp, tar = self.sample_helper(h, b, neuron, samples)
ws = ws[:, None]
else:
raise NotImplementedError
nll = -(tar*logp).sum(-1)
return nll.sum(1), ws
def sample(self, F_mu, neuron, XZ=None):
"""
Sample from the categorical distribution.
:param numpy.array log_probs: log count probabilities (trials, neuron, timestep, counts), no
need to be normalized
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
F_dims = self._neuron_to_F(neuron)
log_probs = self.get_logp(torch.tensor(F_mu[:, F_dims, :], dtype=self.tensor_type))
c_dist = mdl.distributions.Categorical(logits=log_probs)
cnt_prob = torch.exp(log_probs)
return c_dist.sample().numpy()
# count distributions
class Bernoulli(count_model):
"""
Inhomogeneous Bernoulli likelihood, limits the count to binary trains.
"""
def __init__(self, tbin, neurons, inv_link, tensor_type=torch.float):
super().__init__(tbin, neurons, None, inv_link, tensor_type)
def set_Y(self, spikes, batch_size, filter_len=1):
"""
Get all the activity into batches useable format for quick log-likelihood evaluation
Tensor shapes: self.spikes (neuron_dim, batch_dim)
tfact is the log of time_bin times the spike count
"""
assert spikes.max() < 2
super().set_Y(spikes, batch_size, filter_len=filter_len)
def get_saved_factors(self, b, neuron, spikes):
"""
Get saved factors for proper likelihood values and perform broadcasting when needed.
"""
if self.strict_likelihood:
tfact = self.tfact[b][:, neuron, :].to(self.tbin.device)
if tfact.shape[0] != 1 and tfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
tfact = tfact.repeat(spikes.shape[0]//tfact.shape[0], 1, 1)
else:
tfact = 0
return tfact
def nll(self, b, rates, l_rates, spikes, neuron, disper_param=None):
tfact = self.get_saved_factors(b, neuron, spikes)
nll = -(l_rates + tfact + (1-spikes)*torch.log(1-rates*self.tbin))
return nll.sum(1)
def sample(self, rate, neuron=None, XZ=None):
"""
Takes into account the quantization bias if we sample IPP with dilation factor.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
return point_process.gen_IBP(rate[:, neuron, :]*self.tbin.item())
class Poisson(count_model):
"""
Poisson count likelihood.
"""
def __init__(self, tbin, neurons, inv_link, tensor_type=torch.float):
super().__init__(tbin, neurons, None, inv_link, tensor_type)
def get_saved_factors(self, b, neuron, spikes):
"""
Get saved factors for proper likelihood values and perform broadcasting when needed.
"""
if self.strict_likelihood:
tfact = self.tfact[b][:, neuron, :].to(self.tbin.device)
lfact = self.lfact[b][:, neuron, :].to(self.tbin.device)
if tfact.shape[0] != 1 and tfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
tfact = tfact.repeat(spikes.shape[0]//tfact.shape[0], 1, 1)
lfact = lfact.repeat(spikes.shape[0]//lfact.shape[0], 1, 1)
else:
tfact, lfact = 0, 0
return tfact, lfact
def nll(self, b, rates, l_rates, spikes, neuron, disper_param=None):
tfact, lfact = self.get_saved_factors(b, neuron, spikes)
T = rates*self.tbin
nll = (-l_rates + T - tfact + lfact)
return nll.sum(1)
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
"""
The Poisson likelihood with the log Cox process has an exact variational likelihood term.
"""
if self.inv_link == 'exp': # exact
if isinstance(F_var, numbers.Number) is False and len(F_var.shape) == 4: # diagonalize
F_var = F_var.view(*F_var.shape[:2], -1)[:, :, ::F_var.shape[-1]+1]
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
rates = self.f(F_mu + F_var/2.)[:, neuron, :] # watch out for underflow or overflow here
l_rates = (spikes*F_mu[:, neuron, :])
tfact, lfact = self.get_saved_factors(b, neuron, spikes)
T = rates*self.tbin
nll = (-l_rates + T - tfact + lfact)
ws = torch.tensor(1./rates.shape[0])
return nll.sum(1), ws # first dimension is summed over later (MC over Z), hence divide by shape[0]
else:
return super().objective(F_mu, F_var, XZ, b, neuron,
samples=samples, mode=mode)
def sample(self, rate, neuron=None, XZ=None):
"""
Takes into account the quantization bias if we sample IPP with dilation factor.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
return torch.poisson(torch.tensor(rate[:, neuron, :]*self.tbin.item())).numpy()
class ZI_Poisson(count_model):
"""
Zero-inflated Poisson (ZIP) count likelihood. [1]
References:
[1] `Untethered firing fields and intermittent silences: Why grid‐cell discharge is so variable`,
<NAME> <NAME> <NAME>
"""
def __init__(self, tbin, neurons, inv_link, alpha=None, tensor_type=torch.float, dispersion_mapping=None):
super().__init__(tbin, neurons, dispersion_mapping, inv_link, tensor_type)
if alpha is not None:
self.register_parameter('alpha', Parameter(torch.tensor(alpha, dtype=self.tensor_type)))
else:
self.alpha = None
def set_params(self, alpha=None, strict_ll=None):
super().set_params(strict_ll)
assert self.strict_likelihood is True
if alpha is not None:
self.alpha.data = torch.tensor(alpha, device=self.tbin.device, dtype=self.tensor_type)
def constrain(self):
if self.alpha is not None:
self.alpha.data = torch.clamp(self.alpha.data, min=0., max=1.)
def get_saved_factors(self, b, neuron, spikes):
"""
Get saved factors for proper likelihood values and perform broadcasting when needed.
"""
tfact = self.tfact[b][:, neuron, :].to(self.tbin.device)
lfact = self.lfact[b][:, neuron, :].to(self.tbin.device)
if tfact.shape[0] != 1 and tfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
tfact = tfact.repeat(spikes.shape[0]//tfact.shape[0], 1, 1)
lfact = lfact.repeat(spikes.shape[0]//lfact.shape[0], 1, 1)
return tfact, lfact
def nll(self, b, rates, l_rates, spikes, neuron, disper_param=None):
if disper_param is None:
alpha_ = self.alpha.expand(1, self.neurons)[:, neuron, None]
else:
alpha_ = disper_param
tfact, lfact = self.get_saved_factors(b, neuron, spikes)
T = rates*self.tbin
zero_spikes = (spikes == 0) # mask
nll_ = (-l_rates + T - tfact + lfact - torch.log(1.-alpha_)) # -log (1-alpha)*p(N)
p = torch.exp(-nll_) # stable as nll > 0
nll_0 = -torch.log(alpha_ + p)
nll = zero_spikes*nll_0 + (~zero_spikes)*nll_
return nll.sum(1)
def sample(self, rate, neuron=None, XZ=None):
"""
Sample from ZIP process.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
rate_ = rate[:, neuron, :]
if self.dispersion_mapping is None:
alpha_ = self.alpha[None, :, None].expand(rate.shape[0], self.neurons,
rate_.shape[-1]).data.cpu().numpy()[:, neuron, :]
else:
samples = rate.shape[0]
alpha_ = self.eval_dispersion_mapping(XZ, samples,
neuron)[None, ...].expand(rate.shape[0],
*rate_.shape[1:]).data.cpu().numpy()
zero_mask = point_process.gen_IBP(alpha_)
return (1.-zero_mask)*torch.poisson(torch.tensor(rate_*self.tbin.item())).numpy()
class Negative_binomial(count_model):
"""
Gamma-Poisson mixture.
:param np.array r_inv: :math:`r^{-1}` parameter of the NB likelihood, if left to None this value is
expected to be provided by the heteroscedastic model in the inference class.
"""
def __init__(self, tbin, neurons, inv_link, r_inv=None, tensor_type=torch.float, dispersion_mapping=None):
super().__init__(tbin, neurons, dispersion_mapping, inv_link, tensor_type)
if r_inv is not None:
self.register_parameter('r_inv', Parameter(torch.tensor(r_inv, dtype=self.tensor_type)))
else:
self.r_inv = None
def set_params(self, r_inv=None, strict_ll=None):
super().set_params(strict_ll)
if r_inv is not None:
self.r_inv.data = torch.tensor(r_inv, device=self.tbin.device, dtype=self.tensor_type)
def constrain(self):
if self.r_inv is not None:
self.r_inv.data = torch.clamp(self.r_inv.data, min=0.) # effective r_inv > 1e-6 stabilized in NLL
def get_saved_factors(self, b, neuron, spikes):
"""
Get saved factors for proper likelihood values and perform broadcasting when needed.
"""
if self.strict_likelihood:
tfact = self.tfact[b][:, neuron, :].to(self.tbin.device)
lfact = self.lfact[b][:, neuron, :].to(self.tbin.device)
if tfact.shape[0] != 1 and tfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
tfact = tfact.repeat(spikes.shape[0]//tfact.shape[0], 1, 1)
lfact = lfact.repeat(spikes.shape[0]//lfact.shape[0], 1, 1)
else:
tfact, lfact = 0, 0
return tfact, lfact
def nll(self, b, rates, l_rates, spikes, neuron, disper_param=None):
"""
The negative log likelihood function. Note that if disper_param is not None, it will use those values for
the dispersion parameter rather than its own dispersion parameters.
:param int b: batch index to evaluate
:param torch.tensor rates: rates of shape (trial, neuron, time)
:param torch.tensor l_rates: spikes*log(rates)
:param torch.tensor spikes: spike counts of shape (trial, neuron, time)
:param list neuron: list of neuron indices to evaluate
:param torch.tensor disper_param: input for heteroscedastic NB likelihood of shape (trial, neuron, time),
otherwise uses fixed :math:`r_{inv}`
:returns: NLL of shape (trial, time)
:rtype: torch.tensor
"""
if disper_param is None:
r_ = 1./(self.r_inv.expand(1, self.neurons)[:, neuron, None] + 1e-6)
else:
r_ = 1./(disper_param + 1e-6)
tfact, lfact = self.get_saved_factors(b, neuron, spikes)
lambd = rates*self.tbin
fac_lgamma = (-torch.lgamma(r_+spikes) + torch.lgamma(r_))
fac_power = ((spikes+r_)*torch.log(r_+lambd) - r_*torch.log(r_))
nll = (-l_rates + fac_power + fac_lgamma - tfact + lfact)
return nll.sum(1)
def sample(self, rate, neuron=None, XZ=None):
"""
Sample from the Gamma-Poisson mixture.
:param numpy.array rate: input rate of shape (trials, neuron, timestep)
:param int max_count: maximum number of spike counts per time bin
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
rate_ = rate[:, neuron, :]
if self.dispersion_mapping is None:
r_ = 1./(self.r_inv[None, :, None].expand(rate.shape[0], self.neurons,
rate_.shape[-1]).data.cpu().numpy()
+ 1e-6)[:, neuron, :]
else:
samples = rate.shape[0]
disp = self.eval_dispersion_mapping(XZ, samples, neuron)[None, ...].expand(rate.shape[0],
*rate_.shape[1:])
r_ = 1./(disp.data.cpu().numpy() + 1e-6)
s = np.random.gamma(r_, rate_*self.tbin.item()/r_)
return torch.poisson(torch.tensor(s)).numpy()
class COM_Poisson(count_model):
"""
Conway-Maxwell-Poisson, as described in
https://en.wikipedia.org/wiki/Conway%E2%80%93Maxwell%E2%80%93Poisson_distribution.
"""
def __init__(self, tbin, neurons, inv_link, nu=None, tensor_type=torch.float, J=100, dispersion_mapping=None):
super().__init__(tbin, neurons, dispersion_mapping, inv_link, tensor_type)
if nu is not None:
self.register_parameter('log_nu', Parameter(torch.tensor(nu, dtype=self.tensor_type)))
else:
self.log_nu = None
self.J = J
self.register_buffer('powers', torch.tensor(np.arange(self.J+1), dtype=self.tensor_type).to(self.tbin.device))
self.register_buffer('jfact', torch.lgamma(self.powers+1.).to(self.tbin.device))
def set_params(self, log_nu=None, strict_ll=None):
super().set_params(strict_ll)
if log_nu is not None:
self.log_nu.data = torch.tensor(log_nu, device=self.tbin.device, dtype=self.tensor_type)
def get_saved_factors(self, b, neuron, spikes):
"""
Get saved factors for proper likelihood values and perform broadcasting when needed.
"""
if self.strict_likelihood:
tfact = self.tfact[b][:, neuron, :].to(self.tbin.device)
if tfact.shape[0] != 1 and tfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
tfact = tfact.repeat(spikes.shape[0]//tfact.shape[0], 1, 1)
else:
tfact = 0
lfact = self.lfact[b][:, neuron, :].to(self.tbin.device)
if lfact.shape[0] != 1 and lfact.shape[0] < spikes.shape[0]: # cannot rely on broadcasting
lfact = lfact.repeat(spikes.shape[0]//lfact.shape[0], 1, 1)
return tfact, lfact
def log_Z(self, log_lambda, nu):
"""
Partition function.
:param torch.tensor lambd: lambda of shape (samples, neurons, timesteps)
:param torch.tensor nu: nu of shape (samples, neurons, timesteps)
:returns: log Z of shape (samples, neurons, timesteps)
:rtype: torch.tensor
"""
#indx = torch.where((self.powers*lambd.max() - nu_.min()*self.j) < -1e1) # adaptive
#if len(indx) == 0:
# indx = self.J+1
log_Z_term = (self.powers[:, None, None, None]*log_lambda[None, ...] - \
nu[None, ...]*self.jfact[:, None, None, None])
return torch.logsumexp(log_Z_term, dim=0)
def nll(self, b, rates, l_rates, spikes, neuron, disper_param=None):
"""
:param int b: batch index to evaluate
:param torch.tensor rates: rates of shape (trial, neuron, time)
:param torch.tensor l_rates: spikes*log(rates)
:param torch.tensor spikes: spike counts of shape (neuron, time)
:param list neuron: list of neuron indices to evaluate
:param torch.tensor disper_param: input for heteroscedastic NB likelihood of shape (trial, neuron, time),
otherwise uses fixed :math:`\nu`
:returns: NLL of shape (trial, time)
:rtype: torch.tensor
"""
if disper_param is None:
nu_ = torch.exp(self.log_nu).expand(1, self.neurons)[:, neuron, None]
else:
nu_ = torch.exp(disper_param) # nn.functional.softplus
tfact, lfact = self.get_saved_factors(b, neuron, spikes)
log_lambda = torch.log(rates*self.tbin+1e-12)
l_Z = self.log_Z(log_lambda, nu_)
nll = (-l_rates + l_Z - tfact + nu_*lfact)
return nll.sum(1)
def sample(self, rate, neuron=None, XZ=None):
"""
Sample from the CMP distribution.
:param numpy.array rate: input rate of shape (neuron, timestep)
:returns: spike train of shape (trials, neuron, timesteps)
:rtype: np.array
"""
neuron = self._validate_neuron(neuron)
mu_ = rate[:, neuron, :]*self.tbin.item()
if self.dispersion_mapping is None:
nu_ = torch.exp(self.log_nu)[None, :, None].expand(
rate.shape[0], self.neurons, mu_.shape[-1]).data.cpu().numpy()[:, neuron, :]
else:
samples = rate.shape[0]
disp = self.eval_dispersion_mapping(XZ, samples, neuron)[None, ...].expand(rate.shape[0],
*mu_.shape[1:])
nu_ = torch.exp(disp.data).cpu().numpy()
return point_process.gen_CMP(mu_, nu_)
# renewal distributions
class Gamma(renewal_model):
"""
Gamma renewal process
"""
def __init__(self, tbin, neurons, inv_link, shape, tensor_type=torch.float, allow_duplicate=True,
dequantize=True):
"""
Renewal parameters shape can be shared for all neurons or independent.
"""
super().__init__(tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize)
self.register_parameter('shape', Parameter(torch.tensor(shape, dtype=self.tensor_type)))
def set_params(self, shape=None):
if shape is not None:
self.shape.data = torch.tensor(shape, device=self.shape.device, dtype=self.tensor_type)
def constrain(self):
self.shape.data = torch.clamp(self.shape.data, min=1e-5, max=2.5)
def nll(self, l_rates, rISI, neuron):
"""
Gamma case, approximates the spiketrain NLL (takes tbin into account for NLL).
:param np.array neuron: fit over given neurons, must be an array
:param torch.tensor F_mu: F_mu product with shape (samples, neurons, timesteps)
:param torch.tensor F_var: variance of the F_mu values, same shape
:param int b: batch number
:param np.array neuron: neuron indices that are used
:param int samples: number of MC samples for likelihood evaluation
:returns: NLL array over sample dimensions
:rtype: torch.tensor
"""
samples_ = l_rates.shape[0] # ll_samplesxcov_samples, in case of trials trial_num=cov_samples
shape_ = self.shape.expand(1, self.F_dims)[:, neuron]
# Ignore the end points of the spike train
# d_Lambda_i = rates[:self.spiketimes[0]].sum()*self.tbin
# d_Lambda_f = rates[self.spiketimes[ii]:].sum()*self.tbin
# l_start = torch.empty((len(neuron)), device=self.tbin.device)
# l_end = torch.empty((len(neuron)), device=self.tbin.device)
# l_start[n_enu] = torch.log(sps.gammaincc(self.shape.item(), d_Lambda_i))
# l_end[n_enu] = torch.log(sps.gammaincc(self.shape.item(), d_Lambda_f))
intervals = torch.zeros((samples_, len(neuron)), device=self.tbin.device)
T = torch.empty((samples_, len(neuron)), device=self.tbin.device) # MC samples, neurons
l_Lambda = torch.empty((samples_, len(neuron)), device=self.tbin.device)
for tr, isis in enumerate(rISI): # over trials
for n_enu, isi in enumerate(isis): # over neurons
if len(isi) > 0: # nonzero number of ISIs
intervals[tr::self.trials, n_enu] = isi.shape[-1]
T[tr::self.trials, n_enu] = isi.sum(-1)
l_Lambda[tr::self.trials, n_enu] = torch.log(isi+1e-12).sum(-1)
else:
T[tr::self.trials, n_enu] = 0 # TODO: minibatching ISIs approximate due to b.c.
l_Lambda[tr::self.trials, n_enu] = 0
nll = -(shape_-1)*l_Lambda - l_rates + T + intervals[None, :]*torch.lgamma(shape_)
return nll.sum(1, keepdims=True) # sum over neurons, keep as dummy time index
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
return super().objective(F_mu, F_var, XZ, b, neuron,
self.shape, samples=samples, mode=mode)
def ISI_dist(self, n):
shape = self.shape[n].data.cpu().numpy()
return point_process.ISI_gamma(shape, scale=1./shape)
class logNormal(renewal_model):
"""
Log-normal ISI distribution
Ignores the end points of the spike train in each batch
"""
def __init__(self, tbin, neurons, inv_link, sigma, tensor_type=torch.float, allow_duplicate=True,
dequantize=True):
"""
:param np.array sigma: :math:`$sigma$` parameter which is > 0
"""
super().__init__(tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize)
#self.register_parameter('mu', Parameter(torch.tensor(mu, dtype=self.tensor_type)))
self.register_parameter('sigma', Parameter(torch.tensor(sigma, dtype=self.tensor_type)))
self.register_buffer('twopi_fact', 0.5*torch.tensor(2*np.pi, dtype=self.tensor_type).log())
def set_params(self, sigma=None):
if sigma is not None:
self.sigma.data = torch.tensor(sigma, device=self.sigma.device)
def constrain(self):
self.sigma.data = torch.clamp(self.sigma.data, min=1e-5)
def set_Y(self, spikes, batch_size, filter_len=1):
super().set_Y(spikes, batch_size, filter_len=filter_len)
def nll(self, l_rates, rISI, neuron):
"""
Computes the log Normal distribution
.. math:: p(f^* \mid X_{new}, X, y, k, X_u, u_{loc}, u_{scale\_tril})
= \mathcal{N}(loc, cov).
:param torch.tensor l_rates: log rates at spike times (samples, neurons, timesteps)
:param torch.tensor rISI: modified rate rescaled ISIs
:param np.array neuron: neuron indices that are used
:returns: spike train negative log likelihood of shape (timesteps, samples (dummy dimension))
:rtype: torch.tensor
"""
sigma_ = self.sigma.expand(1, self.F_dims)[:, neuron]
samples_ = l_rates.shape[0]
l_Lambda = torch.empty((samples_, len(neuron)), device=self.tbin.device)
quad_term = torch.empty((samples_, len(neuron)), device=self.tbin.device)
norm_term = torch.empty((samples_, len(neuron)), device=self.tbin.device)
for tr, isis in enumerate(rISI):
for n_enu, isi in enumerate(isis):
if len(isi) > 0: # nonzero number of ISIs
intervals = isi.shape[1]
l_Lambda[tr::self.trials, n_enu] = torch.log(isi+1e-12).sum(-1)
quad_term[tr::self.trials, n_enu] = 0.5*((torch.log(isi+1e-12)/sigma_[:, n_enu:n_enu+1])**2).sum(-1) # -mu_[:, n_enu:n_enu+1]
norm_term[tr::self.trials, n_enu] = intervals*(torch.log(sigma_[0, n_enu]) + self.twopi_fact)
else:
l_Lambda[tr::self.trials, n_enu] = 0
quad_term[tr::self.trials, n_enu] = 0
norm_term[tr::self.trials, n_enu] = 0
nll = -l_rates + norm_term + l_Lambda + quad_term
return nll.sum(1, keepdims=True)
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
return super().objective(F_mu, F_var, XZ, b, neuron,
torch.exp(-self.sigma**2/2.), samples=samples, mode=mode)
def ISI_dist(self, n):
sigma = self.sigma[n].data.cpu().numpy()
return point_process.ISI_logNormal(sigma, scale=np.exp(sigma**2/2.))
class invGaussian(renewal_model):
"""
Inverse Gaussian ISI distribution
Ignores the end points of the spike train in each batch
"""
def __init__(self, tbin, neurons, inv_link, mu, tensor_type=torch.float, allow_duplicate=True,
dequantize=True):
"""
:param np.array mu: :math:`$mu$` parameter which is > 0
"""
super().__init__(tbin, neurons, inv_link, tensor_type, allow_duplicate, dequantize)
self.register_parameter('mu', Parameter(torch.tensor(mu, dtype=self.tensor_type)))
#self.register_parameter('lambd', Parameter(torch.tensor(lambd, dtype=self.tensor_type)))
self.register_buffer('twopi_fact', 0.5*torch.tensor(2*np.pi, dtype=self.tensor_type).log())
def set_params(self, mu=None):
if mu is not None:
self.mu.data = torch.tensor(mu, device=self.mu.device)
def constrain(self):
self.mu.data = torch.clamp(self.mu.data, min=1e-5)
def set_Y(self, spikes, batch_size, filter_len=1):
super().set_Y(spikes, batch_size, filter_len=filter_len)
def nll(self, l_rates, rISI, neuron):
"""
:param torch.tensor F_mu: F_mu product with shape (samples, neurons, timesteps)
:param torch.tensor F_var: variance of the F_mu values, same shape
:param int b: batch number
:param np.array neuron: neuron indices that are used
:param int samples: number of MC samples for likelihood evaluation
"""
mu_ = self.mu.expand(1, self.F_dims)[:, neuron]
samples_ = l_rates.shape[0]
l_Lambda = torch.empty((samples_, len(neuron)), device=self.tbin.device)
quad_term = torch.empty((samples_, len(neuron)), device=self.tbin.device)
norm_term = torch.empty((samples_, len(neuron)), device=self.tbin.device)
for tr, isis in enumerate(rISI):
for n_enu, isi in enumerate(isis):
if len(isi) > 0: # nonzero number of ISIs
intervals = isi.shape[1]
l_Lambda[tr::self.trials, n_enu] = torch.log(isi+1e-12).sum(-1)
quad_term[tr::self.trials, n_enu] = 0.5*(((isi - mu_[:, n_enu:n_enu+1])/ \
mu_[:, n_enu:n_enu+1])**2 / isi).sum(-1) # (lambd_[:, n_enu:n_enu+1])
norm_term[tr::self.trials, n_enu] = intervals*(self.twopi_fact) # - 0.5*torch.log(lambd_[0, n_enu])
else:
l_Lambda[tr::self.trials, n_enu] = 0
quad_term[tr::self.trials, n_enu] = 0
norm_term[tr::self.trials, n_enu] = 0
nll = -l_rates + norm_term + 1.5*l_Lambda + quad_term
return nll.sum(1, keepdims=True)
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
return super().objective(F_mu, F_var, XZ, b, neuron,
1./self.mu, samples=samples, mode=mode)
def ISI_dist(self, n):
"""
Note the scale parameter here is the inverse of the scale parameter in nll(), as the scale
parameter here is :math:`\tau/s` while in nll() is refers to :math:`d\tau = s*r(t) \, \mathrm{d}t`
"""
# self.lambd[n].data.cpu().numpy()
mu = self.mu[n].data.cpu().numpy()
return point_process.ISI_invGauss(mu, scale=mu)
# noise distribution
class Gaussian(base._likelihood):
"""
Gaussian noise likelihood.
Analogous to Factor Analysis.
"""
def __init__(self, neurons, inv_link, log_var, tensor_type=torch.float):
"""
:param np.array log_var: log observation noise of shape (neuron,) or (1,) if parameters tied
"""
super().__init__(1., neurons, inv_link, tensor_type) # dummy tbin
self.register_parameter('log_var', Parameter(torch.tensor(log_var, dtype=self.tensor_type)))
def set_params(self, log_var=None):
if log_var is not None:
self.log_var.data = torch.tensor(log_var, device=self.tbin.device)
def sample_helper(self, h, b, neuron, samples):
"""
NLL helper function for MC sample evaluation.
"""
rates = self.f(h) # watch out for underflow or overflow here
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
return rates, spikes
def nll(self, rates, spikes, noise_var):
"""
Gaussian likelihood for activity train
samples introduces a sample dimension from the left
F_mu has shape (samples, neuron, timesteps)
if F_var = 0, we don't expand by samples in the sample dimension
"""
nll = .5*(torch.log(noise_var) + ((spikes - rates)**2)/noise_var) + \
.5*torch.log(torch.tensor(2*np.pi))
return nll.sum(1)
def objective(self, F_mu, F_var, XZ, b, neuron, samples=10, mode='MC'):
"""
Computes the terms for variational expectation :math:`\mathbb{E}_{q(f)q(z)}[]`, which
can be used to compute different likelihood objectives.
The returned tensor will have sample dimension as MC over :math:`q(z)`, depending
on the evaluation mode will be MC or GH or exact over the likelihood samples. This
is all combined in to the same dimension to be summed over. The weights :math:`w_s`
are the quadrature weights or equal weights for MC, with appropriate normalization.
:param int samples: number of MC samples or GH points (exact will ignore and give 1)
:returns: negative likelihood term of shape (samples, timesteps), sample weights (samples, 1
:rtype: tuple of torch.tensors
"""
if disper is None:
if self.log_var.shape[0] == 1:
log_var = self.log_var.expand(1, len(neuron))[..., None]
else:
log_var = self.log_var[None, neuron, None]
else:
dh = self.mc_gen(disper, disper_var, samples, neuron)
log_var = disp_f(dh)
if self.inv_link == 'identity': # exact
spikes = self.spikes[b][:, neuron, self.filter_len-1:].to(self.tbin.device)
if isinstance(F_var, numbers.Number):
F_var = 0
else:
F_var = F_var[:, neuron, :]
noise_var = (torch.exp(log_var) + F_var)
nll = .5*(torch.log(noise_var) + ((spikes - F_mu)**2)/noise_var + F_var/noise_var) + \
.5*torch.log(torch.tensor(2*np.pi))
ws = torch.tensor(1/F_mu.shape[0])
return nll.sum(1), ws
#elif self.inv_link == 'exp' # exact
if mode == 'MC':
h = self.mc_gen(F_mu, F_var, samples, neuron)
rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = torch.tensor(1./rates.shape[0])
elif mode == 'GH':
h, ws = self.gh_gen(F_mu, F_var, samples, neuron)
rates, spikes = self.sample_helper(h, b, neuron, samples)
ws = ws[:, None]
else:
raise NotImplementedError
return self.nll(rates, spikes, torch.exp(log_var)), ws
def sample(self, rate, neuron=None, XZ=None):
"""
Sample activity trains [trial, neuron, timestep]
"""
neuron = self._validate_neuron(neuron)
rate_ = rate[:, neuron, :]
if self.log_var.shape[0] == 1:
log_var = self.log_var.expand(1, len(neuron)).data[..., None].cpu().numpy()
else:
log_var = self.log_var.data[None, neuron, None].cpu().numpy()
act = rate_ + np.exp(log_var/2.)*np.random.randn((rate.shape[0], len(neuron), rate.shape[-1])), rate_
return act
class MultivariateGaussian(base._likelihood):
r"""
Account for noise correlations as in [1]. The covariance over neuron dimension is introduced.
[1] `Learning a latent manifold of odor representations from neural responses in piriform cortex`,
<NAME>, <NAME>, <NAME>, <NAME>, 2018
"""
def __init__(self, neurons, inv_link, log_var, tensor_type=torch.float):
"""
log_var can be shared or independent for neurons depending on the shape
"""
super().__init__(neurons, inv_link, tensor_type)
self.register_parameter('log_var', Parameter(torch.tensor(log_var, dtype=self.tensor_type)))
def set_params(self, log_var=None, jitter=1e-6):
if log_var is not None:
self.log_var.data = torch.tensor(log_var, device=self.tbin.device)
def objective(self, F_mu, F_var, XZ, b, neuron, samples, mode='MC'):
"""
Gaussian likelihood for activity train
samples introduces a sample dimension from the left
F_mu has shape (samples, neuron, timesteps)
if F_var = 0, we don't expand by samples in the sample dimension
"""
spikes = self.spikes[b][None, neuron, self.filter_len-1:].to(self.tbin.device) # activity
batch_size = F_mu.shape[-1]
if self.inv_link == 'identity': # exact
noise_var = (self.L @ self.L.t())[None, neuron, None] + F_var[:, neuron, :]
nll = .5*(torch.log(noise_var) + ((spikes - F_mu)**2)/noise_var + F_var/noise_var).sum(-1) + \
.5*torch.log(torch.tensor(2*np.pi))*batch_size
else:
if F_var != 0: # MC samples
h = dist.Rn_Normal(F_mu, F_var)((samples,)).view(-1, *F_mu.shape[1:])[:, neuron, :]
F_var = F_var.repeat(samples, 1, 1)
else:
h = F_mu[:, neuron, :]
rates = self.f(h)
noise_var = (torch.exp(self.log_var)[None, neuron, None] + F_var[:, neuron, :])
nll = .5*(torch.log(noise_var) + ((spikes - rates)**2)/noise_var).sum(-1) + \
.5*torch.log(torch.tensor(2*np.pi))*batch_size
ws = torch.tensor(1./nll.shape[0])
return nll.sum(1), ws
def sample(self, rate, neuron=None, XZ=None):
"""
Sample activity trains [trial, neuron, timestep]
"""
neuron = self._validate_neuron(neuron)
act = rate + torch.exp(self.log_var).data.sqrt().cpu().numpy()*np.random.randn((rate.shape[0], len(neuron), rate.shape[-1]))
return act
| StarcoderdataPython |
4910161 | import sys
import os
s2l = {}
fi = open(sys.argv[3], 'r')
for line in fi:
s = line.split('\t')[0]
l = line.split('\t')[1]
s2l[s] = l
fi.close()
#print s2l
fi = open(sys.argv[1], 'r')
fo = open(sys.argv[2], 'w')
for line in fi:
s = line.split('::')[1]
l = s2l.get(s, '-1')
if l == '-1':
print s
fo.write(l + '\n')
fi.close()
fo.close()
| StarcoderdataPython |
5060319 | from django.test import TestCase
from django.urls import reverse
from accounts.models import UserProfileInfo, User
from admin_app.models import Magazine, Truck, Route
from accounts.forms import UserForm, UserProfileInfoForm
from django.test import Client
from django.test import TestCase
from order_app.models import Checkout, OrderedProducts
from products_app.models import Product
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from django.test import LiveServerTestCase
class AdminAppTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user1_info = UserProfileInfo.objects.create(user=self.user1, company_name="company 1",
phone_number="123456789",
longitude=50.064824, latitude=19.923944, is_client=True)
self.user2= User.objects.create_user(username="user2", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user2_info = UserProfileInfo.objects.create(user=self.user2, company_name="company 1",
phone_number="123456789",
longitude=50.064824, latitude=19.923944, is_client=True)
self.checkout=Checkout.objects.create(name_client=self.user1,price=0,weigth=150,route_client=False,
date='2018-12-20',hour=1,magazine=False,confirmed=False)
self.checkout2=Checkout.objects.create(name_client=self.user2,price=200,weigth=50,route_client=False,
date='2018-12-25',hour=2,magazine=False,confirmed=False)
self.checkout3 = Checkout.objects.create(name_client=self.user1, price=200, weigth=50, route_client=False,
date='2018-12-25', hour=2, magazine=False, confirmed=False)
self.admin=User.objects.create_user(username='admin', first_name='Admin',last_name='Admin', email='<EMAIL>',
password='<PASSWORD>',is_staff=True, is_superuser=True)
self.checkout = Checkout.objects.create(name_client=self.user1, price=0, weigth=150, route_client=False,
date='2018-12-20', hour=1, magazine=False, confirmed=True)
self.user3 = User.objects.create_user(username="user3", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user3_info = UserProfileInfo.objects.create(user=self.user3, company_name="company 1",
phone_number="123456789",
longitude=50.064824, latitude=19.923944, is_client=False)
self.product = Product.objects.create(name='jabłko', genre='nwm', name_deliver=self.user3, amount=100, price=10)
self.ordered_product = OrderedProducts.objects.create(id_checkout=self.checkout, name_deliver=self.user3,
name_product=self.product, amount=20, route=False,
id_route=0, magazine=False)
self.c = Client()
self.magazine = Magazine.objects.create(id_magazine=1,
latitude=200.5678,
longitude=133.21,
radius=12)
self.truck = Truck.objects.create(id_truck=1,
capacity=200,
return_date='2018-01-01')
self.route = Route.objects.create(id_route=1,
products_list='[200,12,34,56,76]',
date='2018-01-01',
id_truck=self.truck
)
self.route2 = Route.objects.create(id_route=2,
products_list='[200,14,4,56,76]',
date='2018-01-01',
id_truck=self.truck
)
self.c = Client()
class AdminCheckoutListViewTest(AdminAppTestCase):
def test_list_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:order_list'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin_app/order_list.html')
self.assertEqual(len(response.context['checkout_list']),1)
def test_list_not_confirmed(self):
self.checkout2 = Checkout.objects.create(name_client=self.user1, price=0, weigth=150, route_client=False,
date='2018-12-20', hour=1, magazine=False, confirmed=False)
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:order_list'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin_app/order_list.html')
self.assertEqual(len(response.context['checkout_list']), 1)
def test_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:order_list'))
self.assertEqual(response.status_code, 302)
class AdminCheckoutDetailViewTest(AdminAppTestCase):
def test_detail_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:order_detail', kwargs={'pk':self.checkout.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['order_details'].id,self.checkout.id)
def test_detail_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:order_detail', kwargs={'pk': self.checkout.id}))
self.assertEqual(response.status_code, 302)
class AdminProductListViewTest(AdminAppTestCase):
def test_product_list_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:product_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['product_list']),1)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:product_list'))
self.assertEqual(response.status_code, 302)
class IndexView20Test(AdminAppTestCase):
def test_index_view20_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index20'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']),1)
self.assertEqual(len(response.context['routes_tomorrow']),0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index20'))
self.assertEqual(response.status_code, 302)
def test_algorithm(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.post(reverse('admin_app:index20'), data={'date': '2018-12-18', 'claster': '1'})
class IndexView21Test(AdminAppTestCase):
def test_index_view20_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index21'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']),1)
self.assertEqual(len(response.context['routes_tomorrow']),0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index21'))
self.assertEqual(response.status_code, 302)
class IndexView22Test(AdminAppTestCase):
def test_index_view20_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index22'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']),1)
self.assertEqual(len(response.context['routes_tomorrow']),0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index22'))
self.assertEqual(response.status_code, 302)
class IndexView23Test(AdminAppTestCase):
def test_index_view20_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index23'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']), 1)
self.assertEqual(len(response.context['routes_tomorrow']), 0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index23'))
self.assertEqual(response.status_code, 302)
class IndexViewTest(AdminAppTestCase):
def test_index_view20_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']), 1)
self.assertEqual(len(response.context['routes_today']), 0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index'))
self.assertEqual(response.status_code, 302)
class IndexView1Test(AdminAppTestCase):
def test_index_view1_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index1'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']), 1)
self.assertEqual(len(response.context['routes_today']), 0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index1'))
self.assertEqual(response.status_code, 302)
class IndexView2Test(AdminAppTestCase):
def test_index_view1_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index2'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']), 1)
self.assertEqual(len(response.context['routes_today']), 0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index2'))
self.assertEqual(response.status_code, 302)
class IndexView3Test(AdminAppTestCase):
def test_index_view1_get(self):
self.c.login(username='admin', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index3'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['trucks']), 1)
self.assertEqual(len(response.context['routes_today']), 0)
def test_product_list_not_superuser(self):
self.c.login(username='user1', password='<PASSWORD>')
response = self.c.get(reverse('admin_app:index3'))
self.assertEqual(response.status_code, 302)
# views (uses selenium)
class LoginSetUp(TestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
self.selenium.get('http://127.0.0.1:8000/accounts/user_login/')
self.selenium.find_element_by_name('username').send_keys('Maria')
self.selenium.find_element_by_name('password').send_keys('<PASSWORD>')
self.selenium.find_element_by_name('login').click()
class TestViews(LiveServerTestCase, LoginSetUp):
def setUp(self):
super().setUp()
def tearDown(self):
self.selenium.quit()
super(TestViews, self).tearDown()
def test_index_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('index').click()
assert 'LOGOUT' in selenium.page_source
def test_dashboard_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('dashboard').click()
assert 'Admin\'s dashboard' in selenium.page_source
def test_today_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('today').click()
assert 'Routes for date:' in selenium.page_source
def test_tomorrow_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('tomorrow').click()
assert 'Admin\'s dashboard' in selenium.page_source
def test_orders_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('orders').click()
assert 'Order id' in selenium.page_source
def test_orders_view(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/admin_app/dashboard/')
selenium.find_element_by_name('products').click()
assert 'Available Products' in selenium.page_source
| StarcoderdataPython |
3350414 | import pandas as pd
import Core_Scripts.Custom_Functions.Functions_Formatting_Data as formatFunc
from Core_Scripts.Custom_Functions.Functions_General import append_df_to_excel
import os
from collections import Counter
import datetime
import csv
# so that output doesnt get cut short when displaying, below line will widen what the print window shows
pd.options.display.width = None
cwd = os.getcwd()
def script_data_history_summary(df):
print('Data Gathering: Collating data for summarizing')
# the column names of the main date values to check for the date ranges
datecolumn = '/CreationDate'
datecolumnMod = '/ModDate'
# in pdf data, the column names for the engineer date stamp and the bim date stamped
engready = 'Engineer_Revised'
bimready = 'BIM_Completed'
# column name for new column in visualization data
remaining = 'Total_Outstanding'
# remove rows that have errored date values (errors or electronic signature/stamps that arent markups)
# if there are any
try:
df_date_calc = df.drop(df[df[datecolumn] == 'Error'].index)
except ValueError:
df_date_calc = df
# get the max and minimum dates from all drawings that we have (based on solely creation dates)
# this assumes that at the deadline day there are no markups remaining, thus creation date can be
# a good metric to find out the range of dates to make
try:
newestCreate = max(df_date_calc[datecolumn])
# need to separate into two lines to simplify the logic (optimization for one line index might be better?) to
# only get the column of dates that exclude 'None' values (meaning a drawing is fresh and has no edits)
modified_list = df_date_calc[df_date_calc[datecolumnMod] != 'None']
try:
newestMod = max(modified_list[datecolumnMod])
except TypeError:
newestMod = datetime.datetime(0, 0, 0, 0, 0, 0)
newest = max([newestMod, newestCreate])
except ValueError:
newest = datetime.datetime.today()
oldest = min(df_date_calc[datecolumn])
# create a list of dates start from a known date and ending at another known date
daterange = pd.date_range(start=oldest.date(), end=newest.date(), closed=None)
daterange = [dt.date() for dt in daterange]
# simplify data by reducing the irrelevant rows to quicker process
df_adjusted = df.drop(df[df['StampedBy'] == "None"].index)
# counter object have dictionary interface, so if you use a date as an index, it will return the count of that date
# or a 0 if there are no matches (https://docs.python.org/2/library/collections.html)
# making sure the timestamp is in string format helps resolve data type issues for this counter
ready_ymd = [formatFunc.extract_ymd(dv, dtformat='%y-%m-%d %H:%M:%S') for dv in df_adjusted[engready]]
ready_dict = Counter(ready_ymd)
complete_ymd = [formatFunc.extract_ymd(dv, dtformat='%y-%m-%d %H:%M:%S') for dv in df_adjusted[bimready]]
complete_dict = Counter(complete_ymd)
ready = []
completed = []
outstanding = []
# enumerating on the for loop line allows us to use the index that the value corresponds to in another list to
# iterate with two lists
for i, eachdate in enumerate(daterange):
# in the dictionary for ready, get the tally of readys for the current date (zero if no match)
newengstamped = ready_dict[eachdate]
ready.append(newengstamped)
# same as for ready
newbimstamped = complete_dict[eachdate]
completed.append(newbimstamped)
# this try block should only fail once (first index) and should not error otherwise
try:
previousoustanding = outstanding[i-1]
except IndexError:
previousoustanding = 0
# the new outstanding is the difference between the bim stamps and the engineer stamps, plus what was in backlog
newoutstanding = newengstamped - newbimstamped + previousoustanding
outstanding.append(newoutstanding)
# create the dataframe to store all the resulting data, then store all relevant data
time_df = pd.DataFrame()
time_df['Date'] = daterange
time_df[engready] = ready
time_df[bimready] = completed
time_df[remaining] = outstanding
print('Data Gathering: History summary completed')
# remove extra dates (long stream of zeros from start to first markup, and from last markup to end)
# start at first entry, find where there is a submitted or completed drawing and save the index before it
first_markup = 0
for eachindex in range(0, len(time_df[remaining])):
# if there are no engineer or bim stamps, this variable should return true
all_zeroes = all([time_df[engready].iloc[eachindex] == 0,
time_df[bimready].iloc[eachindex] == 0])
# when the above condition fails, the loop should end (all leading 0's have been identified)
if not all_zeroes:
first_markup = eachindex - 1
break
# remove all rows that have no entries until just before the first markup is seen
time_df.drop(time_df.index[:first_markup], inplace=True)
# start at the maximum index, and go in reverse until the last submitted or completed drawing
last_markup = len(time_df[remaining])
for eachindex in range(len(time_df[remaining]) - 1, 0, -1):
# same condition as for leading zeroes
all_zeroes = all([time_df[engready].iloc[eachindex] == 0,
time_df[bimready].iloc[eachindex] == 0])
if not all_zeroes:
# the index needs +2 because we want to keep one date AFTER the found condition above, and to not
# drop the row after, have to go one past (index is inclusive so you want to go one ahead of the found)
last_markup = eachindex + 2
break
time_df.drop(time_df.index[last_markup:], inplace=True)
# reset indices for clarity
time_df.reset_index(inplace=True, drop=True)
return time_df
# run this if the script is ran standalone
if __name__ == '__main__':
configName = '../Local_Config.csv'
# get the config file
with open(configName) as config_file:
reader = csv.reader(config_file)
config = dict(reader)
db_name = config['DB_Filename']
xlPath = config['DB_Filepath']
os.chdir(xlPath)
datasheet = 'PDF_Data'
visualize_sheet = 'VisualizationData'
db_pdf_data = pd.read_excel(db_name, sheet_name=datasheet)
time_data = script_data_history_summary(db_pdf_data)
print('Data Gathering: Data result:')
print(time_data)
print('Data Gathering: Saving to database')
append_df_to_excel(db_name, time_data, visualize_sheet, startrow=0, truncate_sheet=True)
print('Data Gathering: Save complete')
| StarcoderdataPython |
233859 | from src.card import *
class TestCard:
card = Card(
[7, 8, 3, 4]
) # we use an attributes to avoid recreate a card instance for each test
def test_create_card(self):
assert self.card.sides == [7, 8, 3, 4]
def test_card_tostring(self):
assert self.card.toString() == "7, 8, 3, 4"
def test_card_rotation(self):
self.card.rotation()
assert self.card.sides == [4, 7, 8, 3]
assert self.card.rotation_count == 1
self.card.rotation()
assert self.card.sides == [3, 4, 7, 8]
assert self.card.rotation_count == 2
self.card.rotation()
assert self.card.sides == [8, 3, 4, 7]
assert self.card.rotation_count == 3
self.card.rotation()
assert self.card.sides == [7, 8, 3, 4]
assert self.card.rotation_count == 4
def test_card_getLeft(self):
assert self.card.getLeft() == 7
def test_card_getUp(self):
assert self.card.getUp() == 8
def test_card_getRight(self):
assert self.card.getRight() == 3
def test_card_getDown(self):
assert self.card.getDown() == 4
| StarcoderdataPython |
84421 | <filename>testproject/settings.py
from django.conf import global_settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
DEBUG = True
SECRET_KEY = 'secret'
ROOT_URLCONF = "testproject.urls"
INSTALLED_APPS = ["log_request_id"]
MIDDLEWARE_CLASSES = [
'log_request_id.middleware.RequestIDMiddleware',
# ... other middleware goes here
] + list(global_settings.MIDDLEWARE_CLASSES)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
}
},
'formatters': {
'standard': {
'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
'mock': {
'level': 'DEBUG',
'class': 'testproject.handler.MockLoggingHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
'testproject': {
'handlers': ['mock'],
'level': 'DEBUG',
'propagate': False,
},
'log_request_id.middleware': {
'handlers': ['mock'],
'level': 'DEBUG',
'propagate': False,
},
}
}
| StarcoderdataPython |
11385585 | from django.contrib.gis.db import models
class Admin1Code(models.Model):
code = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=58)
objects = models.GeoManager()
def __str__(self):
return ': '.join([str(self.code), str(self.name)])
class Admin2Code(models.Model):
code = models.CharField(max_length=32)
name = models.CharField(max_length=46)
objects = models.GeoManager()
def __str__(self):
return ': '.join([str(self.code), str(self.name)])
class TimeZone(models.Model):
tzid = models.CharField(max_length=30)
gmt_offset = models.FloatField()
dst_offset = models.FloatField()
objects = models.GeoManager()
def __str__(self):
return self.tzid
class GeonameManager(models.GeoManager):
def countries(self, *args, **kwargs):
"""
Filter returns only countries
"""
return self.filter(fcode__in=['PCLI']).filter(*args, **kwargs)
def continents(self, *args, **kwargs):
"""
Filter returns only continents
"""
return self.filter(fcode__in=['CONT']).filter(*args, **kwargs)
def cities(self, *args, **kwargs):
"""
Filter returns only cities and other populated places
"""
return self.filter(fcode__in=['PPL']).filter(*args, **kwargs)
class Geoname(models.Model):
geonameid = models.PositiveIntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=200, db_index=True)
asciiname = models.CharField(max_length=200, db_index=True)
alternates = models.TextField(blank=True)
fclass = models.CharField(max_length=1, db_index=True)
fcode = models.CharField(max_length=10, db_index=True)
country = models.CharField(max_length=2, blank=True, db_index=True)
cc2 = models.CharField('Alternate Country Code', max_length=100, blank=True)
admin1 = models.CharField(max_length=20, blank=True, db_index=True)
admin2 = models.CharField(max_length=80, blank=True, db_index=True)
admin3 = models.CharField(max_length=20, blank=True, db_index=True)
admin4 = models.CharField(max_length=20, blank=True, db_index=True)
population = models.BigIntegerField(db_index=True)
elevation = models.IntegerField(db_index=True)
topo = models.IntegerField(db_index=True)
timezone = models.CharField(max_length=40, blank=True)
moddate = models.DateField('Date of Last Modification')
point = models.PointField(null=True, geography=True, spatial_index=True)
objects = GeonameManager()
def __str__(self):
return self.name
def is_country(self):
return self.fcode == 'PCLI'
def is_continent(self):
return self.fcode == 'CONT'
def is_populated(self):
return self.fcode == 'PPL'
def get_country(self):
if not self.is_country():
try:
return self.__class__.objects.get(
fcode='PCLI', country=self.country)
except self.__class__.DoesNotExist:
return None
else:
return self
class Meta:
ordering = ('name', 'country')
# class LocalName(models.Model):
# uuid = UUIDField(auto=True)
# language = models.CharField(max_length=20, db_index=True)
# name = models.CharField(max_length=255)
# value = models.TextField()
#
# def __unicode__(self):
# return self.name
class Alternate(models.Model):
alternateid = models.PositiveIntegerField(primary_key=True, unique=True)
geoname = models.ForeignKey(Geoname, related_name='alternate_names')
isolanguage = models.CharField(max_length=7)
variant = models.CharField(max_length=200, db_index=True)
preferred = models.BooleanField(db_index=True, default=None)
short = models.BooleanField(default=None)
colloquial = models.BooleanField(default=None)
historic = models.BooleanField(default=None)
objects = models.GeoManager()
class Meta:
ordering = ('-preferred',)
def __str__(self):
return self.geoname.name
class PostalCode(models.Model):
countrycode = models.CharField(max_length=2)
postalcode = models.CharField(max_length=20)
placename = models.CharField(max_length=200)
admin1name = models.CharField(max_length=200)
admin1code = models.CharField(max_length=20)
admin2name = models.CharField(max_length=200)
admin2code = models.CharField(max_length=80)
admin3name = models.CharField(max_length=200)
admin3code = models.CharField(max_length=20)
latitude = models.FloatField()
longitude = models.FloatField()
accuracy = models.SmallIntegerField()
objects = models.GeoManager()
def __str__(self):
return self.placename
| StarcoderdataPython |
3578941 | from feather import read_dataframe as read_feather
import numpy as np
import pandas as pd
import pdb
import pydens
from zpylib import data_path as dp
def select_group(cols, n):
return np.random.choice(cols, size=n, replace=False)
def cols2density(df, cols):
cade = pydens.cade.Cade(
sim_size=1000000,
verbose=True)
diagnostics = cade.train(
df=df[cols].copy(),
diagnostics=True
)
print('classifier in-sample AUC = ' + str(diagnostics['auc']))
return cade
def densify(infile, outfile, groups, models=None):
print("### Loading " + infile)
df = read_feather(infile)
if models is None:
models = [cols2density(df, cols) for cols in groups]
print("Scoring on all groups ...")
scores_df = pd.DataFrame({
'pydens_' + str(k): models[k].density(df[groups[k]]) for k in range(len(groups))
})
print("Concatenating ...")
new_df = pd.concat([df, scores_df], axis=1)
print("Feathering ...")
new_df.to_feather(outfile)
return models
# Constants
np.random.seed(0)
ALLCOLS = pd.read_csv(dp('raw/train.csv'), nrows=2).columns.tolist()
DENSIFIABLE_COLS = [a for a in ALLCOLS
if ((a != 'HasDetections') and (a != 'MachineIdentifier'))]
N = 25 # number of features to densify per run
N_GROUPS = 10
GROUPS = [select_group(DENSIFIABLE_COLS, N).tolist() for k in range(N_GROUPS)]
# Main
models = densify(
infile=dp("refactored/train.feather"),
outfile=dp("refactored/densified_train.feather"),
groups=GROUPS
)
_ = densify(
infile=dp("refactored/test.feather"),
outfile=dp("refactored/densified_test.feather"),
groups=GROUPS,
models=models
) | StarcoderdataPython |
377235 | <gh_stars>100-1000
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.jit.backend.llsupport.descr import CallDescr
class UnsupportedKind(Exception):
pass
def get_call_descr_dynamic(cpu, cif_description, extrainfo):
"""Get a call descr from the given CIF_DESCRIPTION"""
ffi_result = cif_description.rtype
try:
reskind = get_ffi_type_kind(cpu, ffi_result)
argkinds = [get_ffi_type_kind(cpu, cif_description.atypes[i])
for i in range(cif_description.nargs)]
except UnsupportedKind:
return None
if reskind == 'v':
result_size = 0
else:
result_size = intmask(ffi_result.c_size)
argkinds = ''.join(argkinds)
return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result),
result_size, extrainfo, ffi_flags=cif_description.abi)
def get_ffi_type_kind(cpu, ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
if ((not cpu.supports_floats and kind == 'f') or
(not cpu.supports_longlong and kind == 'L') or
(not cpu.supports_singlefloats and kind == 'S') or
kind == '*' or kind == '?'):
raise UnsupportedKind("Unsupported kind '%s'" % kind)
if kind == 'u':
kind = 'i'
return kind
def is_ffi_type_signed(ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
return kind != 'u'
@specialize.memo()
def _get_ffi2descr_dict(cpu):
def entry(letter, TYPE):
return (letter, cpu.arraydescrof(rffi.CArray(TYPE)), rffi.sizeof(TYPE))
#
d = {('v', 0): ('v', None, 1)}
if cpu.supports_floats:
d[('f', 0)] = entry('f', lltype.Float)
if cpu.supports_singlefloats:
d[('S', 0)] = entry('i', lltype.SingleFloat)
for SIGNED_TYPE in [rffi.SIGNEDCHAR,
rffi.SHORT,
rffi.INT,
rffi.LONG,
rffi.LONGLONG]:
key = ('i', rffi.sizeof(SIGNED_TYPE))
kind = 'i'
if key[1] > rffi.sizeof(lltype.Signed):
if not cpu.supports_longlong:
continue
key = ('L', 0)
kind = 'f'
d[key] = entry(kind, SIGNED_TYPE)
for UNSIGNED_TYPE in [rffi.UCHAR,
rffi.USHORT,
rffi.UINT,
rffi.ULONG,
rffi.ULONGLONG]:
key = ('u', rffi.sizeof(UNSIGNED_TYPE))
if key[1] > rffi.sizeof(lltype.Signed):
continue
d[key] = entry('i', UNSIGNED_TYPE)
return d
def get_arg_descr(cpu, ffi_type):
from rpython.rlib.jit_libffi import types
kind = types.getkind(ffi_type)
if kind == 'i' or kind == 'u':
size = rffi.getintfield(ffi_type, 'c_size')
else:
size = 0
return _get_ffi2descr_dict(cpu)[kind, size]
def calldescr_dynamic_for_tests(cpu, atypes, rtype, abiname='FFI_DEFAULT_ABI'):
from rpython.rlib import clibffi
from rpython.rlib.jit_libffi import CIF_DESCRIPTION, FFI_TYPE_PP
from rpython.jit.codewriter.effectinfo import EffectInfo
#
p = lltype.malloc(CIF_DESCRIPTION, len(atypes),
flavor='raw', immortal=True)
p.abi = getattr(clibffi, abiname)
p.nargs = len(atypes)
p.rtype = rtype
p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes),
flavor='raw', immortal=True)
for i in range(len(atypes)):
p.atypes[i] = atypes[i]
return cpu.calldescrof_dynamic(p, EffectInfo.MOST_GENERAL)
| StarcoderdataPython |
1844548 | <filename>Lecture4/student/project.py
from VandyStudent import VandyStudent
from Student import add
if __name__ == '__main__':
print(add(1, 2))
student1 = VandyStudent('Ao', 'Qu', 'EBI', '<NAME>')
student2 = VandyStudent('Xuhuan', 'Huang', 'Branscomb', '<NAME>')
print(student1 > student2)
student1.add_major('Math', 'Econ')
print(student1.majors) | StarcoderdataPython |
6432722 | <gh_stars>0
from .map import inc
| StarcoderdataPython |
3275189 | #!/usr/bin/env python
##
## video.py - recoding VNC to FLV.
##
## Copyright (c) 2009-2010 by <NAME>
##
import sys, zlib, re
from struct import pack, unpack
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from flvscreen import FlvScreen
def str2clip(s):
m = re.match(r'^(\d+)x(\d+)([\-\+])(\d+)([\-\+])(\d+)$', s)
if not m:
raise ValueError('Invalid clipping spec: %r' % s)
return ((m.group(3), int(m.group(4))),
(m.group(5), int(m.group(6))),
int(m.group(1)), int(m.group(2)))
def str2size(s):
m = re.match(r'^(\d+)x(\d+)$', s)
if not m:
raise ValueError('Invalid size spec: %r' % s)
f = map(int, m.groups())
return (f[0], f[1])
class MultipleRange(object):
def __init__(self, s):
self.ranges = []
if isinstance(s, basestring):
t = 0
for x in s.split(','):
m = re.match(r'(\d+)?-(\d+)?', x)
if not m:
raise ValueError('Invalid range spec: %r' % x)
if m.group(1):
i1 = int(m.group(1))
else:
i1 = 0
if m.group(2):
i2 = int(m.group(2))
else:
i2 = sys.maxint
self.ranges.append((t, i1, i2))
t += (i2 - i1)
elif isinstance(s, list):
t = 0
for (i1, i2) in s:
self.ranges.append((t, i1, i2))
t += (i2 - i1)
self.ranges.sort()
self.pos = 0
return
def __iter__(self):
return iter(self.ranges)
def get_total(self, tmax):
t = 0
for (_, i1, i2) in self.ranges:
if i2 == sys.maxint:
i2 = tmax
t += (i2 - i1)
return t
def seekandmap(self, i):
while self.pos < len(self.ranges):
(t, i1, i2) = self.ranges[self.pos]
if i < i1: return -1
if i <= i2: return (i - i1 + t)
self.pos += 1
return -1
## VideoSink
##
## edit by luoxiao:
## x、y : 在vnc界面中截取录像的偏移位置
##
## width、height : 截取录像的大小
##
class VideoSink(object):
def __init__(self, clipping=None, debug=0):
self.debug = debug
self.clipping = clipping
self.initialized = False
return
# width、height两传入参数是vnc服务器端的值,本方法返回值中的width、height优先使用-C启动参数中设定的值。
def init_screen(self, width, height, name=None):
if self.debug:
print >> sys.stderr, 'init_screen: %dx%d, name=%r' % (width, height, name)
# 优先使用-C启动参数的值设置x,y,width,height
if self.clipping:
((xs, x), (ys, y), w, h) = self.clipping
if xs == '-':
(x, width) = (width - w - x, w)
else:
(x, width) = (x, w)
if ys == '-':
(y, height) = (height - h - x, h)
else:
(y, height) = (y, h)
else:
(x, y) = (0, 0)
self.initialized = True
return (x, y, width, height)
# data is given as ARGB
def convert_pixels(self, data):
return data
def convert_color1(self, data):
return unpack('BBBx', data)
def update_cursor_image(self, width, height, data):
if self.debug:
print >> sys.stderr, 'update_cursor_image: %dx%d' % (width, height)
return
def update_cursor_pos(self, x, y):
if self.debug:
print >> sys.stderr, 'update_cursor_pos: (%d,%d)' % (x, y)
return
def update_screen_rgbabits(self, (x, y), (width, height), data):
if self.debug:
print >> sys.stderr, 'update_screen_rgbabits: %dx%d at (%d,%d)' % (width, height, x, y)
return
def update_screen_solidrect(self, (x, y), (w, h), data):
if self.debug:
print >> sys.stderr, 'update_screen_solidrect: %dx%d at (%d,%d), color=%r' % (width, height, x, y, color)
return
def flush(self, t):
if self.debug:
print >> sys.stderr, 'flush', t
return
def close(self):
if self.debug:
print >> sys.stderr, 'close'
return
## FLVVideoSink
##
## add by luoxiao:
## width、height - 截取录像的大小
## x、y - 在vnc界面中截取录像的偏移位置
class FLVVideoSink(VideoSink):
def __init__(self, writer, blocksize=32, framerate=15, keyframe=0,
clipping=None, panwindow=None, panspeed=0, debug=0):
VideoSink.__init__(self, clipping=clipping, debug=debug)
self.writer = writer
self.blocksize = blocksize
self.framerate = framerate
self.keyframe = keyframe
self.panwindow = panwindow
self.panspeed = panspeed
self.screen = None
self.screenpos = (0, 0)
self.screensize = None
self.windowpos = (0, 0)
self.windowsize = None
self.curframe = 0
self.changes = []
self.newFBSizeChange = False # add by luoxiao
self.vnc_width = 0 # add by luoxiao
self.vnc_height = 0 # add by luoxiao
return
def init_screen(self, width, height, name=None):
self.vnc_width = width
self.vnc_height = height
(x, y, width, height) = VideoSink.init_screen(self, width, height, name=name)
bw = (width + self.blocksize - 1) / self.blocksize
bh = (height + self.blocksize - 1) / self.blocksize
self.screenpos = (x, y)
self.screensize = (bw, bh)
# 初始化flvscreen,blocksize默认为32,bw=width/blocksize=1024/32=32,bh=height/blocksize=768/32=24
self.screen = FlvScreen(self.blocksize, bw, bh)
if self.panwindow:
(w, h) = self.panwindow
self.windowsize = ((w + self.blocksize - 1) / self.blocksize,
(h + self.blocksize - 1) / self.blocksize)
else:
self.windowsize = (bw, bh)
if self.debug:
print >> sys.stderr, 'start: %d,%d (%dx%d)' % (x, y, width, height)
self.writer.set_screen_size(width, height)
# add by luoxiao
# import pdb
# pdb.set_trace()
return (x, y, width, height)
# 将图像写入flvscreen缓冲
def update_screen_rgbabits(self, (x, y), (w, h), data):
(x0, y0) = self.screenpos
# add by luoxiao , vnc实际屏幕大小 < 录像屏幕大小时使图像居中
(screenwidth, screenheight) = self.screensize
if screenwidth * self.blocksize > self.vnc_width:
x = x + (screenwidth * self.blocksize - self.vnc_width) / 2
if screenheight * self.blocksize > self.vnc_height:
y = y + (screenheight * self.blocksize - self.vnc_height) / 2
# import pdb
# pdb.set_trace()
# end add
self.screen.blit_rgba(x - x0, y - y0, w, h, data)
return
# 将flvscreen缓冲的图像写入flv文件
def flush(self, t):
# t must be >= 0
if not self.screen: return
while 1:
timestamp = self.curframe * 1000 / self.framerate
if t < timestamp: break
self.writer.write_video_frame(timestamp, self.get_update_frame()) # 获取更新的帧,并写入flv文件
self.curframe += 1
return
# add by luoxiao
def onNewFBSize(self, x, y, width, height):
self.screen.reset(1) # 清空缓存的帧像素及block
self.newFBSizeChange = True # 强制全部刷新
self.vnc_width = width
self.vnc_height = height
return
# write SCREENVIDEOPACKET tag
def get_update_frame(self):
changes = self.screen.changed()
# edit by luoxiao:
# self.screen.reset()
self.screen.reset(0)
(bw, bh) = self.windowsize
# edit by luoxiao: 跳过panding提高性能(录像20分钟后高cpu占用问题)
# (bx,by) = self.do_autopan(self.windowpos, changes)
bx = 0;
by = 0;
# edit end
key = ((bx, by) != self.windowpos or
(self.keyframe and (self.curframe % self.keyframe) == 0))
# edit by luoxiao
# if key:
if key or self.newFBSizeChange: # end edit
# update the entire screen if necessary.
self.windowpos = (bx, by)
changes = set((bx + x, by + y) for y in xrange(bh) for x in xrange(bw))
# add by luoxiao
self.newFBSizeChange = False
else:
changes = set(changes)
if self.debug:
print >> sys.stderr, 'update(%d): changes=%r' % (self.curframe, len(changes)), sorted(changes)
flags = 3 # screenvideo codec
if key:
flags |= 0x10
else:
flags |= 0x20
data = chr(flags)
w = bw * self.blocksize
h = bh * self.blocksize
data += chr((self.blocksize / 16 - 1) << 4 | w >> 8) + chr(w & 0xff)
data += chr((self.blocksize / 16 - 1) << 4 | h >> 8) + chr(h & 0xff)
n = 0
for y in xrange(bh, 0, -1):
y = by + y - 1
for x in xrange(bw):
x += bx
if (x, y) in changes:
# changed block
block = zlib.compress(self.screen.get(x, y)) # 从screen获取指定位置的block的像素图片
data += pack('>H', len(block)) + block
else:
# unchanged block
data += pack('>H', 0)
return data
# do paning.
def do_autopan(self, (wx, wy), changes):
if changes:
r = (min(x for (x, y) in changes),
min(y for (x, y) in changes),
max(x for (x, y) in changes) + 1,
max(y for (x, y) in changes) + 1)
self.changes.append(r)
elif self.changes:
self.changes.append(self.changes[-1])
self.changes = self.changes[-self.panspeed:]
cx0 = sum(x0 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cy0 = sum(y0 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cx1 = sum(x1 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cy1 = sum(y1 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
(w, h) = self.windowsize
(bw, bh) = self.screensize
if w < cx1 - cx0:
wx = min(max(0, (cx0 + cx1 - w) / 2), bw - w)
elif cx0 < wx:
wx = cx0
elif wx < cx1 - w:
wx = cx1 - w
if h <= cy1 - cy0:
wy = min(max(0, (cy0 + cy1 - h) / 2), bh - h)
elif cy0 < wy:
wy = cy0
elif wy < cy1 - h:
wy = cy1 - h
return (wx, wy)
##
## FLVMovieProcessor
##
## :flvrec.py not use this class
##
class FLVMovieProcessor(object):
def __init__(self, writer=None, debug=0):
self.debug = debug
self.writer = writer
self.basetime = 0
return
def process_audio_tag(self, audiosink, data):
flags = ord(data[0])
# must be mp3 packet
if (flags & 0xf0) != 0x20: return
samplerate = (flags & 0x0c) >> 2
samplerate = [5500, 11025, 22050, 44100][samplerate]
samplesize = 8
if flags & 2:
samplesize = 16
samplestereo = flags & 1
audiosink.load(data[1:])
return
def process_video_tag(self, videosink, data):
import flvscreen, zlib
(frametype, codecid) = ord(data[0]) >> 4, ord(data[0]) & 0xf
# must be ScreenVideo
if codecid != 3: return
(blockwidth, imagewidth) = ord(data[1]) >> 4, (ord(data[1]) & 0xf) << 8 | ord(data[2])
(blockheight, imageheight) = ord(data[3]) >> 4, (ord(data[3]) & 0xf) << 8 | ord(data[4])
blockwidth = (blockwidth + 1) * 16
blockheight = (blockheight + 1) * 16
hblocks = (imagewidth + blockwidth - 1) / blockwidth
vblocks = (imageheight + blockheight - 1) / blockheight
if not videosink.initialized:
videosink.init_screen(imagewidth, imageheight)
fp = StringIO(data[5:])
changed = []
for y in xrange(vblocks):
for x in xrange(hblocks):
(length,) = unpack('>H', fp.read(2))
if not length: continue
data = fp.read(length)
x0 = x * blockwidth
y0 = imageheight - (y + 1) * blockheight
w = min(blockwidth, imagewidth - x0)
h = blockheight
if y0 < 0:
h += y0
y0 = 0
data = zlib.decompress(data)
data = flvscreen.flv2rgba(w, h, data)
changed.append((x, vblocks - y - 1))
videosink.update_screen_rgbabits((x0, y0), (w, h), data)
return
def process_flv(self, parser, audiosink=None, videosink=None, ranges=None):
timestamp = 0
for (i, (tag, _, timestamp, _, keyframe)) in enumerate(parser):
data = parser.get_data(i)
if tag == 9:
if videosink:
self.process_video_tag(videosink, data)
elif tag == 8:
if audiosink:
self.process_audio_tag(audiosink, data)
else:
self.writer.write_other_data(tag, data)
continue
if ranges:
timestamp = ranges.seekandmap(timestamp)
if timestamp < 0: continue
if videosink:
videosink.flush(timestamp)
if ranges:
timestamp = ranges.get_total(timestamp)
if audiosink:
if ranges:
for (t, s, e) in ranges:
audiosink.put(self.writer, s, e, t)
else:
audiosink.put(self.writer)
if videosink:
videosink.flush(timestamp)
self.writer.flush()
self.writer.add_basetime(timestamp)
return
# main
if __name__ == '__main__':
from flv import FLVWriter
from rfb import RFBNetworkClient
fp = file('out.flv', 'wb')
writer = FLVWriter(fp)
sink = FLVVideoSink(writer, debug=1)
client = RFBNetworkClient('127.0.0.1', 5900, sink)
client.open()
try:
while 1:
client.idle()
except KeyboardInterrupt:
pass
client.close()
writer.close()
fp.close()
| StarcoderdataPython |
3298398 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from yoti_python_sdk.tests.conftest import PEM_FILE_PATH, YOTI_CLIENT_SDK_ID
from yoti_python_sdk.tests.mocks import (
mocked_requests_post_share_url,
mocked_requests_post_share_url_app_not_found,
mocked_requests_post_share_url_invalid_json,
mocked_timestamp,
mocked_uuid4,
)
from yoti_python_sdk.dynamic_sharing_service import share_url
from yoti_python_sdk.dynamic_sharing_service.dynamic_scenario_builder import (
DynamicScenarioBuilder,
)
from yoti_python_sdk.client import Client
try:
from unittest import mock
except ImportError:
import mock
@mock.patch("requests.request", side_effect=mocked_requests_post_share_url)
@mock.patch("time.time", side_effect=mocked_timestamp)
@mock.patch("uuid.uuid4", side_effect=mocked_uuid4)
def test_create_share_url_with_correct_data(mock_uuid4, mock_time, mock_get):
yoti_client = Client(YOTI_CLIENT_SDK_ID, PEM_FILE_PATH)
dynamic_scenario = DynamicScenarioBuilder().build()
dynamic_share = share_url.create_share_url(yoti_client, dynamic_scenario)
assert (
dynamic_share.share_url == "https://code.yoti.com/forfhq3peurij4ihroiehg4jgiej"
)
assert dynamic_share.ref_id == "01aa2dea-d28b-11e6-bf26-cec0c932ce01"
@mock.patch("requests.request", side_effect=mocked_requests_post_share_url_invalid_json)
@mock.patch("time.time", side_effect=mocked_timestamp)
@mock.patch("uuid.uuid4", side_effect=mocked_uuid4)
def test_create_share_url_invalid_json(mock_uuid4, mock_time, mock_get):
yoti_client = Client(YOTI_CLIENT_SDK_ID, PEM_FILE_PATH)
dynamic_scenario = DynamicScenarioBuilder().build()
with pytest.raises(RuntimeError) as err:
share_url.create_share_url(yoti_client, dynamic_scenario)
assert share_url.INVALID_DATA in str(err.value)
@mock.patch(
"requests.request", side_effect=mocked_requests_post_share_url_app_not_found
)
@mock.patch("time.time", side_effect=mocked_timestamp)
@mock.patch("uuid.uuid4", side_effect=mocked_uuid4)
def test_create_share_url_app_not_found(mock_uuid4, mock_time, mock_get):
yoti_client = Client(YOTI_CLIENT_SDK_ID, PEM_FILE_PATH)
dynamic_scenario = DynamicScenarioBuilder().build()
with pytest.raises(RuntimeError) as err:
share_url.create_share_url(yoti_client, dynamic_scenario)
assert share_url.APPLICATION_NOT_FOUND in str(err.value)
| StarcoderdataPython |
49563 | import turtle
turtle.bgcolor("black")
sq = turtle.Turtle()
sq.speed(20)
sq.color("white")
for i in range(500):
sq.forward(i)
sq.left(91)
| StarcoderdataPython |
9782062 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from flavor import *
from floating_ip_associate import *
from floating_ip import *
from instance import *
from keypair import *
from sec_group import *
from server_group import *
from volume_attach import *
from get_flavor import *
from get_keypair import *
| StarcoderdataPython |
8097032 | <gh_stars>1-10
# coding=utf-8
import os
import getopt
import sys
import platform
import pandas as pd
from datetime import datetime
from datetime import timedelta
from Sql.Connect import switch_amex_database, switch_nysdaq_database, switch_nyse_database, switch_zh_database
from Sql.StockBriefTable import StockBriefTable
from Sql.KLineBuyRecdTable import KLineBuyRecdTable, KLineBuyRecd
from Sql.CashFlowRecdTable import CashFlowRecdTable
from Sql.ProfitRecdTable import ProfitRecdTable
from Sql.KLineTable import KLineTable
from Analizer.StockAnalyzer import StockAnalyzer
from Analizer.KLineAnalyzer import KLineAnalyzer
import get_all_tickers.get_tickers as gt
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if platform.system() == 'Windows':
os.environ['PYTHONPATH'] = os.environ['PATH'] + BASE_DIR + ";"
else:
os.environ['PYTHONPATH'] = os.environ['PATH'] + ":" + BASE_DIR
def help_print():
print('get top chinese stock which has high net present value ')
print('args:')
print('-h\t\t\t show help')
print('--init\t\t run before --anpv, get stocks info from network')
print('--anpv [n]\t get top high net present value of stock, [n] means top count')
def init_us_database():
print('start init database...')
print('init nysdaq stock brief...')
switch_nysdaq_database()
gt.save_tickers(NYSE=False, NASDAQ=True, AMEX=False, filename="./data/nysdaq_stock_list.csv")
StockBriefTable.init_stock_brief_from_cvs("./data/nysdaq_stock_list.csv")
print('init NYSE stock brief...')
switch_nyse_database()
gt.save_tickers(NYSE=True, NASDAQ=False, AMEX=False, filename="./data/nyse_stock_list.csv")
StockBriefTable.init_stock_brief_from_cvs("./data/nyse_stock_list.csv")
print('init AMEX stock brief...')
switch_amex_database()
gt.save_tickers(NYSE=False, NASDAQ=False, AMEX=True, filename="./data/amex_stock_list.csv")
StockBriefTable.init_stock_brief_from_cvs("./data/amex_stock_list.csv")
print('\ninit finished')
def init_database():
print('start init database...')
print('init stock brief...')
StockBriefTable.clear_brief_table()
StockBriefTable.init_stock_brief_from_xl('./data/A_stock_list.xlsx')
print('\nusing spiders to get stock cash flow table...')
CashFlowRecdTable.clear_cash_flow_recd_table()
os.system("scrapy runspider Spiders/StockSpiders/CashFlowSpider.py --nolog")
print('\nusing spiders to get stock profit table...')
ProfitRecdTable.clear_profit_recd_table()
os.system("scrapy runspider Spiders/StockSpiders/StockProfitSpider.py --nolog")
print('\ninit finished')
def init_day_k_line():
print('start init day k line...')
switch_zh_database()
init_day_k_line_with_arg(0)
print('\ninit finished')
def init_day_k_line_with_arg(market_id=0):
KLineTable.clear_k_line_table(0)
os.system("scrapy runspider Spiders/StockSpiders/DayKLineSpider.py --nolog -a market_id=" + str(market_id))
def init_us_day_k_line():
print('start init day k line...')
print('init nysdaq...')
switch_nysdaq_database()
init_day_k_line_with_arg(105)
print('\ninit nyse...')
switch_nyse_database()
init_day_k_line_with_arg(106)
print('\ninit amex...')
switch_amex_database()
init_day_k_line_with_arg(107)
print('\ninit finished')
def get_rzrq():
switch_zh_database()
os.system("scrapy runspider Spiders/StockSpiders/RzRqSpider.py --nolog")
def init_week_k_line():
print('start init week k line...')
KLineTable.clear_k_line_table(1)
os.system("scrapy runspider Spiders/StockSpiders/WeekKLineSpider.py --nolog")
print('\ninit finished')
def init_month_k_line():
print('start init month k line...')
KLineTable.clear_k_line_table(2)
os.system("scrapy runspider Spiders/StockSpiders/MonthKLineSpider.py ")
print('\ninit finished')
def print_top_net_present_value_stock(top_cnt, season):
StockAnalyzer.get_top_net_present_value_stock(top_cnt, season)
def analyzer_day_cost(code_id):
analyzer = KLineAnalyzer()
analyzer.analyze_one(code_id, 0)
def analyzer_day_cost_all():
analyzer = KLineAnalyzer()
analyzer.analyze_all(0)
def analyzer_holding_info():
dt = datetime(1991, 4, 1)
# dt = datetime(2020, 3, 18)
max_price = 0.0
min_win_rate = 2.0
max_hold_cnt = 0
while dt < datetime.now() + timedelta(days=2):
buy_recd_list = KLineBuyRecdTable.select_holding_buy_recd_list_by_date(dt)
total_price = 0.0
average_price = 0.0
win_cnt = 0
recd_cnt = 0
for buy_recd in buy_recd_list:
total_price += buy_recd.buy_price
if buy_recd.sell_price > buy_recd.buy_price:
win_cnt += 1
if buy_recd.sell_price == 0.0:
recd_cnt -= 1
recd_cnt += 1
if recd_cnt > 0:
average_price = total_price / recd_cnt
if max_price < total_price:
max_price = total_price
if min_win_rate > win_cnt * 1.0 / len(buy_recd_list):
min_win_rate = win_cnt * 1.0 / len(buy_recd_list)
if max_hold_cnt < len(buy_recd_list):
max_hold_cnt = len(buy_recd_list)
print(dt.strftime("%Y-%m-%d: ") + str(total_price) + " " + str(
win_cnt * 1.0 / recd_cnt) + " " + str(recd_cnt))
dt = dt + timedelta(days=1)
print("max price: " + str(max_price))
print("max hold cnt: " + str(max_hold_cnt))
print("min win rate: " + str(min_win_rate))
def analyzer_us_day_cost_profit():
switch_nysdaq_database()
analyzer_day_cost_profit()
switch_amex_database()
analyzer_day_cost_profit()
switch_nyse_database()
analyzer_day_cost_profit()
def analyzer_day_cost_profit():
scode_list = StockBriefTable.get_stock_id_list()
analyzer = KLineAnalyzer()
code_cnt = len(scode_list)
win_cnt = 0
total_cnt = 0
growth = 1.0
take_days = 0
KLineBuyRecdTable.clear_table()
for index in range(0, code_cnt):
code_id = scode_list[index]
buy_record_list = analyzer.analyze_profit(code_id, 0)
has_holding = False
for buy_record in buy_record_list:
if buy_record.sell_date == 'None':
has_holding = True
continue
rate = (buy_record.sell_price - buy_record.buy_price) / buy_record.buy_price
growth = growth * (1 + rate)
total_cnt += 1
take_days += buy_record.days
if rate > 0.0:
win_cnt += 1
if has_holding:
KLineBuyRecdTable.insert_buy_recd_list(buy_record_list)
if len(buy_record_list) > 0 and total_cnt > 0:
print("[" + str(index) + "] WinRate: " + str(win_cnt * 1.0 / total_cnt) + " Growth: " + str(
pow(growth, 1 / take_days)) + " days: " + str(take_days / total_cnt))
def get_buy_recd_win_rate(buy_recd_list):
total_cnt = len(buy_recd_list) - 1
win_cnt = 0
if total_cnt <= 0:
return 0.5
else:
for buy_recd in buy_recd_list:
if buy_recd.sell_price > buy_recd.buy_price:
win_cnt += 1
return win_cnt / total_cnt
def get_buy_recd_list_sort_key(buy_recd_list):
win_cnt = 0
total_cnt = 0
for buy_recd in buy_recd_list:
if buy_recd.is_holding():
continue
else:
total_cnt += 1
if buy_recd.sell_price > buy_recd.buy_price:
win_cnt += 1
if total_cnt != 0:
return round(win_cnt / total_cnt * 100, 2)
else:
return 0.5
def get_us_adv():
switch_nysdaq_database()
get_adv("nysdaq_recd")
switch_nyse_database()
get_adv("nyse_recd")
switch_amex_database()
get_adv("amex_recd")
def get_adv(dirname="zh_recd"):
recd_list = KLineBuyRecdTable.select_holding_code()
analyzer = KLineAnalyzer()
buy_recd_lists_group = []
for recd in recd_list:
code_id = recd.code_id
recd_list = analyzer.analyze_profit(code_id, 0, False)
buy_recd_lists_group.append(recd_list)
buy_recd_lists_group.sort(key=get_buy_recd_list_sort_key, reverse=True)
str_file_name = 'recd' + datetime.now().strftime("%Y-%m-%d")
f = open('./recd/' + dirname + '/' + str_file_name, 'w')
f.write('<html>\n<head>\n</head>\n<body>\n')
for buy_recd_list in buy_recd_lists_group:
code_id = ''
win_rate = get_buy_recd_list_sort_key(buy_recd_list)
total_cnt = len(buy_recd_list) - 1
if len(buy_recd_list) <= 0:
continue
else:
code_id = buy_recd_list[0].code_id
f.write('code: ' + code_id + ", win: " + str(win_rate) + "%, total cnt: " + str(total_cnt) + "<br>\n")
f.write('\n</body></html>')
def us_daily_run():
init_us_day_k_line()
analyzer_us_day_cost_profit()
get_us_adv()
def daily_run():
init_day_k_line()
analyzer_day_cost_profit()
get_adv()
get_rzrq()
def reg_test():
dt = datetime(1991, 4, 1)
# dt = datetime(2020, 3, 18)
max_price = 0.0
while dt < datetime.now() + timedelta(days=2):
dt = dt + timedelta(days=1)
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help',
'init',
'initus',
'anpv=',
'season=',
'initusdl',
'initdl',
'initwl',
'initml',
'adcost=',
'adcostall',
'adcostprofit',
'aduscostprofit',
'aholding',
'getadv',
'getusadv',
'dailyrun',
'usdailyrun',
'rzrq',
'regtest'])
season = 4
for key, value in opts:
if key in ['--season']:
season = int(value)
if season < 0 or season > 4:
print("season value has to be one of 1 ,2, 3, 4\n")
exit(0)
for key, value in opts:
if key in ['-h', '--help']:
help_print()
elif key in ['--init']:
init_database()
elif key in ['--initus']:
init_us_database()
elif key in ['--initdl']:
init_day_k_line()
elif key in ['--initwl']:
init_week_k_line()
elif key in ['--initml']:
init_month_k_line()
elif key in ['--anpv']:
print_top_net_present_value_stock(int(value), season)
elif key in ['--adcostall']:
analyzer_day_cost_all()
elif key in ['--adcost']:
analyzer_day_cost(value)
elif key in ["--adcostprofit"]:
analyzer_day_cost_profit()
elif key in ["--aduscostprofit"]:
analyzer_us_day_cost_profit()
elif key in ["--aholding"]:
analyzer_holding_info()
elif key in ["--getadv"]:
get_adv()
elif key in ["--getusadv"]:
get_us_adv()
elif key in ["--dailyrun"]:
daily_run()
elif key in ["--usdailyrun"]:
us_daily_run()
elif key in ["--regtest"]:
reg_test()
elif key in ['--initusdl']:
init_us_day_k_line()
elif key in ["--rzrq"]:
get_rzrq()
| StarcoderdataPython |
3563423 | <filename>tests/common/test_ping.py<gh_stars>0
from .context import common
import pytest
import subprocess
# set timeout for all test functions to 1.2 sec
pytestmark = pytest.mark.timeout(1.2)
def test_is_target_reachable__loopback():
# should always succeed
assert common.is_target_reachable("127.0.0.1")
def test_is_target_reachable__linklocal():
# assumption: specific address is not present in current link-local net
# PING: Fehler bei der Übertragung. Allgemeiner Fehler.
assert not common.is_target_reachable("169.254.0.1")
def test_is_target_reachable__private():
# assumption: specific address is not present in this private net
# Antwort von a.b.c.d: Zielnetz nicht erreichbar.
assert not common.is_target_reachable("192.168.212.212")
def test_is_target_reachable__notReachable():
# assumption: specific address is not reachable
# Zeitüberschreitung der Anforderung.
assert not common.is_target_reachable("2.3.4.5")
def test_is_target_reachable__timeoutExpired():
with pytest.raises(subprocess.TimeoutExpired):
# assumption: specific address is not reachable
# Zeitüberschreitung der Anforderung.
common.is_target_reachable("2.3.4.5", timeout=0)
| StarcoderdataPython |
11391344 | from django.contrib import admin
from blog.models import Post, Comment, Reaction
class CommentAdmin(admin.ModelAdmin):
list_display = ('post', 'comment_text', 'user')
admin.site.register(Comment, CommentAdmin)
class PostAdmin(admin.ModelAdmin):
list_display = ('post_title', 'user', 'updated', 'publish_status')
admin.site.register(Post, PostAdmin)
class ReactionAdmin(admin.ModelAdmin):
list_display = ('post', 'user', 'react')
admin.site.register(Reaction, ReactionAdmin) | StarcoderdataPython |
6510322 | # @title Define imports and utility functions.
import collections
import functools
import time
from pathlib import Path
import gin
import jax
import jax.numpy as jnp
import numpy as np
import PIL.Image
import skvideo.io
import tensorflow as tf
from absl import app, flags, logging
from flax import jax_utils
from flax import linen as nn
from flax import optim
from flax.training import checkpoints
from jax import random
from conerf import (
configs,
datasets,
evaluation,
gpath,
image_utils,
model_utils,
models,
schedules,
utils,
)
from conerf import visualization as viz
flags.DEFINE_enum(
"mode",
None,
["jax_cpu", "jax_gpu", "jax_tpu"],
"Distributed strategy approach.",
)
flags.DEFINE_string("base_folder", None, "where to store ckpts and logs")
flags.mark_flag_as_required("base_folder")
flags.DEFINE_multi_string("gin_bindings", None, "Gin parameter bindings.")
flags.DEFINE_multi_string("gin_configs", (), "Gin config files.")
flags.DEFINE_string("betas", None, "Indices from the dataset to be used")
flags.DEFINE_bool(
"use_lr", False, "Whether use linear regression to predict attributes"
)
FLAGS = flags.FLAGS
def main(argv):
jax.config.parse_flags_with_absl()
tf.config.experimental.set_visible_devices([], "GPU")
del argv
logging.info("*** Starting generating video")
# Assume G3 path for config files when running locally.
gin_configs = FLAGS.gin_configs
logging.info("*** Loading Gin configs from: %s", str(gin_configs))
gin.parse_config_files_and_bindings(
config_files=gin_configs,
bindings=FLAGS.gin_bindings,
skip_unknown=True,
)
exp_config = configs.ExperimentConfig()
train_config = configs.TrainConfig()
eval_config = configs.EvalConfig()
dummy_model = models.NerfModel({}, 0, 0, 0)
# Get directory information.
exp_dir = gpath.GPath(FLAGS.base_folder)
if exp_config.subname:
exp_dir = exp_dir / exp_config.subname
checkpoint_dir = exp_dir / "checkpoints"
# Log and create directories if this is the main process.
if jax.process_index() == 0:
logging.info("exp_dir = %s", exp_dir)
if not exp_dir.exists():
exp_dir.mkdir(parents=True, exist_ok=True)
logging.info("checkpoint_dir = %s", checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True, exist_ok=True)
logging.info(
"Starting process %d. There are %d processes.",
jax.process_index(),
jax.process_count(),
)
logging.info(
"Found %d accelerator devices: %s.",
jax.local_device_count(),
str(jax.local_devices()),
)
logging.info(
"Found %d total devices: %s.", jax.device_count(), str(jax.devices())
)
datasource = exp_config.datasource_cls(
image_scale=exp_config.image_scale,
random_seed=exp_config.random_seed,
# Enable metadata based on model needs.
use_warp_id=dummy_model.use_warp,
use_appearance_id=(
dummy_model.nerf_embed_key == "appearance"
or dummy_model.hyper_embed_key == "appearance"
),
use_camera_id=dummy_model.nerf_embed_key == "camera",
use_time=dummy_model.warp_embed_key == "time",
)
rng = random.PRNGKey(exp_config.random_seed)
np.random.seed(exp_config.random_seed + jax.process_index())
devices_to_use = jax.devices()
learning_rate_sched = schedules.from_config(train_config.lr_schedule)
nerf_alpha_sched = schedules.from_config(train_config.nerf_alpha_schedule)
warp_alpha_sched = schedules.from_config(train_config.warp_alpha_schedule)
hyper_alpha_sched = schedules.from_config(
train_config.hyper_alpha_schedule
)
hyper_sheet_alpha_sched = schedules.from_config(
train_config.hyper_sheet_alpha_schedule
)
rng, key = random.split(rng)
params = {}
model, params["model"] = models.construct_nerf(
key,
batch_size=train_config.batch_size,
embeddings_dict=datasource.embeddings_dict,
near=datasource.near,
far=datasource.far,
num_attributes=datasource.num_attributes,
)
optimizer_def = optim.Adam(learning_rate_sched(0))
optimizer = optimizer_def.create(params)
state = model_utils.TrainState(
optimizer=optimizer,
nerf_alpha=nerf_alpha_sched(0),
warp_alpha=warp_alpha_sched(0),
hyper_alpha=hyper_alpha_sched(0),
hyper_sheet_alpha=hyper_sheet_alpha_sched(0),
)
optimizer_def = optim.Adam(0.0)
if train_config.use_weight_norm:
optimizer_def = optim.WeightNorm(optimizer_def)
optimizer = optimizer_def.create(params)
init_state = model_utils.TrainState(optimizer=optimizer)
del params
# @title Define pmapped render function.
devices = jax.devices()
def _model_fn(key_0, key_1, params, rays_dict, extra_params):
out = model.apply(
{"params": params},
rays_dict,
extra_params=extra_params,
rngs={"coarse": key_0, "fine": key_1},
mutable=False,
metadata_encoded=FLAGS.use_lr,
)
return jax.lax.all_gather(out, axis_name="batch")
pmodel_fn = jax.pmap(
# Note rng_keys are useless in eval mode since there's no randomness.
_model_fn,
in_axes=(0, 0, 0, 0, 0), # Only distribute the data input.
devices=devices_to_use,
axis_name="batch",
)
render_fn = functools.partial(
evaluation.render_image,
model_fn=pmodel_fn,
device_count=len(devices),
chunk=eval_config.chunk,
)
camera_path = Path("camera-paths") / "orbit-mild"
camera_dir = Path(datasource.data_dir, camera_path)
logging.info(f"Loading cameras from {camera_dir}")
test_camera_paths = datasource.glob_cameras(camera_dir)
test_cameras = utils.parallel_map(
datasource.load_camera, test_camera_paths, show_pbar=True
)
rng = rng + jax.process_index() # Make random seed separate across hosts.
_ = random.split(rng, len(devices))
warp_lr_params = None
hyper_lr_params = None
if (exp_dir / "warp_lr.npy").exists():
warp_lr_params = jnp.load(exp_dir / "warp_lr.npy")
if (exp_dir / "hyper_lr.npy").exists():
hyper_lr_params = jnp.load(exp_dir / "hyper_lr.npy")
last_step = 0
while True:
if not checkpoint_dir.exists():
logging.info("No checkpoints yet.")
time.sleep(10)
continue
state = checkpoints.restore_checkpoint(checkpoint_dir, init_state)
state = jax_utils.replicate(state, devices=devices_to_use)
step = int(state.optimizer.state.step[0])
if step <= last_step:
logging.info("No new checkpoints (%d <= %d).", step, last_step)
time.sleep(10)
continue
results = []
renders_dir = exp_dir / "eval_renders"
if FLAGS.betas is not None:
betas = map(int, FLAGS.betas.split(","))
else:
betas = [0]
for beta in betas:
if model.use_attribute_conditioning or FLAGS.use_lr:
num_attributes = datasource.num_attributes
attribute_values = [-1.0, 0.0, 1.0]
attributes_meshgrid = jnp.stack(
jnp.meshgrid(*([attribute_values] * num_attributes)),
axis=-1,
).reshape((-1, num_attributes))
logging.info(
f"{len(attributes_meshgrid)} sequences to generate"
)
for attribute_set in attributes_meshgrid:
frames = collections.defaultdict(list)
for i in range(len(test_cameras)):
suffix = (
f"_beta_{beta}" if FLAGS.betas is not None else ""
)
out_dir = renders_dir / (
"_".join([f"{attr:.2f}" for attr in attribute_set])
+ f"_camera_{i}"
+ suffix
)
out_dir.mkdir(parents=True, exist_ok=True)
logging.info(
f"Rendering frame {i+1}/{len(test_cameras)}"
)
camera = test_cameras[i]
batch = datasets.camera_to_rays(camera)
batch["metadata"] = {
"appearance": jnp.zeros_like(
batch["origins"][..., 0, jnp.newaxis],
jnp.uint32,
)
+ beta,
"warp": jnp.zeros_like(
batch["origins"][..., 0, jnp.newaxis],
jnp.uint32,
)
+ beta,
"hyper_embed": (
attribute_set[jnp.newaxis, jnp.newaxis]
.repeat(
repeats=batch["origins"].shape[0], axis=0
)
.repeat(
repeats=batch["origins"].shape[1], axis=1
)
),
}
if FLAGS.use_lr:
encodings = {}
if warp_lr_params is not None:
encodings["encoded_warp"] = (
batch["metadata"]["hyper_embed"]
@ warp_lr_params
)
if hyper_lr_params is not None:
encodings["encoded_hyper"] = (
batch["metadata"]["hyper_embed"]
@ hyper_lr_params
)
elif model.hyper_use_warp_embed:
encodings["encoded_hyper"] = (
batch["metadata"]["hyper_embed"]
@ warp_lr_params
)
batch["metadata"] = encodings
render = render_fn(state, batch, rng=rng)
rgb = np.array(render["rgb"])
depth_med = np.array(render["med_depth"])
results.append((rgb, depth_med))
depth_viz = viz.colorize(
depth_med.squeeze(),
cmin=datasource.near,
cmax=datasource.far,
invert=True,
)
frames["rgb"].append(image_utils.image_to_uint8(rgb))
frames["depth"].append(
image_utils.image_to_uint8(depth_viz)
)
if "attribute_rgb" in render:
attribute = np.array(
nn.sigmoid(render["attribute_rgb"])
)
mask = np.concatenate(
np.split(
attribute,
indices_or_sections=num_attributes,
axis=-1,
),
axis=1,
).repeat(repeats=3, axis=-1)
overlay = np.clip(
(
attribute.max(axis=-1, keepdims=True) * 0.5
+ rgb * 0.5
),
a_min=0.0,
a_max=1.0,
)
frames["mask"].append(
image_utils.image_to_uint8(mask)
)
frames["overlay"].append(
image_utils.image_to_uint8(overlay)
)
for key, vals in frames.items():
PIL.Image.fromarray(vals[-1]).save(
(
out_dir / "{}_{:04d}.png".format(key, i)
).as_posix()
)
fps = "30"
for key, vals in frames.items():
skvideo.io.vwrite(
(out_dir / f"{key}.mp4").as_posix(),
vals,
inputdict={"-r": fps},
outputdict={"-r": fps},
)
else:
frames = collections.defaultdict(list)
suffix = f"_beta_{beta}" if FLAGS.betas is not None else ""
for i in range(len(test_cameras)):
out_dir = renders_dir / (f"camera_{i}" + suffix)
out_dir.mkdir(parents=True, exist_ok=True)
logging.info(f"Rendering frame {i+1}/{len(test_cameras)}")
camera = test_cameras[i]
batch = datasets.camera_to_rays(camera)
batch["metadata"] = {
"appearance": jnp.zeros_like(
batch["origins"][..., 0, jnp.newaxis], jnp.uint32
)
+ 78,
"warp": jnp.zeros_like(
batch["origins"][..., 0, jnp.newaxis], jnp.uint32
)
+ 78,
}
render = render_fn(state, batch, rng=rng)
rgb = np.array(render["rgb"])
depth_med = np.array(render["med_depth"])
results.append((rgb, depth_med))
depth_viz = viz.colorize(
depth_med.squeeze(),
cmin=datasource.near,
cmax=datasource.far,
invert=True,
)
frames["rgb"].append(image_utils.image_to_uint8(rgb))
frames["depth"].append(
image_utils.image_to_uint8(depth_viz)
)
for key, vals in frames.items():
PIL.Image.fromarray(vals[-1]).save(
(
out_dir / "{}_{:04d}.png".format(key, i)
).as_posix()
)
fps = "30"
for key, vals in frames.items():
skvideo.io.vwrite(
(out_dir / f"{key}.mp4").as_posix(),
vals,
inputdict={"-r": fps},
outputdict={"-r": fps},
)
if step >= train_config.max_steps:
break
last_step = step
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
8080799 | from queue import Queue
import numpy as np
def breath_first_search(graph, start=0):
queue = Queue()
queue.put(start)
visited = np.zeros(graph.numVertices)
while not queue.empty():
vertex = queue.get()
if visited[vertex] == 1:
continue
print('Visit: ', vertex)
visited[vertex] = 1
for v in graph.get_adjacent_vertices(vertex):
if visited[v] != 1:
queue.put(v)
| StarcoderdataPython |
4873947 | import logging
from terra import util_terra
from terra.execute_type import (
_execute_type,
EXECUTE_TYPE_SWAP,
)
from .borrow import (
handle_deposit_borrow,
handle_repay_withdraw,
)
def handle(exporter, elem, txinfo, index):
execute_msg = util_terra._execute_msg(elem, index)
if "execute_order" in execute_msg:
return EXECUTE_TYPE_SWAP
execute_type = _execute_type(elem, txinfo, index)
logging.debug("[miaw] General transaction type=%s txid=%s", execute_type, elem["txhash"])
return execute_type | StarcoderdataPython |
363737 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===========================================
Mitigation (:mod:`qiskit.ignis.mitigation`)
===========================================
.. currentmodule:: qiskit.ignis.mitigation
Measurement
===========
The measurement calibration is used to mitigate measurement errors.
The main idea is to prepare all :math:`2^n` basis input states and compute
the probability of measuring counts in the other basis states.
From these calibrations, it is possible to correct the average results
of another experiment of interest.
.. autosummary::
:toctree: ../stubs/
complete_meas_cal
tensored_meas_cal
MeasurementFilter
TensoredFilter
CompleteMeasFitter
TensoredMeasFitter
"""
from .measurement import (complete_meas_cal, tensored_meas_cal,
MeasurementFilter, TensoredFilter,
CompleteMeasFitter, TensoredMeasFitter)
| StarcoderdataPython |
3516436 | <reponame>mario21ic/python_curso
# -*- coding: utf-8 -*-
print "#"*3 +" Operador ternario "+"#"*3
var1 = 'valor1'
var2 = 'valor2' if var1 == 'valor1' else 'valor3'
print "var2:",var2
| StarcoderdataPython |
235921 | # Copyright 2018 ParallelM, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import argparse
import numpy as ny
import os.path
import sys
import time
import pickle
from mnist_stream_input import MnistStreamInput
### MLOPS start
from parallelm.mlops import mlops
from parallelm.mlops.mlops_mode import MLOpsMode
from parallelm.mlops.predefined_stats import PredefinedStats
from parallelm.mlops.stats.table import Table
from parallelm.mlops.stats.bar_graph import BarGraph
### MLOPS end
def add_parameters(parser):
# Input configuration
parser.add_argument("--randomize_input", dest="random", default=False, action='store_true')
parser.add_argument("--total_records", type=int, dest="total_records", default=1000, required=False)
parser.add_argument("--input_dir",
dest="input_dir",
type=str,
required=False,
help='Directory for caching input data',
default="/tmp/mnist_data")
# Where to save predictions
parser.add_argument("--output_file",
dest="output_file",
type=str,
required=False,
help='File for output predictions',
default="/tmp/mnist_predictions")
# Model configuration
parser.add_argument("--model_dir", dest="model_dir", type=str, required=True)
# Stats arguments
parser.add_argument("--stats_interval", dest="stats_interval", type=int, default=100, required=False)
# Alerting configuration
parser.add_argument("--conf_thresh", dest="conf_thresh", help='Confidence threshold for raising alerts',
type=int, default=50, required=False)
parser.add_argument("--conf_percent", dest="conf_percent", help='Confidence percent for raising alerts',
type=int, default=10, required=False)
def infer_loop(model, input, output_file, stats_interval, conf_thresh, conf_percent):
output = open(output_file, "w")
# Initialize statistics
total_predictions = 0
low_confidence_predictions = 0
categories = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
prediction_hist = []
for i in range(0, len(categories)):
prediction_hist.append(0)
### MLOPS start
# Create a bar graph and table for reporting prediction distributions and set the column names
infer_bar = BarGraph().name("Prediction Distribution Bar Graph").cols(categories)
infer_tbl = Table().name("Prediction Distribution Table").cols(categories)
### MLOPS end
while True:
try:
sample, label = input.get_next_input()
sample_np = ny.array(sample).reshape(1, -1)
# The prediction is the class with the highest probability
prediction = model.predict(sample_np)
# Append the prediction to the output file
output.write("{}\n".format(prediction))
# Calculate statistics
total_predictions += 1
prediction_hist[ny.int(prediction[0])] += 1
# Report statistics
if total_predictions % stats_interval == 0:
# Report the prediction distribution
for i in range(0, len(categories)):
print("category: {} predictions: {}".format(categories[i], prediction_hist[i]))
### MLOPS start
# Show the prediction distribution as a table
infer_tbl.add_row(str(total_predictions), prediction_hist)
# Show the prediction distribution as a bar graph
infer_bar.data(prediction_hist)
except EOFError:
# stop when we hit end of input
# Report the stats
mlops.set_stat(infer_tbl)
mlops.set_stat(infer_bar)
### MLOPS end
output.close()
### MLOPS start
mlops.done()
### MLOPS end
break
def main():
parser = argparse.ArgumentParser()
add_parameters(parser)
args = parser.parse_args()
### MLOPS start
# Initialize mlops
mlops.init()
### MLOPS end
mlops.init()
if args.model_dir is not None:
try:
filename = args.model_dir
file_obj = open(filename, 'rb')
mlops.set_stat("model_file", 1)
except Exception as e:
print("Model not found")
print("Got exception: {}".format(e))
mlops.set_stat("model_file", 0)
mlops.done()
return 0
# load the model
model = pickle.load(file_obj)
# get the input
input = MnistStreamInput(args.input_dir, args.total_records, args.random)
print('args.total_records:',args.total_records)
# perform inferences on the input
infer_loop(model, input, args.output_file, args.stats_interval, args.conf_thresh, args.conf_percent)
test_data = input._samples
del model
del input
if __name__ == "__main__":
# TF serving client API currently only supports python 2.7
assert sys.version_info >= (2, 7) and sys.version_info < (2, 8)
main()
| StarcoderdataPython |
333883 | <reponame>heckad/sqlalchemy
#!/usr/bin/env python
"""
pytest plugin script.
This script is an extension to pytest which
installs SQLAlchemy's testing plugin into the local environment.
"""
import os
import sys
import pytest
os.environ["SQLALCHEMY_WARN_20"] = "true"
collect_ignore_glob = []
pytest.register_assert_rewrite("sqlalchemy.testing.assertions")
if not sys.flags.no_user_site:
# this is needed so that test scenarios like "python setup.py test"
# work correctly, as well as plain "pytest". These commands assume
# that the package in question is locally present, but since we have
# ./lib/, we need to punch that in.
# We check no_user_site to honor the use of this flag.
sys.path.insert(
0,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "lib"),
)
# use bootstrapping so that test plugins are loaded
# without touching the main library before coverage starts
bootstrap_file = os.path.join(
os.path.dirname(__file__),
"..",
"lib",
"sqlalchemy",
"testing",
"plugin",
"bootstrap.py",
)
with open(bootstrap_file) as f:
code = compile(f.read(), "bootstrap.py", "exec")
to_bootstrap = "pytest"
exec(code, globals(), locals())
from pytestplugin import * # noqa
| StarcoderdataPython |
215776 | <gh_stars>10-100
import pytest
from lms.models import ApplicationInstance
from lms.views.application_instances import (
create_application_instance,
new_application_instance,
)
class TestCreateApplicationInstance:
def test_it_creates_an_application_instance(self, pyramid_request):
create_application_instance(pyramid_request)
application_instance = pyramid_request.db.query(ApplicationInstance).one()
assert application_instance.lms_url == "canvas.example.com"
assert application_instance.requesters_email == "<EMAIL>"
assert application_instance.developer_key is None
assert application_instance.developer_secret is None
def test_it_saves_the_Canvas_developer_key_and_secret_if_given(
self, pyramid_request
):
pyramid_request.params["developer_key"] = "example_key"
pyramid_request.params["developer_secret"] = "example_secret"
create_application_instance(pyramid_request)
application_instance = pyramid_request.db.query(ApplicationInstance).one()
assert application_instance.developer_key == "example_key"
assert application_instance.developer_secret
@pytest.mark.parametrize(
"developer_key,developer_secret",
[
# A developer key is given but no secret. Neither should be saved.
("example_key", ""),
# A developer secret is given but no key. Neither should be saved.
("", "example_secret"),
],
)
def test_if_developer_key_or_secret_is_missing_it_doesnt_save_either(
self, pyramid_request, developer_key, developer_secret
):
pyramid_request.params["developer_key"] = developer_key
pyramid_request.params["developer_secret"] = developer_secret
create_application_instance(pyramid_request)
application_instance = pyramid_request.db.query(ApplicationInstance).one()
assert application_instance.developer_key is None
assert application_instance.developer_secret is None
@pytest.mark.parametrize(
"developer_key,canvas_sections_enabled",
[("test_developer_key", True), ("", False)],
)
def test_it_sets_canvas_sections_enabled(
self, pyramid_request, developer_key, canvas_sections_enabled
):
pyramid_request.params["developer_key"] = developer_key
pyramid_request.params["developer_secret"] = "test_developer_secret"
create_application_instance(pyramid_request)
application_instance = pyramid_request.db.query(ApplicationInstance).one()
assert (
bool(application_instance.settings.get("canvas", "sections_enabled"))
== canvas_sections_enabled
)
@pytest.mark.parametrize(
"developer_key,canvas_groups_enabled",
[("test_developer_key", True), ("", False)],
)
def test_it_sets_canvas_groups_enabled(
self, pyramid_request, developer_key, canvas_groups_enabled
):
pyramid_request.params["developer_key"] = developer_key
pyramid_request.params["developer_secret"] = "test_developer_secret"
create_application_instance(pyramid_request)
application_instance = pyramid_request.db.query(ApplicationInstance).one()
assert (
bool(application_instance.settings.get("canvas", "groups_enabled"))
== canvas_groups_enabled
)
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.method = "POST"
pyramid_request.params = {
"lms_url": "canvas.example.com",
"email": "<EMAIL>",
"developer_key": "",
"developer_secret": "",
}
return pyramid_request
class TestNewApplicationInstance:
def test_it(self, pyramid_request):
assert not new_application_instance(pyramid_request)
| StarcoderdataPython |
8028342 | import enum
class API_ENV(enum.Enum):
DEVELOPMENT = "development"
PRODUCTION = "production"
class API_CONFIG(enum.Enum):
SECRET_KEY = "SECRET_KEY"
DEBUG = "DEBUG"
TESTING = "TESTING"
| StarcoderdataPython |
4907224 | <filename>rb/cna/cna_graph.py<gh_stars>1-10
from typing import Callable, Dict, List, Tuple, Union
import networkx as nx
import numpy as np
from networkx.algorithms.link_analysis.pagerank_alg import pagerank
from rb.cna.edge_type import EdgeType
from rb.cna.overlap_type import OverlapType
from rb.core.block import Block
from rb.core.cscl.community import Community
from rb.core.cscl.contribution import Contribution
from rb.core.cscl.conversation import Conversation
from rb.core.document import Document
from rb.core.lang import Lang
from rb.core.meta_document import MetaDocument
from rb.core.pos import POS
from rb.core.text_element import TextElement
from rb.core.word import Word
from rb.similarity.vector_model import VectorModel
class CnaGraph:
def __init__(self, docs: Union[TextElement, List[TextElement]], models: List[VectorModel]):
if isinstance(docs, TextElement):
docs = [docs]
self.graph = nx.MultiDiGraph()
self.models = models
if all(isinstance(doc, Community) or isinstance(doc, MetaDocument) for doc in docs):
for doc in docs:
self.graph.add_node(doc)
for conv in doc.components:
nodes = self.add_element(conv)
for model in self.models:
sim = model.similarity(conv, doc)
self.graph.add_edge(conv, doc, type=EdgeType.SEMANTIC, model=model, value=sim)
self.graph.add_edge(conv, doc, type=EdgeType.PART_OF)
self.add_links(nodes)
else:
for doc in docs:
self.add_element(doc)
self.add_links(self.graph.nodes)
self.add_coref_links()
self.add_explicit_links()
self.filtered_graph = self.create_filtered_graph()
self.importance = self.compute_importance()
def add_links(self, nodes):
levels = dict()
for n in nodes:
if not n.is_word():
if n.depth in levels:
levels[n.depth].append(n)
else:
levels[n.depth] = [n]
for depth, elements in levels.items():
self.add_lexical_links(elements, lambda w: w.pos in [POS.ADJ, POS.ADV, POS.NOUN, POS.VERB], OverlapType.CONTENT_OVERLAP)
self.add_lexical_links(elements, lambda w: w.pos in [POS.NOUN, POS.VERB], OverlapType.TOPIC_OVERLAP)
self.add_lexical_links(elements, lambda w: w.pos in [POS.NOUN, POS.PRON], OverlapType.ARGUMENT_OVERLAP)
self.add_semantic_links(elements)
def add_element(self, element: TextElement) -> List[TextElement]:
self.graph.add_node(element)
result = [element]
if not element.is_sentence():
for child in element.components:
result += self.add_element(child)
for model in self.models:
sim = model.similarity(child, element)
self.graph.add_edge(child, element, type=EdgeType.SEMANTIC, model=model, value=sim)
self.graph.add_edge(child, element, type=EdgeType.PART_OF)
self.graph.add_edges_from(zip(element.components[:-1], element.components[1:]), type=EdgeType.ADJACENT)
return result
def add_semantic_links(self, elements: List[TextElement]):
for i, a in enumerate(elements[:-1]):
for b in elements[i+1:]:
for model in self.models:
sim = model.similarity(a, b)
self.graph.add_edge(a, b, type=EdgeType.SEMANTIC, model=model, value=sim)
self.graph.add_edge(b, a, type=EdgeType.SEMANTIC, model=model, value=sim)
def add_lexical_links(self, elements: List[TextElement], test: Callable[[Word], bool], link_type: OverlapType):
words = {element: {word.lemma for word in element.get_words() if test(word)} for element in elements}
for i, a in enumerate(elements[:-1]):
for b in elements[i+1:]:
words_a = words[a]
words_b = words[b]
weight = len(words_a & words_b) / (1e-5 + len(words_a | words_b))
self.graph.add_edge(a, b, type=EdgeType.LEXICAL_OVERLAP, model=link_type, value=weight)
self.graph.add_edge(b, a, type=EdgeType.LEXICAL_OVERLAP, model=link_type, value=weight)
def add_coref_links(self):
for node in self.graph.nodes():
if isinstance(node, Block):
if node.has_coref:
for cluster in node.coref_clusters:
for mention in cluster.mentions:
if mention != cluster.main and mention.container != cluster.main.container:
edge = self.get_edge(mention.container, cluster.main.container, edge_type=EdgeType.COREF)
if edge is None:
self.graph.add_edge(mention.container, cluster.main.container, type=EdgeType.COREF, details=[(mention.text, cluster.main.text)])
else:
edge["details"].append((mention.text, cluster.main.text))
def add_explicit_links(self):
explicit_links = []
for node in self.graph.nodes:
if isinstance(node, Contribution):
parent = node.get_parent()
explicit_links.append((node, parent))
for node, parent in explicit_links:
self.graph.add_edge(node, parent, type=EdgeType.EXPLICIT)
def is_coref_edge(self, a: Block, b: Block) -> bool:
for _, x, _ in self.edges(a, edge_type=EdgeType.COREF):
if x is b:
return True
return False
def is_explicit_edge(self, a: Block, b: Block) -> bool:
for _, x, _ in self.edges(a, edge_type=EdgeType.EXPLICIT):
if x is b:
return True
return False
def create_filtered_graph(self) -> nx.DiGraph:
similarities = [
value
for a, b, value in self.edges(None, edge_type=EdgeType.SEMANTIC)
if a.depth == b.depth]
mean = np.mean(similarities)
stdev = np.std(similarities)
filtered_graph = nx.DiGraph()
for node in self.graph.nodes:
filtered_graph.add_node(node)
for a in filtered_graph.nodes():
for b in filtered_graph.nodes():
if a != b:
values = []
special = False
edges = self.graph.get_edge_data(a, b)
for data in edges.values() if edges else []:
if data["type"] is EdgeType.SEMANTIC:
values.append(data["value"])
elif data["type"] in [EdgeType.COREF, EdgeType.PART_OF, EdgeType.EXPLICIT]:
special = True
if len(values) == 0:
continue
value = np.mean(values)
if special or value > mean + stdev:
filtered_graph.add_edge(a, b, weight=value)
return filtered_graph
def compute_importance(self) -> Dict[TextElement, float]:
return {
node: value * len(self.filtered_graph)
for node, value in pagerank(self.filtered_graph, max_iter=10000, tol=1e-4).items()
}
def edges(self,
node: Union[TextElement, Tuple[TextElement, TextElement]],
edge_type: EdgeType = None,
vector_model: Union[VectorModel, OverlapType] = None) -> List[Tuple[TextElement, TextElement, float]]:
if isinstance(node, tuple):
edges = self.graph.get_edge_data(node[0], node[1])
if not edges:
return []
edges = ((node[0], node[1], data) for data in edges.values())
else:
edges = self.graph.edges(node, data=True)
return [(a, b, data["value"] if "value" in data else 0)
for a, b, data in edges
if (edge_type is None or data["type"] is edge_type) and
(vector_model is None or data["model"] is vector_model)
]
def get_edge(self, a: TextElement, b: TextElement, edge_type: EdgeType) -> Dict:
edge = self.graph[a][b]
for data in self.graph[a][b].values():
if (data["type"] is edge_type):
return data
return None
| StarcoderdataPython |
1831046 | <filename>autograd/initializers/initializer.py
from autograd import Tensor
class Initializer(object):
def __call__(self, shape=None):
if shape is None:
raise ValueError(
f'can not initialize Tensor when shape is None')
if not isinstance(shape, tuple):
raise ValueError(
f'provided shape is not a tuple, {shape=}')
return self.initialize_(shape)
def initialize_(self, shape):
raise NotImplementedError(
f'user defined Initializer has not implemented initialize_ func')
| StarcoderdataPython |
3318053 | BOT_TOKEN = '<KEY>'
# find your timezone here -> https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568
TIMEZONE = 'Asia/Hong_Kong'
# 24-hour, leave empty to disable
NOTIFY_TIME = '23:00'
| StarcoderdataPython |
8104711 | <reponame>N5GEH/FiLiP<gh_stars>1-10
"""
Autogenerated Models for the vocabulary described by the ontologies:
http://www.semanticweb.org/building (building circuits)
"""
from enum import Enum
from typing import Dict, Union, List
from filip.semantics.semantics_models import\
SemanticClass,\
SemanticIndividual,\
RelationField,\
DataField,\
SemanticDeviceClass,\
DeviceAttributeField,\
CommandField
from filip.semantics.semantics_manager import\
SemanticsManager,\
InstanceRegistry
semantic_manager: SemanticsManager = SemanticsManager(
instance_registry=InstanceRegistry(),
)
# ---------CLASSES--------- #
class Thing(SemanticClass):
"""
Predefined root_class
Source:
None (Predefined)
"""
def __new__(cls, *args, **kwargs):
kwargs['semantic_manager'] = semantic_manager
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
kwargs['semantic_manager'] = semantic_manager
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
class Building(Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.goalTemperature._rules = [('exactly|1', [['integer']])]
self.name._rules = [('exactly|1', [['string']])]
self.hasFloor._rules = [('min|1', [[Floor]])]
self.hasFloor._instance_identifier = self.get_identifier()
self.goalTemperature._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
# Data fields
goalTemperature: DataField = DataField(
name='goalTemperature',
rule='exactly 1 integer',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
# Relation fields
hasFloor: RelationField = RelationField(
name='hasFloor',
rule='min 1 Floor',
semantic_manager=semantic_manager)
class Circuit(Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.hasOutlet._rules = [('min|1', [[Outlet]])]
self.hasProducer._rules = [('min|1', [[Producer]])]
self.hasOutlet._instance_identifier = self.get_identifier()
self.hasProducer._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
# Data fields
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
# Relation fields
hasOutlet: RelationField = RelationField(
name='hasOutlet',
rule='min 1 Outlet',
inverse_of=['connectedTo'],
semantic_manager=semantic_manager)
hasProducer: RelationField = RelationField(
name='hasProducer',
rule='min 1 Producer',
semantic_manager=semantic_manager)
class Floor(Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.hasRoom._rules = [('only', [[Room]])]
self.hasRoom._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
# Data fields
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
# Relation fields
hasRoom: RelationField = RelationField(
name='hasRoom',
rule='only Room',
semantic_manager=semantic_manager)
class Outlet(SemanticDeviceClass, Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.connectedTo._rules = [('min|1', [[Circuit]]), ('exactly|1', [[Room]])]
self.connectedTo._instance_identifier = self.get_identifier()
self.controlCommand._instance_identifier = self.get_identifier()
self.state._instance_identifier = self.get_identifier()
# Data fields
controlCommand: CommandField = CommandField(
name='controlCommand',
semantic_manager=semantic_manager)
state: DeviceAttributeField = DeviceAttributeField(
name='state',
semantic_manager=semantic_manager)
# Relation fields
connectedTo: RelationField = RelationField(
name='connectedTo',
rule='min 1 Circuit, exactly 1 Room',
inverse_of=['hasOutlet'],
semantic_manager=semantic_manager)
class Producer(SemanticDeviceClass, Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.controlCommand._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
self.state._instance_identifier = self.get_identifier()
# Data fields
controlCommand: CommandField = CommandField(
name='controlCommand',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
state: DeviceAttributeField = DeviceAttributeField(
name='state',
semantic_manager=semantic_manager)
class AirProducer(Producer):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.controlCommand._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
self.state._instance_identifier = self.get_identifier()
# Data fields
controlCommand: CommandField = CommandField(
name='controlCommand',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
state: DeviceAttributeField = DeviceAttributeField(
name='state',
semantic_manager=semantic_manager)
class ColdProducer(Producer):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.controlCommand._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
self.state._instance_identifier = self.get_identifier()
# Data fields
controlCommand: CommandField = CommandField(
name='controlCommand',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
state: DeviceAttributeField = DeviceAttributeField(
name='state',
semantic_manager=semantic_manager)
class HeatProducer(Producer):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.name._rules = [('exactly|1', [['string']])]
self.controlCommand._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
self.state._instance_identifier = self.get_identifier()
# Data fields
controlCommand: CommandField = CommandField(
name='controlCommand',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
state: DeviceAttributeField = DeviceAttributeField(
name='state',
semantic_manager=semantic_manager)
class Room(Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.goalTemperature._rules = [('exactly|1', [['integer']])]
self.name._rules = [('exactly|1', [['string']])]
self.volume._rules = [('some', [['rational']])]
self.hasOutlet._rules = [('only', [[Outlet]])]
self.hasSensor._rules = [('only', [[Sensor]])]
self.hasTenant._rules = [('only', [[Tenant]])]
self.hasOutlet._instance_identifier = self.get_identifier()
self.hasSensor._instance_identifier = self.get_identifier()
self.hasTenant._instance_identifier = self.get_identifier()
self.goalTemperature._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
self.volume._instance_identifier = self.get_identifier()
# Data fields
goalTemperature: DataField = DataField(
name='goalTemperature',
rule='exactly 1 integer',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
volume: DataField = DataField(
name='volume',
rule='some rational',
semantic_manager=semantic_manager)
# Relation fields
hasOutlet: RelationField = RelationField(
name='hasOutlet',
rule='only Outlet',
inverse_of=['connectedTo'],
semantic_manager=semantic_manager)
hasSensor: RelationField = RelationField(
name='hasSensor',
rule='only Sensor',
semantic_manager=semantic_manager)
hasTenant: RelationField = RelationField(
name='hasTenant',
rule='only Tenant',
semantic_manager=semantic_manager)
class Sensor(SemanticDeviceClass, Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.measures._rules = [('exactly|1', [['MeasurementType']])]
self.unit._rules = [('exactly|1', [['Unit']])]
self.measurement._instance_identifier = self.get_identifier()
self.measures._instance_identifier = self.get_identifier()
self.unit._instance_identifier = self.get_identifier()
# Data fields
measurement: DeviceAttributeField = DeviceAttributeField(
name='measurement',
semantic_manager=semantic_manager)
measures: DataField = DataField(
name='measures',
rule='exactly 1 MeasurementType',
semantic_manager=semantic_manager)
unit: DataField = DataField(
name='unit',
rule='exactly 1 Unit',
semantic_manager=semantic_manager)
class Tenant(Thing):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/building (building circuits)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.goalTemperature._rules = [('exactly|1', [['integer']])]
self.name._rules = [('exactly|1', [['string']])]
self.goalTemperature._instance_identifier = self.get_identifier()
self.name._instance_identifier = self.get_identifier()
# Data fields
goalTemperature: DataField = DataField(
name='goalTemperature',
rule='exactly 1 integer',
semantic_manager=semantic_manager)
name: DataField = DataField(
name='name',
rule='exactly 1 string',
semantic_manager=semantic_manager)
# ---------Individuals--------- #
class ExampleIndividual(SemanticIndividual):
_parent_classes: List[type] = []
# ---------Datatypes--------- #
semantic_manager.datatype_catalogue = {
'MeasurementType': {
'type': 'enum',
'enum_values': ['Air_Quality', 'Temperature'],
},
'Unit': {
'type': 'enum',
'enum_values': ['Celsius', 'Kelvin', 'Relative_Humidity'],
},
'rational': {
'type': 'number',
'number_decimal_allowed': True,
},
'real': {
'type': 'number',
},
'PlainLiteral': {
'type': 'string',
},
'XMLLiteral': {
'type': 'string',
},
'Literal': {
'type': 'string',
},
'anyURI': {
'type': 'string',
},
'base64Binary': {
'type': 'string',
},
'boolean': {
'type': 'enum',
'enum_values': ['True', 'False'],
},
'byte': {
'type': 'number',
'number_range_min': -128,
'number_range_max': 127,
'number_has_range': True,
},
'dateTime': {
'type': 'date',
},
'dateTimeStamp': {
'type': 'date',
},
'decimal': {
'type': 'number',
'number_decimal_allowed': True,
},
'double': {
'type': 'number',
'number_decimal_allowed': True,
},
'float': {
'type': 'number',
'number_decimal_allowed': True,
},
'hexBinary': {
'allowed_chars': ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'],
'type': 'string',
},
'int': {
'type': 'number',
'number_range_min': -2147483648,
'number_range_max': 2147483647,
'number_has_range': True,
},
'integer': {
'type': 'number',
},
'language': {
'type': 'string',
},
'long': {
'type': 'number',
'number_range_min': -9223372036854775808,
'number_range_max': 9223372036854775807,
'number_has_range': True,
},
'Name': {
'type': 'string',
},
'NCName': {
'forbidden_chars': [':'],
'type': 'string',
},
'negativeInteger': {
'type': 'number',
'number_range_max': -1,
'number_has_range': True,
},
'NMTOKEN': {
'type': 'string',
},
'nonNegativeInteger': {
'type': 'number',
'number_range_min': 0,
'number_has_range': True,
},
'nonPositiveInteger': {
'type': 'number',
'number_range_max': -1,
'number_has_range': True,
},
'normalizedString': {
'type': 'string',
},
'positiveInteger': {
'type': 'number',
'number_range_min': 0,
'number_has_range': True,
},
'short': {
'type': 'number',
'number_range_min': -32768,
'number_range_max': 32767,
'number_has_range': True,
},
'string': {
'type': 'string',
},
'token': {
'type': 'string',
},
'unsignedByte': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 255,
'number_has_range': True,
},
'unsignedInt': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 4294967295,
'number_has_range': True,
},
'unsignedLong': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 18446744073709551615,
'number_has_range': True,
},
'unsignedShort': {
'type': 'number',
'number_range_min': 0,
'number_range_max': 65535,
'number_has_range': True,
},
}
class MeasurementType(str, Enum):
value_Air_Quality = 'Air_Quality'
value_Temperature = 'Temperature'
class Unit(str, Enum):
value_Celsius = 'Celsius'
value_Kelvin = 'Kelvin'
value_Relative_Humidity = 'Relative_Humidity'
# ---------Class Dict--------- #
semantic_manager.class_catalogue = {
'AirProducer': AirProducer,
'Building': Building,
'Circuit': Circuit,
'ColdProducer': ColdProducer,
'Floor': Floor,
'HeatProducer': HeatProducer,
'Outlet': Outlet,
'Producer': Producer,
'Room': Room,
'Sensor': Sensor,
'Tenant': Tenant,
'Thing': Thing,
}
semantic_manager.individual_catalogue = {
'ExampleIndividual': ExampleIndividual,
}
| StarcoderdataPython |
5165329 | class RelengException(Exception):
pass
| StarcoderdataPython |
6685907 | <gh_stars>1-10
#! flask/bin/python
from app import app, db
from app.models import *
from trueskill_functions import *
# tests for trueskill_functions
print "test0.trueskills is empty, and region is None: should be populated with Global TrueSkill"
# test0 = User(tag="test0")
test0 = check_set_user("test0")
populate_trueskills(test0)
print test0
print test0.tag, test0.trueskills
print test0.trueskills
print '\n'
print "testA.trueskills is empty, and region is populated: should be populated with Global and Region TrueSkill"
testA = check_set_user("testA")
testA.region=Region.query.first()
populate_trueskills(testA)
print testA
print testA.tag, testA.trueskills
print '\n'
print "test1.trueskills contains Global TrueSkill: should remain populated with Global TrueSkill"
test1 = check_set_user("test1")
test1.trueskills.append(TrueSkill(region="Global", mu=22, sigma=9, cons_mu=22-(9*3)))
populate_trueskills(test1)
print test1
print test1.tag, test1.trueskills
print '\n'
print "testB.trueskills contains Global TrueSkill, and region is populated: should add Region TrueSkill and remain populated with Global TrueSkill"
testB = check_set_user("testB")
testB.region=Region.query.first()
testB.trueskills.append(TrueSkill(region="Global", mu=22, sigma=9, cons_mu=22-(9*3)))
populate_trueskills(testB)
print testB
print testB.tag, testB.trueskills
print '\n'
print "test2.trueskills contains Global and Region TrueSkill and region is populated; should remain populated with both TrueSkills"
test2 = check_set_user("test2")
test2.region=Region.query.first()
test2.trueskills.append(TrueSkill(region="Global", mu=22, sigma=9, cons_mu=22-(9*3)))
test2.trueskills.append(TrueSkill(region=test2.region.region, mu=30, sigma=4, cons_mu=30-(4*3)))
populate_trueskills(test2)
print test2
print test2.tag, test2.trueskills
print '\n'
print "Testing update_rating"
print "test0 vs testA, Global ranking should update"
update_rating(test0, testA)
print '\n'
print "testA vs testB, Both rankings should update"
update_rating(testA, testB)
print '\n'
print "test1 vs testB, Global ranking should update"
update_rating(test1, testB)
print '\n'
print "test2 vs testB, Both rankings should update"
update_rating(test2, testB)
print '\n'
print "test2 vs testA, Both rankings should update"
update_rating(test2, testA)
print '\n'
print "test2 vs test0, Global ranking should update"
update_rating(test2, test0)
print '\n'
print "Testing reset_trueskill; all TrueSkill values should be default"
reset_trueskill(test0)
reset_trueskill(testA)
reset_trueskill(test1)
reset_trueskill(testB)
reset_trueskill(test2)
print test0
print testA
print test1
print testB
print test2
| StarcoderdataPython |
5095456 | from selenium import webdriver
from framework.factory.models_enums.page_config import PageConfig
from framework.factory.page_factory import PageFactory
from configuration.configuration import ConfigurationManager
import inspect
class BasePage(PageFactory):
"""Base class to create a page."""
def __init__(self, driver: webdriver, page_data: PageConfig, can_use_config_url: bool = True) -> None:
"""Initializes the BasePage.
Args:
driver: web driver
page_data: giver page data (url, timeout, highlight)
can_use_config_url: condition parameter to load page url from config file
"""
stack = inspect.stack()
class_name = stack[1][0].f_locals["self"].__class__.__name__
self.driver = driver # Required
self.timeout = page_data.timeout
self.highlight = page_data.highlight
if can_use_config_url:
url = ConfigurationManager.getInstance().get_page_url(class_name)
if url:
self.driver.get(url)
| StarcoderdataPython |
8090063 | # coding: utf-8
import anyjson
import envoy
import os
import os.path
import setuptools as orig_setuptools
import sys
import string
import operator
import datetime
from itertools import islice, dropwhile, izip_longest
from collections import defaultdict, deque
from orderedset import OrderedSet
from allmychanges.utils import cd, trace
from allmychanges.crawler import _extract_date, _extract_version, RE_BUMP_LINE
from django.utils.encoding import force_str
from twiggy_goodies.threading import log
def do(command):
# print 'EXEC>', command
r = envoy.run(force_str(command))
assert r.status_code == 0, '"{0} returned code {1} and here is it\'s stderr:{2}'.format(
command, r.status_code, r.std_err)
return r
def process_vcs_message(text):
lines = text.split(u'\n')
lines = (line for line in lines
if RE_BUMP_LINE.match(line) is None)
lines = dropwhile(operator.not_, lines)
return u'<br/>\n'.join(lines)
def find_tagged_versions():
"""Returns a map {hash -> version_number}
"""
def tag_to_hash(tag):
r = do('git rev-parse ' + tag + '^{}')
return r.std_out.strip()
r = do('git tag')
tags = r.std_out.split('\n')
tags = ((tag, _extract_version(tag)) for tag in tags)
result = dict(
(tag_to_hash(tag), version)
for tag, version in tags
if version is not None)
return result
def git_history_extractor(path, limit=None):
"""Returns list of dicts starting from older commits to newer.
Each dict should contain fields: `date`, `message`, `hash`, `checkout`,
where `checkout` is a function which makes checkout of this version
and returns path which will be passed to the version extractor.
Optionally, dict could contain `version` field, taken from tag.
"""
splitter = '-----======!!!!!!======-----'
ins = '--!!==!!--'
with cd(path):
def gen_checkouter(hash_):
def checkout():
with cd(path):
result = do('git status --porcelain')
# if state is dirty, we just commit these changes
# into a orphaned commit
if result.std_out:
if '??' in result.std_out:
do('git clean -f')
try:
do('git add -u')
do('git commit -m "Trash"')
except:
pass
do('git checkout ' + hash_)
return path
return checkout
if os.environ.get('IGNORE_TAGS'):
tagged_versions = []
else:
tagged_versions = find_tagged_versions()
with log.fields(num_tagged_versions=len(tagged_versions)):
log.info('Found tagged versions')
r = do('git log --pretty=format:"%H%n{ins}%n%ai%n{ins}%n%B%n{ins}%n%P%n{splitter}"'.format(ins=ins, splitter=splitter))
# containse tuples (_hash, date, msg, parents)
response = r.std_out.decode('utf-8', errors='replace')
groups = (map(string.strip, group.strip().split(ins))
for group in response.split(splitter)[:-1])
result = (dict(date=_extract_date(date),
message=process_vcs_message(msg),
checkout=gen_checkouter(_hash),
hash=_hash,
parents=parents.split())
for _hash, date, msg, parents in groups)
result = list(result)
if limit:
result = islice(result, 0, limit)
result = list(result)
root = result[0]['hash']
def add_tagged_version(item):
if item['hash'] in tagged_versions:
item['version'] = tagged_versions[item['hash']]
return item
result = dict((item['hash'], add_tagged_version(item))
for item in result)
result['root'] = result[root]
def show(hh):
# Function to view result item by partial hash
# used with set_trace ;)
for key, value in result.items():
if key.startswith(hh):
return value
return result, tagged_versions
def choose_history_extractor(path):
if isinstance(path, dict):
# this is a special case for tests
def test_history_extractor(path):
def create_version(item):
new_item = item.copy()
new_item['checkout'] = lambda: item['version']
del new_item['version']
return new_item
commits = dict((key, create_version(item))
for key, item in path.items())
# make root to point to the tip of the
# commit history instead of pointing to a separate
# object
commits['root'] = commits[commits['root']['hash']]
tagged_versions = {}
return commits, tagged_versions
return test_history_extractor
return git_history_extractor
def python_version_extractor(path, use_threads=True):
from multiprocessing import Process, Queue
from collections import deque
if use_threads:
queue = Queue()
process = Process(target=python_version_extractor_worker,
args=(path, queue))
process.start()
process.join()
return queue.get()
else:
class Queue(deque):
def put(self, value):
self.append(value)
def get(self):
return self.popleft()
queue = Queue()
python_version_extractor_worker(path, queue)
return queue.get()
def python_version_extractor_worker(path, queue):
with cd(path):
if os.path.exists('setup.py'):
envoy.run("find . -name '*.pyc' -delete")
try:
metadata = {}
class FakeSetuptools(object):
def setup(self, *args, **kwargs):
metadata.update(kwargs)
def __getattr__(self, name):
return getattr(orig_setuptools, name)
sys.modules['distutils.core'] = FakeSetuptools()
sys.modules['setuptools'] = FakeSetuptools()
sys.path.insert(0, os.getcwd())
try:
from setup import setup
except:
pass
version = metadata.get('version')
queue.put(version)
finally:
pass
queue.put(None)
def npm_version_extractor(path):
filename = os.path.join(path, 'package.json')
if os.path.exists(filename):
with open(filename) as f:
try:
data = anyjson.deserialize(f.read())
return data.get('version')
except Exception:
pass
def choose_version_extractor(path):
if isinstance(path, dict):
# this is a special case for tests
def test_version_extractor(path):
return path
return test_version_extractor
if os.path.exists(os.path.join(path, 'setup.py')):
return python_version_extractor
if os.path.exists(os.path.join(path, 'package.json')):
return npm_version_extractor
def stop_at_hash_if_needed(hash):
if os.environ.get('STOP_AT_HASH'):
hashes = os.environ['STOP_AT_HASH'].split(',')
for hash_to_check in hashes:
if hash.startswith(hash_to_check):
import pdb; pdb.set_trace()
def _add_version_number(commit, extract_version):
if 'version' not in commit:
stop_at_hash_if_needed(commit['hash'])
checkout_path = commit['checkout']()
version = extract_version(checkout_path)
commit['version'] = version
return commit['version']
# TODO we don't need it anymore
def _normalize_version_numbers(commits):
already_seen = defaultdict(int)
previous_number = None
# We need this step to normalize version order
# because sometimes after the merge it is currupted
stop_at_hash = os.environ.get('STOP_AT_HASH')
for commit in commits:
number = commit['version']
if number is not None and previous_number is not None and number != previous_number and number in already_seen:
# fixing commit merged after the version
# was bumped
if stop_at_hash and commit['hash'].startswith(stop_at_hash):
import pdb; pdb.set_trace()
commit['version'] = previous_number
else:
already_seen[number] += 1
previous_number = number
def write_vcs_versions_bin(commits, extract_version):
# first, we'll skip heading commits without version number
stop_at_hash = os.environ.get('STOP_AT_HASH')
idx = 0
number = _add_version_number(commits[idx], extract_version)
while number is None:
idx += 1
number = _add_version_number(commits[idx], extract_version)
# and now we need to go through the history recursivly
# dividing it twice on each step
def rec(commits, extract_version):
left_version = commits[0]['version']
right_version = _add_version_number(commits[-1], extract_version)
if len(commits) == 2:
if right_version is None:
commits[-1]['version'] = left_version
else:
if left_version == right_version:
for commit in commits[1:-1]:
if stop_at_hash and commit['hash'].startswith(stop_at_hash):
import pdb; pdb.set_trace()
commit['version'] = left_version
else:
threshold = len(commits) / 2
rec(commits[:threshold + 1], extract_version)
rec(commits[threshold:], extract_version)
rec(commits[idx:], extract_version)
_normalize_version_numbers(commits)
def _normalize_version_numbers2(commits):
# fills commits in gaps with 'version' attribute
updated = set()
to_update = set()
to_check = deque()
def show(hh):
# Function to view result item by partial hash
# used with set_trace ;)
for key, value in commits.items():
if key.startswith(hh):
return value
for commit in commits.values():
if commit.get('version') is None:
to_check.clear()
to_update.clear()
current = commit
while current and current.get('version') is None and current['hash'] not in updated:
to_update.add(current['hash'])
for hash_ in current['parents']:
if hash_ not in to_update:
to_check.append(hash_)
try:
hash_ = to_check.popleft()
current = commits.get(hash_)
except Exception:
current = None
for hash_ in to_update:
commit = commits.get(hash_)
commit['version'] = current['version'] if current else None
updated.add(hash_)
def write_vcs_versions_slowly(commits, extract_version):
for idx, commit in enumerate(commits.values()):
_add_version_number(commit, extract_version)
_normalize_version_numbers2(commits)
def write_vcs_versions_bin_helper(commits, extract_version):
"""Recursively writes versions to continuous chain of commits.
Each commit should be decedant or ancestor of its nearest neiboughrs.
"""
# first, we'll skip commits from head and tail without version numbers
number = _add_version_number(commits[0], extract_version)
while number is None:
commits = commits[1:]
if not commits:
break
number = _add_version_number(commits[0], extract_version)
if not commits:
return
number = _add_version_number(commits[-1], extract_version)
while number is None:
commits = commits[:-1]
if not commits:
break
number = _add_version_number(commits[-1], extract_version)
if not commits:
return
# and now we need to go through the history recursivly
# dividing it twice on each step
def rec(commits, extract_version):
left_version = commits[0]['version']
right_version = _add_version_number(commits[-1], extract_version)
if len(commits) > 2:
if left_version and left_version == right_version:
for commit in commits[1:-1]:
commit['version'] = left_version
else:
threshold = len(commits) / 2
rec(commits[:threshold + 1], extract_version)
rec(commits[threshold:], extract_version)
rec(commits, extract_version)
def write_vcs_versions_fast(commits, extract_version):
queue = deque()
commit = commits['root']
covered = set()
def enque(hash):
if hash not in covered:
commit = commits.get(hash)
if commit is not None:
queue.append(commit)
while commit:
# fast forward
commits_between = []
# a hack to reuse version number from a previously calculated commit
if 'child' in commit:
commits_between.append(commit['child'])
limit = 200
forward = commit
while forward['hash'] not in covered and limit > 0:
covered.add(forward['hash'])
commits_between.append(forward)
num_parents = len(forward['parents'])
if num_parents > 1:
map(enque, forward['parents'][1:])
for it_hash in forward['parents'][1:]:
it_commit = commits.get(it_hash)
if it_commit:
it_commit['child'] = forward
if num_parents > 0:
forward = commits[forward['parents'][0]]
limit -= 1
covered.add(forward['hash'])
# we add this point nonetheless it could be already covered
# because if it is covered, then we don't need to calculate
# version number again
commits_between.append(forward)
write_vcs_versions_bin_helper(commits_between, extract_version)
map(enque, forward['parents'])
try:
commit = queue.pop()
except IndexError:
commit = None
_normalize_version_numbers2(commits)
def messages_to_html(messages):
items = [u'<ul>']
items.extend(map(u'<li>{0}</li>'.format, messages))
items.append(u'</ul>')
return u''.join(items)
def get_versions_from_vcs(env):
path = env.dirname
get_history = choose_history_extractor(path)
commits, tagged_versions = get_history(path)
# we only go through the history
# if version extractor is available for this repository
extract_version = choose_version_extractor(path)
if extract_version is not None:
write_vcs_versions_fast(commits, extract_version)
else:
# if we only use information from tags,
# we need to fill gaps between tags
_normalize_version_numbers2(commits)
# now we'll check if some version information was
# extracted
has_versions = sum(1 for commit in commits.values()
if 'version' in commit)
if has_versions:
bumps = mark_version_bumps(commits, tagged_versions)
grouped = group_versions(commits, bumps)
for version in grouped:
yield env.push(type='almost_version',
title=version['version'],
version=version['version'],
filename='VCS',
date=None if version.get('unreleased') else version['date'],
unreleased=version.get('unreleased', False),
content=messages_to_html(version['messages']))
def iterate_over_commits(tree, start_hash, upto=None):
"""Returns iterable over all hashes in the tree,
starting from `start_hash` and up to `upto`.
When merge commit encountered, algorithm first iterates
the left branch until a fork point, then right branch
until a fork point, and continues from the fork point.
Notice! This algorithm does not work with tries where
more than 2 branches are merged in 1 point.
"""
current_hash = start_hash
while current_hash and current_hash != upto:
yield current_hash
item = tree[current_hash]
parents = item['parents']
num_parents = len(parents)
if num_parents > 1:
fork_point = find_fork_point(tree, *parents)
# yield both branches
for parent in parents:
for commit in iterate_over_commits(
tree,
parent,
upto=fork_point):
yield commit
# then jump to fork point
current_hash = fork_point
elif num_parents == 1:
current_hash = parents[0]
else:
current_hash = None
def find_fork_point(tree, left, right):
"""Returns a single parent commit common for two branches
designated by `left` and `right` hashes.
"""
left_ancestors = iterate_over_commits(tree, left)
right_ancestors = iterate_over_commits(tree, right)
zipped_branches = izip_longest(left_ancestors, right_ancestors)
left_set = set()
right_set = set()
for left_hash, right_hash in zipped_branches:
if left_hash is not None:
left_set.add(left_hash)
if right_hash is not None:
right_set.add(right_hash)
intersection = left_set.intersection(right_set)
# if there is an intersection between these two
# sets, then it should contain only one hash
# and this hash is the fork point
if intersection:
assert len(intersection) == 1
return intersection.pop()
def mark_version_bumps(tree, tagged_versions=None):
"""Returns hashes where version was incremented.
If tagged_versions given, it should be a dict
{hash -> version}. In this case, hashes for this
dict will be considered as increments to versions
from this dict.
"""
bumps = []
queue = deque()
processed = set()
tagged_versions = tagged_versions or {}
def add_bump(hash, version, date):
# print 'add', hash[:7], version
found = None
for idx, bump in enumerate(bumps):
if bump[1] == version:
found = idx
break
if found is not None:
if bumps[found][2] > date:
del bumps[idx]
bumps.append((hash, version, date))
else:
bumps.append((hash, version, date))
queue.extend(
map(
tree.get,
iterate_over_commits(
tree,
tree['root']['hash'],
)
)
)
commit = queue.popleft()
while commit:
hash = commit['hash']
if hash not in processed:
# print hash[:7]
stop_at_hash_if_needed(hash)
version = commit['version']
if version is not None:
parents = map(tree.get, commit['parents'])
# if history was limited, then some parents could be unavailable
parents = filter(None, parents)
# if all parents have different version, then
# this version was incremented in this commit
if hash in tagged_versions:
version = tagged_versions[hash]
# we use 1970-01-01 to make tagged version have
# advantage over versions guessed using other ways
dt = datetime.date(1970, 01, 01)
add_bump(hash, version, dt)
elif not any(map(lambda parent: parent['version'] == version,
parents)):
add_bump(hash, version, commit['date'])
processed.add(hash)
try:
commit = queue.popleft()
except IndexError:
commit = None
return [bump[0] for bump in reversed(bumps)]
def mark_version_bumps_rec(tree):
"""Returns hashes of commits where
version changes.
"""
@trace
def rec(commit, cache={}):
hash = commit['hash']
if hash in cache:
return cache[hash]
version = commit['version']
parents = map(tree.get, commit['parents'])
parent_bumps = reduce(operator.__or__,
(rec(parent, cache=cache) for parent in parents),
OrderedSet())
if not any(map(lambda parent: parent['version'] == version,
parents)):
if version:
cache[hash] = parent_bumps | OrderedSet([hash])
return cache[hash]
cache[hash] = parent_bumps
return cache[hash]
return rec(tree['root'])
def group_versions(tree, bumps):
root_hash = tree['root']['hash']
if root_hash not in bumps:
bumps.append(root_hash)
probably_has_unreleased_commits = True
else:
probably_has_unreleased_commits = False
processed = set()
def collect_messages(commit):
messages = []
queue = deque()
while commit is not None:
if commit['hash'] not in processed:
# we ignore merge commits where parents > 1
if len(commit['parents']) < 2 and commit['message']:
messages.append(commit['message'])
processed.add(commit['hash'])
queue.extend(filter(None, map(tree.get, commit['parents'])))
try:
commit = queue.popleft()
except IndexError:
commit = None
return messages
result = []
for bump in bumps:
commit = tree[bump].copy()
messages = collect_messages(commit)
if not messages:
continue
commit['messages'] = messages
del commit['message']
result.append(commit)
if probably_has_unreleased_commits and result and result[-1]['hash'] == root_hash:
result[-1]['version'] = 'x.x.x'
result[-1]['unreleased'] = True
return result
| StarcoderdataPython |
1895125 | # coding: utf-8
import json
import os
from uuid import uuid4
import requests
from .log import log
class Tokenizer(object):
def __init__(self):
self.is_test = False
def get_token(self, systemId, **args):
"""
curl -X POST https://devpfront.a-3.ru/token/api/v1 \
-H 'content-type: application/json' \
-d '{
"systemId": "EXAMPLE",
"cardNumber": "5500000000000000",
"cardCVV": "000",
"cardExp": "1111",
"cardHolder": "CARD OWNER",
"orderId": "1"
}'
"""
data = {
"systemId": systemId,
}
if args.get('cardNumber'):
data["cardNumber"] = args.get('cardNumber')
if args.get('cardCVV'):
data["cardCVV"] = args.get('cardCVV')
if args.get('cardExp'):
data["cardExp"] = args.get('cardExp')
if args.get('cardHolder'):
data["cardHolder"] = args.get('cardHolder')
if args.get('orderId'):
data["orderId"] = args.get('orderId')
if args.get('userId'):
data["userId"] = args.get('userId')
resource = 'https://pfront.a-3.ru/token/api/v1'
if self.is_test:
resource = 'https://devpfront.a-3.ru/token/api/v1'
res = requests.post(resource, json=data)
return res.json()
| StarcoderdataPython |
11207287 | import unittest
from resources.file_io_wrapper import FileIOWrapper
class TestFileIO(unittest.TestCase):
def setUp(self) -> None:
self.sut = FileIOWrapper()
self.test_bucket = "unit-testing-buckets"
def test_create_list_delete_buckets(self):
# Arrange (and act)
self.sut.create_bucket(bucket_name=self.test_bucket)
# Act
output = self.sut.list_buckets()
# Assert
self.assertGreater(len(output), 0)
self.assertIn(self.test_bucket, output)
# Teardown (and assert)
self.sut.delete_bucket(self.test_bucket)
reverted_buckets = self.sut.list_buckets()
self.assertNotIn(self.test_bucket, reverted_buckets)
self.assertEqual(len(output), len(reverted_buckets) + 1)
def test_create_get_list_delete_object(self):
# Arrange (and act)
self.sut.create_bucket(bucket_name=self.test_bucket)
mock_file_contents = b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01"
# Act
self.sut.put_object(
"rubbish_test_key",
mock_file_contents,
self.test_bucket,
)
output = self.sut.get_object_body(
"rubbish_test_key",
self.test_bucket
)
# Assert
self.assertIn("rubbish_test_key", self.sut.list_objects(self.test_bucket))
self.assertListEqual(
["rubbish_test_key"],
self.sut.list_objects(
self.test_bucket,
prefix_filter="rubbish_test_key"
)
)
self.assertEqual(output.read(), mock_file_contents)
# Teardown (and Assert)
self.sut.delete_objects(["rubbish_test_key"], self.test_bucket)
self.assertNotIn("rubbish_test_key", self.sut.list_objects(self.test_bucket))
def test_list_object_max_keys(self):
# Arrange (and act)
self.sut.create_bucket(bucket_name=self.test_bucket)
mock_file_contents = b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01"
keys = [f"rubbish_max_keys_{n}" for n in range(5)]
for key in keys:
self.sut.put_object(
key,
mock_file_contents,
self.test_bucket,
)
# Act and Assert
self.assertEqual(len(self.sut.list_objects(self.test_bucket)), 5)
self.assertEqual(len(self.sut.list_objects(self.test_bucket, max_keys=2)), 2)
def test_list_object_with_truncated_keys(self):
# Arrange (and act)
self.sut.create_bucket(bucket_name=self.test_bucket)
mock_file_contents = b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01"
keys = [f"rubbish_max_keys_{n}" for n in range(100)]
for key in keys:
self.sut.put_object(
key,
mock_file_contents,
self.test_bucket,
)
# Act
output_get_all = self.sut.list_objects(
self.test_bucket,
max_keys=10, # force truncation
get_all=True,
)
output_not_get_all = self.sut.list_objects(
self.test_bucket,
max_keys=10, # force truncation
get_all=False,
)
# Assert
self.assertEqual(len(output_get_all), 100)
self.assertEqual(len(output_not_get_all), 10)
def tearDown(self) -> None:
existing_buckets = self.sut.list_buckets()
if self.test_bucket in existing_buckets:
items = self.sut.list_objects(self.test_bucket, get_all=True)
if items:
self.sut.delete_objects(items, self.test_bucket)
self.sut.delete_bucket(self.test_bucket)
| StarcoderdataPython |
5099704 | from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
result = {'Answer': 0}
def add(a, b):
return float(a) + float(b)
def sub(a, b):
return float(a) - float(b)
def mul(a, b):
return float(a) * float(b)
def div(a, b):
return float(a) / float(b)
def pow(a, b):
return a ** b
def gcd(a, b):
if a == 0:
return b
return gcd(b%a, a)
def lcm(a, b):
return (a*b) / gcd(a,b)
def OR(a, b):
return int(a) | int(b)
def AND(a, b):
return int(a) & int(b)
def XOR(a, b):
return int(a) ^ int(b)
@app.route("/", methods=["POST", "GET"])
def home():
return render_template("index.html")
@app.route("/submit", methods=["POST", "GET"])
def submit():
val = 0
if request.method == "POST":
num1 = float(request.form['num1'])
num2 = float(request.form['num2'])
op = request.form['operation']
if op == 'add':
val = add(num1, num2)
elif op == 'sub':
val = sub(num1, num2)
elif op == 'mul':
val = mul(num1, num2)
elif op == 'div':
val = div(num1, num2)
elif op == 'pow':
val = pow(num1, num2)
elif op == 'gcd':
val = gcd(num1, num2)
elif op == 'lcm':
val = lcm(num1, num2)
elif op == 'or':
val = OR(num1, num2)
elif op == 'and':
val = AND(num1, num2)
elif op == 'xor':
val = XOR(num1, num2)
result['Answer'] = val
# return render_template("index.html", num1=num1, num2=num2, op=op, val=val)
return jsonify(result)
if __name__ == "__main__":
app.run(debug=True) | StarcoderdataPython |
6414887 | from zipfile import ZipFile
from bs4 import BeautifulSoup
from flask import Flask, render_template, request
from util import allowed_file
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
if 'file' not in request.files:
return 'No file sent to server', 400
uploaded_file = request.files['file']
if uploaded_file.filename == '':
return 'File wasnt specified', 400
if uploaded_file and not allowed_file(uploaded_file.filename):
return 'Wrong file type, only ZIP type is accepted', 400
with ZipFile(uploaded_file) as files_list:
target_xml = None
for file in files_list.namelist():
if '-s-' in file:
target_xml = file
continue
if not target_xml:
return '-s- XML file not found in uploaded ZIP archive', 400
with files_list.open(target_xml) as target_xml_file:
file_content = target_xml_file.read()
file_content = BeautifulSoup(file_content, 'xml')
rows = []
for i in file_content.find_all('subscriber'):
price = float(i['summaryPrice'])
payments = i.find('payments')
payment = 0
if payments:
payment = float(payments['paymentTotalPrice'])
total_price = price + payment
rows.append([
i['phoneNumber'],
total_price
])
response = []
# response.append('<table>')
for i in rows:
response.append('<tr><td>{}</td><td>{:.2f} Kč</td></tr>'.format(*i))
# response.append('</table>')
# return jsonify(rows)
return ''.join(response)
| StarcoderdataPython |
6520371 | <filename>fireworks_vasp/tasks.py
__author__ = '<NAME>'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '1/31/14'
import logging
from pymatgen import Structure
from fireworks import FireTaskBase, FWAction, explicit_serialize
from custodian import Custodian
from custodian.vasp.jobs import VaspJob
from pymatgen.io.vaspio import Vasprun
def load_class(mod, name):
mod = __import__(mod, globals(), locals(), [name], 0)
return getattr(mod, name)
@explicit_serialize
class WriteVaspInputTask(FireTaskBase):
"""
Writes VASP Input files.
Required params:
structure (dict): An input structure in pymatgen's Structure.to_dict
format.
vasp_input_set (str): A string name for the VASP input set. E.g.,
"MPVaspInputSet" or "MITVaspInputSet".
Optional params:
input_set_params (dict): If the input set requires some additional
parameters, specify them using input_set_params. E.g.,
{"user_incar_settings": ...}.
"""
required_params = ["structure", "vasp_input_set"]
optional_params = ["input_set_params"]
def run_task(self, fw_spec):
s = Structure.from_dict(self["structure"])
mod = __import__("pymatgen.io.vaspio_set", globals(), locals(),
[self["vasp_input_set"]], -1)
vis = load_class("pymatgen.io.vaspio_set", self["vasp_input_set"])(
**self.get("input_set_params", {}))
vis.write_input(s, ".")
@explicit_serialize
class VaspCustodianTask(FireTaskBase):
"""
Runs VASP using Custodian.
Required Params:
vasp_cmd: The command to run vasp. E.g., ["mpirun", "-np", "8",
"vasp"].
handlers ([str]): List of error handler names to use. See custodian
.vasp.handlers for list of applicable handlers. Note that
default args are assumed for all handlers for simplicity. A
special option is "all", which simply uses a set of common
handlers for relaxation jobs.
Optional params:
vasp_job_params (dict): Additional parameter settings as desired for
custodian's VaspJob.
custodian_params (dict): Additional parameter settings as desired for
Custodian. E.g., to use a scratch directory, you can have {
"scratch_dir": "..."} as specified in Custodian's scratch_dir
options.
"""
required_params = ["vasp_cmd", "handlers"]
optional_params = ["vasp_job_params", "custodian_params"]
def run_task(self, fw_spec):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO,
filename="run.log")
job = VaspJob(vasp_cmd=self["vasp_cmd"],
**self.get("vasp_job_params", {}))
if self["handlers"] == "all":
hnames = ["VaspErrorHandler", "MeshSymmetryErrorHandler",
"UnconvergedErrorHandler", "NonConvergingErrorHandler",
"PotimErrorHandler", "PBSWalltimeHandler"]
else:
hnames = self["handlers"]
handlers = [load_class("custodian.vasp.handlers", n) for n in hnames]
c = Custodian(handlers, [job], **self.get("custodian_params", {}))
output = c.run()
return FWAction(stored_data=output)
@explicit_serialize
class VaspAnalyzeTask(FireTaskBase):
"""
Read in vasprun.xml and insert into Fireworks stored_data.
"""
optional_params = ["vasprun_fname"]
def run_task(self, fw_spec):
f = self.get("vasprun_fname", "vasprun.xml")
v = Vasprun(f)
return FWAction(stored_data={"vasprun": v.to_dict}) | StarcoderdataPython |
4970988 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
# pylint:disable=invalid-name
# pylint:disable=g-bad-import-order
# pylint:disable=line-too-long
"""Unit tests for http.py."""
__author__ = '<EMAIL> (<NAME>)'
# this needs to be first, before any tornado imports
import epoll_fix # pylint:disable=unused-import
import datetime
import os
import shutil
import sys
import tempfile
import time
from wvtest import unittest
import xml.etree.cElementTree as ET
import google3
import dm_root
import mox
import tornado.httpclient
import tornado.ioloop
import tornado.testing
import tornado.util
import tornado.web
import api
import cpe_management_server
import cwmpdate
import garbage
import handle
import http
import mainloop
import session
SOAPNS = '{http://schemas.xmlsoap.org/soap/envelope/}'
CWMPNS = '{urn:dslforum-org:cwmp-1-2}'
def GetMonotime():
"""Older tornado doesn't have monotime(); stay compatible."""
if hasattr(tornado.util, 'monotime_impl'):
return tornado.util.monotime_impl
else:
return time.time
def SetMonotime(func):
"""Older tornado doesn't have monotime(); stay compatible."""
if hasattr(tornado.util, 'monotime_impl'):
tornado.util.monotime_impl = func
else:
time.time = func
def StubOutMonotime(moxinstance):
if hasattr(tornado.util, 'monotime_impl'):
moxinstance.StubOutWithMock(tornado.util, 'monotime_impl')
else:
moxinstance.StubOutWithMock(time, 'time')
class WrapHttpClient(object):
def __init__(self, oldclient, stopfunc, **kwargs):
self.stopfunc = stopfunc
self.realclient = oldclient(**kwargs)
def fetch(self, req, callback):
print '%s: fetching: %s %s' % (self, req, callback)
def mycallback(httpresponse):
print 'WrapHTTP request: finished request for %r' % req.url
callback(httpresponse)
self.stopfunc()
self.realclient.fetch(req, mycallback)
def close(self):
self.realclient.close()
def handle_callback_exception(self, callback):
self.realclient.handle_callback_exception(callback)
class FakeAcsConfig(object):
def __init__(self, port):
self.port = port
self.acs_access_attempt_count = 0
self.success_url = None
def GetAcsUrl(self):
return 'http://127.0.0.1:%d/cwmp' % self.port
def SetAcsUrl(self, val):
pass
def AcsAccessAttempt(self, unused_url):
self.acs_access_attempt_count += 1
def AcsAccessSuccess(self, url):
self.success_url = url
class LinearHttpHandler(tornado.web.RequestHandler):
def initialize(self, callback):
self.callback = callback
def _handle(self):
print 'LinearHttpHandler: got %r request for %r' % (self.request.method,
self.request.path)
self.callback(self)
@tornado.web.asynchronous
def get(self):
return self._handle()
@tornado.web.asynchronous
def post(self):
return self._handle()
class _TrivialHandler(tornado.web.RequestHandler):
def get(self):
return 'foo'
def post(self):
# postdata arrives as bytes, but response can go out as unicode.
self.write('post-foo: %s' % self.request.body.decode('utf-8'))
class TrivialTest(tornado.testing.AsyncHTTPTestCase, unittest.TestCase):
def setUp(self):
super(TrivialTest, self).setUp()
self.gccheck = garbage.GcChecker()
def tearDown(self):
super(TrivialTest, self).tearDown()
del self._app.handlers
del self._app
del self.http_server
self.gccheck.Done()
def trivial_callback(self, *args, **kwargs):
self.trivial_calledback = True
def get_app(self):
return tornado.web.Application([('/', _TrivialHandler)])
def test01(self):
pass
def test02(self):
pass
def test_query_params(self):
self.assertEqual(http.AddQueryParams('http://whatever/thing', True),
'http://whatever/thing')
self.assertEqual(http.AddQueryParams('http://whatever/thing', False),
'http://whatever/thing?options=noautoprov')
self.assertEqual(http.AddQueryParams('http://whatever', False),
'http://whatever?options=noautoprov')
self.assertEqual(http.AddQueryParams('//whatever', False),
'//whatever?options=noautoprov')
self.assertEqual(http.AddQueryParams('http://whatever:112', False),
'http://whatever:112?options=noautoprov')
self.assertEqual(http.AddQueryParams('http://whatever:112?x^&y', False),
'http://whatever:112?x^&y&options=noautoprov')
self.assertEqual(http.AddQueryParams('http://what:112?options=x', False),
'http://what:112?options=x&options=noautoprov')
# Doesn't add a new options=noautoprov or reorder options
self.assertEqual(http.AddQueryParams('//w?x&options=noautoprov&y', False),
'//w?x&options=noautoprov&y')
# Doesn't remove noautoprov even if autoprov=True (we only add options,
# never remove)
self.assertEqual(http.AddQueryParams('//w?x&options=noautoprov&y', True),
'//w?x&options=noautoprov&y')
def test_trivial_get(self):
self.trivial_calledback = False
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
self.assertIsNone(response.error)
self.assertFalse(self.trivial_calledback)
self.assertEqual(response.body, '')
for fd in self.io_loop._handlers.keys():
self.io_loop.remove_handler(fd)
def test_trivial_post(self):
self.trivial_calledback = False
# postdata body is provided as unicode, and auto-encoded as utf-8
self.http_client.fetch(self.get_url('/'), self.stop,
method='POST', body=u'hello\u00b4')
response = self.wait()
self.assertIsNone(response.error)
self.assertFalse(self.trivial_calledback)
for fd in self.io_loop._handlers.keys():
self.io_loop.remove_handler(fd)
# post response comes back as utf-8 encoded bytes (not auto-decoded)
self.assertEqual(bytes(response.body), 'post-foo: hello\xc2\xb4')
class TestManagementServer(object):
ConnectionRequestUsername = 'username'
ConnectionRequestPassword = 'password'
class PingTest(tornado.testing.AsyncHTTPTestCase, unittest.TestCase):
def setUp(self):
super(PingTest, self).setUp()
self.gccheck = garbage.GcChecker()
def tearDown(self):
super(PingTest, self).tearDown()
self.gccheck.Done()
def ping_callback(self):
self.ping_calledback = True
def get_app(self):
return tornado.web.Application(
[('/', http.PingHandler, dict(cpe_ms=TestManagementServer(),
callback=self.ping_callback))])
def test_ping(self):
self.ping_calledback = False
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
self.assertEqual(response.error.code, 401)
self.assertFalse(self.ping_calledback)
class HttpTest(tornado.testing.AsyncHTTPTestCase, unittest.TestCase):
def setUp(self):
self.gccheck = garbage.GcChecker()
self.old_HTTPCLIENT = session.HTTPCLIENT
self.old_GETWANPORT = http.GETWANPORT
self.want_auto_prov = False
self.old_WantACSAutoprovisioning = (
cpe_management_server.CpeManagementServer.WantACSAutoprovisioning)
cpe_management_server.CpeManagementServer.WantACSAutoprovisioning = (
lambda(_): self.want_auto_prov)
def _WrapWrapper(**kwargs):
return WrapHttpClient(self.old_HTTPCLIENT, self.stop, **kwargs)
session.HTTPCLIENT = _WrapWrapper
self.app = tornado.web.Application(
[
('/cwmp', LinearHttpHandler,
dict(callback=self.HttpRequestReceived)),
('/redir.*', LinearHttpHandler,
dict(callback=self.HttpRequestReceived)),
])
super(HttpTest, self).setUp() # calls get_app(), so self.app must exist
self.requestlog = []
self.old_monotime = GetMonotime()
self.advance_time = 0
self.removedirs = list()
self.removefiles = list()
def tearDown(self):
session.HTTPCLIENT = self.old_HTTPCLIENT
http.GETWANPORT = self.old_GETWANPORT
cpe_management_server.CpeManagementServer.WantACSAutoprovisioning = (
self.old_WantACSAutoprovisioning)
SetMonotime(self.old_monotime)
for d in self.removedirs:
shutil.rmtree(d)
for f in self.removefiles:
os.remove(f)
super(HttpTest, self).tearDown()
# clean up the namespace to make it easier to see "real" memory leaks
del self.app.handlers[:]
del self.app.handlers
self.app.__dict__.clear()
del self.app
del self._app
del self.http_server
if self.requestlog:
raise Exception('requestlog still has %d unhandled requests'
% len(self.requestlog))
self.gccheck.Done()
self.gccheck = None
def get_app(self):
return self.app
def HttpRequestReceived(self, handler):
self.requestlog.append(handler)
self.stop()
def NextHandler(self):
while not self.requestlog:
self.wait()
return self.requestlog.pop(0)
def advanceTime(self):
# Ensure time marches forward some small amount with each call
# or the network code in tornado sometimes fails to work.
self.advance_time += 0.01
return self.advance_time
def getCpe(self):
dm_root.PLATFORMDIR = '../platform'
root = dm_root.DeviceModelRoot(self.io_loop, 'fakecpe', ext_dir=None)
cpe = api.CPE(handle.Handle(root))
dldir = tempfile.mkdtemp()
self.removedirs.append(dldir)
cfdir = tempfile.mkdtemp()
self.removedirs.append(cfdir)
cpe.download_manager.SetDirectories(config_dir=cfdir, download_dir=dldir)
cpe_machine = http.Listen(ip=None, port=0,
ping_path='/ping/http_test',
acs=None, cpe=cpe, cpe_listener=False,
acs_config=FakeAcsConfig(self.get_http_port()),
ioloop=self.io_loop)
return cpe_machine
def testA00(self):
# a trivial test to make sure setUp/tearDown don't leak memory.
pass
def testA01(self):
self.http_client.fetch(self.get_url('/cwmp'), self.stop)
self.wait()
h = self.NextHandler()
self.assertEqual(h.request.method, 'GET')
h.finish()
self.wait()
def testMaxEnvelopes(self):
SetMonotime(self.advanceTime)
cpe_machine = self.getCpe()
cpe_machine.Startup()
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
root = ET.fromstring(h.request.body)
h.finish()
envelope = root.find(SOAPNS + 'Body/' + CWMPNS + 'Inform/MaxEnvelopes')
self.assertTrue(envelope is not None)
self.assertEqual(envelope.text, '1')
self.assertEqual(len(self.requestlog), 0)
def testCurrentTime(self):
SetMonotime(self.advanceTime)
cpe_machine = self.getCpe()
cpe_machine.Startup()
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
root = ET.fromstring(h.request.body)
h.finish()
ctime = root.find(SOAPNS + 'Body/' + CWMPNS + 'Inform/CurrentTime')
self.assertTrue(ctime is not None)
self.assertTrue(cwmpdate.valid(ctime.text))
self.assertEqual(len(self.requestlog), 0)
def testLookupDevIP6(self):
http.PROC_IF_INET6 = 'testdata/http/if_inet6'
http.GETWANPORT = 'testdata/http/getwanport_eth0'
cpe_machine = self.getCpe()
self.assertEqual(cpe_machine.LookupDevIP6(),
'11:2233:4455:6677:8899:aabb:ccdd:eeff')
http.GETWANPORT = 'testdata/http/getwanport_foo0'
self.assertEqual(cpe_machine.LookupDevIP6(), 0)
def testRetryCount(self):
SetMonotime(self.advanceTime)
cpe_machine = self.getCpe()
cpe_machine.Startup()
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
root = ET.fromstring(h.request.body)
retry = root.find(SOAPNS + 'Body/' + CWMPNS + 'Inform/RetryCount')
self.assertTrue(retry is not None)
self.assertEqual(retry.text, '0')
# Fail the first request
h.send_error(404)
self.wait(timeout=20) # wait for client request to finish and setup retry
self.advance_time += 10
# not success, don't bless URL
self.assertEqual(cpe_machine._acs_config.success_url, None)
h = self.NextHandler()
root = ET.fromstring(h.request.body)
h.finish()
retry = root.find(SOAPNS + 'Body/' + CWMPNS + 'Inform/RetryCount')
self.assertTrue(retry is not None)
self.assertEqual(retry.text, '1')
def testCookies(self):
SetMonotime(self.advanceTime)
cpe_machine = self.getCpe()
cpe_machine.Startup()
h = self.NextHandler()
self.assertTrue(h.request.method, 'POST')
msg = ('<soapenv:Envelope '
'xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" '
'xmlns:cwmp="urn:dslforum-org:cwmp-1-2" '
'xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" '
'xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><soapenv:Header><cwmp:ID'
' soapenv:mustUnderstand="1">cwmpID</cwmp:ID><cwmp:HoldRequests '
'soapenv:mustUnderstand="1">1</cwmp:HoldRequests></soapenv:Header><soapenv:Body><cwmp:InformResponse><MaxEnvelopes>1</MaxEnvelopes></cwmp:InformResponse></soapenv:Body></soapenv:Envelope>') # pylint:disable=g-line-too-long
h.set_cookie('CWMPSID', '0123456789abcdef')
h.set_cookie('AnotherCookie', '987654321', domain='.example.com',
path='/', expires_days=1)
h.write(msg)
h.finish()
self.wait()
h = self.NextHandler()
self.assertEqual(h.request.headers['Cookie'],
'AnotherCookie=987654321; CWMPSID=0123456789abcdef')
h.finish()
self.wait()
# success, bless the URL
self.assertEqual(cpe_machine._acs_config.success_url,
cpe_machine._acs_config.GetAcsUrl())
def testRedirect(self):
SetMonotime(self.advanceTime)
cpe_machine = self.getCpe()
cpe_machine.Startup()
orig_url = cpe_machine._acs_config.GetAcsUrl()
h = self.NextHandler()
urlbase = 'http://127.0.0.1:%d' % self.get_http_port()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/cwmp')
# want_auto_prov defaults to False
self.assertEqual(h.request.query, 'options=noautoprov')
h.redirect(urlbase + '/redir7', status=307)
self.assertTrue('<soap' in h.request.body)
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redir7')
# After an HTTP redirect, we don't add our noautoprov option anymore
# (it's handled at the HTTP layer; the redirector is supposed to keep it
# if it wants it).
self.assertEqual(h.request.query, '')
h.redirect(urlbase + '/redir1', status=301)
self.assertTrue('<soap' in h.request.body)
# Session still not complete: success should not be declared yet
self.assertIsNone(cpe_machine._acs_config.success_url)
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redir1')
self.assertEqual(h.request.query, '')
h.redirect(urlbase + '/redir2', status=302)
self.assertTrue('<soap' in h.request.body)
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redir2')
self.assertEqual(h.request.query, '')
self.assertTrue('<soap' in h.request.body)
h.finish()
self.wait()
# Session still not complete: success should not be declared yet
self.assertIsNone(cpe_machine._acs_config.success_url)
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redir2')
self.assertEqual(h.request.query, '')
h.finish()
self.wait()
# success, bless the *original* URL.
# (HTTP redirect targets are never blessed)
self.assertEqual(cpe_machine._acs_config.success_url, orig_url)
def testRedirectSession(self):
"""Test that a redirect persists for the entire session."""
SetMonotime(self.advanceTime)
self.want_auto_prov = True
cpe_machine = self.getCpe()
cpe_machine.Startup()
h = self.NextHandler()
urlbase = 'http://127.0.0.1:%d' % self.get_http_port()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/cwmp')
self.assertEqual(h.request.query, '')
h.redirect(urlbase + '/redirected', status=307)
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redirected')
h.finish()
h = self.NextHandler()
self.assertEqual(h.request.method, 'POST')
self.assertEqual(h.request.path, '/redirected')
h.finish()
def testNewPingSession(self):
cpe_machine = self.getCpe()
cpe_machine.previous_ping_time = 0
# Create mocks of ioloop, and stubout the time function.
m = mox.Mox()
ioloop_mock = m.CreateMock(tornado.ioloop.IOLoop)
m.StubOutWithMock(cpe_machine, '_NewSession')
StubOutMonotime(m)
# First call to _NewSession should get the time and trigger a new session
GetMonotime()().AndReturn(1000)
cpe_machine._NewSession(mox.IsA(str))
# Second call to _NewSession should queue a session
GetMonotime()().AndReturn(1001)
ioloop_mock.add_timeout(mox.IsA(datetime.timedelta),
mox.IgnoreArg()).AndReturn(1)
# Third call should get the time and then not do anything
# since a session is queued.
GetMonotime()().AndReturn(1001)
# And the call to _NewTimeoutSession should call through to
# NewPingSession, and start a new session
GetMonotime()().AndReturn(1000 + cpe_machine.ping_rate_limit_seconds)
ioloop_mock.add_timeout(mox.IsA(datetime.timedelta),
mox.IgnoreArg()).AndReturn(2)
cpe_machine.ioloop = ioloop_mock
m.ReplayAll()
# Real test starts here.
cpe_machine._NewPingSession()
cpe_machine._NewPingSession()
cpe_machine._NewPingSession()
cpe_machine._NewTimeoutPingSession()
# Verify everything was called correctly.
m.VerifyAll()
def testNewPeriodicSession(self):
"""Tests that _NewSession is called if the event queue is empty."""
cpe_machine = self.getCpe()
# Create mocks of ioloop, and stubout the time function.
m = mox.Mox()
m.StubOutWithMock(cpe_machine, '_NewSession')
cpe_machine._NewSession('2 PERIODIC')
m.ReplayAll()
cpe_machine.NewPeriodicSession()
m.VerifyAll()
def testNewPeriodicSessionPending(self):
"""Tests that no new periodic session starts if there is one pending."""
cpe_machine = self.getCpe()
# Create mocks of ioloop, and stubout the time function.
m = mox.Mox()
m.StubOutWithMock(cpe_machine, 'Run')
cpe_machine.Run()
m.ReplayAll()
self.assertFalse(('2 PERIODIC', None) in cpe_machine.event_queue)
cpe_machine.NewPeriodicSession()
self.assertTrue(('2 PERIODIC', None) in cpe_machine.event_queue)
cpe_machine.NewPeriodicSession()
m.ReplayAll()
def testNewWakeupSession(self):
cpe_machine = self.getCpe()
# Create mocks of ioloop, and stubout the time function.
m = mox.Mox()
m.StubOutWithMock(cpe_machine, 'Run')
cpe_machine.Run()
m.ReplayAll()
self.assertFalse(('6 CONNECTION REQUEST', None) in cpe_machine.event_queue)
cpe_machine.NewWakeupSession()
self.assertTrue(('6 CONNECTION REQUEST', None) in cpe_machine.event_queue)
cpe_machine.NewWakeupSession()
m.ReplayAll()
def testEventQueue(self):
cpe_machine = self.getCpe()
m = mox.Mox()
m.StubOutWithMock(sys, 'exit')
sys.exit(1)
sys.exit(1)
sys.exit(1)
sys.exit(1)
m.ReplayAll()
for i in range(64):
cpe_machine.event_queue.append(i)
cpe_machine.event_queue.append(100)
cpe_machine.event_queue.appendleft(200)
cpe_machine.event_queue.extend([300])
cpe_machine.event_queue.extendleft([400])
cpe_machine.event_queue.clear()
cpe_machine.event_queue.append(10)
cpe_machine.event_queue.clear()
m.VerifyAll()
def testEncodeInform(self):
cpe_machine = self.getCpe()
cpe_machine.NewPeriodicSession()
inform = cpe_machine.EncodeInform()
self.assertTrue(len(inform))
self.assertTrue('2 PERIODIC' in inform)
self.assertFalse('4 VALUE CHANGE' in inform)
cpe_machine.event_queue.append(('4 VALUE CHANGE', None))
inform = cpe_machine.EncodeInform()
self.assertTrue(len(inform))
self.assertTrue('2 PERIODIC' in inform)
self.assertTrue('4 VALUE CHANGE' in inform)
def testAcsDisable(self):
http.CWMP_TMPDIR = tempfile.mkdtemp()
self.removedirs.append(http.CWMP_TMPDIR)
cpe_machine = self.getCpe()
loop = mainloop.MainLoop()
http.ACS_DISABLE_EXPIRY_SECS = 60 * 10
# pylint: disable=protected-access
acs_disabled_filename = cpe_machine._AcsDisabledFilename()
# Disable the ACS. Make sure we logged an access attempt anyway.
before = cpe_machine._acs_config.acs_access_attempt_count
open(acs_disabled_filename, 'w')
loop.RunOnce()
cpe_machine.NewPeriodicSession()
cpe_machine.PingReceived()
self.assertEqual(len(cpe_machine.event_queue), 0)
after = cpe_machine._acs_config.acs_access_attempt_count
self.assertNotEqual(before, after)
# Now test that the file age has expired. We have to open the file again
# to trigger _UpdateAcsDisabled.
http.ACS_DISABLE_EXPIRY_SECS = 0.1
open(acs_disabled_filename, 'w')
time.sleep(0.1)
loop.RunOnce()
cpe_machine.NewPeriodicSession()
cpe_machine.PingReceived()
self.assertEqual(len(cpe_machine.event_queue), 1)
# Clear the event queue and session, go back a step to the ACS being
# disabled again, then delete the file and make sure that re-enables it.
cpe_machine.InformResponseReceived()
cpe_machine.session = None
self.assertEqual(len(cpe_machine.event_queue), 0)
http.ACS_DISABLE_EXPIRY_SECS = 60 * 10
open(acs_disabled_filename, 'w')
loop.RunOnce()
cpe_machine.NewPeriodicSession()
cpe_machine.PingReceived()
self.assertEqual(len(cpe_machine.event_queue), 0)
os.unlink(acs_disabled_filename)
loop.RunOnce()
cpe_machine.NewPeriodicSession()
cpe_machine.PingReceived()
self.assertEqual(len(cpe_machine.event_queue), 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
245395 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LogbookUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activity', models.TextField(max_length=45, null=True, blank=True)),
('module', models.TextField(max_length=45, null=True, blank=True)),
('body_log', models.CharField(max_length=1000, null=True, blank=True)),
('log_level', models.CharField(default=b'INFO', max_length=5, choices=[(b'DEBUG', b'DEBUG'), (b'ERROR', b'ERROR'), (b'INFO', b'INFO')])),
('logbook_date', models.DateField(auto_now=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'logbook_user',
},
),
migrations.CreateModel(
name='LogMail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mail_from', models.TextField(max_length=45)),
('mail_subject', models.TextField(max_length=50)),
('mail_sent_date', models.DateField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'log_mail',
},
),
]
| StarcoderdataPython |
12854572 | <reponame>xenadevel/xena-open-automation-python-api<filename>xoa_driver/internals/core/commands/px_commands.py<gh_stars>1-10
#: L23 Port Transceiver Commands
from dataclasses import dataclass
import typing
from ..protocol.command_builders import (
build_get_request,
build_set_request
)
from .. import interfaces
from ..transporter.token import Token
from ..protocol.fields.data_types import *
from ..protocol.fields.field import XmpField
from ..registry import register_command
from .enums import *
@register_command
@dataclass
class PX_RW:
"""
Provides access to the register interface supported by the port transceiver. It
is possible to both read and write register values.
"""
code: typing.ClassVar[int] = 501
pushed: typing.ClassVar[bool] = False
_connection: "interfaces.IConnection"
_module: int
_port: int
_page_xindex: int
_register_xaddress: int
@dataclass(frozen=True)
class SetDataAttr:
value: XmpField[XmpHex4] = XmpField(XmpHex4) # 4 hex bytes, register value of the port transceiver
@dataclass(frozen=True)
class GetDataAttr:
value: XmpField[XmpHex4] = XmpField(XmpHex4) # 4 hex bytes, register value of the port transceiver
def get(self) -> "Token[GetDataAttr]":
"""Get the register value of a transceiver.
:return: the register value of a transceiver
:rtype: PX_RW.GetDataAttr
"""
return Token(self._connection, build_get_request(self, module=self._module, port=self._port, indices=[self._page_xindex, self._register_xaddress]))
def set(self, value: str) -> "Token":
"""Set the register value of a transceiver.
:param value: register value of a transceiver
:type value: str
"""
return Token(self._connection, build_set_request(self, module=self._module, port=self._port, indices=[self._page_xindex, self._register_xaddress], value=value))
@register_command
@dataclass
class PX_MII:
"""Provides access to the register interface supported by the media-independent interface (MII) transceiver. It
is possible to both read and write register values."""
code: typing.ClassVar[int] = 537
pushed: typing.ClassVar[bool] = False
_connection: "interfaces.IConnection"
_module: int
_port: int
_register_xaddress: int
@dataclass(frozen=True)
class SetDataAttr:
value: XmpField[XmpHex2] = XmpField(XmpHex2) # 2 hex bytes, register value of the transceiver
@dataclass(frozen=True)
class GetDataAttr:
value: XmpField[XmpHex2] = XmpField(XmpHex2) # 2 hex bytes, register value of the transceiver
def get(self) -> "Token[GetDataAttr]":
"""Get the register value of a transceiver.
:return: the register value of a transceiver
:rtype: PX_MII.GetDataAttr
"""
return Token(self._connection, build_get_request(self, module=self._module, port=self._port, indices=[self._register_xaddress]))
def set(self, value: str) -> "Token":
"""Set the register value of a transceiver.
:param value: register value of a transceiver
:type value: str
"""
return Token(self._connection, build_set_request(self, module=self._module, port=self._port, indices=[self._register_xaddress], value=value))
@register_command
@dataclass
class PX_TEMPERATURE:
"""
Transceiver temperature in degrees Celsius.
"""
code: typing.ClassVar[int] = 538
pushed: typing.ClassVar[bool] = True
_connection: "interfaces.IConnection"
_module: int
_port: int
@dataclass(frozen=True)
class GetDataAttr:
temperature_msb: XmpField[XmpByte] = XmpField(XmpByte) # byte, temperature value before the decimal digit.
temperature_decimal_fraction: XmpField[XmpByte] = XmpField(XmpByte) # byte, 1/256th of a degree Celsius after the decimal digit.
def get(self) -> "Token[GetDataAttr]":
"""Get transceiver temperature in degrees Celsius.
:return: temperature value before the decimal digit, and 1/256th of a degree Celsius after the decimal digit.
:rtype: PX_TEMPERATURE.GetDataAttr
"""
return Token(self._connection, build_get_request(self, module=self._module, port=self._port))
| StarcoderdataPython |
202134 | <gh_stars>0
from .utils import *
from .TelloClient import *
| StarcoderdataPython |
6568791 | # -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2017 <NAME> <<EMAIL>>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
from diffoscope.tools import tool_required
from diffoscope.difference import Difference
from .utils.file import File
from .utils.command import Command
import shutil
import os.path
import binascii
HEADER = binascii.a2b_hex("580a000000020003")
# has to be one line
DUMP_RDB = """lazyLoad(commandArgs(TRUE)); for (obj in ls()) { print(obj); for (line in deparse(get(obj))) cat(line,"\\n"); }"""
# unfortunately this above snippet can't detect the build-path differences so
# diffoscope still falls back to a hexdump
def check_rds_extension(f):
return f.name.endswith(".rds") or f.name.endswith(".rdx")
def ensure_archive_rdx(f):
if not f.container or f.path.endswith(".rdb"):
return f.path
# if we're in an archive, copy the .rdx file over so R can read it
bname = os.path.basename(f.name)
assert bname.endswith(".rdb")
rdx_name = f.name[:-4] + ".rdx"
try:
rdx_path = f.container.get_member(rdx_name).path
except KeyError:
return f.path
# R will fail, diffoscope will report the error and continue
shutil.copy(f.path, f.path + ".rdb")
shutil.copy(rdx_path, f.path + ".rdx")
return f.path + ".rdb"
class RdsReader(Command):
@tool_required('Rscript')
def cmdline(self):
return [
'Rscript',
'-e',
'args <- commandArgs(TRUE); readRDS(args[1])',
self.path,
]
class RdsFile(File):
DESCRIPTION = "GNU R Rscript files (.rds)"
@classmethod
def recognizes(cls, file):
if (
check_rds_extension(file)
or file.container
and check_rds_extension(file.container.source)
):
return file.file_header.startswith(HEADER)
return False
def compare_details(self, other, source=None):
return [Difference.from_command(RdsReader, self.path, other.path)]
class RdbReader(Command):
@tool_required('Rscript')
def cmdline(self):
return ['Rscript', '-e', DUMP_RDB, self.path[:-4]]
class RdbFile(File):
DESCRIPTION = "GNU R database files (.rdb)"
FILE_EXTENSION_SUFFIX = '.rdb'
def compare_details(self, other, source=None):
self_path = ensure_archive_rdx(self)
other_path = ensure_archive_rdx(other)
return [Difference.from_command(RdbReader, self_path, other_path)]
| StarcoderdataPython |
318855 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 14:39:32 2020
@author: zo
"""
def tokenizer_and_model_config_mismatch(config, tokenizer):
"""
Check for tokenizer and model config miss match.
Args:
config:
model config.
tokenizer:
tokenizer.
Raises:
ValueError: A special token id in config is different from tokenizer.
"""
id_check_list = ['bos_token_id', 'eos_token_id', 'pad_token_id',
'mask_token_id', 'unk_token_id']
for id_type in id_check_list:
if getattr(config, id_type) != getattr(tokenizer, id_type):
# We should tell how to resolve it.
raise ValueError('A special token id in config is different from tokenizer')
def block_size_exceed_max_position_embeddings(config, block_size):
# This will cause position ids automatically create from model
# to go beyond embedding size of position id embedding.
# And return not so useful error.
# This sound like a bug in transformers library.
# If we got this error the work around for now id to set
# `max_position_embeddings` of model config to be higher than or equal to
# `max_seq_len + config.pad_token_id + 1` at least to avoid problem.
if(block_size > config.max_position_embeddings + config.pad_token_id + 1):
recommend_block_size = config.max_position_embeddings + config.pad_token_id + 1
raise ValueError(f'This block size will cause error due to max_position_embeddings. '
f'Use this block_size={recommend_block_size} or '
f'increase max_position_embeddings')
| StarcoderdataPython |
3303421 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from ._shared import parse_key_vault_id, KeyVaultResourceId
def parse_key_vault_secret_id(source_id):
# type: (str) -> KeyVaultResourceId
"""Parses a secret's full ID into a class with parsed contents as attributes.
:param str source_id: the full original identifier of a secret
:returns: Returns a parsed secret ID as a :class:`KeyVaultResourceId`
:rtype: ~azure.keyvault.secrets.KeyVaultResourceId
:raises: ValueError
Example:
.. literalinclude:: ../tests/test_parse_id.py
:start-after: [START parse_key_vault_secret_id]
:end-before: [END parse_key_vault_secret_id]
:language: python
:caption: Parse a secret's ID
:dedent: 8
"""
parsed_id = parse_key_vault_id(source_id)
return KeyVaultResourceId(
name=parsed_id.name, source_id=parsed_id.source_id, vault_url=parsed_id.vault_url, version=parsed_id.version
)
| StarcoderdataPython |
12850782 | <reponame>MarioCodes/ProyectosClaseDAM<filename>python/periodic-web-scrapper/scraper/utilities/WebUtilities.py
'''
Created on Mar 22, 2018
@author: msanchez
'''
import requests
from html.parser import HTMLParser
import urllib
class WebUtilities(object):
def __init__(self):
''' Utility class. Has everything that has to do with web download / requests.
It may also create static HTML web pages.
Attributes:
content Part of the page which will form the main content, this is the main info to show.
title title which will be shown into HTML <h1> tags as main header.
'''
self.content = ""
def download(self, url):
''' Obtains complete request from an URL.
:param url: Complete url to download
:return: Complete request
:rtype: requests.Response
'''
page = requests.get(url)
page._content = self.__unescape(page)
return page
def web_status(self, url):
''' Pings a web and shows if it's reachable.
:return: online if status code == 200, else offline
:rtype: str
'''
status_code = urllib.request.urlopen(url).getcode()
return "online" if status_code == 200 else "offline" # todo: change to bool
def __unescape(self, page):
''' Removes HTML Entities. If not done, chars such as 'á' would appear as 'x/0f1' when read.
'''
parser = HTMLParser()
return parser.unescape(page.text)
def create_static_web(self, title):
''' Sets the title which will be shown to the given str and creates and empty content
:param title: str to set as the web's title
'''
self.content = ""
self.title = title
def append_paragraph(self, content):
''' Appends the given parameter as a part of the main content to be shown.
it does so appending it into <p> tags.
:param content: str to be added in a new line into <p> tags
'''
self.content += "<p>" + content + "</p>"
def build(self):
''' Main method to call when the rest of options are set. It will mount the title with the content
and return the whole web as a str.
:return: whole static web with appended title and content.
:rtype: str
'''
html_page = "<html><h1>" + self.title + "</h1>"
for entry in self.content:
html_page += entry
html_page += "</html>"
return html_page | StarcoderdataPython |
11390509 | <filename>etc/script/notify.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import json
import urllib2
import smtplib
from email.mime.text import MIMEText
notify_channel_funcs = {
"email":"email",
"sms":"sms",
"voice":"voice",
"dingtalk":"dingtalk",
"wecom":"wecom",
"feishu":"feishu"
}
mail_host = "smtp.163.com"
mail_port = 994
mail_user = "ulricqin"
mail_pass = "password"
mail_from = "<EMAIL>"
class Sender(object):
@classmethod
def send_email(cls, payload):
if mail_user == "ulricqin" and mail_pass == "password":
print("invalid smtp configuration")
return
users = payload.get('event').get("notify_users_obj")
emails = {}
for u in users:
if u.get("email"):
emails[u.get("email")] = 1
if not emails:
return
recipients = emails.keys()
mail_body = payload.get('tpls').get("mailbody.tpl", "mailbody.tpl not found")
message = MIMEText(mail_body, 'html', 'utf-8')
message['From'] = mail_from
message['To'] = ", ".join(recipients)
message["Subject"] = payload.get('tpls').get("subject.tpl", "subject.tpl not found")
try:
smtp = smtplib.SMTP_SSL(mail_host, mail_port)
smtp.login(mail_user, mail_pass)
smtp.sendmail(mail_from, recipients, message.as_string())
smtp.close()
except smtplib.SMTPException, error:
print(error)
@classmethod
def send_wecom(cls, payload):
users = payload.get('event').get("notify_users_obj")
tokens = {}
for u in users:
contacts = u.get("contacts")
if contacts.get("wecom_robot_token", ""):
tokens[contacts.get("wecom_robot_token", "")] = 1
opener = urllib2.build_opener(urllib2.HTTPHandler())
method = "POST"
for t in tokens:
url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={}".format(t)
body = {
"msgtype": "markdown",
"markdown": {
"content": payload.get('tpls').get("wecom.tpl", "wecom.tpl not found")
}
}
request = urllib2.Request(url, data=json.dumps(body))
request.add_header("Content-Type",'application/json;charset=utf-8')
request.get_method = lambda: method
try:
connection = opener.open(request)
print(connection.read())
except urllib2.HTTPError, error:
print(error)
@classmethod
def send_dingtalk(cls, payload):
event = payload.get('event')
users = event.get("notify_users_obj")
rule_name = event.get("rule_name")
event_state = "Triggered"
if event.get("is_recovered"):
event_state = "Recovered"
tokens = {}
phones = {}
for u in users:
if u.get("phone"):
phones[u.get("phone")] = 1
contacts = u.get("contacts")
if contacts.get("dingtalk_robot_token", ""):
tokens[contacts.get("dingtalk_robot_token", "")] = 1
opener = urllib2.build_opener(urllib2.HTTPHandler())
method = "POST"
for t in tokens:
url = "https://oapi.dingtalk.com/robot/send?access_token={}".format(t)
body = {
"msgtype": "markdown",
"markdown": {
"title": "{} - {}".format(event_state, rule_name),
"text": payload.get('tpls').get("dingtalk.tpl", "dingtalk.tpl not found") + ' '.join(["@"+i for i in phones.keys()])
},
"at": {
"atMobiles": phones.keys(),
"isAtAll": False
}
}
request = urllib2.Request(url, data=json.dumps(body))
request.add_header("Content-Type",'application/json;charset=utf-8')
request.get_method = lambda: method
try:
connection = opener.open(request)
print(connection.read())
except urllib2.HTTPError, error:
print(error)
@classmethod
def send_feishu(cls, payload):
users = payload.get('event').get("notify_users_obj")
tokens = {}
phones = {}
for u in users:
if u.get("phone"):
phones[u.get("phone")] = 1
contacts = u.get("contacts")
if contacts.get("feishu_robot_token", ""):
tokens[contacts.get("feishu_robot_token", "")] = 1
opener = urllib2.build_opener(urllib2.HTTPHandler())
method = "POST"
for t in tokens:
url = "https://open.feishu.cn/open-apis/bot/v2/hook/{}".format(t)
body = {
"msg_type": "text",
"content": {
"text": payload.get('tpls').get("feishu.tpl", "feishu.tpl not found")
},
"at": {
"atMobiles": phones.keys(),
"isAtAll": False
}
}
request = urllib2.Request(url, data=json.dumps(body))
request.add_header("Content-Type",'application/json;charset=utf-8')
request.get_method = lambda: method
try:
connection = opener.open(request)
print(connection.read())
except urllib2.HTTPError, error:
print(error)
@classmethod
def send_sms(cls, payload):
users = payload.get('event').get("notify_users_obj")
phones = {}
for u in users:
if u.get("phone"):
phones[u.get("phone")] = 1
if phones:
print("send_sms not implemented, phones: {}".format(phones.keys()))
@classmethod
def send_voice(cls, payload):
users = payload.get('event').get("notify_users_obj")
phones = {}
for u in users:
if u.get("phone"):
phones[u.get("phone")] = 1
if phones:
print("send_voice not implemented, phones: {}".format(phones.keys()))
def main():
payload = json.load(sys.stdin)
with open(".payload", 'w') as f:
f.write(json.dumps(payload, indent=4))
for ch in payload.get('event').get('notify_channels'):
send_func_name = "send_{}".format(notify_channel_funcs.get(ch.strip()))
if not hasattr(Sender, send_func_name):
print("function: {} not found", send_func_name)
continue
send_func = getattr(Sender, send_func_name)
send_func(payload)
def hello():
print("hello nightingale")
if __name__ == "__main__":
if len(sys.argv) == 1:
main()
elif sys.argv[1] == "hello":
hello()
else:
print("I am confused") | StarcoderdataPython |
9659704 | <reponame>DaniAffCH/Amazon-scraper
from selectorlib import Extractor
import requests
import re
class Scraper:
__private_url = str()
r = None
ext = None
headers = {
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://www.amazon.com/',
'accept-language': 'it-IT;q=0.9,it;q=0.8',
}
def __init__(self, selector):
if selector == None or selector == "":
raise Exception("I parametri devono essere inizializzati")
self.ext = Extractor.from_yaml_file(selector)
def setUrl(self, u):
self.__private_url = u
def sendRequest(self, proxy = None):
if(self.__private_url == None or self.__private_url == ""):
raise Exception("Url non valido")
print("INVIO RICHIESTA")
if(proxy != None):
proxy_config = {"http": "http://"+proxy, "https": "http://"+proxy}
tmp = requests.get(self.__private_url, headers=self.headers, proxies=proxy_config)
else:
tmp = requests.get(self.__private_url, headers=self.headers)
if tmp.status_code > 400:
raise Exception("ERRORE! CODICE %s"%tmp.status_code)
elif "To discuss automated access to Amazon data please contact" in tmp.text:
raise Exception("AMAZON TI HA BLOCCATO\nURL CORRENTE %s"%self.__private_url)
else:
print("DOWNLOAD PAGINA COMPLETATO")
self.r = tmp
def __normalize(self, arr):
arr["price"] = arr["price"].replace(u'\xa0', u'')
pattern = r"n\. (\d+,*\d*) in (Alimentari e cura della casa|Oli d'oliva)"
arr["ranking"] = re.findall(pattern,arr["ranking"])
for n, element in enumerate(arr["ranking"]):
tmp = list(arr["ranking"][n])
tmp[0] = int(element[0].replace(',', ''))
arr["ranking"][n] = tuple(tmp)
pattern = r"\d+"
arr["reviews"] = int(re.search(pattern,arr["reviews"]).group())
return arr
def extraction(self):
if(self.r!=None):
out = self.ext.extract(self.r.text)
out["AmazonSpedition"] = "spedito da Amazon" in self.r.text
return self.__normalize(out)
| StarcoderdataPython |
399767 | <reponame>fakedrake/WikipediaBase
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_classifiers
----------------------------------
Tests for `classifiers` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from wikipediabase import classifiers as cls
class TestClassifiers(unittest.TestCase):
def test_person(self):
c = cls.PersonClassifier()
self.assertIn('wikibase-person', c('<NAME>'))
self.assertIn('wikibase-person', c('<NAME>'))
self.assertNotIn('wikibase-person', c('Harvard University'))
def test_term(self):
c = cls.TermClassifier()
self.assertEquals(c('<NAME>'), ['wikibase-term'])
self.assertEquals(c('Harvard University'), ['wikibase-term'])
def test_sections(self):
c = cls.SectionsClassifier()
self.assertEquals(c('<NAME>'), ['wikibase-sections'])
self.assertEquals(c('Harvard University'), ['wikibase-sections'])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9651996 | <filename>core/views.py<gh_stars>0
from django.shortcuts import render, redirect
from django.http.response import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from pytesseract import pytesseract
from PIL import Image
from django.contrib import messages
@csrf_exempt
def upload(request):
context = {}
if request.method == 'POST':
if not request.FILES:
messages.error(request, 'Please select file')
return render(request, 'upload.html')
uploaded_file = request.FILES['file']
# Get a searchable PDF
try:
pdf = pytesseract.image_to_pdf_or_hocr(Image.open(uploaded_file), extension='pdf')
response = HttpResponse(pdf)
except:
messages.error(request, 'Please upload valid image file')
return render(request, 'upload.html')
# Set the HTTP header for sending to browser
response['Content-Disposition'] = "attachment; filename=%s" % (uploaded_file.name.split('.')[0] + '.pdf')
# Return the response value
return response
return render(request, 'upload.html', context)
| StarcoderdataPython |
3353424 | bills = [7, 12, 22, 52, 102, 15, 25, 55, 105, 30, 60, 110, 70, 120, 150]
count = 0
while True:
N, M = map(int, input().split())
if N == 0 and M == 0:
break
for i in bills:
if M - N == i:
count = 1
break
else:
count = 0
if count == 1:
print('possible')
else:
print('impossible')
| StarcoderdataPython |
5046041 | <reponame>droberin/TelNot
#!/usr/bin/env python3
#
# Author: <NAME> <<EMAIL>
# Version: 1.0
#
from telnot import telnot
if __name__ == '__main__':
app = telnot.TelNot()
app.run()
| StarcoderdataPython |
1834249 | import os
import numpy as np
import torch
from .helper_func import tensor_to_np
class Dataset:
def __init__(self, path):
self.path = path
self.data = dict()
self.files = set(os.listdir(path))
def __getattr__(self, item):
xname = item + "_x"
yname = item + "_y"
x = self.__get_helper(xname)
y = self.__get_helper(yname)
return x, y
def __get_helper(self, fname, type='np', device='cpu'):
if fname not in self.data:
if fname + '.npy' in self.files:
self.data[fname] = np.load(fname)
elif fname + '.csv' in self.files:
self.data[fname] = np.loadtxt(fname, delimiter=',')
if type == 'torch':
self.data[fname] = torch.Tensor(self.data[fname])
if device != 'cpu':
self.data[fname] = self.data[fname].to(device)
return self.data[fname]
def save(self, path=None, verbose=False):
path = self.path if path is None else path
for fname in self.data:
file_path = os.path.join(path, fname + '.npy')
odata = tensor_to_np(self.data[fname])
np.save(file_path, odata)
if verbose:
print('File {} saved with shape {}.'.format(fname, odata.shape))
| StarcoderdataPython |
4885797 | import contextlib
import io
import sys
import time
import unittest
import unittest.mock
import importlib
import mqtt_app
from threading import Thread
class TestApp(unittest.TestCase):
@contextlib.contextmanager
def _capture_output(self):
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def test_app(self):
# capture output to test on
with self._capture_output() as (out, err):
importlib.reload(mqtt_app)
mqtt_thread = Thread(target=mqtt_app.start_mqtt, args=(), daemon=True)
mqtt_thread.start()
publish = Thread(target=mqtt_app.publish_thread, args=(), daemon=True)
publish.start()
# Need a delay to allow some time for the threads to start
time.sleep(4)
# Pull info out of stdout since this app uses the cs.py log
# function. This means the logs are converted to prints and
# go to stdout
output = err.getvalue().strip()
# if the test passed, stdout should have captured output
self.assertIn('MQTT connect reply to test.mosquitto.org, 1883: Connection Accepted.', output)
self.assertIn('MQTT Client connection results: Connection Accepted.', output)
self.assertIn('Published msg received. topic: /status/gps/lastpos', output)
self.assertIn('Published msg received. topic: /status/wan/connection_state', output)
self.assertIn('Published msg received. topic: /status/system/modem_temperature', output)
self.assertIn('MQTT published file:', output)
self.assertNotIn('Exception in publish_file().', output)
self.assertNotIn('Exception in publish_thread().', output)
self.assertNotIn('Exception in start_mqtt()!', output)
self.assertNotIn('Exception during start_app()!', output)
| StarcoderdataPython |
1682975 | class GameState(object):
"""
This class represents a contract that must be fulfilled for MCTS to work.
Such contract describes a singular state within a game (i.e. pawns positioning on chessboard).
Methods provided below are bare minimum to make UTC in any 2-player complete information deterministic
zero-sum game. By convention the players are numbered 1 and 2.
"""
def __init__(self, last_active_player=2, _chips=15):
self.last_active_player = last_active_player
self._chips = _chips
def clone(self):
"""
This method should return a deep clone of this game state.
"""
pass
def perform_action(self, action):
"""
This method should perform given move, update state and switch which player has just moved.
:param action: action to be performed
"""
pass
def get_available_actions(self):
"""
This method should return list of all possible, legal moves.
"""
pass
def get_value(self, player_id):
"""
Get the game state value, from point of view of given player (1st or 2nd)
:param player_id: player_id to get value for
"""
pass
def __repr__(self):
"""
This method allows for easier printing out game states, but is not necessary.
"""
pass
| StarcoderdataPython |
1782832 | <filename>coreml/data/sampler.py
"""Custom sampler for loading data"""
import random
from typing import List, Any, Optional
from collections import defaultdict
import numpy as np
from torch.utils.data.sampler import Sampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import Dataset
from coreml.factory import Factory
class DataSampler(Sampler):
"""Custom sampler to decide the ordering of samples within an epoch
This retains the functionality of the default PyTorch sampler.
Added here to serve as the base for adding more functionality.
:param data_source: the dataset object from which to sample
:type data_source: :class:`~torch.utils.data.Dataset`
:param shuffle: decides the functionality for the sampler,
defaults to True
:type shuffle: bool, optional
:param seed: random seed to use for sampling, defaults to 0
:type seed: int, optional
:param kwargs: additional params as dict
:type kwargs: dict
"""
def __init__(self, data_source: Dataset, shuffle: bool = True,
seed: int = 0, **kwargs):
super(DataSampler, self).__init__(data_source)
self.data_source = data_source
self.shuffle = shuffle
random.seed(seed)
self.len = len(data_source)
def load_fn(self):
"""Default behaviour as :class:`~torch.utils.sampler.Sampler`"""
indices = np.arange(self.len)
if self.shuffle:
random.shuffle(indices)
return indices
def __iter__(self):
return iter(self.load_fn())
def __len__(self):
return self.len
class ClassificationDataSampler(DataSampler):
"""Custom sampler to decide the ordering of samples for classification
:param data_source: the dataset object from which to sample
:type data_source: :class:`~torch.utils.data.Dataset`
:param shuffle: decides the functionality for the sampler,
defaults to True
:type shuffle: bool, optional
:param seed: random seed to use for sampling, defaults to 0
:type seed: int, optional
:param target_transform: defines the transformation to be applied on the
raw targets to make them processable; if label_index is provided,
target_transform.transforms[label_index] is used instead; defaults to
None
:type target_transform: Any
:param mode: mode of sampling; choices are [`default`, `balanced`]; for
`default`, it matches the default sampling behaviour. For `balanced`,
it ensures class balance per batch and drops the examples; defaults
to `default`
:type mode: str, optional
"""
def __init__(self, data_source: Dataset, shuffle: bool = True,
seed: int = 0, target_transform: Any = None,
mode: str = 'default'):
super(ClassificationDataSampler, self).__init__(
data_source, shuffle, seed)
self._check_params(data_source, shuffle, target_transform, mode)
self.mode = mode
if mode == 'balanced':
self.labels = [
item.label['classification'] for item in data_source.items]
if target_transform is not None:
self.labels = np.array([target_transform(
label) for label in self.labels])
_, indices = np.unique(self.labels, return_inverse=True)
# tracks the list of indices corresponding to each label
self.label_indices_map = defaultdict(list)
for index, class_index in enumerate(indices):
self.label_indices_map[class_index].append(index)
# tracks the minimum number of examples across classes
self.min_count = min(
[len(indices) for _, indices in self.label_indices_map.items()]
)
self.load_fn = self.load_balanced
# length = number of classes * min_count
self.len = self.min_count * len(self.label_indices_map)
def load_balanced(self):
"""
Returns a list of indices with class balance per batch.
It returns K * C indices where C is the number of classes and K
is the minimum number of examples across classes.
"""
if self.shuffle:
for key in self.label_indices_map:
random.shuffle(self.label_indices_map[key])
indices = []
for i in range(self.min_count):
# need to use `sorted` here to ensure that the ordering of keys is
# not affected by which key was created first
indices.extend([subindices[i] for _, subindices in sorted(
self.label_indices_map.items())])
return indices
@staticmethod
def _check_params(data_source, shuffle, target_transform, mode):
assert mode in ['default', 'balanced', 'random']
if mode in ['default', 'random']:
return
assert isinstance(data_source.items[0].label, dict)
if target_transform is not None:
assert hasattr(target_transform, 'classes')
class DistributedSamplerWrapper(DistributedSampler):
def __init__(
self, sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True):
super(DistributedSamplerWrapper, self).__init__(
sampler.data_source, num_replicas, rank, shuffle)
self.sampler = sampler
def __iter__(self):
indices = list(self.sampler)
indices = indices[self.rank:self.total_size:self.num_replicas]
return iter(indices)
def __len__(self):
return len(self.sampler)
sampler_factory = Factory()
sampler_factory.register_builder('default', DataSampler)
sampler_factory.register_builder('random', RandomSampler)
sampler_factory.register_builder('classification', ClassificationDataSampler)
| StarcoderdataPython |
6478205 | <reponame>dumpmemory/zulip<gh_stars>1-10
# Generated by Django 3.2.9 on 2021-12-27 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zilencer", "0023_remotezulipserver_deactivated"),
]
operations = [
migrations.AlterField(
model_name="remotepushdevicetoken",
name="user_id",
field=models.BigIntegerField(db_index=True, null=True),
),
migrations.AddField(
model_name="remotepushdevicetoken",
name="user_uuid",
field=models.UUIDField(null=True),
),
migrations.AlterUniqueTogether(
name="remotepushdevicetoken",
unique_together={
("server", "user_uuid", "kind", "token"),
("server", "user_id", "kind", "token"),
},
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.