id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1770968 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright 2020 <NAME>. All Rights Reserved
from __future__ import print_function
from __future__ import division
import click
import zerorpc
import time
import yaml
from loguru import logger as log
from PIL import Image
from traintrack.server import TrackerServer
from traintrack.services import add_trackers_from_config
from traintrack.services.debug import DebugTracker
arg = click.argument
opt = click.option
default_config = {
'host': '0.0.0.0',
'port': 4242,
'trackers': [
{'type': 'progress'},
{'type': 'console'}
]
}
@click.command()
@opt('--config', 'configfile', default=None, help='YAML configuration file')
@opt('--port', default=None, help='TCP port number')
@opt('--host', default=None, help='Hostname to run on')
@opt('--debug/--no-debug', default=False, help='Only use the debug tracker')
def main(host, port, configfile, debug):
"""
Experiment tracking ZeroRPC server.
"""
server = TrackerServer()
config = default_config
if configfile:
with open(configfile, 'r') as f:
config = yaml.safe_load(f)
# allow override host
if host:
config['host'] = host
# allow override port
if port:
config['port'] = port
# add trackers
if debug:
server.register_tracker(DebugTracker())
else:
add_trackers_from_config(server, config, log)
# run
log.info(f'Starting RPC service on {config["host"]}:{config["port"]}')
server.run(config['host'], config['port'])
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
| StarcoderdataPython |
66232 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""See README.md for package documentation."""
from setuptools import setup
from io import open
from os import path
from calendar_widget import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
URL = 'https://github.com/kivy-garden/calendar_widget'
setup(
name='calendar_widget',
version=__version__,
description='A calendar widget for Kivy.',
long_description=long_description,
long_description_content_type='text/markdown',
url=URL,
author="<NAME> (xxblx),\
<NAME> (amelius), <NAME> (fherbine)",
author_email="<EMAIL>, <EMAIL>",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='Kivy kivy-garden calendar_widget',
packages=['calendar_widget'],
install_requires=[],
extras_require={
'dev': ['pytest>=3.6', 'wheel', 'pytest-cov', 'pycodestyle'],
'travis': ['coveralls'],
},
package_data={},
data_files=[],
entry_points={},
project_urls={
'Bug Reports': URL + '/issues',
'Source': URL,
},
)
| StarcoderdataPython |
130439 | import fisher
import numpy as np
def print_2x2_table(table, row_labels, col_labels, fmt="%d"):
"""
Prints a table used for Fisher's exact test. Adds row, column, and grand
totals.
:param table: The four cells of a 2x2 table: [r1c1, r1c2, r2c1, r2c2]
:param row_labels: A length-2 list of row names
:param col_labels: A length-2 list of column names
"""
grand = sum(table)
# Separate table into components and get row/col sums
t11, t12, t21, t22 = table
# Row sums, col sums, and grand total
r1 = t11 + t12
r2 = t21 + t22
c1 = t11 + t21
c2 = t12 + t22
# Re-cast everything as the appropriate format
t11, t12, t21, t22, c1, c2, r1, r2, grand = [
fmt % i for i in [t11, t12, t21, t22, c1, c2, r1, r2, grand]]
# Construct rows and columns the long way...
rows = [
[""] + col_labels + ['total'],
[row_labels[0], t11, t12, r1],
[row_labels[1], t21, t22, r2],
['total', c1, c2, grand],
]
cols = [
[row[0] for row in rows],
[col_labels[0], t11, t21, c1],
[col_labels[1], t12, t22, c2],
['total', r1, r2, grand],
]
# Get max column width for each column; need this for nice justification
widths = []
for col in cols:
widths.append(max(len(i) for i in col))
# ReST-formatted header
sep = ['=' * i for i in widths]
# Construct the table one row at a time with nice justification
s = []
s.append(' '.join(sep))
s.append(' '.join(i.ljust(j) for i, j in zip(rows[0], widths)))
s.append(' '.join(sep))
for row in rows[1:]:
s.append(' '.join(i.ljust(j) for i, j in zip(row, widths)))
s.append(' '.join(sep) + '\n')
return "\n".join(s)
def print_row_perc_table(table, row_labels, col_labels):
"""
given a table, print the percentages rather than the totals
"""
r1c1, r1c2, r2c1, r2c2 = map(float, table)
row1 = r1c1 + r1c2
row2 = r2c1 + r2c2
blocks = [
(r1c1, row1),
(r1c2, row1),
(r2c1, row2),
(r2c2, row2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(True)
del s[5]
return ''.join(s)
def print_col_perc_table(table, row_labels, col_labels):
"""
given a table, print the cols as percentages
"""
r1c1, r1c2, r2c1, r2c2 = map(float, table)
col1 = r1c1 + r2c1
col2 = r1c2 + r2c2
blocks = [
(r1c1, col1),
(r1c2, col2),
(r2c1, col1),
(r2c2, col2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(False)
last_space = s[0].rindex(" ")
new_s = [i[:last_space] for i in s]
return '\n'.join(new_s)
def table_maker(subset, ind1, ind2, row_labels, col_labels, title):
"""
`subset` provides a subsetted boolean of items to consider. If no subset,
you can use all with `np.ones_like(ind1) == 1`
`ind1` is used to subset rows, e.g., log2fc > 0. This is used for rows, so
row_label might be ['upregulated', 'others']
`ind2` is used to subset cols. For example, col_labels would be
['bound', 'unbound']
"""
table = [
sum(subset & ind1 & ind2),
sum(subset & ind1 & ~ind2),
sum(subset & ~ind1 & ind2),
sum(subset & ~ind1 & ~ind2)
]
print()
print(title)
print('-' * len(title))
print(print_2x2_table(table, row_labels=row_labels, col_labels=col_labels))
print(print_row_perc_table(
table, row_labels=row_labels, col_labels=col_labels))
print(print_col_perc_table(
table, row_labels=row_labels, col_labels=col_labels))
print(fisher.pvalue(*table))
if __name__ == "__main__":
table = [12, 5, 29, 2]
s = print_2x2_table(
table,
row_labels=['Selected', 'Not selected'],
col_labels=['Having the property', 'Not having the property']
)
str_table = """
============ =================== ======================= =====
Having the property Not having the property total
============ =================== ======================= =====
Selected 12 5 17
Not selected 29 2 31
total 41 7 48
============ =================== ======================= =====
"""
# For the test, remove the first newline and all common leading whitespace
from textwrap import dedent
str_table = "".join(str_table.splitlines(True)[1:])
print(s)
assert dedent(str_table) == s
| StarcoderdataPython |
163493 | import pandas as pd
# Load dataset https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv')
# Cetak 5 data teratas
print('Sebelum diubah dalam format datetime:\n', gaq.head())
# Ubah menjadi datetime
gaq['timestamp'] = pd.to_datetime(gaq['timestamp'])
gaq = gaq.set_index('timestamp')
# Cetak 5 data teratas
print('Sesudah diubah dalam format datetime:\n', gaq.head()) | StarcoderdataPython |
1793095 | <reponame>AvTe/Data-Structure-in-Python-Dr.-R-Nageswara-Rao-Book-program-<filename>Deque..py
# deque Operation
from collections import deque
# create an emtpy deque
d = deque()
choice=0
while choice<7:
print('DEQUE OPERATION')
print('1 Add element at front')
print('2 Remove element at front')
print('3 Add element at rear')
print('4 Remove element at rear')
print('5 Remove element in the middle')
print('6 Search for element')
print('7 Exit')
choice = int(input('Your choice: '))
# perform a task depending on user choice
if choice==1:
element = input('Enter element: ')
d.appendleft(element)
elif choice==2:
if len(d) == 0:
print('Deque is empty')
else:
d.popleft()
elif choice==3:
element = input('Enter element: ')
d.append(element)
elif choice==4:
if len(d) == 0:
print('Deque is empty')
else:
d.pop()
elif choice==5:
element = input('Enter element: ')
try:
d.remove(element)
except ValueError:
print('Element no found')
elif choice==6:
element = input('Enter element: ')
c = d.count(element)
print('No of times the element found: ', c )
else:
break
# display the deque element using for loop
print('Deque= ', end='')
for i in d:
print(i, ' ', end='')
print( ) # move cursor to next line
| StarcoderdataPython |
3362552 | # Generated by Django 2.2.8 on 2020-02-14 07:30
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('datasets', '0037_auto_20200211_1929'),
('boundaries', '0020_ward_name'),
]
operations = [
migrations.CreateModel(
name='GeographyBoundary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
('name', models.CharField(max_length=50)),
('area', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(null=True, srid=4326)),
('geography', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='datasets.Geography')),
],
),
migrations.AddIndex(
model_name='geographyboundary',
index=models.Index(fields=['code'], name='boundaries__code_8ea6f4_idx'),
),
]
| StarcoderdataPython |
1622699 | """Variáveis que caracterizam o banco de dados"""
import os
__all__ = [
'f_names_train', 'f_names_test', 'ch_names', 'e_dict', 'e_classes',
'n_runs', 'base_folder', 'epoch_train_loc', 'epoch_test_loc', 'raw_folder', 'raw_fif_folder',
'csp_folder', 'features_test_folder', 'features_train_folder', 'originals_folder'
]
# Nomes que referenciam os sujeitos e seus respectivos conjuntos de teste/treino
f_names_train = ["A01T", "A02T", "A03T", "A05T", "A06T", "A07T", "A08T", "A09T"]
f_names_test = ["A01E", "A02E", "A03E", "A05E", "A06E", "A07E", "A08E", "A09E"]
# Canais do dataset:
ch_names = ['Fz', 'FC3', 'FC1', 'FCz', 'FC2', 'FC4', 'C5', 'C3', 'C1', 'Cz', 'C2',
'C4', 'C6', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'P1', 'Pz', 'P2', 'POz',
'EOG1', 'EOG2', 'EOG3']
# Faz a definição dos eventos (classes de movimentos) com um dicionário.
# Neste caso o dicionário é feito para as seguintes classes:
# l - mão esquerda
# r - mão direita
# f - pés
# t - lígua
# a - movimento das maos
# b - movimento pés ou lingua
e_dict = {1: 'l', 2: 'r', 3: 'f', 4: 't'}
# Pega os valores de forma não duplicada
e_classes = list()
for val in e_dict.values():
e_classes.append(val) if val not in e_classes else ...
# Numeros de arquivos por pessoa
n_runs = 6
# Localização da pasta onde o dataset original está
originals_folder = 'subject_files/bci_competition_dataset'
# Pasta onde serão salvos os arquivos de processamento
base_folder = "subject_files/dataset_files_fbcsp_9"
# Os arquivos 'sorted' são apenas um conjunto de matrizes com as epocas já separadas
# por classe, sendo um arquivo desses por sujeito
epoch_train_loc = os.path.join(base_folder, "epoch_train")
epoch_test_loc = os.path.join(base_folder, "epoch_test")
csp_folder = os.path.join(base_folder, "csp")
# Os arquivos 'raw' são os sinais originais provisionados pelo dataset, mantendo
# todas as informações iniciais (trabalha em conjunto com os arquivos de eventos)
raw_folder = os.path.join(base_folder, "raw_data")
raw_fif_folder = os.path.join(base_folder, "raw_fif_files")
# Os arquivos 'features' são matrizes que guardam o conjunto de características, tanto
# do conjunto de testes como do conjunto de treino
features_train_folder = os.path.join(base_folder, "features_train")
features_test_folder = os.path.join(base_folder, "features_test")
| StarcoderdataPython |
1746274 | <filename>backend/document/entrypoints/app.py
"""This module provides the FastAPI API definition."""
import os
import pathlib
from collections.abc import Iterable, Sequence
from typing import Any
from document.config import settings
from document.domain import document_generator, exceptions, model, resource_lookup
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse
from pydantic import AnyHttpUrl
# Don't serve swagger docs static assets from third party CDN.
# Source: https://github.com/tiangolo/fastapi/issues/2518#issuecomment-827513744
app = FastAPI()
logger = settings.logger(__name__)
# CORS configuration to allow frontend to talk to backend
origins: list[AnyHttpUrl] = settings.BACKEND_CORS_ORIGINS
logger.debug("CORS origins: %s", origins)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
@app.exception_handler(exceptions.InvalidDocumentRequestException)
def invalid_document_request_exception_handler(
request: Request, exc: exceptions.InvalidDocumentRequestException
) -> JSONResponse:
return JSONResponse(
status_code=404,
content={
"message": f"{exc.message}",
},
)
@app.post("/documents")
def document_endpoint(
document_request: model.DocumentRequest,
success_message: str = settings.SUCCESS_MESSAGE,
failure_message: str = settings.FAILURE_MESSAGE,
) -> model.FinishedDocumentDetails:
"""
Get the document request and hand it off to the document_generator
module for processing. Return model.FinishedDocumentDetails instance
containing URL of resulting PDF, or, raise an
InvalidDocumentRequestException if there is an exception which is
subsequently caught in the frontend UI.
"""
# Top level exception handler
try:
document_request_key, finished_document_path = document_generator.main(
document_request
)
assert os.path.exists(finished_document_path)
except Exception:
logger.exception(
"There was a error while attempting to fulfill the document "
"request. Likely reason is the following exception:"
)
# NOTE It might not always be the case that an exception here
# is as a result of an invalid document request, but it often
# is.
raise exceptions.InvalidDocumentRequestException(message=failure_message)
else:
details = model.FinishedDocumentDetails(
finished_document_request_key=document_request_key,
message=success_message,
)
logger.debug("FinishedDocumentDetails: %s", details)
return details
@app.get("/pdfs/{document_request_key}")
def serve_pdf_document(
document_request_key: str, output_dir: str = settings.output_dir()
) -> FileResponse:
"""Serve the requested PDF document."""
path = "{}.pdf".format(os.path.join(output_dir, document_request_key))
return FileResponse(
path=path,
filename=pathlib.Path(path).name,
headers={"Content-Disposition": "attachment"},
)
@app.get("/language_codes_names_and_resource_types")
def lang_codes_names_and_resource_types() -> Iterable[model.CodeNameTypeTriplet]:
"""
Return list of tuples of lang_code, lang_name, resource_types for
all available language codes.
"""
return resource_lookup.lang_codes_names_and_resource_types()
@app.get("/language_codes")
def lang_codes() -> Iterable[str]:
"""Return list of all available language codes."""
return resource_lookup.lang_codes()
@app.get("/language_codes_and_names")
def lang_codes_and_names() -> list[tuple[str, str]]:
"""Return list of all available language code, name tuples."""
return resource_lookup.lang_codes_and_names()
@app.get("/resource_types")
def resource_types() -> Any:
"""Return list of all available resource types."""
return resource_lookup.resource_types()
@app.get("/resource_types_for_lang/{lang_code}")
def resource_types_for_lang(lang_code: str) -> Sequence[Any]:
"""Return list of all available resource types."""
return resource_lookup.resource_types_for_lang(lang_code)
@app.get("/resource_codes_for_lang/{lang_code}")
def resource_codes_for_lang(lang_code: str) -> Sequence[Sequence[Any]]:
"""Return list of all available resource types."""
return resource_lookup.resource_codes_for_lang(lang_code)
@app.get("/resource_codes")
def resource_codes() -> Any:
"""Return list of all available resource codes."""
return resource_lookup.resource_codes()
@app.get("/health/status")
def health_status() -> tuple[dict[str, str], int]:
"""Ping-able server endpoint."""
return {"status": "ok"}, 200
| StarcoderdataPython |
59302 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<site_settings_pk>\d+)/special-page/add/$',
views.special_page_add, name='special-page-add'),
url(r'^(?P<site_settings_pk>\d+)/special-page/'
r'(?P<pk>\d+)/update/$',
views.special_page_edit, name='special-page-edit'),
url(r'^(?P<site_settings_pk>\d+)/special-page/'
r'(?P<pk>\d+)/delete/$',
views.special_page_delete, name='special-page-delete')
]
| StarcoderdataPython |
1691569 | <reponame>mdavezac/bempp<filename>python/bempp/visualization.py
# Copyright (C) 2011-2012 by the BEM++ Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import bempp.py_extensions as py_ext
try:
from tvtk.api import tvtk
from mayavi import mlab
except ImportError:
print "You need to have Enthought tvtk and mayavi installed for this module to work!"
def getTvtkGrid(grid):
"""Return a TVTK object containing the grid"""
if grid.topology()=="triangular":
(points,elems,auxData) = grid.leafView().getRawElementData()
elem_list = elems[:-1,:].T
mesh = tvtk.PolyData()
mesh.points = points.T
mesh.polys = elem_list
else:
raise TypeError("Visualization of this grid topology not implemented!")
return mesh
def plotTvtkActors(tvtkActors):
"""Plot a number of TVTK actors in the same plot."""
import collections
v = mlab.figure()
if isinstance(tvtkActors, collections.Iterable):
for actor in tvtkActors: v.scene.add_actor(actor) # Input is iterable
else:
v.scene.add_actor(tvtkActors) # Input is not iteratble
mlab.show()
def gridActor(grid):
"""
Return a TVTK actor representing a grid.
*Parameters:*
- grid (Grid)
A BEM++ grid object.
"""
mesh = getTvtkGrid(grid)
mapper = tvtk.DataSetMapper(input=mesh)
actor = tvtk.Actor(mapper=mapper)
actor.property.representation = 'w'
actor.property.ambient = 1
return actor
def gridFunctionActor(gridFun,dataType='vertex_data',transformation='real'):
"""
Return a TVTK actor representing a grid function.
*Parameters:*
- gridFun (gridFunction).
The grid function to be plotted.
- dataType ('vertex_data' or 'cell_data')
Determines whether the plot should be constructed from the function
values at element vertices or at element centres.
- transformation ('real', 'imag', 'abs' or a callable object)
Determines how the function is transformed before plotting.
"""
if not dataType in ["cell_data", "vertex_data"]:
raise ValueError("Unknown mode specified. Valid modes are 'vertex_data' and 'cell_data'!")
if not hasattr(transformation, '__call__'):
if transformation=='real':
data_transform = lambda x:np.real(x)
elif transformation=='imag':
data_transform = lambda x:np.imag(x)
elif transformation=='abs':
data_transform = lambda x:np.abs(x)
else:
raise ValueError("Unknown value for 'transformation'. It needs to be 'real', 'imag', 'abs' or a Python callable!")
else:
data_transform = transformation
mesh = getTvtkGrid(gridFun.grid())
if dataType=="vertex_data":
values = gridFun.evaluateAtSpecialPoints("vertex_data").flatten()
tvtk_data = mesh.point_data
elif dataType=="cell_data":
values = gridFun.evaluateAtSpecialPoints("cell_data").flatten()
tvtk_data = mesh.cell_data
values = data_transform(values)
tvtk_data.scalars = values
mapper = tvtk.DataSetMapper(input = mesh)
mapper.scalar_range = tvtk_data.scalars.range
actor = tvtk.Actor(mapper=mapper)
return actor
def scalarDataOnRegularGridActor(
points, data, dimensions,
colorRange=None,transformation='real'):
"""
Return a TVTK actor representing the plot of a function interpolated on
a regular grid.
"""
if points.shape[0] != 3 or points.ndim != 2:
raise ValueError("incorrect shape")
data = data.squeeze()
if data.ndim != 1:
raise ValueError("incorrect shape")
if not hasattr(transformation, '__call__'):
if transformation=='real':
data_transform = lambda point,val:np.real(val)
elif transformation=='imag':
data_transform = lambda point,val:np.imag(val)
elif transformation=='abs':
data_transform = lambda point,val:np.abs(val)
else:
raise ValueError("Unknown value for 'transformation'. It needs to be 'real', 'imag', 'abs' or a Python Callable!")
else:
data_transform = transformation
data = data_transform(points,data)
dims = dimensions
if colorRange is None:
minVal = np.min(data)
maxVal = np.max(data)
colorRange = (minVal, maxVal)
g = tvtk.StructuredGrid(dimensions=(dims[1], dims[0], 1), points=points.T)
g.point_data.scalars = data
# Create actors
mapper = tvtk.DataSetMapper(input=g)
mapper.scalar_range = colorRange
return tvtk.Actor(mapper=mapper)
def legendActor(actor):
"""Return a TVTK actor representing the legend of another actor."""
scalar_bar = tvtk.ScalarBarActor()
scalar_bar.lookup_table = actor.mapper.lookup_table
return scalar_bar
def plotGridFunction(*args,**kwargs):
"""
Plot a grid function.
This function takes the same parameters as gridFunctionActor().
"""
fun = gridFunctionActor(*args,**kwargs)
legend = legendActor(fun)
plotTvtkActors([fun,legend])
plotgridFunction = plotGridFunction # old name with a typo
def plotGrid(grid):
"""
Plot a grid.
This function takes the same parameters as gridActor().
"""
grid_actor = gridActor(grid)
plotTvtkActors(grid_actor)
def plotThreePlanes(potentialOp, gridFun, limits, dimensions,
colorRange=None, transformation='real', evalOps=None):
"""
Plot the potential generated by applying a potential operator to a grid
function on the xy, xz and yz planes.
*Parameters:*
- potentialOp (PotentialOperator)
A potential operator.
- gridFun (GridFunction)
A grid function.
- limits (tuple)
Tuple (min, max) or (xmin, xmax, ymin, ymax, zmin, zmax)
specifying the extent of each plane on which the potential
will be plotted.
- dimensions (tuple)
Scalar or tuple (xdim, ydim, zdim) specifying the number of samples
in each direction.
- colorRange (tuple)
Tuple (min, max) determining the range of data to be plotted.
If set to None, the data range is determined automatically.
- transformation ('real', 'imag', 'abs' or a callable object)
Determines how the potential is transformed before plotting.
- evalOps (EvaluationOptions)
Options controlling the evaluation of the potential.
"""
if np.isscalar(dimensions):
dims = (dimensions, dimensions, dimensions)
else:
if len(dimensions) == 3:
dims = dimensions
else:
raise ValueError("dimensions must be a scalar or a tuple with 3 elements")
if len(limits) == 2:
lims = (limits[0], limits[1], limits[0], limits[1], limits[0], limits[1])
elif len(limits) == 6:
lims = limits
else:
raise ValueError("limits must be a tuple with 2 or 6 elements")
origin = ((lims[0] + lims[1]) / 2.,
(lims[2] + lims[3]) / 2.,
(lims[4] + lims[5]) / 2.)
(points1,vals1) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[:4],dims[:2],plane="xy",origin=origin,
evalOps=evalOps)
(points2,vals2) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[:2]+lims[4:],dims[:1]+dims[2:],plane="xz",
origin=origin,evalOps=evalOps)
(points3,vals3) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[2:],dims[1:],plane="yz",origin=origin,
evalOps=evalOps)
if not hasattr(transformation, '__call__'):
if transformation=='real':
data_transform = lambda point,val:np.real(val)
elif transformation=='imag':
data_transform = lambda point,val:np.imag(val)
elif transformation=='abs':
data_transform = lambda point,val:np.abs(val)
else:
raise ValueError("Unknown value for 'transformation'. It needs to be 'real', 'imag', 'abs' or a Python Callable!")
else:
data_transform = transformation
vals1 = data_transform(points1,vals1)
vals2 = data_transform(points2,vals2)
vals3 = data_transform(points3,vals3)
if colorRange is None:
minVal = np.min([vals1,vals2,vals3])
maxVal = np.max([vals1,vals2,vals3])
colorRange = (minVal,maxVal)
g1 = tvtk.StructuredGrid(dimensions=(dims[0],dims[1],1),points=points1)
g2 = tvtk.StructuredGrid(dimensions=(dims[0],dims[2],1),points=points2)
g3 = tvtk.StructuredGrid(dimensions=(dims[1],dims[2],1),points=points3)
# Add data
g1.point_data.scalars = vals1
g2.point_data.scalars = vals2
g3.point_data.scalars = vals3
# Create actors
mapper1 = tvtk.DataSetMapper(input=g1)
mapper1.scalar_range = colorRange
actor1 = tvtk.Actor(mapper=mapper1)
mapper2 = tvtk.DataSetMapper(input=g2)
mapper2.scalar_range = colorRange
actor2 = tvtk.Actor(mapper=mapper2)
mapper3 = tvtk.DataSetMapper(input=g3)
mapper3.scalar_range = colorRange
actor3 = tvtk.Actor(mapper=mapper3)
gActor = gridActor(gridFun.grid())
legend = legendActor(actor1)
plotTvtkActors([actor1,actor2,actor3,gActor,legend])
| StarcoderdataPython |
36560 | <filename>src/Algorithms/VotingClassifier.py
# External Modules
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
# Internal Modules
import Helpers.PrintHelpers.PrintHelper as phelper
import Helpers.DataFrameHelpers.DataframeConverter as dfc
class VotingClass:
"""Voting Class
"""
@property
def df(self):
"""Dataframe: Original dataframe to used for learning."""
return self._df
def __init__(self,df,algorithms):
""" Constracter
Args:
df (Dataframe): dataframe for learning.
algorithms (list): list of algorithms.
"""
self._df = df
self._algorithms = algorithms
phelper.PrintHelper.print_title(self.__class__.__name__)
def learn(self,column,params):
"""
"""
#predict data
y = self._df[column]
self._y = y
#learning data
X = self._df.drop([column], axis=1)
self._X = X
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.3, random_state=0)
eclf = VotingClassifier(estimators=self._algorithms)
if params == None:
self._learned_model = eclf
eclf.fit(X_train, y_train)
print('...Predicting Test Data...')
predicted_result = self._learned_model.predict(X_test).astype(int)
else:
phelper.PrintHelper.print_title('Params from a file')
print(params)
print('...Doing Grid Search...')
cv = GridSearchCV(eclf, params, cv = 10, scoring = 'neg_mean_squared_error', n_jobs=1, refit = True)
cv.fit(X_train, y_train)
self._best_params = cv.best_params_
self._learned_model = cv.best_estimator_
phelper.PrintHelper.print_title('Best Params')
print(cv.best_params_)
self._learned_model = cv
# Print accuracy score
print('...Predicting Test Data...')
predicted_result = self._learned_model.predict(X_test).astype(int)
phelper.PrintHelper.print_title('Accuracy Score')
print(accuracy_score(y_test,predicted_result))
return True
def predict(self,test_df):
return self._learned_model.predict(test_df).astype(int)
| StarcoderdataPython |
3223068 | #!/usr/bin/python
"""
(C) Copyright 2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from logging import getLogger
from ClusterShell.NodeSet import NodeSet
class TelemetryUtils():
# pylint: disable=too-many-nested-blocks
"""Defines a object used to verify telemetry information."""
ENGINE_CONTAINER_METRICS = [
"engine_pool_container_handles",
"engine_pool_ops_cont_close",
"engine_pool_ops_cont_destroy",
"engine_pool_ops_cont_open"]
ENGINE_POOL_METRICS = [
"engine_pool_entries_dtx_batched_degree",
"engine_pool_entries_dtx_batched_total",
"engine_pool_ops_akey_enum",
"engine_pool_ops_akey_punch",
"engine_pool_ops_compound",
"engine_pool_ops_dkey_enum",
"engine_pool_ops_dkey_punch",
"engine_pool_ops_dtx_abort",
"engine_pool_ops_dtx_check",
"engine_pool_ops_dtx_commit",
"engine_pool_ops_dtx_refresh",
"engine_pool_ops_ec_agg",
"engine_pool_ops_ec_rep",
"engine_pool_ops_fetch",
"engine_pool_ops_key_query",
"engine_pool_ops_migrate",
"engine_pool_ops_obj_enum",
"engine_pool_ops_obj_punch",
"engine_pool_ops_obj_sync",
"engine_pool_ops_recx_enum",
"engine_pool_ops_tgt_akey_punch",
"engine_pool_ops_tgt_dkey_punch",
"engine_pool_ops_tgt_punch",
"engine_pool_ops_tgt_update",
"engine_pool_ops_update",
"engine_pool_pool_handles",
"engine_pool_resent",
"engine_pool_restarted",
"engine_pool_started_at",
"engine_pool_xferred_fetch",
"engine_pool_xferred_update"]
ENGINE_EVENT_METRICS = [
"engine_events_dead_ranks",
"engine_events_last_event_ts",
"engine_servicing_at",
"engine_started_at"]
ENGINE_IO_DTX_COMMITTABLE_METRICS = [
"engine_io_dtx_committable",
"engine_io_dtx_committable_max",
"engine_io_dtx_committable_mean",
"engine_io_dtx_committable_min",
"engine_io_dtx_committable_stddev"]
ENGINE_IO_DTX_COMMITTED_METRICS = [
"engine_io_dtx_committed",
"engine_io_dtx_committed_max",
"engine_io_dtx_committed_mean",
"engine_io_dtx_committed_min",
"engine_io_dtx_committed_stddev"]
ENGINE_IO_LATENCY_FETCH_METRICS = [
"engine_io_latency_fetch",
"engine_io_latency_fetch_max",
"engine_io_latency_fetch_mean",
"engine_io_latency_fetch_min",
"engine_io_latency_fetch_stddev"]
ENGINE_IO_LATENCY_UPDATE_METRICS = [
"engine_io_latency_update",
"engine_io_latency_update_max",
"engine_io_latency_update_mean",
"engine_io_latency_update_min",
"engine_io_latency_update_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_METRICS = [
"engine_io_ops_akey_enum_active",
"engine_io_ops_akey_enum_active_max",
"engine_io_ops_akey_enum_active_mean",
"engine_io_ops_akey_enum_active_min",
"engine_io_ops_akey_enum_active_stddev"]
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_akey_enum_latency",
"engine_io_ops_akey_enum_latency_max",
"engine_io_ops_akey_enum_latency_mean",
"engine_io_ops_akey_enum_latency_min",
"engine_io_ops_akey_enum_latency_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_akey_punch_active",
"engine_io_ops_akey_punch_active_max",
"engine_io_ops_akey_punch_active_mean",
"engine_io_ops_akey_punch_active_min",
"engine_io_ops_akey_punch_active_stddev"]
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_akey_punch_latency",
"engine_io_ops_akey_punch_latency_max",
"engine_io_ops_akey_punch_latency_mean",
"engine_io_ops_akey_punch_latency_min",
"engine_io_ops_akey_punch_latency_stddev"]
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS = [
"engine_io_ops_compound_active",
"engine_io_ops_compound_active_max",
"engine_io_ops_compound_active_mean",
"engine_io_ops_compound_active_min",
"engine_io_ops_compound_active_stddev"]
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS = [
"engine_io_ops_compound_latency",
"engine_io_ops_compound_latency_max",
"engine_io_ops_compound_latency_mean",
"engine_io_ops_compound_latency_min",
"engine_io_ops_compound_latency_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS = [
"engine_io_ops_dkey_enum_active",
"engine_io_ops_dkey_enum_active_max",
"engine_io_ops_dkey_enum_active_mean",
"engine_io_ops_dkey_enum_active_min",
"engine_io_ops_dkey_enum_active_stddev"]
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS = [
"engine_io_ops_dkey_enum_latency",
"engine_io_ops_dkey_enum_latency_max",
"engine_io_ops_dkey_enum_latency_mean",
"engine_io_ops_dkey_enum_latency_min",
"engine_io_ops_dkey_enum_latency_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_dkey_punch_active",
"engine_io_ops_dkey_punch_active_max",
"engine_io_ops_dkey_punch_active_mean",
"engine_io_ops_dkey_punch_active_min",
"engine_io_ops_dkey_punch_active_stddev"]
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_dkey_punch_latency",
"engine_io_ops_dkey_punch_latency_max",
"engine_io_ops_dkey_punch_latency_mean",
"engine_io_ops_dkey_punch_latency_min",
"engine_io_ops_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS = [
"engine_io_ops_ec_agg_active",
"engine_io_ops_ec_agg_active_max",
"engine_io_ops_ec_agg_active_mean",
"engine_io_ops_ec_agg_active_min",
"engine_io_ops_ec_agg_active_stddev"]
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS = [
"engine_io_ops_ec_agg_latency",
"engine_io_ops_ec_agg_latency_max",
"engine_io_ops_ec_agg_latency_mean",
"engine_io_ops_ec_agg_latency_min",
"engine_io_ops_ec_agg_latency_stddev"]
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS = [
"engine_io_ops_ec_rep_active",
"engine_io_ops_ec_rep_active_max",
"engine_io_ops_ec_rep_active_mean",
"engine_io_ops_ec_rep_active_min",
"engine_io_ops_ec_rep_active_stddev"]
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS = [
"engine_io_ops_ec_rep_latency",
"engine_io_ops_ec_rep_latency_max",
"engine_io_ops_ec_rep_latency_mean",
"engine_io_ops_ec_rep_latency_min",
"engine_io_ops_ec_rep_latency_stddev"]
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS = [
"engine_io_ops_fetch_active",
"engine_io_ops_fetch_active_max",
"engine_io_ops_fetch_active_mean",
"engine_io_ops_fetch_active_min",
"engine_io_ops_fetch_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS = [
"engine_io_ops_key_query_active",
"engine_io_ops_key_query_active_max",
"engine_io_ops_key_query_active_mean",
"engine_io_ops_key_query_active_min",
"engine_io_ops_key_query_active_stddev"]
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS = [
"engine_io_ops_key_query_latency",
"engine_io_ops_key_query_latency_max",
"engine_io_ops_key_query_latency_mean",
"engine_io_ops_key_query_latency_min",
"engine_io_ops_key_query_latency_stddev"]
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS = [
"engine_io_ops_migrate_active",
"engine_io_ops_migrate_active_max",
"engine_io_ops_migrate_active_mean",
"engine_io_ops_migrate_active_min",
"engine_io_ops_migrate_active_stddev"]
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS = [
"engine_io_ops_migrate_latency",
"engine_io_ops_migrate_latency_max",
"engine_io_ops_migrate_latency_mean",
"engine_io_ops_migrate_latency_min",
"engine_io_ops_migrate_latency_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS = [
"engine_io_ops_obj_enum_active",
"engine_io_ops_obj_enum_active_max",
"engine_io_ops_obj_enum_active_mean",
"engine_io_ops_obj_enum_active_min",
"engine_io_ops_obj_enum_active_stddev"]
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS = [
"engine_io_ops_obj_enum_latency",
"engine_io_ops_obj_enum_latency_max",
"engine_io_ops_obj_enum_latency_mean",
"engine_io_ops_obj_enum_latency_min",
"engine_io_ops_obj_enum_latency_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_obj_punch_active",
"engine_io_ops_obj_punch_active_max",
"engine_io_ops_obj_punch_active_mean",
"engine_io_ops_obj_punch_active_min",
"engine_io_ops_obj_punch_active_stddev"]
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS = [
"engine_io_ops_obj_punch_latency",
"engine_io_ops_obj_punch_latency_max",
"engine_io_ops_obj_punch_latency_mean",
"engine_io_ops_obj_punch_latency_min",
"engine_io_ops_obj_punch_latency_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS = [
"engine_io_ops_obj_sync_active",
"engine_io_ops_obj_sync_active_max",
"engine_io_ops_obj_sync_active_mean",
"engine_io_ops_obj_sync_active_min",
"engine_io_ops_obj_sync_active_stddev"]
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS = [
"engine_io_ops_obj_sync_latency",
"engine_io_ops_obj_sync_latency_max",
"engine_io_ops_obj_sync_latency_mean",
"engine_io_ops_obj_sync_latency_min",
"engine_io_ops_obj_sync_latency_stddev"]
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS = [
"engine_io_ops_recx_enum_active",
"engine_io_ops_recx_enum_active_max",
"engine_io_ops_recx_enum_active_mean",
"engine_io_ops_recx_enum_active_min",
"engine_io_ops_recx_enum_active_stddev"]
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS = [
"engine_io_ops_recx_enum_latency",
"engine_io_ops_recx_enum_latency_max",
"engine_io_ops_recx_enum_latency_mean",
"engine_io_ops_recx_enum_latency_min",
"engine_io_ops_recx_enum_latency_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_akey_punch_active",
"engine_io_ops_tgt_akey_punch_active_max",
"engine_io_ops_tgt_akey_punch_active_mean",
"engine_io_ops_tgt_akey_punch_active_min",
"engine_io_ops_tgt_akey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_akey_punch_latency",
"engine_io_ops_tgt_akey_punch_latency_max",
"engine_io_ops_tgt_akey_punch_latency_mean",
"engine_io_ops_tgt_akey_punch_latency_min",
"engine_io_ops_tgt_akey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_dkey_punch_active",
"engine_io_ops_tgt_dkey_punch_active_max",
"engine_io_ops_tgt_dkey_punch_active_mean",
"engine_io_ops_tgt_dkey_punch_active_min",
"engine_io_ops_tgt_dkey_punch_active_stddev"]
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_dkey_punch_latency",
"engine_io_ops_tgt_dkey_punch_latency_max",
"engine_io_ops_tgt_dkey_punch_latency_mean",
"engine_io_ops_tgt_dkey_punch_latency_min",
"engine_io_ops_tgt_dkey_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS = [
"engine_io_ops_tgt_punch_active",
"engine_io_ops_tgt_punch_active_max",
"engine_io_ops_tgt_punch_active_mean",
"engine_io_ops_tgt_punch_active_min",
"engine_io_ops_tgt_punch_active_stddev"]
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS = [
"engine_io_ops_tgt_punch_latency",
"engine_io_ops_tgt_punch_latency_max",
"engine_io_ops_tgt_punch_latency_mean",
"engine_io_ops_tgt_punch_latency_min",
"engine_io_ops_tgt_punch_latency_stddev"]
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_tgt_update_active",
"engine_io_ops_tgt_update_active_max",
"engine_io_ops_tgt_update_active_mean",
"engine_io_ops_tgt_update_active_min",
"engine_io_ops_tgt_update_active_stddev"]
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS = [
"engine_io_ops_update_active",
"engine_io_ops_update_active_max",
"engine_io_ops_update_active_mean",
"engine_io_ops_update_active_min",
"engine_io_ops_update_active_stddev"]
ENGINE_IO_METRICS = ENGINE_IO_DTX_COMMITTABLE_METRICS +\
ENGINE_IO_DTX_COMMITTED_METRICS +\
ENGINE_IO_LATENCY_FETCH_METRICS +\
ENGINE_IO_LATENCY_UPDATE_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_METRICS +\
ENGINE_IO_OPS_AKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_COMPOUND_ACTIVE_METRICS +\
ENGINE_IO_OPS_COMPOUND_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_AGG_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_AGG_LATENCY_METRICS +\
ENGINE_IO_OPS_EC_REP_ACTIVE_METRICS +\
ENGINE_IO_OPS_EC_REP_LATENCY_METRICS +\
ENGINE_IO_OPS_FETCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_ACTIVE_METRICS +\
ENGINE_IO_OPS_KEY_QUERY_LATENCY_METRICS +\
ENGINE_IO_OPS_MIGRATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_MIGRATE_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_ACTIVE_METRICS +\
ENGINE_IO_OPS_OBJ_SYNC_LATENCY_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_ACTIVE_METRICS +\
ENGINE_IO_OPS_RECX_ENUM_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_AKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_DKEY_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_ACTIVE_METRICS +\
ENGINE_IO_OPS_TGT_PUNCH_LATENCY_METRICS +\
ENGINE_IO_OPS_TGT_UPDATE_ACTIVE_METRICS +\
ENGINE_IO_OPS_UPDATE_ACTIVE_METRICS
ENGINE_NET_METRICS = [
"engine_net_ofi_sockets_failed_addr",
"engine_net_ofi_sockets_req_timeout",
"engine_net_ofi_sockets_uri_lookup_timeout",
"engine_net_uri_lookup_other",
"engine_net_uri_lookup_self"]
ENGINE_RANK_METRICS = [
"engine_rank"]
GO_METRICS = [
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads"]
PROCESS_METRICS = [
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes"]
ENGINE_NVME_METRICS = [
"engine_nvme_<id>_commands_checksum_mismatch",
"engine_nvme_<id>_commands_ctrl_busy_time",
"engine_nvme_<id>_commands_data_units_read",
"engine_nvme_<id>_commands_data_units_written",
"engine_nvme_<id>_commands_host_read_cmds",
"engine_nvme_<id>_commands_host_write_cmds",
"engine_nvme_<id>_commands_media_errs",
"engine_nvme_<id>_commands_read_errs",
"engine_nvme_<id>_commands_unmap_errs",
"engine_nvme_<id>_commands_write_errs",
"engine_nvme_<id>_power_cycles",
"engine_nvme_<id>_power_on_hours",
"engine_nvme_<id>_read_only_warn",
"engine_nvme_<id>_reliability_avail_spare",
"engine_nvme_<id>_reliability_avail_spare_threshold",
"engine_nvme_<id>_reliability_avail_spare_warn",
"engine_nvme_<id>_reliability_percentage_used",
"engine_nvme_<id>_reliability_reliability_warn",
"engine_nvme_<id>_temp_crit_time",
"engine_nvme_<id>_temp_current",
"engine_nvme_<id>_temp_warn",
"engine_nvme_<id>_temp_warn_time",
"engine_nvme_<id>_unsafe_shutdowns",
"engine_nvme_<id>_volatile_mem_warn",
"engine_nvme_<id>_vendor_program_fail_cnt_norm",
"engine_nvme_<id>_vendor_program_fail_cnt_raw",
"engine_nvme_<id>_vendor_erase_fail_cnt_norm",
"engine_nvme_<id>_vendor_erase_fail_cnt_raw",
"engine_nvme_<id>_vendor_wear_leveling_cnt_norm",
"engine_nvme_<id>_vendor_wear_leveling_cnt_min",
"engine_nvme_<id>_vendor_wear_leveling_cnt_max",
"engine_nvme_<id>_vendor_wear_leveling_cnt_avg",
"engine_nvme_<id>_vendor_endtoend_err_cnt_raw",
"engine_nvme_<id>_vendor_crc_err_cnt_raw",
"engine_nvme_<id>_vendor_media_wear_raw",
"engine_nvme_<id>_vendor_host_reads_raw",
"engine_nvme_<id>_vendor_crc_workload_timer_raw",
"engine_nvme_<id>_vendor_thermal_throttle_status_raw",
"engine_nvme_<id>_vendor_thermal_throttle_event_cnt",
"engine_nvme_<id>_vendor_retry_buffer_overflow_cnt",
"engine_nvme_<id>_vendor_pll_lock_loss_cnt",
"engine_nvme_<id>_vendor_nand_bytes_written",
"engine_nvme_<id>_vendor_host_bytes_written"]
def __init__(self, dmg, servers):
"""Create a TelemetryUtils object.
Args:
dmg (DmgCommand): the DmgCommand object configured to communicate
with the servers
servers (list): a list of server host names
"""
self.log = getLogger(__name__)
self.dmg = dmg
self.hosts = NodeSet.fromlist(servers)
def get_all_server_metrics_names(self, server, with_pools=False):
"""Get all the telemetry metrics names for this server.
Args:
server (DaosServerCommand): the server from which to determine what
metrics will be available
Returns:
list: all of the telemetry metrics names for this server
"""
all_metrics_names = list(self.ENGINE_EVENT_METRICS)
all_metrics_names.extend(self.ENGINE_IO_METRICS)
all_metrics_names.extend(self.ENGINE_NET_METRICS)
all_metrics_names.extend(self.ENGINE_RANK_METRICS)
all_metrics_names.extend(self.GO_METRICS)
all_metrics_names.extend(self.PROCESS_METRICS)
if with_pools:
all_metrics_names.extend(self.ENGINE_POOL_METRICS)
all_metrics_names.extend(self.ENGINE_CONTAINER_METRICS)
# Add NVMe metrics for any NVMe devices configured for this server
for nvme_list in server.manager.job.get_engine_values("bdev_list"):
for nvme in nvme_list if nvme_list is not None else []:
# Replace the '<id>' placeholder with the actual NVMe ID
nvme_id = nvme.replace(":", "_").replace(".", "_")
nvme_metrics = [
name.replace("<id>", nvme_id)
for name in self.ENGINE_NVME_METRICS]
all_metrics_names.extend(nvme_metrics)
return all_metrics_names
def list_metrics(self):
"""List the available metrics for each host.
Returns:
dict: a dictionary of host keys linked to a list of metric names
"""
info = {}
self.log.info("Listing telemetry metrics from %s", self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_list(host=host)
info[host] = []
if "response" in data:
if "available_metric_sets" in data["response"]:
for entry in data["response"]["available_metric_sets"]:
if "name" in entry:
info[host].append(entry["name"])
return info
def get_metrics(self, name):
"""Obtain the specified metric information for each host.
Args:
name (str): Comma-separated list of metric names to query.
Returns:
dict: a dictionary of host keys linked to metric data for each
metric name specified
"""
info = {}
self.log.info("Querying telemetry metric %s from %s", name, self.hosts)
for host in self.hosts:
data = self.dmg.telemetry_metrics_query(host=host, metrics=name)
info[host] = {}
if "response" in data:
if "metric_sets" in data["response"]:
for entry in data["response"]["metric_sets"]:
info[host][entry["name"]] = {
"description": entry["description"],
"metrics": entry["metrics"]
}
return info
def get_container_metrics(self):
"""Get the container telemetry metrics.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
info = self.get_metrics(",".join(self.ENGINE_CONTAINER_METRICS))
self.log.info("Container Telemetry Information")
for host in info:
data[host] = {name: 0 for name in self.ENGINE_CONTAINER_METRICS}
for name in self.ENGINE_CONTAINER_METRICS:
if name in info[host]:
for metric in info[host][name]["metrics"]:
self.log.info(
" %s (%s): %s (%s)",
info[host][name]["description"], name,
metric["value"], host)
data[host][name] = metric["value"]
return data
def get_pool_metrics(self, specific_metrics=None):
"""Get the pool telemetry metrics.
Args:
specific_metrics(list): list of specific pool metrics
Returns:
dict: dictionary of dictionaries of pool metric names and
values per server host key
"""
data = {}
if specific_metrics is None:
specific_metrics = self.ENGINE_POOL_METRICS
info = self.get_metrics(",".join(specific_metrics))
self.log.info("Pool Telemetry Information")
for name in specific_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %s",
"Host", "Rank", "Target", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %s",
host, rank, target, metric["value"])
return data
def get_io_metrics(self, test_metrics=None):
"""Get the io telemetry metrics.
Args:
test_metrics (str list, optional): Comma-separated list of metric
names to query. By default, test_metrics is entire
ENGINE_IO_METRICS.
Returns:
dict: dictionary of dictionaries of container metric names and
values per server host key
"""
data = {}
if test_metrics is None:
test_metrics = self.ENGINE_IO_METRICS
info = self.get_metrics(",".join(test_metrics))
self.log.info("Telemetry Information")
for name in test_metrics:
for index, host in enumerate(info):
if name in info[host]:
if index == 0:
self.log.info(
" %s (%s):",
name, info[host][name]["description"])
self.log.info(
" %-12s %-4s %-6s %-6s %s",
"Host", "Rank", "Target", "Size", "Value")
if name not in data:
data[name] = {}
if host not in data[name]:
data[name][host] = {}
for metric in info[host][name]["metrics"]:
if "labels" in metric:
if ("rank" in metric["labels"]
and "target" in metric["labels"]
and "size" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
size = metric["labels"]["size"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target][size] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, size, metric["value"])
elif ("rank" in metric["labels"]
and "target" in metric["labels"]):
rank = metric["labels"]["rank"]
target = metric["labels"]["target"]
if rank not in data[name][host]:
data[name][host][rank] = {}
if target not in data[name][host][rank]:
data[name][host][rank][target] = {}
data[name][host][rank][target]["-"] = \
metric["value"]
self.log.info(
" %-12s %-4s %-6s %-6s %s",
host, rank, target, "-", metric["value"])
return data
def check_container_metrics(self, open_count=None, active_count=None,
close_count=None, destroy_count=None):
"""Verify the container telemetry metrics.
Args:
open_count (dict, optional): Number of times cont_open has been
called per host key. Defaults to None.
active_count (dict, optional): Number of open container handles per
host key. Defaults to None.
close_count (dict, optional): Number of times cont_close has been
called per host key. Defaults to None.
destroy_count (dict, optional): Number of times cont_destroy has
been called per host key. Defaults to None.
Returns:
list: list of errors detected
"""
errors = []
expected = {
"engine_pool_ops_cont_open": open_count,
"engine_pool_container_handles": active_count,
"engine_pool_ops_cont_close": close_count,
"engine_pool_ops_cont_destroy": destroy_count,
}
data = self.get_container_metrics()
for host in data:
for name in expected:
if name in data[host]:
if (expected[name] is not None
and host in expected[name]
and expected[name][host] != data[host][name]):
errors.append(
"{} mismatch on {}: expected={}; actual={}".format(
name, host, expected[name][host],
data[host][name]))
else:
errors.append("No {} data for {}".format(name, host))
return errors
| StarcoderdataPython |
1693002 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import torch
import torch.nn as nn
BN_MOMENTUM = 0.1 # 【】bn层都用这个:init的才用,干嘛的
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1 # 【c】这啥
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride) # 【c】看看每个stage的输入输出
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes) # 通道不变
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride # 【c】这还有啥用
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual # 分辨率通道数均一样
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4 # 【】通道扩展倍数;为啥是4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False) # 【l】什么时候不用bias
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual # 【】通道数也要一致吗
out = self.relu(out)
return out
class IncepHRModule(nn.Module):
def __init__(self, num_blocks, block, cur_stage, original_channel, multi_scale_output=True):
super().__init__()
# stage == branch num_blocks[前后blocks] original_channel n
self.multi_scale_output = multi_scale_output # 是否要多级融合
self.block = block # 【】block怎么传的
self.cur_stage = cur_stage # 当前
self.num_blocks = num_blocks
self.cur_channel = 2**(cur_stage-1) * \
original_channel # stage2 2n 3 4n 4 8n
self.middle_channels = [self.cur_channel//2]
self.relu = nn.ReLU(inplace=True)
for i in range(self.cur_stage-1):
self.middle_channels.append(self.cur_channel) # 尾部最后的
self.out_channels = [i*2 for i in self.middle_channels]
self.out_channels.append(2*self.cur_channel)
# 前段
self.stem_layers_first = self._make_stem(
self.block, self.num_blocks[0])
self.expand_layers_middle = self._expand_layers(
self.cur_channel, self.cur_stage, Is_middle=True)
self.fuse_layer_middle = self._make_fuse_layers(self.middle_channels)
# 后半段
self.stem_layers_second = self._make_stem(
self.block, self.num_blocks[-1])
self.expand_layers_tail = self._expand_layers(
self.cur_channel, self.cur_stage, Is_middle=False)
self.fuse_layer_tail = self._make_fuse_layers(self.out_channels,self.multi_scale_output)
self.change_channel_middle = self._change_channel_layers(
self.middle_channels, 2)
self.change_channel_tail = self._change_channel_layers(
self.out_channels, 1)
# self.change_channel_stem = nn.Sequential(nn.Conv2d(
# 4*self.cur_channel, cur_channel, # 【】4n时都改为 4*cur
# 1, 1, 0, bias=False),
# nn.BatchNorm2d(cur_channel),
# nn.ReLU(inplace=True)
# )
def forward(self, x): # x是个列表
x_head = x # head 混后
# print(self.cur_stage,len(x))
# for i in range(len(x)):
# print(x[i].shape)
x = self.stem_layers_first(x[-1])
x_expand_middle = []
for i in range(self.cur_stage): # 将末节点生成中间分叉
if i == self.cur_stage-1:
y = x
else:
# print(self.cur_stage,i)
# print(self.expand_layers_middle[i](x).shape,x_head[i].shape)
# 这个expand is_middle=true【】 中间层加head吗
y = self.relu(self.expand_layers_middle[i](x))+x_head[i]
x_expand_middle.append(y)#【】xhead已经relu过了这样好吗
x_middle_fuse = []
for i in range(len(self.fuse_layer_middle)):
y = x_expand_middle[0] if i == 0 else self.fuse_layer_middle[i][0](
x_expand_middle[0]) # 由前面j第0层汇聚到 第0层的
for j in range(1, self.cur_stage):
if i == j:
y = y + x_expand_middle[j] # 是同一层就直接+其他各层要进行相应的变换(上/下采样)
else: # 【see】i代表汇聚到第i层,j代表参与汇聚的各分支
y = y + self.fuse_layer_middle[i][j](x_expand_middle[j])
x_middle_fuse.append(self.relu(y)) # 得到融合了的各分支的结果
x = self.stem_layers_second(x_middle_fuse[-1])
x_expand_tail = []
for i in range(self.cur_stage+1): # 有一个下采样层
if i == self.cur_stage-1:
y = x
else:
y = self.expand_layers_tail[i](x) # 这个expand is_middle=false
x_expand_tail.append(y) # 末层分叉 先不relu【】cat前relu还是后还是都
x_tail = [] # 【】会不会耗显存?不造
for i in range(len(x_head)): # 将首节点cat给末尾
x = torch.cat((x_expand_tail[i], x_head[i]), 1)
x_tail.append(self.relu(x)) # 【】cat后relu 不加bn?
x_tail.append(self.relu(x_expand_tail[-1])) # 尾部多一节点
for i in range(len(x_tail)): # 1x1
# fuse前要relu 1x1里没有relu
x_tail[i] = self.relu(self.change_channel_tail[i](
x_tail[i])) # 【】要不要1x1还是单独bn relu 马上fuse 这句还有没有必要?
x_tail_fuse = []
for i in range(len(self.fuse_layer_tail)):
y = x_tail[0] if i == 0 else self.fuse_layer_tail[i][0](
x_tail[0]) # 汇聚到第i层的第0个分支
for j in range(1, self.cur_stage+1):
if i == j:
y = y + x_tail[j] # 是同一层就直接+其他各层要进行相应的变换(上/下采样)
else: # i前j后
y = y + self.fuse_layer_tail[i][j](x_tail[j])
# 得到融合了的各分支的结果 保留relu 在1x1那里去掉relu
x_tail_fuse.append(self.relu(y))
for i in range(len(x_tail_fuse)): # 1x1
# fuse后relu了 1x1里没有relu
x_tail_fuse[i] = self.change_channel_tail[i](
x_tail_fuse[i]) # 要和middle加所以1x1去掉relu
x_head = [] # 节省显存
len_middel_sum_tail = len(x_middle_fuse) if self.multi_scale_output==True else 1#最后一个stg只有一个分支
for i in range(len_middel_sum_tail):
x = self.change_channel_middle[i](
x_middle_fuse[i])+x_tail_fuse[i]
x_head.append(self.relu(x))
# 两个for可以合并 没合并时上一步是1x1所以也要relu
if self.multi_scale_output==True:#如果是最后一个stg的就不用下支路了
x_head.append(self.relu(x_tail_fuse[-1]))
return x_head # 其实返回的是尾部
def _make_fuse_layers(self, in_channels,multi_scale_output=True):
num_branches = len(in_channels) # 【see】
if num_branches == 1:
return None # 单分支不用融合
num_inchannels = in_channels
fuse_layers = []
for i in range(num_branches if multi_scale_output==True else 1):
# 如果不多级输出的话只要第一层(结尾那儿);不断遍历i层
fuse_layer = [] # 【】再过一下这个层?
for j in range(num_branches): # 不断遍历其余各层融合到i层里
if j > i: # i下面的层:转换i通道(1x1),分辨率(upsample)为j的
fuse_layer.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_inchannels[i],
1, 1, 0, bias=False # 核大小,步长,pad;
),
nn.BatchNorm2d(num_inchannels[i]), # 【c】这个没用moment
# 【see】可以换模式不,双线性插值
nn.Upsample(scale_factor=2**(j-i), mode='nearest')
)
)
elif j == i:
fuse_layer.append(None) # 自己就不加了,在forward里加的
else: # i上面的层
conv3x3s = []
for k in range(i-j): # 第j层通过多个conv来降采样到i层分辨率其中最后一个conv输出通道与i层一致(前面的与j一致)
if k == i - j - 1: # 最后一组
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3, # 输出通道为第i层的
3, 2, 1, bias=False # 核大小,步长,pad;
),
# 【c】这个没用moment
nn.BatchNorm2d(num_outchannels_conv3x3)
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3, # 输入输出通道相等
3, 2, 1, bias=False
),
# 【c】这个没用moment
nn.BatchNorm2d(num_outchannels_conv3x3),
nn.ReLU(inplace=True)
)
)
# 第一个else结束第一个for结束
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def _change_channel_layers(self, in_channels, scale_factor): # 中间层通道x2
out_list = []
for i in range(len(in_channels)):
in_channel = in_channels[i]
out_channel = scale_factor*in_channel
out_list.append(
nn.Sequential(
nn.Conv2d(
in_channel,
out_channel,
1, 1, 0, bias=False # 核大小,步长,pad;
),
nn.BatchNorm2d(out_channel), # 【c】这个没用moment
# nn.ReLU(True) # 中部和尾部加 所以不要relu
))
return nn.ModuleList(out_list)
def _expand_layers(self, cur_channel, cur_stage, Is_middle=False):
# 分支数 固定通道n数 当前stage数
# stem传来的单个节点 通道数n
expand_layers = []
# 上采样
for j in range(cur_stage-1): # stage2 1层上
up_channel = cur_channel//2 if j == 0 else cur_channel # 顶层通道是当前的一半
expand_layers.append(
nn.Sequential(
nn.Conv2d(
cur_channel,
up_channel,
1, 1, 0, bias=False # 核大小,步长,pad;
),
nn.BatchNorm2d(up_channel), # 【c】这个没用moment
# 【see】可以换模式不,双线性插值
nn.Upsample(scale_factor=2 **
(cur_stage-1-j), mode='nearest')
)
)
# 当前层 1x1?还是传前面的
expand_layers.append(None) # 当为空时传前面的x
if(Is_middle == False):
# 下采样
down_channel = cur_channel*2
expand_layers.append(nn.Sequential(
nn.Conv2d(
cur_channel,
down_channel, # 输入输出通道相等
3, 2, 1, bias=False
),
# 【c】这个没用moment
nn.BatchNorm2d(down_channel)
# nn.ReLU(True) #【】加relu吗,M:不加在forward里统一加的
))
return nn.ModuleList(expand_layers)
def _make_stem(self, block, num_block, stride=1): # 【see】要用两次前部和后部
bottlten_input = self.cur_channel
bottlen_output = bottlten_input
downsample = None
if stride != 1 or \
bottlten_input != bottlen_output * block.expansion: # 【】如果分叉那儿用n的话;也就是说步长为1时且前后通道数一致时不降采样
downsample = nn.Sequential(
nn.Conv2d(
self.bottlten_input,
bottlen_output * block.expansion,
kernel_size=1, stride=stride, bias=False
), # 【see】使步长通道数与主路一致
nn.BatchNorm2d(
bottlen_output * block.expansion,
momentum=BN_MOMENTUM
),
)
layers = [] # 接block
layers.append(
block(
bottlten_input, # 【】输入全是这个?那这个也是1x1:对
bottlen_output, # 【】不是有个啥膨胀4倍?
stride,
downsample,
)
)
for i in range(1, num_block): # 该分支后面的block
layers.append(
block(
bottlen_output, # 【】输入全是这个?那这个也是1x1:对
bottlen_output
) # 【c】basicblock
)
return nn.Sequential(*layers)
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
} # 调用类
class IncepHRNet(nn.Module):
def __init__(self, cfg, **kwargs):
# self.original_channel = cfg.MODEL
extra = cfg.MODEL.EXTRA # 【c】干嘛的
super(IncepHRNet, self).__init__()
self.inplanes = 64
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 4) # 居然只用了一次
self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2']
self.original_channel =self.stage2_cfg['ORIGINAL_CHANNEL']
self.multi_scale_output =self.stage2_cfg['MULTI_SCALE_OUTPUT']
self.block =blocks_dict[self.stage2_cfg['BLOCK']]
self.stage1_make_first = self.stage1_make(self.original_channel)
self.num_blocks =self.stage2_cfg['NUM_BLOCKS']
self.cur_stage =self.stage2_cfg['CUR_STAGE']
self.stage2 = IncepHRModule(self.num_blocks,self.block,self.cur_stage,self.original_channel,self.multi_scale_output)
self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3']
self.num_blocks =self.stage3_cfg['NUM_BLOCKS']
self.cur_stage =self.stage3_cfg['CUR_STAGE']
self.stage3 = IncepHRModule(self.num_blocks,self.block,self.cur_stage,self.original_channel,self.multi_scale_output)
self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4']
self.num_blocks =self.stage4_cfg['NUM_BLOCKS']
self.cur_stage =self.stage4_cfg['CUR_STAGE']
self.stage4 = IncepHRModule(self.num_blocks,self.block,self.cur_stage,self.original_channel,False)
self.final_layer = nn.Conv2d(
in_channels=8*self.original_channel, #【】
out_channels=cfg.MODEL.NUM_JOINTS,
kernel_size=extra.FINAL_CONV_KERNEL, # 1
stride=1,
padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
)
self.pretrained_layers = cfg['MODEL']['EXTRA']['PRETRAINED_LAYERS']
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(len(self.stage1_make_first)):
x_list.append(self.stage1_make_first[i](x))
x_list = self.stage2(x_list)
x_list = self.stage3(x_list)
x_list = self.stage4(x_list)
x = self.final_layer(x_list[0]) # 1x1卷积变16通道
return x
def _make_layer(self, block, planes, blocks, stride=1): # 【】和make one branch 有点像啊64,4
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(planes * block.expansion,
momentum=BN_MOMENTUM), # 有mom
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
# 【c】self.inplanes=planes?
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def stage1_make(self,original_channel):
stage1_list = []
stage1_list.append(nn.Sequential(nn.Conv2d(
256, original_channel, # 上面为n
1, 1, 0, bias=False),
nn.BatchNorm2d(original_channel),
nn.ReLU(inplace=True)
))
stage1_list.append(nn.Sequential(nn.Conv2d(
256, original_channel*2, # 【】4n时都改为 4*cur
3, 2, 1, bias=False),
nn.BatchNorm2d(original_channel*2),
nn.ReLU(inplace=True)))
return nn.ModuleList(stage1_list)
def init_weights(self, pretrained=''): # 【c】怎么用的
# 【see】把运行信息打印在log里
logger.info('=> init weights from normal distribution')
for m in self.modules(): # modules是哪个参数
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']: # 【c】为啥是in
nn.init.constant_(m.bias, 0) # 有bias才设为0
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0) # 【】不能整个网络一起初始化?
if os.path.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
need_init_state_dict = {}
for name, m in pretrained_state_dict.items():
if name.split('.')[0] in self.pretrained_layers \
or self.pretrained_layers[0] is '*': # 'conv1''layer1'一类;*代表全部层都要复制。人为定义的
need_init_state_dict[name] = m
self.load_state_dict(need_init_state_dict, # 继承自module所以有这个函数
strict=False) # 把参数给有对应结构的网络
elif pretrained: # 【see】文件不存在,但pretrained字符串不为空时
logger.error('=> please download pre-trained models first!')
raise ValueError('{} is not exist!'.format(pretrained))
def get_pose_net(cfg, is_train, **kwargs): # 【c】哪儿用了这个函数?
model = IncepHRNet(cfg, **kwargs)
if is_train and cfg.MODEL.INIT_WEIGHTS:
model.init_weights(cfg.MODEL.PRETRAINED)
return model
| StarcoderdataPython |
45506 | # TODO: figure out how to put this in the app/ folder and still use serverless
# This line: `handler: main.handler`
# How do we specify a path here, as per uvicorn?
import os
from enum import Enum
from typing import Optional
from pydantic import BaseModel
from fastapi import FastAPI, Query
# for lambda; see https://adem.sh/blog/tutorial-fastapi-aws-lambda-serverless
from mangum import Mangum
from ontology_term_usage.term_usage import OntologyClient, ResultSet, TermUsage, TERM, ServiceMetadataCollection
# necessary for serverless/lambda
stage = os.environ.get('STAGE', None)
openapi_prefix = f"/{stage}" if stage else "/"
client = OntologyClient()
description = """
Wraps multiple endpoints to query for all usages of a term, including
* Terms used in logical definitions in external ontologies
* Terms used in annotation of entities like genes and proteins
* Terms used in specialized annotation such as GO-CAMs
"""
app = FastAPI(title='Ontology Usage API',
description=description,
contact = {
"name": "<NAME>",
"url": "https://github.com/cmungall/ontology-term-usage",
"email": "cjmungall AT lbl DOT gov",
},
openapi_prefix=openapi_prefix)
tags_metadata = [
{
"name": "usages",
"description": "Operations on term usages",
"externalDocs": {
"description": "External docs",
"url": "https://github.com/cmungall/ontology-term-usage",
},
},
{
"name": "metadata",
"description": "Operations to discover more information about system configuration.",
"externalDocs": {
"description": "External docs",
"url": "https://github.com/cmungall/ontology-term-usage",
},
},
]
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.get("/usage/{term}", response_model=ResultSet, summary='Find usages of a term', tags=["usages"])
async def usage(term: TERM, limit: int = None) -> ResultSet:
"""
Find all usages of an ontology term across multiple services.
To obtain metadata on all services called, use the services endpoint
Example terms: GO:0006915 (apoptotic process), RO:0000057 (has participant)
\f
:param term: URI or CURIE of a term.
:param limit: maximum number of usages
:return: usages broken down by service
"""
rs = client.term_usage(term, limit=limit)
return rs
@app.get("/metadata", response_model=ServiceMetadataCollection, tags=["metadata"])
async def metadata() -> ServiceMetadataCollection:
return client.get_services()
handler = Mangum(app) | StarcoderdataPython |
33819 | <filename>sound_play/scripts/test/test_sound_client.py
#!/usr/bin/env python
import unittest
import rospy
import rostest
from sound_play.libsoundplay import SoundClient
class TestCase(unittest.TestCase):
def test_soundclient_constructor(self):
s = SoundClient()
self.assertIsNotNone(s)
if __name__ == '__main__':
rostest.rosrun('sound_play', 'test_sound_client', TestCase)
__author__ = '<NAME>'
| StarcoderdataPython |
1605370 | import os
import zipfile
import logging
import boto3
import wget
logger = logging.getLogger()
logger.setLevel(logging.INFO)
S3_BUCKET_NAME = "movie-data-platform.mpd"
TEMP_PATH = "/tmp/"
MOVIE_LENS_PATH = "movie_lens/"
RAW_ZONE_PATH = "raw_zone/"
MOVIE_LENS_URL_PREFIX = "https://files.grouplens.org/datasets/movielens/"
MOVIE_LENS_FILE_NAME = "ml-25m.zip"
MOVIE_LENS_DIR_NAME = "ml-25m/"
def lambda_handler(event, context):
logger.info("===== Start MovieLens data injection =====")
zip_file_path = get_file_from_movie_lens(MOVIE_LENS_URL_PREFIX + MOVIE_LENS_FILE_NAME, MOVIE_LENS_FILE_NAME)
unzip_file_and_save_into_s3(zip_file_path)
start_glue_workflow()
logger.info("===== End MovieLens data injection =====")
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
}
}
def get_file_from_movie_lens(remote_url, file_name):
logger.info("Start get_file_from_movie_lens")
zip_file_path = TEMP_PATH + file_name
logger.info('zip_file_path: {}'.format(zip_file_path))
unzip_file_name = file_name.replace(".zip", "")
logger.info('unzip_file: {}'.format(unzip_file_name))
wget.download(remote_url, zip_file_path)
logger.info('File info: {}'.format(os.stat(zip_file_path)))
return zip_file_path
def unzip_file_and_save_into_s3(zip_file_path):
logger.info("Start unzip_file_and_save_into_s3")
s3 = boto3.resource("s3")
z = zipfile.ZipFile(zip_file_path)
logger.info('file name list: {}'.format(", ".join(str(file_name) for file_name in z.namelist())))
for file_name in z.namelist():
s3_raw_path = RAW_ZONE_PATH + MOVIE_LENS_PATH + file_name.replace(MOVIE_LENS_DIR_NAME, "")
logger.info('s3_raw_path: {}'.format(s3_raw_path))
s3.Bucket(S3_BUCKET_NAME).put_object(Key=s3_raw_path, Body=z.open(file_name))
def start_glue_workflow():
logger.info("Start start_glue_workflow")
glue = boto3.client("glue")
glue.start_workflow_run(
Name = "movie_lens_data_cleansing_glue_workflow"
) | StarcoderdataPython |
1636259 | <reponame>PeoplesMomentum/mxv
"""
Django settings for mxv project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from django.contrib import messages
from datetime import date
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from dotenv import load_dotenv
def bool_env(name):
return True if os.environ.get(name, 'False').upper() == 'TRUE' else False
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('MXV_SECRET_KEY', '&0+9q8c$q46+bslj=g#!i9@u#j@3#p=#k12je47wj%fj24q%=*')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True if os.environ.get('MXV_DEBUG', 'True') == 'True' else False
# Application definition
INSTALLED_APPS = [
# apps
'mxv.apps.MxvConfig',
'members.apps.MembersConfig',
'review.apps.ReviewConfig',
'voting_intentions.apps.VotingIntentionsConfig',
'consultations.apps.ConsultationsConfig',
'tasks.apps.TasksConfig',
'questions.apps.QuestionsConfig',
'livereload',
# admin, Django and third party
'nested_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'solo.apps.SoloAppConfig',
'tinymce',
'widget_tweaks',
'django_rq',
'polymorphic',
'django_countries',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'livereload.middleware.LiveReloadScript',
]
ROOT_URLCONF = 'mxv.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/mxv/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'mxv.context_processors.default',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'mxv.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('MXV_DATABASE_HOST', ''),
'PORT': os.environ.get('MXV_DATABASE_PORT', ''),
'NAME': os.environ.get('MXV_DATABASE_NAME', 'mxv'),
'USER': os.environ.get('MXV_DATABASE_USER', 'mxv'),
'PASSWORD': os.environ.get('MXV_DATABASE_PASSWORD', '<PASSWORD>')
},
'live': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('MXV_LIVE_DATABASE_HOST', ''),
'PORT': os.environ.get('MXV_LIVE_DATABASE_PORT', ''),
'NAME': os.environ.get('MXV_LIVE_DATABASE_NAME', 'mxv'),
'USER': os.environ.get('MXV_LIVE_DATABASE_USER', 'mxv'),
'PASSWORD': os.environ.get('MXV_LIVE_DATABASE_PASSWORD', '<PASSWORD>')
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = False
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = not DEBUG
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# extend the base user model
AUTH_USER_MODEL = 'members.Member'
# get the secret used by the join page to create inactive members
CREATE_INACTIVE_MEMBER_SECRET = os.environ.get('MXV_CREATE_INACTIVE_MEMBER_SECRET', 'mxv')
# get the secret used by the GDPR tool to delete members
GDPR_TOOL_SECRET = os.environ.get('MXV_GDPR_TOOL_SECRET', 'mxv')
# set up HTML editor
TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced",
'width' : 600,
'height' : 300,
'convert_urls' : False,
'relative_urls' : False,
}
# Postmark
EMAIL_BACKEND = 'postmarker.django.EmailBackend'
POSTMARK = {
'TOKEN': os.environ.get('POSTMARK_SERVER_TOKEN', "mxv"),
'TEST_MODE': False,
'VERBOSITY': 0,
}
DEFAULT_FROM_EMAIL = "Team Momentum <<EMAIL>>"
# join page
JOIN_URL = "https://join.peoplesmomentum.com"
# site name
SITE_NAME_SHORT = "My Momentum"
SITE_NAME_LONG = "My Momentum"
# send members to the index page after they login
LOGIN_URL = '/members/login'
LOGIN_REDIRECT_URL = '/'
# date/time formats
DATE_FORMAT = 'd/m/Y'
DATETIME_FORMAT = 'd/m/Y H:i:s'
# close the session when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# whether to allow requests to the error URL (for testing error handling)
ALLOW_ERROR_URL = True if os.environ.get('MXV_ALLOW_ERROR_URL', 'False') == 'True' else False
# change error into danger for bootstrap
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# whether to show track voting-specific changes to just staff or anyone
TRACK3_VOTING_VISIBLE_TO_NON_STAFF = True if os.environ.get('MXV_TRACK3_VOTING_VISIBLE_TO_NON_STAFF', 'False') == 'True' else False
# when the site was launched to the members
LAUNCH_DATE = date(2018, 2, 2)
# token for accessing NationBuilder
NATIONBUILDER_API_TOKEN = os.environ.get('MXV_NATIONBUILDER_API_TOKEN', '')
# default redirect page URL
DEFAULT_REDIRECT_PAGE_URL = 'https://peoplesmomentum.com'
# whether to show consultations to just staff or anyone
CONSULTATIONS_VISIBLE_TO_NON_STAFF = True if os.environ.get('MXV_CONSULTATIONS_VISIBLE_TO_NON_STAFF', 'False') == 'True' else False
# whether to show membership cards on the index or not
MEMBERSHIP_CARD_VISIBLE_TO_NON_STAFF = True if os.environ.get('MXV_MEMBERSHIP_CARD_VISIBLE_TO_NON_STAFF', 'False') == 'True' else False
# task queueing
RQ_SHOW_ADMIN_LINK = True
RQ_QUEUES = {
'default': {
'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379/0'),
}
}
# configure sentry
sentry_sdk.init(
dsn=os.getenv('SENTRY_DSN', ''),
integrations=[DjangoIntegration()]
)
# whether to show profile pages to just staff or anyone
PROFILES_VISIBLE_TO_NON_STAFF = True if os.environ.get('MXV_PROFILES_VISIBLE_TO_NON_STAFF', 'False') == 'True' else False
# get the secret used by the NationBuilder web hooks
WEB_HOOK_SECRET = os.environ.get('MXV_WEB_HOOK_SECRET', 'mxv')
# NCG voting
QUESTIONS_VISIBLE_TO_NON_STAFF = bool_env('MXV_QUESTIONS_VISIBLE_TO_NON_STAFF')
NCG_VOTING_VISIBLE_TO_NON_STAFF = True if os.environ.get('MXV_NCG_VOTING_VISIBLE_TO_NON_STAFF', 'False') == 'True' else False
NCG_VOTING_URL = os.environ.get('MXV_NCG_VOTING_URL', '')
NCG_VOTING_IV = os.environ.get('MXV_NCG_VOTING_IV', '')
NCG_VOTING_KEY = os.environ.get('MXV_NCG_VOTING_KEY', '')
# django-countries settings
COUNTRIES_FIRST=['UK'] | StarcoderdataPython |
25517 | <gh_stars>1-10
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
import zapador.constantes as cons
from zapador.metodos import pre_run
from zapador.clases import *
from kivy.factory import Factory
ubicacion = cons.DIR_SCRIPT + '/zapador/kv/'
with open('{}main.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
with open('{}clases.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
with open('{}contenido.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
class Pantalla_Nueva(Screen):
pass
class Pantalla_Importar(Screen):
popup = Factory.CargarMision()
importar = None
def on_pre_enter(self):
self.importar = self.children[0].children[0]
self.popup.papi = self.importar
self.popup.open()
class Pantalla_Opciones(Screen):
pass
sm = ScreenManager(transition=FadeTransition())
sm.add_widget(Pantalla_Nueva(name='pantalla_nueva'))
sm.add_widget(Pantalla_Importar(name='pantalla_importar'))
sm.add_widget(Pantalla_Opciones(name='pantalla_opciones'))
def cambiar_pantalla(pantalla):
sm.current = pantalla
class ZapadorApp(App):
"""Entry point para Zapador app"""
def on_start(self):
pre_run()
def on_stop(self):
Descargando.stop.set()
def build(self):
self.title = 'Zapador v'+cons.VERSION
self.icon = 'zapador/assets/img/zapador.ico'
return sm
| StarcoderdataPython |
1752146 | <filename>python/tinyusdz/Usd/__init__.py
from . import Stage
| StarcoderdataPython |
7116 | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
import dateutil.parser
import pytz
from flask import flash, request, session
from flask_pluginengine import render_plugin_template, url_for_plugin
from indico.core import signals
from indico.core.config import config
from indico.core.plugins import IndicoPlugin
from indico.core.settings.converters import ModelListConverter
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.util.string import natural_sort_key
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField
from indico.web.menu import TopMenuItem
from indico_room_assistance import _
from indico_room_assistance.blueprint import blueprint
from indico_room_assistance.definition import RoomAssistanceRequest
from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached,
is_room_assistance_support)
def _order_func(object_list):
return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name))
class RoomAssistanceForm(IndicoForm):
_fieldsets = [
('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance',
'room_assistance_support']),
]
room_assistance_recipients = EmailListField(_('Recipients'),
description=_('Notifications about room assistance requests are sent '
'to these email addresses (one per line)'))
rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms',
query_factory=lambda: Room.query,
description=_('Rooms for which users can request startup '
'assistance'),
get_label='full_name', collection_class=set,
render_kw={'size': 20}, modify_object_list=_order_func)
room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True,
description=_('List of users who can view the list of events with '
'room startup assistance.'))
class RoomAssistancePlugin(IndicoPlugin):
"""Room assistance request
This plugin lets users request assistance for meeting rooms.
"""
configurable = True
settings_form = RoomAssistanceForm
settings_converters = {
'rooms_with_assistance': ModelListConverter(Room)
}
acl_settings = {'room_assistance_support'}
default_settings = {
'room_assistance_recipients': [],
'rooms_with_assistance': [],
}
def init(self):
super().init()
self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False,
condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name)
self.template_hook('event-actions', self._room_assistance_action)
self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu')
self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request)
self.connect(signals.event.updated, self._on_event_update)
def get_blueprints(self):
return blueprint
def _room_assistance_action(self, event, **kwargs):
return render_plugin_template('room_assistance_action.html', event=event,
can_request_assistance=can_request_assistance_for_event(event))
def _extend_services_menu(self, reservation, **kwargs):
if not session.user or not is_room_assistance_support(session.user):
return
return TopMenuItem('services-cern-room-assistance', _('Room assistance'),
url_for_plugin('room_assistance.request_list'), section='services')
def _get_room_assistance_request(self, sender, **kwargs):
return RoomAssistanceRequest
def _on_event_update(self, event, **kwargs):
changes = kwargs['changes']
if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}:
return
request = Request.find_latest_for_event(event, RoomAssistanceRequest.name)
if not request or request.state != RequestState.accepted:
return
if 'location_data' in changes and not event_has_room_with_support_attached(event):
request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event location is not in the list of the rooms supported by the room assistance team. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
if changes.keys() & {'start_dt', 'end_dt'}:
tz = pytz.timezone(config.DEFAULT_TIMEZONE)
occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']}
req_dates = {occ.date() for occ in occurrences}
event_dates = set(event.iter_days())
old_dates = req_dates - event_dates
has_overlapping_dates = req_dates & event_dates
if not has_overlapping_dates:
request.definition.reject(request,
{'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event dates don't overlap with the existing room assistance request for this event. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
elif old_dates and has_overlapping_dates:
new_data = dict(request.data)
new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences
if occ.date() in req_dates & event_dates]
request.data = new_data
flash(_("Room assistance had been requested for days that are not between the updated start/end "
"dates. Support will not be provided on these days anymore."), 'warning')
| StarcoderdataPython |
1601851 | <gh_stars>0
# Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper object for Iotic resources
"""
from __future__ import unicode_literals
from IoticAgent.Core.Validation import Validation
class Resource(object):
"""Resource base class
"""
def __init__(self, client, guid):
self.__client = client
self.__guid = Validation.guid_check_convert(guid)
@property
def guid(self):
"""The Globally Unique ID of this resource in hex form (undashed).
"""
return self.__guid
@property
def _client(self):
"""For internal use: reference to IOT.Client instance"""
return self.__client
| StarcoderdataPython |
1792095 | <filename>ping2mqtt/logging.py
import sys
from loguru import logger
from .settings import general_settings
__all__ = ("logger",)
LoggerFormat = "<green>{time:YY-MM-DD HH:mm:ss.SSS}</green> | " \
"<level>{level: <8}</level> | " \
"<level>{message}</level>"
logger.remove()
logger.add(
sys.stderr,
level=general_settings.log_level.upper(),
format=LoggerFormat,
enqueue=True
)
| StarcoderdataPython |
1651963 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
from __future__ import (division as _py3_division,
print_function as _py3_print,
absolute_import as _py3_abs_import)
from .metamodel import TransitiveRelationDescriptor, backref
from .metamodel import get_age, get_birth_date
def test_transitive():
class Place:
located_in = TransitiveRelationDescriptor('located_in')
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
def __repr__(self):
return '<Place %r>' % self.name
Place.located_in.target = Place
cuba = Place(name='Cuba', located_in=None)
havana = Place(name='Havana', located_in=cuba)
plaza = Place(name='Plaza', located_in=havana)
vedado = Place(name='Vedado', located_in=plaza)
assert all(x in vedado.located_in for x in (cuba, havana, plaza))
assert vedado not in vedado.located_in
assert not cuba.located_in
# After removing Havana from Cuba, Vedado is no longer in Cuba as well.
del havana.located_in
assert cuba not in vedado.located_in
def test_backref():
class Person:
mother = backref('mother', 'children')
father = backref('father', 'children')
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
def __repr__(self):
return '<Place %r>' % self.name
Person.mother.target = Person
Person.father.target = Person
mami = Person(name='<NAME>')
papi = Person(name='<NAME> Portal')
manu = Person(name='<NAME>', mother=mami, father=papi)
taire = Person(name='<NAME>', mother=mami, father=papi)
yade = Person(name='<NAME>')
manolito = Person(name='<NAME>', mother=yade, father=manu)
assert manolito in yade.children and manolito in manu.children
assert taire in mami.children and manu in mami.children
def test_ages():
import random
ages = range(4, 80)
ages_seq = (random.choice(ages) for _ in range(100))
assert all(get_age(get_birth_date(x)) == x for x in ages_seq)
| StarcoderdataPython |
3244654 | """Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
import random
import os,h5py
import numpy as np
from util.data_util import read_json
class XGazeDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.add_argument('--index_file', type=str, default=None, help='mapping from full-data index to key and person-specific index')
parser.add_argument('--cam_index', type=int, nargs='+', default=None, help='loading specific camera index for ethxgaze')
parser.set_defaults(max_dataset_size=None, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt, split):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
self.split = split
self.opt = opt
self.hdfs = {}
self.root = opt.dataroot
self.key_to_use = read_json(self.root)[self.split]
self.selected_keys = [k for k in self.key_to_use]
# get the image paths of your dataset;
for num_i in range(0, len(self.selected_keys)):
file_path = os.path.join(self.root, self.split, self.selected_keys[num_i])
# print(self.root, self.split)
self.hdfs[num_i] = h5py.File(file_path, 'r', swmr=True)
# print('read file: ', os.path.join(self.path, self.selected_keys[num_i]))
assert self.hdfs[num_i].swmr_mode
# Construct mapping from full-data index to key and person-specific index
index_file = opt.index_file
cam_list = opt.cam_index
if index_file is None:
self.idx_to_kv = []
for num_i in range(0, len(self.selected_keys)):
n = self.hdfs[num_i]["face_patch"].shape[0]
if cam_list is None:
for i in range(0,n):
self.idx_to_kv += [(num_i, i)]
else:
for cam in cam_list:
for i in range(cam, n, 18):
self.idx_to_kv += [(num_i, i)]
else:
print('load the file: ', index_file)
self.idx_to_kv = np.loadtxt(index_file, dtype=np.int)
max_dataset_size = opt.max_dataset_size
if max_dataset_size is not None:
random.seed(max_dataset_size)
self.idx_to_kv = random.sample(self.idx_to_kv, max_dataset_size)
# print(self.idx_to_kv[:20])
random.seed(time.time())
for num_i in range(0, len(self.hdfs)):
if self.hdfs[num_i]:
self.hdfs[num_i].close()
self.hdfs[num_i] = None
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
def __len__(self):
return len(self.idx_to_kv)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
key, idx = self.idx_to_kv[index]
self.hdf = h5py.File(os.path.join(self.root, self.split, self.selected_keys[key]), 'r', swmr=True)
face = self.hdf['face_patch'][idx,:]
face = face[:, :, [2, 1, 0]]
face = self.transform(face)
gaze_label = self.hdf['face_gaze'][idx, :]
return {'face': face, 'gaze': gaze_label}
| StarcoderdataPython |
27541 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 06:34:04 2015
@author: tanay
"""
from lasagne.layers import InputLayer, DropoutLayer, DenseLayer
from lasagne.updates import nesterov_momentum
from lasagne.objectives import binary_crossentropy
from nolearn.lasagne import NeuralNet
import theano
from theano import tensor as T
from theano.tensor.nnet import sigmoid
from sklearn import metrics
from sklearn.utils import shuffle
import numpy as np
learning_rate = theano.shared(np.float32(0.1))
input_size=Xtrh.shape
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=400,
dropout1_p=0.4,
hidden2_num_units=200,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=4,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.899,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=4),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=75,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(Xtrh, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
| StarcoderdataPython |
3253020 | <gh_stars>0
def makeKDTree(points, dim = 0):
if not points:
return None
elif len(points) == 1:
return tuple(points)
points.sort(key = lambda p: p[dim])
medianIndex = len(points) // 2
median = points[medianIndex][dim]
nextDim = (dim + 1) % len(points[medianIndex])
left = points[:medianIndex]
right = points[medianIndex:]
return (median, makeKDTree(left, nextDim), makeKDTree(right, nextDim))
def searchKDTree(kdTree, point, dim = 0):
if len(kdTree) == 1:
return kdTree[0] == point
nodeValue, left, right = kdTree
nextDim = (dim + 1) % len(point)
nextTree = left if point[dim] < nodeValue else right
return searchKDTree(nextTree, point, nextDim)
kdTree1 = makeKDTree([(1,9), (2,3), (3,7), (4,1), (5,4), (6,8), (7,2), (7,9), (8,8), (9,6)])
print(kdTree1)
print(searchKDTree(kdTree1, (1,9)))
print(searchKDTree(kdTree1, (9,6)))
print(searchKDTree(kdTree1, (5,4)))
print(searchKDTree(kdTree1, (2,6)))
| StarcoderdataPython |
1733190 | <filename>armstrong/apps/embeds/admin_forms.py<gh_stars>1-10
from functools import wraps
from django.core.cache import cache
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.contrib.formtools.preview import FormPreview
try:
from django.db.transaction import atomic
except ImportError: # DROP_WITH_DJANGO15 # pragma: no cover
from django.db.transaction import commit_on_success as atomic
try:
from django.utils.encoding import force_text
except ImportError: # DROP_WITH_DJANGO13 # pragma: no cover
from django.utils.encoding import force_unicode as force_text
try:
from django.utils.text import slugify
except ImportError: # DROP_WITH_DJANGO14 # pragma: no cover
from django.template.defaultfilters import slugify
from .models import Embed
from .backends import InvalidResponseError
IS_POPUP_VAR = "_popup"
def generate_cache_key(backend, url):
cache_key = "armstrong.apps.embeds-response-for-%s-%s" % \
(backend.pk, slugify(unicode(url)))
return cache_key[:250] # memcached max key length
# TODO relocate to a shared location
# TODO if Django updates this class to use class-based Views
# (as they did with FormWizard in Django 1.4) this will need to change, though
# some things (such as admin_view() wrapping) will be dramatically easier
class AdminFormPreview(FormPreview):
"""
Adapt FormPreview into the Admin replacing the normal add/change views
with a two-step process for editing a Model and previewing before save.
"""
def __init__(self, form, admin):
super(AdminFormPreview, self).__init__(form)
self.admin = admin
self.model = self.admin.model
self.object_id = None
self.object = None
self.action = "add"
self.admin.add_form_template = self.form_template
self.admin.change_form_template = self.preview_template
def __call__(self, request, *args, **kwargs):
"""Wrap the FormPreview in Admin decorators"""
# Change View if we've been passed an object_id
if len(args) >= 1:
try:
from django.contrib.admin.utils import unquote
except ImportError: # DROP_WITH_DJANGO16 # pragma: no cover
from django.contrib.admin.util import unquote
self.object_id = args[0]
self.action = "change"
self.object = self.admin.get_object(request, unquote(self.object_id))
method = super(AdminFormPreview, self).__call__
method = self.perm_check(method)
method = self.admin.admin_site.admin_view(method)
return method(request, *args, **kwargs)
def perm_check(self, func):
"""Provide permissions checking normally handled in add_view() and change_view()"""
@wraps(func)
def wrapper(request, *args, **kwargs):
if self.object_id and not self.object:
from django.utils.html import escape
from django.http import Http404
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.model._meta.verbose_name),
'key': escape(self.object_id)})
from django.core.exceptions import PermissionDenied
if self.action == "add" and not self.admin.has_add_permission(request):
raise PermissionDenied
elif not self.admin.has_change_permission(request, self.object):
raise PermissionDenied
return func(request, *args, **kwargs)
return wrapper
def get_context(self, request, form):
"""Provide templates vars expected by the Admin change_form.html"""
opts = self.model._meta
context = super(AdminFormPreview, self).get_context(request, form)
context.update(dict(
title=_('%s %s') % (self.action.title(), force_text(opts.verbose_name)),
object_id=self.object_id,
original=self.object,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
current_app=self.admin.admin_site.name,
show_delete=(self.action == "change"),
app_label=opts.app_label,
opts=opts,
#
# Other context vars present in the Admin add_view/change_view--
# Not entirely sure how or if its appropriate to use these
# so acknowledge them and make it clear we aren't using them
#
# adminform -- our stuff is not an AdminForm
# media -- collection of ModelAdmin, AdminForm and inline formset media
inline_admin_formsets=[], # we know this should be empty
# errors -- AdminErrorList, combines all form and formset errors
))
return context
def get_render_change_form_params(self, request):
return dict(
obj=self.object,
add=(self.action == 'add'),
change=(self.action == 'change'))
def preview_get(self, request):
"""
Displays the form.
Overriden to provide the model instance instead of initial data
and call the Admin's render_change_form().
"""
f = self.form(auto_id=self.get_auto_id(), instance=self.object)
context = self.get_context(request, f)
render_params = self.get_render_change_form_params(request)
return self.admin.render_change_form(request, context, **render_params)
def preview_post(self, request):
"""
Validates the POST data. If valid, displays the preview page, else
redisplays form. Overriden to
- provide the model instance
- use Admin's render_change_form()
- update the title context var
- provide a "step2" context var letting us share a single tempate
"""
f = self.form(request.POST, auto_id=self.get_auto_id(), instance=self.object)
context = self.get_context(request, f)
if f.is_valid():
self.process_preview(request, f, context)
context.update(dict(
is_step2=True,
title=_('Preview %s') % force_text(context['opts'].verbose_name),
hash_field=self.unused_name('hash'),
hash_value=self.security_hash(request, f)))
render_params = self.get_render_change_form_params(request)
return self.admin.render_change_form(request, context, **render_params)
def post_post(self, request):
"""
Validates the POST data. If valid, calls done(). Else, redisplays form.
Overriden to
- supply the form model instance
- call preview_post() instead of calling its own render
- add transaction support
"""
f = self.form(request.POST, auto_id=self.get_auto_id(), instance=self.object)
if f.is_valid():
if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''),
request, f):
return self.failed_hash(request) # Security hash failed
with atomic():
return self.done(request, f.cleaned_data)
else:
return self.preview_post(request)
class EmbedFormPreview(AdminFormPreview):
"""
A replacement for the normal Admin edit views that provides a two-step
process for editing an Embed object. Since so much of the Embed data
is gathered from an API, we want to present that to the user so they
have an idea what they are creating.
"""
form_template = "embeds/admin/embed_change_form.html"
preview_template = "embeds/admin/embed_change_form.html"
def process_preview(self, request, form, context):
"""
Generate the response (and cache it) or provide error messaging.
Update the form with the auto-assigned Backend if necessary.
"""
try:
response = form.instance.get_response()
if not response.is_valid():
# throw an error so we can share the except block logic
raise InvalidResponseError(response._data)
except InvalidResponseError as e:
msg = mark_safe(_(
"Invalid response from the Backend API.<br />"
"Check the URL for typos and/or try a different Backend."))
try:
form.add_error(None, ValidationError(msg, code="invalid"))
except AttributeError: # DROP_WITH_DJANGO16 # pragma: no cover
from django.forms.forms import NON_FIELD_ERRORS
if NON_FIELD_ERRORS not in form._errors:
form._errors[NON_FIELD_ERRORS] = form.error_class()
form._errors[NON_FIELD_ERRORS].append(msg)
try: # handle dict data masquerading as an Exception string
from ast import literal_eval
error_dict = literal_eval(str(e))
except (ValueError, SyntaxError): # just use the string
error_dict = dict(data=e)
error_dict['exception'] = type(e).__name__
context['response_error'] = error_dict
else:
context['duplicate_response'] = (form.instance.response == response)
form.instance.response = response
# cache the response to prevent another API call on save
# set() overwrites if anything already exists from another attempt
cache_key = generate_cache_key(
form.instance.backend, form.instance.url)
cache.set(cache_key, form.instance.response_cache, 300)
#HACK if the backend was auto-assigned the form field must also be set
if not form.data['backend']:
data = form.data.copy() # mutable QueryDict
backends = list(form.fields['backend']._queryset)
data['backend'] = backends.index(form.instance.backend) + 1 # select box options are 1-indexed
form.data = data
def done(self, request, cleaned_data):
"""Save Embed using cached response to avoid another API call"""
# get or create the object and use form data
embed = self.object if self.object else Embed()
embed.url = cleaned_data['url']
embed.backend = cleaned_data['backend']
# load and use cached response then delete/clean up
cache_key = generate_cache_key(embed.backend, embed.url)
embed.response = embed.backend.wrap_response_data(cache.get(cache_key), fresh=True)
cache.delete(cache_key)
# save and continue with the Admin Site workflow
embed.save()
return self.admin.response_add(request, embed)
def get_context(self, request, form):
context = super(EmbedFormPreview, self).get_context(request, form)
context['errornote_css_class'] = 'errornote'
context['form1_submit_text'] = "Request new data & Preview" if self.action == "change" else "Preview"
return context
| StarcoderdataPython |
4836717 | # Copyright (C) 2018 and later: Unicode, Inc. and others.
# License & terms of use: http://www.unicode.org/copyright.html
# Python 2/3 Compatibility (ICU-20299)
# TODO(ICU-20301): Remove this.
from __future__ import print_function
from abc import abstractmethod
from collections import defaultdict
import re
import sys
from . import *
from . import utils
from .locale_dependencies import data as DEPENDENCY_DATA
from .request_types import *
# Note: for this to be a proper abstract class, it should extend abc.ABC.
# There is no nice way to do this that works in both Python 2 and 3.
# TODO(ICU-20301): Make this inherit from abc.ABC.
class Filter(object):
@staticmethod
def create_from_json(json_data):
if "filterType" in json_data:
filter_type = json_data["filterType"]
else:
filter_type = "file-stem"
if filter_type == "file-stem":
return FileStemFilter(json_data)
elif filter_type == "language":
return LanguageFilter(json_data)
elif filter_type == "regex":
return RegexFilter(json_data)
elif filter_type == "exclude":
return ExclusionFilter()
elif filter_type == "union":
return UnionFilter(json_data)
elif filter_type == "locale":
return LocaleFilter(json_data)
else:
print("Error: Unknown filterType option: %s" % filter_type, file=sys.stderr)
return None
def filter(self, request):
if not request.apply_file_filter(self):
return []
for file in request.all_input_files():
assert self.match(file)
return [request]
@classmethod
def _file_to_file_stem(cls, file):
start = file.filename.rfind("/")
limit = file.filename.rfind(".")
return file.filename[start+1:limit]
@abstractmethod
def match(self, file):
pass
class InclusionFilter(Filter):
def match(self, file):
return True
class ExclusionFilter(Filter):
def match(self, file):
return False
class WhitelistBlacklistFilter(Filter):
def __init__(self, json_data):
if "whitelist" in json_data:
self.is_whitelist = True
self.whitelist = json_data["whitelist"]
else:
assert "blacklist" in json_data, "Need either whitelist or blacklist: %s" % str(json_data)
self.is_whitelist = False
self.blacklist = json_data["blacklist"]
def match(self, file):
file_stem = self._file_to_file_stem(file)
return self._should_include(file_stem)
@abstractmethod
def _should_include(self, file_stem):
pass
class FileStemFilter(WhitelistBlacklistFilter):
def _should_include(self, file_stem):
if self.is_whitelist:
return file_stem in self.whitelist
else:
return file_stem not in self.blacklist
class LanguageFilter(WhitelistBlacklistFilter):
def _should_include(self, file_stem):
language = file_stem.split("_")[0]
if language == "root":
# Always include root.txt
return True
if self.is_whitelist:
return language in self.whitelist
else:
return language not in self.blacklist
class RegexFilter(WhitelistBlacklistFilter):
def __init__(self, *args):
# TODO(ICU-20301): Change this to: super().__init__(*args)
super(RegexFilter, self).__init__(*args)
if self.is_whitelist:
self.whitelist = [re.compile(pat) for pat in self.whitelist]
else:
self.blacklist = [re.compile(pat) for pat in self.blacklist]
def _should_include(self, file_stem):
if self.is_whitelist:
for pattern in self.whitelist:
if pattern.match(file_stem):
return True
return False
else:
for pattern in self.blacklist:
if pattern.match(file_stem):
return False
return True
class UnionFilter(Filter):
def __init__(self, json_data):
# Collect the sub-filters.
self.sub_filters = []
for filter_json in json_data["unionOf"]:
self.sub_filters.append(Filter.create_from_json(filter_json))
def match(self, file):
"""Match iff any of the sub-filters match."""
for filter in self.sub_filters:
if filter.match(file):
return True
return False
LANGUAGE_SCRIPT_REGEX = re.compile(r"^([a-z]{2,3})_[A-Z][a-z]{3}$")
LANGUAGE_ONLY_REGEX = re.compile(r"^[a-z]{2,3}$")
class LocaleFilter(Filter):
def __init__(self, json_data):
self.locales_requested = set()
self.locales_required = set()
self.include_children = json_data.get("includeChildren", True)
self.include_scripts = json_data.get("includeScripts", False)
# Compute the requested and required locales.
for locale in json_data["whitelist"]:
self._add_locale_and_parents(locale)
def _add_locale_and_parents(self, locale):
# Store the locale as *requested*
self.locales_requested.add(locale)
# Store the locale and its dependencies as *required*
while locale is not None:
self.locales_required.add(locale)
locale = self._get_parent_locale(locale)
def match(self, file):
locale = self._file_to_file_stem(file)
# A locale is *required* if it is *requested* or an ancestor of a
# *requested* locale.
if locale in self.locales_required:
return True
# Resolve include_scripts and include_children.
return self._match_recursive(locale)
def _match_recursive(self, locale):
# Base case: return True if we reached a *requested* locale,
# or False if we ascend out of the locale tree.
if locale is None:
return False
if locale in self.locales_requested:
return True
# Check for alternative scripts.
# This causes sr_Latn to check sr instead of going directly to root.
if self.include_scripts:
match = LANGUAGE_SCRIPT_REGEX.match(locale)
if match and self._match_recursive(match.group(1)):
return True
# Check if we are a descendant of a *requested* locale.
if self.include_children:
parent = self._get_parent_locale(locale)
if self._match_recursive(parent):
return True
# No matches.
return False
@classmethod
def _get_parent_locale(cls, locale):
if locale in DEPENDENCY_DATA["parents"]:
return DEPENDENCY_DATA["parents"][locale]
if locale in DEPENDENCY_DATA["aliases"]:
return DEPENDENCY_DATA["aliases"][locale]
if LANGUAGE_ONLY_REGEX.match(locale):
return "root"
i = locale.rfind("_")
if i < 0:
return None
return locale[:i]
def apply_filters(requests, config):
"""Runs the filters and returns a new list of requests."""
requests = _apply_file_filters(requests, config)
requests = _apply_resource_filters(requests, config)
return requests
def _apply_file_filters(old_requests, config):
"""Filters out entire files."""
filters = _preprocess_file_filters(old_requests, config)
new_requests = []
for request in old_requests:
category = request.category
if category in filters:
new_requests += filters[category].filter(request)
else:
new_requests.append(request)
return new_requests
def _preprocess_file_filters(requests, config):
all_categories = set(
request.category
for request in requests
)
all_categories.remove(None)
all_categories = list(sorted(all_categories))
json_data = config.filters_json_data
filters = {}
for category in all_categories:
if "featureFilters" in json_data and category in json_data["featureFilters"]:
filters[category] = Filter.create_from_json(
json_data["featureFilters"][category]
)
elif "localeFilter" in json_data and category[-5:] == "_tree":
filters[category] = Filter.create_from_json(
json_data["localeFilter"]
)
if "featureFilters" in json_data:
for category in json_data["featureFilters"]:
if category not in all_categories:
print("Warning: category %s is not known" % category, file=sys.stderr)
return filters
class ResourceFilterInfo(object):
def __init__(self, category):
self.category = category
self.filter_tmp_dir = "filters/%s" % category
self.input_files = None
self.filter_files = None
self.rules_by_file = None
def apply_to_requests(self, all_requests):
# Call this method only once per list of requests.
assert self.input_files is None
for request in all_requests:
if request.category != self.category:
continue
if not isinstance(request, AbstractExecutionRequest):
continue
if request.tool != IcuTool("genrb"):
continue
if not request.input_files:
continue
self._set_files(request.input_files)
request.dep_targets += [self.filter_files[:]]
arg_str = "--filterDir {TMP_DIR}/%s" % self.filter_tmp_dir
request.args = "%s %s" % (arg_str, request.args)
# Make sure we found the target request
if self.input_files is None:
print("WARNING: Category not found: %s" % self.category, file=sys.stderr)
self.input_files = []
self.filter_files = []
self.rules_by_file = []
def _set_files(self, files):
# Note: The input files to genrb for a certain category should always
# be the same. For example, there are often two genrb calls: one for
# --writePoolBundle, and the other for --usePoolBundle. They are both
# expected to have the same list of input files.
if self.input_files is not None:
assert self.input_files == files
return
self.input_files = list(files)
self.filter_files = [
TmpFile("%s/%s" % (self.filter_tmp_dir, basename))
for basename in (
file.filename[file.filename.rfind("/")+1:]
for file in files
)
]
self.rules_by_file = [[] for _ in range(len(files))]
def add_rules(self, file_filter, rules):
for file, rule_list in zip(self.input_files, self.rules_by_file):
if file_filter.match(file):
rule_list += rules
def make_requests(self):
# Map from rule list to filter files with that rule list
unique_rules = defaultdict(list)
for filter_file, rules in zip(self.filter_files, self.rules_by_file):
unique_rules[tuple(rules)].append(filter_file)
new_requests = []
i = 0
for rules, filter_files in unique_rules.items():
base_filter_file = filter_files[0]
new_requests += [
PrintFileRequest(
name = "%s_print_%d" % (self.category, i),
output_file = base_filter_file,
content = self._generate_resource_filter_txt(rules)
)
]
i += 1
for filter_file in filter_files[1:]:
new_requests += [
CopyRequest(
name = "%s_copy_%d" % (self.category, i),
input_file = base_filter_file,
output_file = filter_file
)
]
i += 1
return new_requests
@classmethod
def _generate_resource_filter_txt(cls, rules):
result = "# Caution: This file is automatically generated\n\n"
result += "\n".join(rules)
return result
def _apply_resource_filters(all_requests, config):
"""Creates filters for looking within resource bundle files."""
json_data = config.filters_json_data
if "resourceFilters" not in json_data:
return all_requests
collected = {}
for entry in json_data["resourceFilters"]:
if "files" in entry:
file_filter = Filter.create_from_json(entry["files"])
else:
file_filter = InclusionFilter()
for category in entry["categories"]:
# not defaultdict because we need to pass arguments to the constructor
if category not in collected:
filter_info = ResourceFilterInfo(category)
filter_info.apply_to_requests(all_requests)
collected[category] = filter_info
else:
filter_info = collected[category]
filter_info.add_rules(file_filter, entry["rules"])
# Add the filter generation requests to the beginning so that by default
# they are made before genrb gets run (order is required by windirect)
new_requests = []
for filter_info in collected.values():
new_requests += filter_info.make_requests()
new_requests += all_requests
return new_requests
| StarcoderdataPython |
3368522 | from gitmanager import *
def test_is_same_commit():
assert is_same_commit("HEAD", "HEAD")
assert not is_same_commit("HEAD", "HEAD~")
| StarcoderdataPython |
19703 | import os
import yaml
import copy
import logging
from pathlib import Path
import torch
from torch.nn import *
from torch.optim import *
import torch.distributed as dist
from torch.optim.lr_scheduler import *
from torch.nn.parallel import DistributedDataParallel
from utils.metrics import *
from models import _get_model
torch.backends.cudnn.benchmark = True
class Argments(object):
@staticmethod
def _file_load(yaml_file):
with open(fr'{yaml_file}') as f:
y = yaml.safe_load(f)
return y
@staticmethod
def _module_load(d, part, **kargs):
module_obj = eval(d[part]['name'])
module_args = copy.deepcopy(d[part])
module_args.update(kargs)
del module_args['name']
part = module_obj(**module_args)
return part
def _modules_load(self):
for k, v in self._y.items():
if 'module' in k:
setattr(self, k, dict())
module = self.__dict__[k]
module['model'] = _get_model(**v['model'], model_type=self['setup/model_type']).cuda()
if self['setup/phase'] != 'infer':
module['optim'] = self._module_load(v, part='optim',
params=module['model'].parameters())
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
module['lr_scheduler'] = self._module_load(v, part='lr_scheduler',
optimizer=module['optim'])
loss = [eval(l)(**v['loss_args'][l]) for l in v['loss']]
module['loss_with_weight'] = list(zip(loss, v['loss_weight']))
module['val_metric'] = eval(v['val_metric'])(**v['metric_args'])
module['test_metric'] = eval(v['test_metric'])(**v['metric_args'])
else:
module['model'] = DistributedDataParallel(module['model'],
[self['setup/rank']])
def __init__(self, yaml_file, cmd_args):
self.file_name = yaml_file
self._y = self._file_load(yaml_file)
if cmd_args.gpus != "-1":
self['setup/gpus'] = cmd_args.gpus
os.environ["CUDA_VISIBLE_DEVICES"] = self["setup/gpus"]
self['setup/index'] = cmd_args.index
self['setup/phase'] = cmd_args.phase
self['setup/local_rank'] = cmd_args.local_rank
world_size = len(self["setup/gpus"].replace(',', "").replace("'", ""))
model_path = f"outs/{self['setup/model_type']}/{self['module/model/name']}"
model_path += f"/{self['path/dataset']}"
if self['setup/index'] != -1:
model_path += f"_{self['setup/index']}"
if self['path/postfix'] != 'none':
model_path += f"_{self['path/postfix']}"
self['path/model_path'] = model_path
Path(model_path).mkdir(parents=True, exist_ok=True)
torch.cuda.set_device(cmd_args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method=f'file://{Path(model_path).resolve()}/sharedfile',
world_size=world_size,
rank=self['setup/local_rank'])
self['setup/rank'] = dist.get_rank()
self['setup/dist_size'] = dist.get_world_size()
self._modules_load()
def reset(self):
for k, v in list(self.__dict__.items()):
if 'module' in k:
del self.__dict__[k]
torch.cuda.empty_cache()
self._modules_load()
def _get(self, *keys):
v = self._y
for k in keys:
v = v[k]
return v
def _update(self, *keys, value):
k = self._y
for i in range(len(keys) - 1):
k.setdefault(keys[i], {})
k = k[keys[i]]
k[keys[-1]] = value
def __str__(self):
return f'{self.file_name}\n{self._y}'
def __contains__(self, item):
def search_recursively(d, t):
for k, v in d.items():
if k == t:
return True
elif isinstance(v, dict):
search_recursively(v, t)
return False
return search_recursively(self._y, item)
def __getitem__(self, key):
return self._get(*key.split('/'))
def __setitem__(self, key, value):
self._update(*key.split('/'), value=value)
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.INFO)
log.addHandler(stream_handler)
log.addHandler(file_handler)
Args = Argments('test.yaml')
Args._update('path', 'abcd', 'efgh', value='zzzz')
Args['path/cccc/dddd'] = 'ffff'
log.debug(Args)
log.debug(Args['path/cccc/dddd'])
# print(Args)
# print('path' in Args)
# print(Args['path/abcd/efgh'])
# print(Args['path/cccc/dddd'])
# print(Args.module['lr_scheduler'])
| StarcoderdataPython |
4825041 | """ philoseismos: engineering seismologist's toolbox.
This file defines a TextualFileHeader object.
author: <NAME>
e-mail: <EMAIL> """
class TextualFileHeader:
""" This object represents a textual file header of a SEG-Y file. """
def __init__(self):
""" Create a new TFH object. """
self._contents = ' ' * 3200
@classmethod
def load(cls, file: str):
""" Load TFH from file a SEG-Y file.
Args:
file (str) : Path to a SEG-Y file to load from.
"""
tfh = cls()
with open(file, 'br') as sgy:
tfh._contents = sgy.read(3200).decode('cp500')
return tfh
def __repr__(self):
return self._contents
def __str__(self):
pass
| StarcoderdataPython |
27057 | <filename>iCount/tests/test_externals.py
# pylint: disable=missing-docstring, protected-access
import warnings
import unittest
import iCount.externals.cutadapt as cutadapt
import iCount.externals.star as star
from iCount.tests.utils import make_fasta_file, make_fastq_file, get_temp_dir, \
get_temp_file_name, make_file_from_list
class TestCutadapt(unittest.TestCase):
def setUp(self):
self.adapter = 'AAAATTTTCCCCGGGG'
self.reads = make_fastq_file(adapter=self.adapter, num_sequences=100,
out_file=get_temp_file_name(extension='fastq'))
self.tmp = get_temp_file_name(extension='fastq')
warnings.simplefilter("ignore", ResourceWarning)
def test_get_version_ok(self):
version = cutadapt.get_version()
self.assertRegex(version, r'\d\.\d+')
def test_run(self):
return_code = cutadapt.run(self.reads, self.tmp, self.adapter, qual_base=64,
qual_trim=30, minimum_length=70)
self.assertEqual(return_code, 0)
class TestStar(unittest.TestCase):
def setUp(self):
self.dir = get_temp_dir()
self.index_dir = get_temp_dir()
self.genome = make_fasta_file(num_sequences=2, seq_len=1000)
self.reads = make_fastq_file(genome=self.genome)
self.annotation = make_file_from_list([
['1', '.', 'gene', '10', '20', '.', '+', '.',
'gene_id "A";'],
['1', '.', 'transcript', '10', '20', '.', '+', '.',
'gene_id "A"; transcript_id "AA";'],
['1', '.', 'exon', '10', '20', '.', '+', '.',
'gene_id "A"; transcript_id "AA"; exon_number "1";'],
])
warnings.simplefilter("ignore", ResourceWarning)
def test_get_version_ok(self):
version = star.get_version()
# Version example: STAR_2.5.0a
regex = r'STAR_\d\.[\d\w]+'
self.assertRegex(version, regex)
def test_build_index_bad_outdir(self):
message = r'Output directory does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.build_index(self.genome, '/unexisting/outdir')
def test_build_index(self):
# No annotation
return_code1 = star.build_index(self.genome, self.index_dir, overhang=100, overhang_min=8,
threads=1)
# With annotation
return_code2 = star.build_index(self.genome, self.index_dir, annotation=self.annotation,
overhang=100, overhang_min=8, threads=1)
self.assertEqual(return_code1, 0)
self.assertEqual(return_code2, 0)
def test_map_reads_bad_genomedir(self):
message = r'Directory with genome index does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.map_reads(self.reads, '/unexisting/genomedir', self.dir)
def test_map_reads_bad_outdir(self):
message = r'Output directory does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
star.map_reads(self.reads, self.dir, '/unexisting/outdir')
def test_map_reads(self):
# First: make index:
# Give logfile_path to some /tmp location to not pollute woking directory
star.build_index(self.genome, self.index_dir)
# No annotation
return_code1 = star.map_reads(self.reads, self.index_dir, self.dir)
# With annotation:
return_code2 = star.map_reads(
self.reads, self.index_dir, self.dir, annotation=self.annotation,
multimax=10, mismatches=2, threads=1)
self.assertEqual(return_code1, 0)
self.assertEqual(return_code2, 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4805903 | import os
import logging
import re
from cog.utils import str2bool
rel = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
ESGF_CONF_DIR = os.getenv('ESGF_CONF_DIR', rel(''))
'''
SITE SPECIFIC CONFIGURATION
These parameters are read from file 'cog_settings.cfg'
located in directory COG_CONFIG_DIR (or by default '/usr/local/cog/cog_config').
Each parameter has a default value.
'''
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
from cog.constants import SECTION_ESGF, SECTION_PID
ESGF_CONFIG = True
SITE_NAME = "Local CoG"
SITE_DOMAIN = "localhost:8000"
TIME_ZONE = 'America/Denver'
COG_MAILING_LIST = '<EMAIL>'
SECRET_KEY = '<KEY>
# for SQLLite back-end
DATABASE_PATH = ESGF_CONF_DIR + "django.data"
# for postgres back-end
DATABASE_NAME = "cogdb"
DATABASE_USER = "dbsuper" #siteManager.get('DATABASE_USER')
DATABASE_PASSWORD = "<PASSWORD>" #siteManager.get('DATABASE_PASSWORD')
DATABASE_HOST = "pcmdi8vm.llnl.gov" #siteManager.get('DATABASE_HOST', default='localhost')
DATABASE_PORT = "5432" #siteManager.get('DATABASE_PORT', default=5432)
MY_PROJECTS_REFRESH_SECONDS = 3600
PWD_EXPIRATION_DAYS = 0 # 0: no expiration
IDP_REDIRECT = None #siteManager.get('IDP_REDIRECT', default=None)
VISUS_SITE = None
HOME_PROJECT = 'TestProject'
MEDIA_ROOT = ESGF_CONF_DIR + 'site_media'
DEFAULT_SEARCH_URL = "http://pcmdi8vm.llnl.gov/esg-search/search/"
DJANGO_DATABASE = "postgres"
DEBUG = True
ALLOWED_HOSTS = ['localhost'] #siteManager.get('ALLOWED_HOSTS', default=SITE_DOMAIN).split(",")
IDP_WHITELIST = ESGF_CONF_DIR + "esgf_idp.xml" #siteManager.get('IDP_WHITELIST', default=None)
KNOWN_PROVIDERS = ESGF_CONF_DIR + "esgf_known_providers.xml" #siteManager.get('KNOWN_PROVIDERS', default=None)
PEER_NODES = ESGF_CONF_DIR + "esgf_cogs.xml"#siteManager.get('PEER_NODES', default=None)
USE_CAPTCHA = False
# DEVELOPMENT/PRODUCTION server switch
PRODUCTION_SERVER = True
WPS_ENDPOINT = None
# Fields that will be added to the query string
WPS_FIELDS = []
WPS_DATACART = False
# FIXME
# ESGF specific settings
ESGF_HOSTNAME = "localhost"#siteManager.get('ESGF_HOSTNAME', section=SECTION_ESGF, default='')
ESGF_DBURL = "postgresql://dbsuper:EsgfLLNL@pc<EMAIL>.<EMAIL>.<EMAIL>/esgcet" #siteManager.get('ESGF_DBURL', section=SECTION_ESGF)
ESGF_VERSION = "2" #siteManager.get('ESGF_VERSION', section=SECTION_ESGF)
# FIXME
# PID specific settings
# PID_CONFIG = siteManager.isPidEnabled()
# if PID_CONFIG:
# PID_PREFIX = siteManager.get('PID_PREFIX', section=SECTION_PID, default='21.14101')
# PID_MESSAGING_SERVICE_EXCHANGE = siteManager.get('PID_EXCHANGE', section=SECTION_PID, default='esgffed-exchange')
# PID_CREDENTIALS = siteManager.get('PID_CREDENTIALS', section=SECTION_PID, default=None).split('\n')
#====================== standard django settings.py ======================
# IMPORTANT: this setting must be set to True if using COG behind a proxy server,
# otherwise redirects won't work properly
USE_X_FORWARDED_HOST = True
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
# SQLite database
'sqllite3': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2','postgresql','mysql','sqlite3' or 'oracle'.
'NAME': DATABASE_PATH,
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
# Postgres
'postgres': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': DATABASE_NAME,
'USER': DATABASE_USER, # Not used with sqlite3.
'PASSWORD': DATABASE_PASSWORD, # Not used with sqlite3.
'HOST': DATABASE_HOST, # Defaults to 'localhost'
'PORT': DATABASE_PORT, # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES['default'] = DATABASES[DJANGO_DATABASE]
logging.info('>>> Using Django database=%s' % DJANGO_DATABASE)
if DJANGO_DATABASE == 'sqllite3':
logging.info("Database path=%s" % DATABASE_PATH)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
#TIME_ZONE = 'America/Denver'
# use the system time zone, wherever the application is installed
#TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# current site identifier - always first site in database
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
#MEDIA_ROOT = rel('site_media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
#MEDIA_URL = 'http://localhost:8000/site_media/'
MEDIA_URL = '/site_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a trailing slash.
# Examples: "http://foo.com/media/", "/media/".
STATIC_URL = '/static/'
STATIC_ROOT = rel('static/')
# absolute path to root directory containing projects data
DATA_ROOT = os.path.join(MEDIA_ROOT, "data/")
# custom template, media and configuration directories
MYTEMPLATES = ESGF_CONF_DIR + "mytemplates"
MYMEDIA = ESGF_CONF_DIR + "mymedia"
# project-specific configuration directories
# must be writable by web server
PROJECT_CONFIG_DIR = os.path.join(MEDIA_ROOT, 'config')
# Make this unique, and don't share it with anybody.
#SECRET_KEY = '<KEY>'
# new TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
MYTEMPLATES,
rel('templates/'),
rel('static/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'cog.context_processors.cog_settings',
],
'debug': DEBUG,
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'cog.middleware.init_middleware.InitMiddleware',
'cog.middleware.login_middleware.LoginMiddleware',
'cog.middleware.session_middleware.SessionMiddleware',
#'cog.middleware.password_middleware.PasswordMiddleware'
#'django.contrib.sites.middleware.CurrentSiteMiddleware' # django 1.7
#'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
#ROOT_URLCONF = 'COG.urls'
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django_openid_auth',
'grappelli',
'filebrowser',
'django.contrib.admin.apps.SimpleAdminConfig',
'django_comments',
'django.contrib.staticfiles',
'captcha',
'layouts',
'cog.apps.CogConfig',
'cog.templatetags',
)
MIGRATION_MODULES = { 'django_openid_auth': 'cog.db_migrations.django_openid_auth' }
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Default is X_FRAME_OPTIONS='SAMEORIGIN'
# Using X_FRAME_OPTIONS = DENY breaks the CKEditor file uploader.
#X_FRAME_OPTIONS = 'DENY'
# login page URL (default: '/accounts/login')
LOGIN_URL = '/login/'
# OpenID login page
#LOGIN_URL = '/openid/login/'
# page to redirect after successful authentication, if 'next' parameter is not provided
#LOGIN_REDIRECT_URL='/cog/' # COG projects index
LOGIN_REDIRECT_URL = '/' # welcome page
# Custom user profile
AUTH_PROFILE_MODULE = "cog.UserProfile"
# HTTPS support: can only send cookies via SSL connections
#if PRODUCTION_SERVER:
#SESSION_COOKIE_SECURE = True
#CSRF_COOKIE_SECURE = True
#CSRF_COOKIE_HTTPONLY = True
#SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# CSS styles
#COLOR_DARK_TEAL = "#358C92"
#COLOR_LIGHT_TEAL = "#B9E0E3"
#COLOR_DARK_YELLOW = "#FAC2A4";
#COLOR_LIGHT_YELLOW = "#FCE79F";
#COLOR_DARK_GRAY = "#666666";
# FIXME: necessary for openid-auth since django 1.6.5 otherwise session is not serialized correctly
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# default size limit of files uploaded by users
MAX_UPLOAD_SIZE = 52428800
#=== django filebrowser settings =========================
# Filebrowser directory relative to MEDIA_ROOT (IMPORTANT: must have trailing slash)
FILEBROWSER_DIRECTORY = "projects/"
# versions generated when browsing images
FILEBROWSER_VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'thumbnail': {'verbose_name': 'Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
#'small': {'verbose_name': 'Small (2 col)', 'width': 140, 'height': '', 'opts': ''},
#'medium': {'verbose_name': 'Medium (4col )', 'width': 300, 'height': '', 'opts': ''},
#'big': {'verbose_name': 'Big (6 col)', 'width': 460, 'height': '', 'opts': ''},
#'large': {'verbose_name': 'Large (8 col)', 'width': 680, 'height': '', 'opts': ''},
}
# versions selectable through admin interface
FILEBROWSER_ADMIN_VERSIONS = ['thumbnail']
# absolute path to directory containing project specific media
PROJECTS_ROOT = os.path.join(MEDIA_ROOT, FILEBROWSER_DIRECTORY)
#=== django_openid_auth settings =========================
# create user account after first openid authentication
OPENID_CREATE_USERS = True
# do / do NOT keep updating the user profile from the IdP
OPENID_UPDATE_DETAILS_FROM_SREG = True
# list of allowed hosts to redirect to after successful openid login
# this is because django-openid-auth does not allow redirection to full URLs by default,
# unless the host is specifically enabled
ALLOWED_EXTERNAL_OPENID_REDIRECT_DOMAINS = [re.sub(':\d+','', SITE_DOMAIN) ]
#===== django-simpla-captcha =========
#CAPTCHA_LETTER_ROTATION = None
CAPTCHA_BACKGROUND_COLOR = '#FAC24A' # matches CoG dark yellow
#CAPTCHA_FOREGROUND_COLOR = "#666666" # matches CoG dark gray
CAPTCHA_IMAGE_SIZE = (100, 40)
#CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge'
#==== Quality Control Flags references ==================================
QCFLAGS_URLS = { 'obs4mips_indicators': 'https://www.earthsystemcog.org/projects/obs4mips/data-indicators' }
| StarcoderdataPython |
4806901 | <filename>libs/datasets/data_source.py
import pathlib
from typing import List
from typing import Optional
from typing import Union
import structlog
from covidactnow.datapublic import common_df
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import PdFields
from libs.datasets import taglib
from libs.datasets.sources import can_scraper_helpers as ccd_helpers
from libs.datasets import dataset_utils
from libs.datasets import timeseries
from libs.datasets.dataset_utils import TIMESERIES_INDEX_FIELDS
from libs.datasets.timeseries import MultiRegionDataset
from functools import lru_cache
import pandas as pd
_log = structlog.get_logger()
class DataSource(object):
"""Represents a single dataset source; loads data and produces a MultiRegionDataset."""
# Name of source
# TODO(tom): Make an enum of these.
SOURCE_NAME = None
# Fields expected to be in the DataFrame loaded by common_df.read_csv
EXPECTED_FIELDS: Optional[List[CommonFields]] = None
# Path of the CSV to be loaded by the default `make_dataset` implementation.
COMMON_DF_CSV_PATH: Optional[Union[pathlib.Path, str]] = None
# Fields that are ignored when warning about missing and extra fields. By default some fields
# that contain redundant information about the location are ignored because cleaning them up
# isn't worth the effort.
IGNORED_FIELDS = (CommonFields.COUNTY, CommonFields.COUNTRY, CommonFields.STATE)
@classmethod
def _check_data(cls, data: pd.DataFrame):
expected_fields = pd.Index({*cls.EXPECTED_FIELDS, *TIMESERIES_INDEX_FIELDS})
# Keep only the expected fields.
found_expected_fields = data.columns.intersection(expected_fields)
data = data[found_expected_fields]
extra_fields = data.columns.difference(expected_fields).difference(cls.IGNORED_FIELDS)
missing_fields = expected_fields.difference(data.columns).difference(cls.IGNORED_FIELDS)
if not extra_fields.empty:
_log.info(
"DataSource produced extra unexpected fields, which were dropped.",
cls=cls.SOURCE_NAME,
extra_fields=extra_fields,
)
if not missing_fields.empty:
_log.info(
"DataSource failed to produce all expected fields",
cls=cls.SOURCE_NAME,
missing_fields=missing_fields,
)
return data
@classmethod
@lru_cache(None)
def make_dataset(cls) -> timeseries.MultiRegionDataset:
"""Default implementation of make_dataset that loads timeseries data from a CSV."""
assert cls.COMMON_DF_CSV_PATH, f"No path in {cls}"
data_root = dataset_utils.LOCAL_PUBLIC_DATA_PATH
input_path = data_root / cls.COMMON_DF_CSV_PATH
data = common_df.read_csv(input_path, set_index=False)
data = cls._check_data(data)
return MultiRegionDataset.from_fips_timeseries_df(data).add_provenance_all(cls.SOURCE_NAME)
# TODO(tom): Clean up the mess that is subclasses of DataSource and
# instances of DataSourceAndRegionMasks
class CanScraperBase(DataSource):
# Must be set in subclasses.
VARIABLES: List[ccd_helpers.ScraperVariable]
@classmethod
def transform_data(cls, data: pd.DataFrame) -> pd.DataFrame:
"""Subclasses may override this to transform the data DataFrame."""
return data
@staticmethod
@lru_cache(None)
def _get_covid_county_dataset() -> ccd_helpers.CanScraperLoader:
return ccd_helpers.CanScraperLoader.load()
@classmethod
@lru_cache(None)
def make_dataset(cls) -> timeseries.MultiRegionDataset:
"""Default implementation of make_dataset that loads data from the parquet file."""
assert cls.VARIABLES
ccd_dataset = CanScraperBase._get_covid_county_dataset()
data, source_urls_df = ccd_dataset.query_multiple_variables(
cls.VARIABLES, log_provider_coverage_warnings=True
)
data = cls.transform_data(data)
data = cls._check_data(data)
ds = MultiRegionDataset.from_fips_timeseries_df(data).add_provenance_all(cls.SOURCE_NAME)
if not source_urls_df.empty:
# For each FIPS-VARIABLE pair keep the source_url row with the last DATE.
source_urls_df = (
source_urls_df.sort_values(CommonFields.DATE)
.groupby([CommonFields.FIPS, PdFields.VARIABLE], sort=False)
.last()
.reset_index()
.drop(columns=[CommonFields.DATE])
)
source_urls_df[taglib.TagField.TYPE] = taglib.TagType.SOURCE_URL
ds = ds.append_fips_tag_df(source_urls_df)
return ds
| StarcoderdataPython |
3330393 | <filename>easy_lmfit/easy_lmfit.py<gh_stars>1-10
import numpy as np
import scipy
import inspect
import lmfit
from lmfit import *
from lmfit.models import *
from matplotlib import pyplot as plt
import warnings
import re
import asteval
def get_model_params(fit_func):
if inspect.isclass(fit_func):
module_name=getattr(fit_func, '__module__', None)
if module_name==lmfit.models.__name__:
print('Model is: %s'%str(getattr(fit_func, '__name__')))
model=fit_func()
param_dict={}
for indep_vals in model.independent_vars:
print('Independent Variable: %s'%indep_vals)
for I, keys in enumerate(model.param_names):
print('Parameter # %d: %s'%(I+1, keys))
param_dict[keys]=None
print('p0 Param dictionary')
print('p0=%s'%str(param_dict))
else:
pass
elif type(fit_func)==lmfit.model.CompositeModel or type(fit_func)==lmfit.model.Model:
module_name=getattr(fit_func, '__module__', None)
if module_name==lmfit.model.__name__:
print('Model is: %s'%str(fit_func).split(': ')[1].rsplit('>')[0])
param_dict={}
for indep_vals in fit_func.independent_vars:
print('Independent Variable: %s'%indep_vals)
for I, keys in enumerate(fit_func.param_names):
print('Parameter # %d: %s'%(I+1, keys))
param_dict[keys]=None
print('p0 param dictionary')
print('p0=%s'%str(param_dict))
else:
pass
elif inspect.isfunction(fit_func):
print('Model is: %s'%fit_func.__name__)
model=Model(fit_func)
param_dict={}
for indep_vals in model.independent_vars:
print('Independent Variable: %s'%indep_vals)
for I, keys in enumerate(model.param_names):
print('Parameter # %d: %s'%(I+1, keys))
param_dict[keys]=None
print('p0 Param dictionary')
print('p0=%s'%str(param_dict))
else:
raise Exception('ERROR: fit function is %s, needs to be lm_fit model class or function.'%str(type(fit_func)).split(' ')[1].rsplit('>')[0])
def get_lm_models():
models=inspect.getmembers(lmfit.models, inspect.isclass)
print('Available lmfit model functions:')
print('----------------------------------')
for model in models:
print(model[0])
print('----------------------------------')
print('To create an empty parameter dict or learn about the variables of the different models use lmf.get_model_params(fcn)')
print('For more information on the above functional forms go to: https://lmfit.github.io/lmfit-py/builtin_models.html')
def asteval_convert(fcn, handle=None):
if inspect.isfunction(fcn):
aeval=asteval.Interpreter()
if handle!=None:
fcn_name=handle
else:
fcn_name=fcn.__name__
fcn_vars=inspect.getfullargspec(fcn)[0]
aeval.symtable[fcn_name]=fcn
return fcn_name, fcn_vars
else:
raise Exception('ERROR: input function is type %s'%str(type(fcn)).split(' ')[1].rsplit('>')[0])
def lm_curve_fit(fit_func, x_data, y_data, p0, p_over=None, param_domain=None, p_exprs=None, fit_domain=None, verbose=False, plot_fit=False, full_output=False):
'''
:param fit_func: Can be a user defined function, composite model, or a built in lm_fit fit function. For list of built-in models
use get_lm_models().
:param x_data: x-axis data (array like (n,))
:param y_data: y_axis data (array like (n,))
:param p0: Initial fit parameter values, list or dict object of form p0=[a1,a2,a3..an] or p0={a1:value, a2:value...an:value}
if p0 is of list type than p_over, param_domain, and p_exprs must also be entered as list the length of number of
fit model input parameters. If dict is used p0 can be any length as long as parameter keys match those of input function.
To determine parameter keys and return an empty dict object for use in fit use get_model_params(fcn).
:param p_over: Parameter override, allows the ability to fix parameter at a set value, list or dict of form of p0. If list
must be length of p0, if dict only needs the desired parameter for function input parameter
:param param_domain: Bounded domain in which fit parameter is allowed, list or dict of form of p0.
:param fit_domain: domain over which data is actually fitted, given as a tuple (min,max)
:param p_exprs: Parameter constraint equations, given as string, can have passable user defined functions and variables.
For dict object of form: p_exprs={'a1':'fcn1', 'a2':'fcn2'...}, with user defined variables input into dict as 'var1':[val, min, max]
and user defined functions given as 'fcn_handle':user_fcn. For list object, each constrain must be given as a dict
{'fcn':'fcn1','user_var':[val,min,max],'fcn_handle':user_fcn} where 'user_var' and 'fcn_handle' are optional.
:param verbose: Formatted print statement that gives complete fit parameter information and fit statistics from lm_fit
:param plot_fit: Plot of data with fit along with residuals plot from lm_fit
:param full_output: Returns fit_params, fit_params_error, and returns lm_fit full fit result object
:return: fit_params, fit_error as list or dict depending on input type if full_output=False
'''
#create fit model, determine function variables
if inspect.isclass(fit_func):
module_name=getattr(fit_func, '__module__', None)
print(module_name, lmfit.models.__name__)
if module_name==lmfit.models.__name__:
fit_model=fit_func()
fcn_vars=fit_model.param_names
elif type(fit_func)==lmfit.model.CompositeModel or type(fit_func)==lmfit.model.Model:
module_name=getattr(fit_func, '__module__', None)
if module_name==lmfit.model.__name__:
fit_model=fit_func
fcn_vars=fit_model.param_names
elif inspect.isfunction(fit_func):
fit_model=Model(fit_func)
fcn_vars=fit_model.param_names
else:
raise Exception('ERROR: fit function is %s, needs to be lm_fit model class or function.'%str(type(fit_func)).split(' ')[1].rsplit('>')[0])
#set data fit domain
if fit_domain!=None:
if type(fit_domain)==list:
if len(fit_domain)==2:
ind=np.searchsorted(x_data, fit_domain)
x_data=x_data[ind[0]:ind[1]]
y_data=y_data[ind[0]:ind[1]]
else:
raise Exception('ERROR: fit_domain must be list of len=2, len=%d'%len(fit_domain))
else:
raise Exception('ERROR: fit_domain myst be list type, %s type provided'%str(type(p_over)).split(' ')[1].rsplit('>')[0])
else:
pass
#for dict inputs
if type(p0) is dict:
p_guess={}
for keys in fcn_vars:
if keys in p0:
if p0[keys]==None:
p_guess[keys]=0
else:
p_guess[keys]=p0[keys]
else:
#if initial guess not provided set to zero
p_guess[keys]=0
unused_keys=[]
for keys in iter(p0.keys()):
if keys in p_guess:
pass
else:
unused_keys.append(keys)
#print a warning for any unused keys
if len(unused_keys)!=0:
warnings.warn('WARNING: Unused Keys: %s. Valid Parameters: %s'% (', '.join(map(str, unused_keys)), ', '.join(map(str, fcn_vars))))
else:
pass
#set parameters to vary
vary_param={}
for key in iter(p_guess.keys()):
vary_param[key]=True
if p_over!=None:
if type(p_over) is dict:
vary_len=0
for I, key in enumerate(iter(p_guess.keys())):
if key in p_over:
if p_over[key]==None:
pass
else:
p_guess[key]=p_over[key]
vary_param[key]=False
vary_len+=1
if vary_len==len(p_guess):
raise Exception('ERROR: Not enough parameters to fit')
else:
pass
else:
pass
else:
raise Exception('ERROR: p_over is %s, must be dict object'%str(type(p_over)).split(' ')[1].rsplit('>')[0])
elif p_over==None:
pass
#set parameter domains, default, [-inf, inf]
param_domain_vals={}
for key in iter(p_guess.keys()):
param_domain_vals[key]=[-np.inf, np.inf]
if param_domain!=None:
if type(param_domain)==dict:
for key in iter(param_domain.keys()):
if param_domain[key]==None:
pass
else:
param_domain_vals[key]=param_domain[key]
else:
raise Exception('ERROR: param_domain is %s, must be dict object'%str(type(param_domain)).split(' ')[1].rsplit('>')[0])
elif param_domain==None:
pass
params=Parameters()
#create parameters
for I, key in enumerate(iter(p_guess.keys())):
params.add(key, value=p_guess[key], vary=vary_param[key], min=param_domain_vals[key][0], max=param_domain_vals[key][1])
#set constraining expressions
const_eqn={}
for key in iter(p_guess.keys()):
const_eqn[key]=None
if p_exprs !=None:
if type(p_exprs)==dict:
unused_dummy_keys=[]
for key in iter(p_exprs.keys()):
if key in p_guess:
const_eqn[key]=p_exprs[key]
else:
#checking for dummy variables
if p_exprs[key]!=None:
count=0
for key_expr in iter(p_guess.keys()):
if key_expr in p_exprs:
count+=sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(key), p_exprs[key_expr]))
else:
pass
#checking if number of parameters is correct
if count==0:
unused_dummy_keys.append(key)
else:
if inspect.isfunction(p_exprs[key]):
params._asteval.symtable[key]=p_exprs[key]
elif type(p_exprs[key])==list:
if len(p_exprs[key])==3:
params.add(key, value=p_exprs[key][0], min=p_exprs[key][1], max=p_exprs[key][2])
else:
raise Exception('%s needs list len=3, [val, min, max], provided list of len=%d'%(p_exprs[key], len(p_exprs[key])))
else:
raise Exception('Variable input must be function or list, input is %s for %s'%(str(type(p_exprs[key])).split(' ')[1].rsplit('>')[0],key))
else:
pass
if len(unused_dummy_keys)!=0:
warnings.warn('WARNING: Unused dummy variables: %s'% (', '.join(map(str, unused_dummy_keys))))
else:
pass
else:
raise Exception('ERROR: p_exprs is %s, must be dict object'%str(type(p_exprs)).split(' ')[1].rsplit('>')[0])
elif p_exprs==None:
pass
#set constraining equations for appropriate variables
for key in iter(const_eqn.keys()):
params[key].set(expr=const_eqn[key])
result=fit_model.fit(y_data, params, x=x_data)
fit_params={}
fit_err={}
for key in iter(result.params.keys()):
fit_params[key]=result.params[key].value
fit_err[key]=result.params[key].stderr
if verbose==True:
print(result.fit_report())
if plot_fit==True:
fig = plt.figure(figsize=(10,8))
result.plot(fig=fig)
if full_output==True:
return fit_params, fit_err, result
elif full_output==False:
return fit_params, fit_err
#same as above except for list arguments
elif type(p0) is list:
if len(p0)!=len(fcn_vars):
raise Exception('ERROR: Initial guess incorrect length, %d entered, %d required'%(len(p0), len(fcn_vars)))
vary_param=[True]*len(p0)
if p_over!=None:
if type(p_over)==list:
if len(p_over)!=len(fcn_vars):
raise Exception('Parameter override must be len=%d, instead len=%d'%(len(fcn_vars), len(p_over)))
else:
vary_len=0
for I, val in enumerate(p_over):
if p_over[I]==None:
pass
else:
p0[I]=p_over[I]
vary_param[I]=False
vary_len=0
if vary_len==len(p0):
raise Exception('ERROR: Not enough parameters to fit')
return
else:
pass
else:
raise Exception('ERROR: p_over is %s, must be list object'%str(type(p_over)).split(' ')[1].rsplit('>')[0])
elif p_over==None:
pass
for I, vals in enumerate(p0):
if vals==None:
p0[I]=0.0
param_domain_vals=[[-np.inf, np.inf]]*len(p0)
if param_domain!=None:
if len(param_domain)!=len(fcn_vars):
raise Exception('Parameter domain must be len=%d, instead len=%d'%(len(fcn_vars), len(param_domain)))
else:
pass
if type(param_domain)==list:
for I, param in enumerate(param_domain):
if param_domain[I]==None:
pass
else:
param_domain_vals[I]=param_domain[I]
else:
raise Exception('ERROR: param_domain is %s, must be list object'%str(type(param_domain)).split(' ')[1].rsplit('>')[0])
elif param_domain==None:
pass
params=Parameters()
for I, (p_guess, vary, domain) in enumerate(zip(p0, vary_param, param_domain_vals)):
params.add(fcn_vars[I], value=p_guess, vary=vary, min=domain[0], max=domain[1])
#set constraint equations
const_eqn=[None]*len(p0)
if p_exprs!=None:
if type(p_exprs)==list:
if len(p_exprs)!=len(fcn_vars):
raise Exception('Parameter domain must be len=%d, instead len=%d'%(len(fcn_vars), len(p_exprs)))
else:
pass
for I, val in enumerate(p_exprs):
if val!=None:
if type(val)!=dict:
raise Exception('Parameter expression must be type=dict of form {\'fcn\':expr,\'opt_var\':[value, min, max]}, instead %d'%str(type(val)).split(' ')[1].rsplit('>')[0])
else:
pass
if 'fcn' in val:
for key in iter(val.keys()):
if key=='fcn':
const_eqn[I]=val[key]
else:
if val[key]!=None:
count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(key), val['fcn']))
if count==0:
warnings.warn('WARNING: Unused dummy variable: %s'%key)
else:
if inspect.isfunction(val[key]):
params._asteval.symtable[key]=val[key]
elif type(val[key])==list:
if len(val[key])==3:
params.add(key, value=val[key][0], min=val[key][1], max=val[key][2])
else:
raise Exception('%s needs list len=3, [val, min, max], provided list of len=%d'%(val[key], len(val[key])))
else:
raise Exception('Variable input must be function or list, type of input is %s'%str(type(p_exprs[key])).split(' ')[1].rsplit('>')[0])
else:
pass
else:
raise Exception('Missing \'fcn\' key in input dict object')
else:
pass
else:
raise Exception('Parameter expression must be type=list, made of type=dict of form {\'fcn\':expr,\'opt_var\':[value, min, max]}, instead %s'%str(type(val[key])).split(' ')[1].rsplit('>')[0])
#set constraining expressions
for exprs, var in zip(const_eqn, fcn_vars):
params[var].set(expr=exprs)
result=fit_model.fit(y_data, params, x=x_data)
fit_params=[]
fit_err=[]
for key in iter(result.params.keys()):
fit_params.append(result.params[key].value)
fit_err.append(result.params[key].stderr)
if verbose==True:
print(result.fit_report())
if plot_fit==True:
fig = plt.figure(figsize=(10,8))
result.plot(fig=fig)
if full_output==True:
return fit_params, fit_err, result
elif full_output==False:
return fit_params, fit_err
else:
raise Exception('ERROR: Unsupported data type input %s, must be dict or list'%str(type(p0)).split(' ')[1].rsplit('>')[0])
| StarcoderdataPython |
167660 | <gh_stars>0
"""Models used by django-watson."""
from __future__ import unicode_literals
import uuid
from django.db import models
from django.db.models.fields.related import RelatedField
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_str
from django.utils.functional import cached_property
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
INTEGER_FIELDS = (models.IntegerField, models.AutoField,)
BIG_INTEGER_FIELDS = (models.BigIntegerField,)
try:
BIG_INTEGER_FIELDS += (models.BigAutoField,)
except AttributeError: # Django < 2.0.
pass
def get_pk_output_field(model):
"""Gets an instance of the field type for the primary key of the given model, useful for database CAST."""
pk = model._meta.pk
if isinstance(pk, RelatedField):
return get_pk_output_field(pk.remote_field.model)
field_cls = type(pk)
field_kwargs = {}
if isinstance(pk, models.CharField):
# Some versions of Django produce invalid SQL for the CAST function (in some databases)
# if CharField does not have max_length passed.
# Therefore, it is necessary to copy over the max_length of the original field to avoid errors.
# See: https://code.djangoproject.com/ticket/28371
field_kwargs['max_length'] = pk.max_length
elif isinstance(pk, models.AutoField):
# Some versions of Django appear to also produce invalid SQL in MySQL
# when attempting to CAST with AutoField types.
# This covers for that by instead casting to the corresponding integer type.
if isinstance(pk, models.BigAutoField):
field_cls = models.BigIntegerField
else:
field_cls = models.IntegerField
return field_cls(**field_kwargs)
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
isinstance(pk, INTEGER_FIELDS) and
not isinstance(pk, BIG_INTEGER_FIELDS)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.remote_field.model)
)
def get_str_pk(obj, connection):
return obj.pk.hex if isinstance(obj.pk, uuid.UUID) and connection.vendor != "postgresql" else force_str(obj.pk)
META_CACHE_KEY = "_meta_cache"
class SearchEntry(models.Model):
"""An entry in the search index."""
engine_slug = models.CharField(
max_length=200,
db_index=True,
default="default",
)
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
)
object_id = models.TextField()
object_id_int = models.IntegerField(
blank=True,
null=True,
db_index=True,
)
object = GenericForeignKey()
title = models.CharField(
max_length=1000,
)
description = models.TextField(
blank=True,
)
content = models.TextField(
blank=True,
)
url = models.CharField(
max_length=1000,
blank=True,
)
meta_encoded = models.TextField()
def _deserialize_meta(self):
from watson.search import SearchEngine
engine = SearchEngine._created_engines[self.engine_slug]
model = ContentType.objects.get_for_id(self.content_type_id).model_class()
adapter = engine.get_adapter(model)
return adapter.deserialize_meta(self.meta_encoded)
@cached_property
def meta(self):
"""Returns the meta information stored with the search entry."""
# Attempt to use the cached value.
if hasattr(self, META_CACHE_KEY):
return getattr(self, META_CACHE_KEY)
# Decode the meta.
meta_value = self._deserialize_meta()
setattr(self, META_CACHE_KEY, meta_value)
return meta_value
def get_absolute_url(self):
"""Returns the URL of the referenced object."""
return self.url
def __str__(self):
"""Returns a string representation."""
return self.title
class Meta:
verbose_name_plural = "search entries"
app_label = 'watson'
| StarcoderdataPython |
160893 | """Views for this awesome app."""
from django.shortcuts import render
from django.urls import reverse_lazy
from django.http import Http404
from django.contrib.auth.models import User
from django.views.generic import TemplateView, CreateView
from imager_images.models import Photo, Album
from imager_profile.models import ImagerProfile
from imager_images.forms import AlbumForm
class UserView(TemplateView):
"""View profile of other users."""
model = ImagerProfile
class ProfileView(TemplateView):
"""Profile view class based view."""
model = ImagerProfile
class EditProfileView(CreateView):
"""Edit profile information."""
model = ImagerProfile
user_id = ImagerProfile.user_id
fields = [
'website',
'location',
'bio',
'camera',
'services',
'photo_styles',
'fee',
'phone'
]
success_url = reverse_lazy('user_profile')
def form_valid(self, form):
"""."""
if self.request.user.is_authenticated:
form.instance.author = self.request.user
self.object = form.save()
return super(EditProfileView, self).form_valid(form)
else:
raise Http404()
class AddImage(CreateView):
"""View for adding a image."""
model = Photo
fields = [
'image',
'title',
'description',
'published'
]
success_url = reverse_lazy('library')
def form_valid(self, form):
"""."""
if self.request.user.is_authenticated:
form.instance.user = self.request.user
self.object = form.save()
return super(AddImage, self).form_valid(form)
else:
raise Http404()
class AddAlbum(CreateView):
"""View for adding a album."""
model = Album
form_class = AlbumForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""."""
if self.request.user.is_authenticated:
form.instance.user = self.request.user
self.object = form.save()
return super(AddAlbum, self).form_valid(form)
else:
raise Http404()
| StarcoderdataPython |
1773585 | import sys
from PyQt5.QtWidgets import QApplication, \
QWidget, QPushButton, QAction, QLineEdit, \
QMessageBox, QMainWindow, QHBoxLayout, QDialog, \
QVBoxLayout, QGridLayout, QGroupBox, QFormLayout, QRadioButton, QMenu, QMenuBar, \
QTreeWidget, QTreeWidgetItem, QTextEdit, QFrame, QTabWidget, QLabel, QCheckBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
'''
参考资料
https://www.jb51.net/article/181684.htm
'''
class MyMainWind(QMainWindow):
def __init__(self):
super(MyMainWind, self).__init__()
self.title = "2 windows within one frame"
self.left = 300 #the coordinate of window opened on screen
self.top = 300
self.width = 600
self.height = 480
self.initUI()
def initUI(self):
# set basic
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# status bar
self.status = self.statusBar()
self.status.setSizeGripEnabled(True) # 显示右下角
self.status.showMessage("StatusBar Ready ...")
# menu bar
self.menu = QMenu()
self.menu.addAction('&Open')
self.menu.addAction('&Exit')
self.menuBar = QMenuBar()
fileAct = self.menuBar.addAction('&File')
fileAct.setMenu(self.menu)
self.menuBar.addAction('&About')
self.setMenuBar(self.menuBar)
# 2 widget
self.tree = QTreeWidget()
self.tree.setColumnCount(1) #设置列数
self.tree.setHeaderLabels(['QProfiler items']) #设置树形控件头部的标题
self.tree.setIndentation(20) # 项目的缩进
#设置根节点
Perfmon = QTreeWidgetItem()
Perfmon.setText(0, 'Perfmon')
perfmon_00 = QTreeWidgetItem()
perfmon_00.setText(0, 'perfmon_00')
Perfmon.addChild(perfmon_00)
perfmon_01 = QTreeWidgetItem()
perfmon_01.setText(0, 'perfmon_01')
Perfmon.addChild(perfmon_01)
perfmon_02 = QTreeWidgetItem()
perfmon_02.setText(0, 'perfmon_02')
Perfmon.addChild(perfmon_02)
perfmon_03 = QTreeWidgetItem()
perfmon_03.setText(0, 'perfmon_03')
Perfmon.addChild(perfmon_03)
self.tree.addTopLevelItem(Perfmon)
# separator line
self.line = QFrame()
self.line.setGeometry(QRect(250, 340, 3, 61))
self.line.setFrameShape(QFrame.VLine)
self.line.setFrameShadow(QFrame.Sunken)
self.tab = self.createTabWidget()
hLayout = QHBoxLayout()
hLayout.addWidget(self.tree)
hLayout.addWidget(self.line)
hLayout.addWidget(self.tab)
#hLayout.setStretch(0, 1)
# set layout
widget = QWidget()
widget.setLayout(hLayout)
self.setCentralWidget(
widget
) # 如果时 QMainWindow, layout 只能设置在centralWidget上, 所以是需要一个 widget 作为 centralWdiget
self.show()
def createTabWidget(self):
tab = QTabWidget()
tab.tabCloseRequested[int].connect(self.closeTab) # 带参数 的 pyqtsignal
tab1 = QWidget()
tab2 = QWidget()
tab3 = QWidget()
tab4 = QTextEdit()
tab.addTab(tab1, 'tab 1')
tab.addTab(tab2, 'tab 2')
tab.addTab(tab3, 'tab 3')
tab.addTab(tab4, 'tab Edit')
tab.setTabsClosable(True)
tab.setMovable(True)
tab.setDocumentMode(True)
self.tab1_ui(tab, tab1)
self.tab2_ui(tab, tab2)
self.tab3_ui(tab, tab3)
return tab
def tab1_ui(self, tab, wg):
#表单布局
layout = QFormLayout()
#添加姓名,地址的单行文本输入框
layout.addRow('姓名', QLineEdit())
layout.addRow('地址', QLineEdit())
layout.addRow('地址', QLineEdit())
layout.addRow('地址', QLineEdit())
layout.addWidget(QPushButton('button'))
layout.addRow(QLabel('xxx'), QPushButton('yyy'))
#设置选项卡的小标题与布局方式
wg.setLayout(layout)
tab.setTabText(0, '联系方式')
def tab2_ui(self, tab, wg):
#zhu表单布局,次水平布局
layout=QFormLayout()
sex=QHBoxLayout()
#水平布局添加单选按钮
sex.addWidget(QRadioButton('男'))
sex.addWidget(QRadioButton('女'))
#表单布局添加控件
layout.addRow(QLabel('性别'),sex)
layout.addRow('生日',QLineEdit())
#设置标题与布局
wg.setLayout(layout)
tab.setTabText(1,'个人详细信息')
def tab3_ui(self, tab, wg):
#水平布局
layout=QHBoxLayout()
#添加控件到布局中
layout.addWidget(QLabel('科目'))
layout.addWidget(QCheckBox('物理'))
layout.addWidget(QCheckBox('高数'))
#设置小标题与布局方式
wg.setLayout(layout)
tab.setTabText(2,'教育程度')
def closeTab(self, idx):
print('remove tab :', idx)
self.tab.removeTab(idx)
if __name__ == '__main__':
app = QApplication(sys.argv)
myMain = MyMainWind()
sys.exit(app.exec_()) | StarcoderdataPython |
3256233 | <gh_stars>0
from django.apps import AppConfig
class AwardsAppConfig(AppConfig):
name = 'awards_app'
| StarcoderdataPython |
3208756 | VERSION = '0.5.3'
| StarcoderdataPython |
3351375 | """https://leetcode.com/problems/missing-number/
Constraints:
- n == nums.length
- 1 ≤ n ≤ 104
- 0 ≤ nums[i] ≤ n
- All the numbers of nums are unique.
Examples:
>>> Solution().missingNumber([])
0
See Also:
- pytudes/_2021/educative/grokking_the_coding_interview/bitwise_xor/_0__find_missing_number__easy.py
- pytudes/_2021/educative/grokking_the_coding_interview/cyclic_sort/_1__find_the_missing_number__easy.py
"""
class Solution:
def missingNumber(self, nums: list[int]) -> int:
return missing_number_cyclic_sort(nums)
def missing_number_cyclic_sort(nums: list[int]) -> int:
"""Returns the single number in the range [0,n] missing in nums
Complexity:
Time: O(n)
Space: O(1)
Args:
nums:
array containing n distinct numbers taken from the range [0,n]
(n+1 possible numbers)
Examples:
>>> missing_number_cyclic_sort([])
0
>>> missing_number_cyclic_sort([0])
1
>>> missing_number_cyclic_sort([3,0,1])
2
>>> missing_number_cyclic_sort([0,1])
2
>>> missing_number_cyclic_sort([9,6,4,2,3,5,7,0,1])
8
"""
"""ALGORITHM"""
def swap_elements(i, j):
nums[i], nums[j] = nums[j], nums[i]
## INITIALIZE VARS ##
n = len(nums)
## CYCLIC SORT
curr_idx = 0
while curr_idx < n:
target_idx = nums[curr_idx]
if curr_idx != target_idx and target_idx < n:
swap_elements(curr_idx, target_idx)
else:
curr_idx += 1
## FIND MISSING NUMBER
for curr_idx, target_idx in enumerate(nums):
if curr_idx != target_idx:
return curr_idx
else:
return n # missing number is n itself
| StarcoderdataPython |
1717067 | # -*- coding: utf-8 -*-
from alive_progress import alive_bar
import pandas as pd
from logging import getLogger
from tws_equities.data_files import get_japan_indices
from tws_equities.helpers import read_json_file
from tws_equities.helpers import save_data_as_json
from tws_equities.helpers import make_dirs
from tws_equities.helpers import delete_directory
from tws_equities.helpers import get_files_by_type
from tws_equities.helpers import read_csv
from tws_equities.helpers import isfile
from tws_equities.helpers import isdir
from tws_equities.helpers import join
from tws_equities.helpers import sep
from tws_equities.helpers import glob
from tws_equities.helpers import write_to_console
from tws_equities.helpers import HISTORICAL_DATA_STORAGE as _HISTORICAL_DATA_STORAGE
_RED_CROSS = u'\u274C'
_GREEN_TICK = u'\u2705'
_BAR_CONFIG = {
'title': '=> Status∶',
'calibrate': 5,
'force_tty': True,
'spinner': 'dots_reverse',
'bar': 'smooth'
}
logger = getLogger(__name__)
# TODO: both dataframe generators could be refactored into a generic fucntion.
# fixme: account for empty dataframes
def generate_success_dataframe(target_directory, bar_title=None, verbose=False):
"""
Creates a pandas data fame from JSON files present at the given failure location.
Assumes that all these JSON files have valid bar data.
:param target_directory: location to read JSON files from
:param bar_title: message to show infron of progress bar
:param verbose: set to true to see info messages on console
"""
if bar_title is not None:
_BAR_CONFIG['title'] = bar_title
def _get_ticker_id(file_name):
return int(file_name.split(sep)[-1].split('.')[0])
# create a place holder dataframe
expected_columns = ['time_stamp', 'ecode', 'session', 'high', 'low', 'close',
'volume', 'average', 'count']
# create temporary directory to store smaller CSV files
temp_directory = '.temp'
make_dirs(temp_directory)
# extract all json files from target directory
success_files = get_files_by_type(target_directory)
success_tickers = list(map(_get_ticker_id, success_files))
total = len(success_tickers)
data = pd.DataFrame(columns=expected_columns)
if bool(total):
write_to_console(f'=> Generating dataframe for success tickers...', verbose=verbose)
json_generator = map(read_json_file, success_files)
counter = 0 # to count temp files
with alive_bar(total=total, **_BAR_CONFIG) as bar:
for i in range(total):
ticker = success_tickers[i]
ticker_data = next(json_generator) # load data into a dictionary
bar_data, meta_data = ticker_data['bar_data'], ticker_data['meta_data']
temp_data = pd.DataFrame(bar_data)
temp_data['ecode'] = ticker
data = data.append(temp_data)
_time_to_cache = ((i > 0) and (i % 100 == 0)) or (i+1 == total)
if _time_to_cache:
if data.shape[0] > 0:
temp_file = join(temp_directory, f'success_{counter}.csv')
data.to_csv(temp_file)
data = pd.DataFrame(columns=expected_columns)
counter += 1
bar()
# merge all CSV files into a single dataframe
# delete all temp files
temp_files = get_files_by_type(temp_directory, file_type='csv')
if bool(temp_files):
data = pd.concat(map(read_csv, temp_files))
data.sort_values(by=['ecode', 'time_stamp'], inplace=True, ignore_index=True)
data = data[expected_columns]
delete_directory(temp_directory)
return data
def generate_failure_dataframe(target_directory, bar_title=None, verbose=False):
"""
Creates a pandas data fame from JSON files present at the given failure location.
Assumes that all these JSON files have valid error stacks.
:param target_directory: location to read JSON files from
:param bar_title: message to show infron of progress bar
:param verbose: set to true to see info messages on console
"""
if bar_title is not None:
_BAR_CONFIG['title'] = bar_title
def _get_ticker_id(file_name):
return int(file_name.split(sep)[-1].split('.')[0])
# create a place holder dataframe
expected_columns = ['ecode', 'code', 'message']
data = pd.DataFrame(columns=expected_columns)
# create temporary directory to store smaller CSV files
temp_directory = '.temp'
make_dirs(temp_directory)
# extract all json files from target directory
file_pattern = join(target_directory, '*.json') # TODO: can be modified to match digital values
failure_files = glob(file_pattern)
total = len(failure_files)
if bool(total):
write_to_console(f'=> Generting dataframe for failure tickers...', verbose=verbose)
json_generator = map(read_json_file, failure_files)
counter = 0 # to count temp CSV files
with alive_bar(total=total, **_BAR_CONFIG) as bar:
for i in range(total):
ticker_data = next(json_generator)
meta = ticker_data['meta_data']
error_stack = meta['_error_stack']
ecode = meta.get('ecode', _get_ticker_id(failure_files[i]))
temp_data = pd.DataFrame(error_stack, columns=expected_columns)
temp_data['ecode'] = ecode
# if error stack is empty, then create a dummy row
if temp_data.shape[0] == 0: # fixme: find a way to control this in the TWS Client
dummy_row = {'ecode': ecode, 'code': 'unknown', 'message': 'not available'}
temp_data = temp_data.append(dummy_row, ignore_index=True)
data = data.append(temp_data)
_time_to_cache = (i+1 == total) or ((i > 0) and (i % 100 == 0))
if _time_to_cache:
if data.shape[0] > 0:
temp_file = join(temp_directory, f'failure_{counter}.csv')
data.to_csv(temp_file)
data = pd.DataFrame(columns=expected_columns)
counter += 1
bar()
# merge all CSV files into a single dataframe
# delete all temp files
temp_files = get_files_by_type(temp_directory, file_type='csv')
data = pd.concat(map(read_csv, temp_files))
data.sort_values(by=['ecode'], ignore_index=True, inplace=True)
data = data[expected_columns]
delete_directory(temp_directory)
return data
def create_csv_dump(target_date, end_time='15:01:00', verbose=False):
"""
Creates a CSV file from JSON files for a given date.
Raise an error if directory for the gven is not present.
Created CSV files will be saved at the same location by the name:
'success.csv' & 'failure.csv'
"""
logger.info('Generating final CSV dump')
_date = f'{target_date[:4]}/{target_date[4:6]}/{target_date[6:]}'
write_to_console(f'{"-"*30} CSV Conversion: {_date} {"-"*31}', verbose=True)
target_directory = join(_HISTORICAL_DATA_STORAGE, target_date, end_time.replace(':', '_'))
if not isdir(target_directory):
raise NotADirectoryError(f'Could not find a data storage directory for date: {target_date}')
success_directory = join(target_directory, '.success')
failure_directory = join(target_directory, '.failure')
if isdir(success_directory):
path = join(target_directory, 'success.csv')
success = generate_success_dataframe(success_directory, bar_title='Success', verbose=verbose)
success.to_csv(path, index=False)
logger.debug(f'Success file saved at: {path}')
if isdir(failure_directory):
failure = generate_failure_dataframe(failure_directory, bar_title='Failure', verbose=verbose)
path = join(target_directory, 'failure.csv')
failure.to_csv(path, index=False)
logger.debug(f'Failure file saved at: {path}')
def _get_marker(ratio, threshold=0.95):
return _GREEN_TICK if ratio >= threshold else _RED_CROSS
# noinspection PyUnusedLocal
# TODO: refactor
def generate_extraction_metrics(target_date, end_time='15:01:00', input_tickers=None, verbose=False):
"""
Generates metrics about success & failure tickers.
Metrics are saved into a new file called 'metrics.csv'
:param target_date: date for which metrics are needed
:param end_time: end time for metrics are to be generated
:param input_tickers: tickers for which metrics are to be generated
"""
logger.info('Generating final extraction metrics')
_date = f'{target_date[:4]}/{target_date[4:6]}/{target_date[6:]}'
write_to_console(f'{"-"*30} Metrics Generation: {_date} {"-"*31}', verbose=True)
expected_metrics = [
'total_tickers', 'total_extracted', 'total_extraction_ratio',
'extraction_successful', 'extraction_failure',
'success_ratio', 'failure_ratio',
'n_225_input_ratio', 'n_225_success_ratio', 'n_225_failure_ratio',
'topix_input_ratio', 'topix_success_ratio', 'topix_failure_ratio',
'jasdaq_20_input_ratio', 'jasdaq_20_success_ratio', 'jasdaq_20_failure_ratio',
'missing_tickers_ratio', 'missing_tickers'
]
metrics = dict(zip(expected_metrics, [0.0]*len(expected_metrics)))
target_directory = join(_HISTORICAL_DATA_STORAGE, target_date, end_time.replace(':', '_'))
if not isdir(target_directory):
raise NotADirectoryError(f'Data storage directory for {target_date} not found at'
f'{_HISTORICAL_DATA_STORAGE}')
success_file = join(target_directory, 'success.csv')
failure_file = join(target_directory, 'failure.csv')
if not isfile(success_file):
raise FileNotFoundError(f'Can not find success file: {success_file}')
if not isfile(failure_file):
raise FileNotFoundError(f'Can not find failure file: {failure_file}')
input_tickers_file = join(target_directory, 'input_tickers.json')
if input_tickers is None:
if not isfile(input_tickers_file):
raise FileNotFoundError(f'Can not find input tickers file: {input_tickers_file}')
input_tickers = read_json_file(input_tickers_file)
japan_indices = get_japan_indices()
_n_225_tickers = japan_indices[japan_indices.n_225.str.contains('T')].n_225.unique().tolist()
n_225_tickers = list(map(lambda x: int(x.split('.')[0]), _n_225_tickers))
_topix_tickers = japan_indices[japan_indices.topix.str.contains('T')].topix.unique().tolist()
topix_tickers = list(map(lambda x: int(x.split('.')[0]), _topix_tickers))
_jasdaq_20_tickers = japan_indices[japan_indices.jasdaq_20.str.contains('T')].jasdaq_20.unique().tolist()
jasdaq_20_tickers = list(map(lambda x: int(x.split('.')[0]), _jasdaq_20_tickers))
success = read_csv(success_file)
failure = read_csv(failure_file)
success_tickers = success.ecode.unique().tolist()
failure_tickers = failure.ecode.unique().tolist()
total_tickers = len(input_tickers)
if total_tickers == 0:
raise ValueError(f'Can not find any input tickers in file {input_tickers_file}')
extraction_successful = len(success_tickers)
extraction_failure = len(failure_tickers)
total_extracted = extraction_successful + extraction_failure
total_extraction_ratio = round(total_extracted / total_tickers, 3)
success_ratio = round(extraction_successful / total_tickers, 3)
failure_ratio = round(extraction_failure / total_tickers, 3)
logger.debug(f'Updated over-all extraction ratio: {success_ratio}')
write_to_console(f'Over-all Extraction: {_get_marker(success_ratio)}', pointer='->',
indent=2, verbose=True)
write_to_console(f'Over-all Success Ratio: {success_ratio}',
pointer='-', indent=4, verbose=verbose)
n_225_input = list(set(input_tickers).intersection(n_225_tickers))
if bool(n_225_input):
n_225_input_ratio = round(len(n_225_input) / len(n_225_tickers), 3)
n_225_success = list(set(success_tickers).intersection(n_225_input))
n_225_failure = list(set(failure_tickers).intersection(n_225_input))
n_225_success_ratio = round(len(n_225_success) / len(n_225_input), 3)
n_225_failure_ratio = round(len(n_225_failure) / len(n_225_input), 3)
logger.debug(f'Updated N225 extraction ratio: {n_225_success_ratio}')
write_to_console(f'N225 Extraction: {_get_marker(n_225_success_ratio)}', pointer='->',
indent=2, verbose=True)
write_to_console(f'Over-all Success Ratio: {n_225_success_ratio}',
pointer='-', indent=4, verbose=verbose)
else:
logger.debug('Could not find any N 225 tickers in the given input')
topix_input = list(set(input_tickers).intersection(topix_tickers))
if bool(topix_input):
topix_input_ratio = round(len(topix_input) / len(topix_tickers), 3)
topix_success = list(set(success_tickers).intersection(topix_input))
topix_failure = list(set(failure_tickers).intersection(topix_input))
topix_success_ratio = round(len(topix_success) / len(topix_input), 3)
topix_failure_ratio = round(len(topix_failure) / len(topix_input), 3)
logger.debug(f'Updated Topix extraction ratio: {topix_success_ratio}')
write_to_console(f'Topix Extraction: {_get_marker(topix_success_ratio)}', pointer='->',
indent=2, verbose=True)
write_to_console(f'Topix Success Ratio: {topix_success_ratio}',
pointer='-', indent=4, verbose=verbose)
else:
logger.debug('Could not find any Topix tickers in the given input')
jasdaq_20_input = list(set(input_tickers).intersection(jasdaq_20_tickers))
if bool(jasdaq_20_input):
jasdaq_20_input_ratio = round(len(jasdaq_20_input) / len(jasdaq_20_tickers), 3)
jasdaq_20_success = list(set(success_tickers).intersection(jasdaq_20_input))
jasdaq_20_failure = list(set(failure_tickers).intersection(jasdaq_20_input))
jasdaq_20_success_ratio = round(len(jasdaq_20_success) / len(jasdaq_20_input), 3)
jasdaq_20_failure_ratio = round(len(jasdaq_20_failure) / len(jasdaq_20_input), 3)
logger.debug(f'Updated JASDAQ 20 extraction ratio: {jasdaq_20_success_ratio}')
write_to_console(f'JASDAQ 20 Extraction: {_get_marker(jasdaq_20_success_ratio)}', pointer='->',
indent=2, verbose=True)
write_to_console(f'JASDAQ 20 Success Ratio: {jasdaq_20_success_ratio}',
pointer='-', indent=4, verbose=verbose)
else:
logger.debug('Could not find any JASDAQ 20 tickers in the given input')
missing_tickers = list(set(input_tickers).difference(success_tickers + failure_tickers))
missing_tickers_ratio = round(len(missing_tickers) / total_tickers, 3)
logger.debug(f'Updated missing tickers ratio: {missing_tickers_ratio}')
all_vars = vars()
for key in all_vars:
if key in expected_metrics:
metrics[key] = all_vars[key]
metrics_file = join(target_directory, 'metrics.json')
save_data_as_json(metrics, metrics_file)
logger.debug(f'Metrics saved at: {metrics_file}')
if __name__ == '__main__':
create_csv_dump('20210121')
# generate_extraction_metrics('20210120')
| StarcoderdataPython |
3340264 | # Copyright © 2019 <NAME>.
# Import the base board class.
from battleships.objects.Board import Board
# Import the engine.
import engine
class ShipBoard(Board):
"""
Board object that renders all the ships from the player.
"""
def __init__(self, parent):
"""
Class constructor.
Creates a new board on the screen.
:param parent: The parent of the board.
"""
# Call the parent constructor.
super().__init__(parent)
# Create the board background.
ShipBoard.create_background(self, (0, 64, 128), (255, 255, 255))
# List of all the placed ships.
self.placed_ships = []
def remove_boat(self, boat):
if boat in self.placed_ships:
self.placed_ships.remove(boat)
def place_boat(self, boat):
self.placed_ships.append(boat)
def all_boats_placed(self):
return len(self.placed_ships) == 5
def position_is_valid(self, cell, length, direction):
"""
Checks if the position is valid for the specified ship.
:param cell: The cell that is requested.
:param length: The length of the ship to place.
:param direction: The direction of the ship.
:return: True if the position is valid.
"""
# Check if the position is within the bounds.
if cell.x >= 0 and cell.y >= 0:
if cell.x <= 10 and cell.y <= 10:
# If the ship is in the bounds.
if direction % 2 == 0:
if cell.x <= 10 - length:
# Check for collisions
return self.collision_check(cell, length, direction) is None
else:
if cell.y <= 10 - length:
# Check for collisions
return self.collision_check(cell, length, direction) is None
@staticmethod
def __get_covered_cells(cell, length, direction):
covered_cells = []
for i in range(length):
if direction % 2 == 0:
covered_cells.append(i + (cell.x + cell.y * 10))
else:
covered_cells.append((i * 10) + (cell.x + cell.y * 10))
return covered_cells
def collision_check(self, cell, length, direction):
# Compute all the cells covered by the ship/shot.
covered_cells = ShipBoard.__get_covered_cells(cell, length, direction)
# Loop through all the ships.
for ship in self.placed_ships:
ship_cells = ShipBoard.__get_covered_cells(ship.get_cell(), ship.length, ship.rotation)
intersect = list(set(covered_cells) & set(ship_cells))
if len(intersect) > 0:
return ship
return None
| StarcoderdataPython |
1659039 | <reponame>gogomillan/holbertonschool-higher_level_programming
#!/usr/bin/python3
"""
Module for Rectable class.
"""
BaseGeometry = __import__("7-base_geometry").BaseGeometry
class Rectangle(BaseGeometry):
"""
Class Rectangle
"""
def __init__(self, width, height):
"""
Returns an instance of a Rectangle
Args:
width (int): The width of the Rectangle
height (int): The height of the Rectangle
"""
self.integer_validator("width", width)
self.integer_validator("height", height)
self.__width = width
self.__height = height
| StarcoderdataPython |
1740589 | <gh_stars>0
import numpy as np
import h5py
#
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def onehot(data_y):
# one-hot encoding sleep stages
data_y=[data_y]
temp_y = []
for i in range(len(data_y)):
temp_ = []
for j in range(len(data_y[i])):
temp = np.zeros((6,))
temp[data_y[i][j]] = 1.
temp_.append(temp)
temp_y.append(np.array(temp_))
data_y = np.array(temp_y)
return data_y
np.set_printoptions(suppress=True)
def loaddata(dataset):
path = './20_fold_data'
npz = np.load(dataset)
trainX = npz['X_train']
trainY = npz['y_train']
X_valid = npz['X_valid']
y_valid = npz['y_valid']
testX = npz['X_test']
testY = npz['y_test']
# trainX=np.array(data["train_data"]).astype('float32').transpose(0,2,1)
# trainY=np.array(data["train_label"]).astype('int').transpose(1,0)
# testX=np.array(data["test_data"]).astype('float32').transpose(0,2,1)
# testY = np.array(data["test_label"]).astype('int').transpose(1,0)
# X = np.zeros((trainX.shape[0]+testX.shape[0],trainX.shape[1],trainX.shape[2]))
# X[0:trainX.shape[0],:,:] = trainX
# X[trainX.shape[0]:trainX.shape[0]+testX.shape[0],:,:]=testX
#
# Y = np.append(trainY,testY)
#
# result=[X,Y]
# trainY = onehot(trainY).transpose(1,2,0).squeeze()
# testY = onehot(testY).transpose(1,2,0).squeeze()
temp_x = np.zeros((trainX.shape[0],3072,trainX.shape[2]))
temp_x[:,36:3036,:]=trainX
trainX = temp_x
temp_x = np.zeros((X_valid.shape[0],3072,trainX.shape[2]))
temp_x[:,36:3036,:]=X_valid
X_valid = temp_x
temp_x = np.zeros((testX.shape[0],3072,testX.shape[2]))
temp_x[:,36:3036,:]=testX
testX = temp_x
print(trainX.shape)
print(trainY.shape)
print(X_valid.shape)
print(y_valid.shape)
print(testX.shape)
print(testY.shape)
result = [trainX,trainY,X_valid,y_valid,testX,testY]
return result
#
# trainX,trainY=loaddata('sleep_edf_data.mat')
#
# #_ = [print(len(x)) for x in loaddata('sleep_edf_data.mat')]
# # print(type(testX))
# # print(testX.shape)
# # print(trainX.shape)
# print(trainX.shape)
# print(trainY)
| StarcoderdataPython |
63429 |
"""Flexible code for histogramming per-snp and per-replica statistics for selected SNPs in selected replicas in
selected scenarios and/or demographies."""
from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios
from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, \
MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff
from Classes.DotData import DotData
from Operations.Ilya_Operations.PipeRun.python.PipeRun import GetDependsOn
from Operations.Shari_Operations.localize.PopConsts import AllFreqs, AllPops, AllAges, CAUSAL_POS
from Operations.IDotData import IDotData
import operator, os, logging, contextlib, functools, collections, types, ast
from itertools import izip
import itertools, string
from UserDict import DictMixin
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pp
import numpy as np
import math
import traceback as tb
__all__ = ( 'gatherCausalFreqs', 'DefineRulesTo_gatherCausalFreqs', 'histogramSnpStatistic', 'histogramReplicaStatistic',
'AddUpHistograms', 'GraphHistograms', 'GraphCumulPlots', 'DefineRulesTo_histogramSnpStatistic',
'DefineRulesTo_histogramReplicaStatistic', 'findReplicasMatchingConds', 'findSnpsMatchingConds',
'identifyReplicasMeetingConds', 'splitSnpStatsFile',
'DefineRulesTo_identifyReplicasMeetingCommonConds' )
def gatherCausalFreqs( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within one scenario, gather some useful summary info for each replica:
e.g. that replica's modern-day frequency of the causal allele, the genetic map position of the
causal SNP, number of SNPs in the replica, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = Ddata + '/' + simsOut + thinSfx + '/' + scen.scenDir()
statScenDir = Ddata + '/replicastats' + thinSfx + '/' + scen.scenDir()
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
okReplicas = 0
for replicaNum in range( nreplicas ):
if scen.is_neutral(): causalFreq = np.nan
else:
posFile = DotData( SVPath = posFileNames[ replicaNum ], SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ] )
causalLine = posFile[ posFile.CHROM_POS == selpos ]
assert len( causalLine ) == 1
causalFreq = causalLine[0].FREQ1
causalAlleleFreqs.append( causalFreq )
replicaNums.append( replicaNum )
DotData( names = [ 'replicaNum', 'causalAlleleFreq', 'targetCausalFreq' ],
Columns = [ replicaNums, causalAlleleFreqs,
(( 0 if scen.isNeutral() else scen.mutFreq),)*nreplicas ] ).saveToSV( replicaInfoFileName )
def gatherReplicaGDstats( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within each scenario, gather some genetic map-related info for each replica:
e.g. the genetic map position of the
causal SNP, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = os.path.join( Ddata, simsOut + thinSfx, scen.scenDir() )
statScenDir = os.path.join( Ddata, 'replicastats' + thinSfx, scen.scenDir() )
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
def DefineRulesTo_gatherCausalFreqs( pr, Ddata, simsOut = 'simsOut',
mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs,
thinSfx = '', thinExt = '', nreplicas = 100 ):
"""Define rules to gather per-replica statistics"""
for scen in GetScenarios( mutAges, mutPops, mutFreqs ):
pr.addInvokeRule( invokeFn = gatherReplicaStats,
invokeArgs = Dict( 'scen Ddata simsOut thinSfx thinExt nreplicas' ) )
# for compatibility with old code
gatherReplicaStats = gatherCausalFreqs
DefineRulesTo_gatherReplicaStats = DefineRulesTo_gatherCausalFreqs
def histogramSnpStatistic( Ddata, thinSfx, scenDir, replicaTables, replicaCond, snpTables, snpCond, snpStat,
outFile, nreplicas, binSize, binShift = 0.0, sfx = None, scenSfx = None, getio = None ):
"""Compute histogram of $snpStat for snps matching $snpCond in replicas matching $replicaCond in scenario $scenDir.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
replicaTables = MakeSeq( replicaTables )
snpTables = MakeSeq( snpTables )
replicaCondExpr = compile_expr( replicaCond )
snpCondExpr = compile_expr( snpCond )
snpStatExpr = compile_expr( snpStat )
outFile = AddFileSfx( outFile, sfx )
outFileStats = AddFileSfx( outFile, 'stats' )
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenDir,
replicaTable + ( '.tsv' if '.' not in replicaTable else '' ) )
for replicaTable in replicaTables ]
snpTableFiles = [ os.path.join( Ddata, 'snpStats' + thinSfx, scenDir,
AddFileSfx( snpTable + ( '.tsv' if '.' not in snpTable else '' ),
scenSfx if isinstance( scenSfx, types.StringTypes )
else scenSfx[ os.path.splitext( snpTable )[0] ] ) )
for snpTable in snpTables ]
#dbg('replicaTableFiles snpTableFiles')
#dbg('"*****" replicaTableFiles+snpTableFiles')
replicaTableFiles = [ f + '/' if f.endswith('.data') else f for f in replicaTableFiles ]
snpTableFiles = [ f + '/' if f.endswith('.data') else f for f in snpTableFiles ]
snpTables = [ os.path.splitext(snpTable)[0] for snpTable in snpTables ]
if getio: return dict( depends_on = replicaTableFiles + snpTableFiles,
creates = ( outFile, AddFileSfx( outFile, 'stats' ) ),
attrs = Dict( 'scenDir snpCond replicaCond snpStat' ),
mediumRuleNameSfx = ( scenDir, scenSfx ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
replicasToUse = [ eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
#dbg( 'sum(replicasToUse)' )
snpTableVals = [ IDotData( SVPath = f ) for f in snpTableFiles ]
histogramBuilder = Histogrammer( binSize = binSize, binShift = binShift )
lastReplica = np.nan
for snpTableRows in izip( *snpTableVals ):
r0 = snpTableRows[ 0 ]
assert all([ r.Chrom == r0.Chrom for r in snpTableRows ]) or all([ np.isnan( r.Chrom ) for r in snpTableRows ])
assert all([ r.Pos == r0.Pos for r in snpTableRows ])
replica = int( r0.Chrom ) if not np.isnan( r0.Chrom ) else -1
useThisReplica = not replicaTables or replicasToUse[ replica ]
if replica != lastReplica: dbg( 'replica useThisReplica histogramBuilder.getNumVals()' )
if useThisReplica:
snpDict = dict( zip( snpTables, snpTableRows ) )
if eval( snpCondExpr, globals(), snpDict ):
val = eval( snpStatExpr, globals(), snpDict )
histogramBuilder.addVal( val )
lastReplica = replica
logging.info('saving histogram to ', outFile )
histogramBuilder.save( outFile )
def histogramReplicaStatistic( Ddata, thinSfx, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
replicaTables = None,
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findReplicasMatchingConds( showHeadings = 'val', showVals = replicaStat, **args ).val )
histogramBuilder.save( outFile )
def histogramSnpStatistic2( Ddata, thinSfx, snpTables, snpCond, snpCondSfx, replicaTables, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx, snpCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
finSnpsMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findSnpsMatchingConds( showHeadings = 'val', showVals = snpStat, **args ).val )
histogramBuilder.save( outFile )
def AddUpHistograms( histFiles, outFile, getio = None ):
"""Add up histograms from separate files, write results to new file"""
outFileStats = AddFileSfx( outFile, 'stats' )
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileStats ),
attrs = dict( piperun_short = True ) )
sumHist = reduce( operator.add, map( Histogrammer.load, histFiles ) )
sumHist.save( outFile )
def GraphHistograms( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = False,
cumulative = False,
cumulativeUpTo = None,
figSize = (24, 12 ),
subplots_adjust = {},
getio = None ):
"""Plot one or more histograms sharing the same bins.
Params:
normalizeHistograms - if true, for each histogram on the y-axis we plot not the number of
items in a given bin, but their fraction out of the total number of items in that histogram.
This lets us compare different histograms.
"""
#dbg( '"at_first" labels' )
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
outFile = AddFileSfx( outFile, sfx )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
if getio: return dict( depends_on = histFiles, creates = outFile,
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = figSize )
#pp.clf()
pp.subplots_adjust( **MergeDicts( dict( hspace = 0.3, bottom = 0.15 ), subplots_adjust ) )
for which, cumulative in enumerate( ( True, False ) ):
pp.subplot( 2, 1, which + 1 )
pp.xlabel( xlabel )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
binShift = None
theLabels = []
theHandles = []
hists = map( Histogrammer.load, histFiles )
if coarsenBy: hists = [ hist.coarsenBy( coarsenBy ) for hist in hists ]
allBinIds = reduce( operator.concat, [ hist.bin2count.keys() for hist in hists ] )
if not allBinIds: allBinIds = ( 0, )
minBinId = min( allBinIds )
maxBinId = max( allBinIds ) + 1
if cumulativeUpTo is not None:
maxBinId = min( maxBinId, max( [ hist.getCumulativeBinFor( cumulativeUpTo ) for hist in hists ] ) ) + 1
for color, label, ( histFileNum, hist ) in zip( colors, labels, enumerate( hists ) ):
# check that all histograms we're loading have the same bins
if binSize is None: binSize = hist.binSize
else: assert abs( hist.binSize - binSize ) < 1e-12
if binShift is None: binShift = hist.binShift
else: assert abs( hist.binShift - binShift ) < 1e-12
width = binSize * relWidth / len( histFiles )
left = np.array( hist.getAllBinLefts( minBinId = minBinId, maxBinId = maxBinId ) ) + histFileNum * width
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( left ) if i % ticksCoarsen == 0 ] )
height = hist.getAllBinCounts( normed = normed, cumulative = cumulative,
minBinId = minBinId, maxBinId = maxBinId )
rects = pp.bar( height = height,
width = width * 0.95, **Dict( 'left color log' ) )
if rects:
labelHere = label + ' (%d values)' % hist.getNumVals()
if hist.getNumNaNs(): labelHere += ' (%d nans)' % hist.getNumNaNs()
if hist.getNumInfs(): labelHere += ' (%d infs)' % hist.getNumInfs()
rects[ 0 ].set_label( labelHere )
theLabels.append( labelHere )
theHandles.append( rects[0] )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def GraphCumulPlots( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = True,
getio = None ):
"""Plot one or more cumulative plots.
"""
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
outFileTable = outFile + '.points.tsv'
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileTable ),
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = (18,6) )
#pp.clf()
pp.subplots_adjust( bottom = 0.37 )
pp.xlabel( xlabel + '\n\n\n\n' )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
theLabels = []
theHandles = []
for color, label, ( histFileNum, histFile ) in zip( colors, labels, enumerate( histFiles ) ):
hist = Histogrammer.load( histFile )
if coarsenBy: hist = hist.coarsenBy( coarsenBy )
if not binSize: binSize = hist.binSize
else:
if not abs( hist.binSize - binSize ) < 1e-12:
dbg( 'hist.binSize binSize hist.binSize-binSize' )
assert abs( hist.binSize - binSize ) < 1e-12
binLefts = hist.getBinLefts()
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( binLefts ) if i % ticksCoarsen == 0 ] )
binCounts = hist.getBinCounts( normed = normed, cumulative = True )
rects = pp.plot( binLefts, binCounts, label = label, color = color )
DotData( names = ( 'binLefts', 'binCounts' ), Columns = ( binLefts, binCounts ) ).saveToSV( outFileTable )
if rects:
theLabels.append( label )
theHandles.append( rects )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def DefineRulesTo_histogramSnpStatistic( pr, Ddata,
outFile, snpTables, snpStat, binSize,
binShift = 0.0,
scen2sfxs = lambda scen: '',
scenCond = 'True',
allScens = GetScenarios(),
nreplicas = 100, thinSfx = '', replicaTables = (),
replicaConds = 'True', replicaCondsSfxs = '',
snpConds = 'True', snpCondsSfxs = '', title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None, sfx = '',
ticksCoarsen = 1, cumulative = False, normed = False,
colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
subplots_adjust = {},
name = None ):
"""A generic way to plot the distribution of some per-snp statistics for some subset of SNPs.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
snpConds = MakeSeq( snpConds )
snpCondsSfxs = MakeSeq( snpCondsSfxs )
totaledHistFiles = []
totaledLabels = []
outFile = AddFileSfx( outFile, sfx )
baseOutFile = outFile
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
for snpCond, snpCondSfx in zip( snpConds, snpCondsSfxs ):
histFiles = []
for scen in allScens:
if not eval( scenCondExpr, globals(), ScenAttrs( scen ) ): continue
scenDir = scen.scenDir()
for scenSfx in MakeSeq( scen2sfxs( scen ) if callable( scen2sfxs ) else scen2sfxs[ scen ] ):
histOutFile = os.path.join( Ddata, 'hist', scenDir,
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpStat,
replicaCondSfx, snpCondSfx, scenSfx, sfx ) )
rule = pr.addInvokeRule( invokeFn = histogramSnpStatistic,
invokeArgs =
dict( outFile = histOutFile,
**Dict( 'Ddata thinSfx replicaTables replicaCond snpTables snpCond '
'snpStat nreplicas binSize binShift scenDir scenSfx sfx' ) ),
name = name,
comment = 'Compute distribution of ' + snpStat
+ ' for SNPs matching ' + snpCond + ' in replicas matching ' + replicaCond )
histFiles.append( histOutFile )
totaledHistFile = os.path.join( Ddata, 'hist',
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpCondSfx, replicaCondSfx, sfx ) )
totaledHistFiles.append( totaledHistFile )
totaledLabel = ''
if replicaCondSfx:
totaledLabel += replicaCondSfx + ' replicas' + ( (' (' + replicaCond + ') ') \
if replicaCond != 'True' else '' )
if snpCondSfx: totaledLabel += snpCondSfx + ' SNPs' + ( (' (' + snpCond + ') ') \
if snpCond != 'True' else '' )
totaledLabels.append( totaledLabel )
pr.addInvokeRule( invokeFn = AddUpHistograms, invokeArgs = dict( histFiles = histFiles,
outFile = totaledHistFile ),
mediumRuleNameSfx = ( sfx, snpStat, replicaCondSfx, snpCondSfx ), name = 'AddUpSnpHists',
fileDescrs = { 0:
( 'Distribution of <b>' + snpStat + '</b> among '
+ ( 'all SNPs' if snpCond == 'True'
else ' snps matching <em>' + snpCond + '</em>' )
+ ' in '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of SNPs with ' + snpStat + ' in given bin' ),) ) } )
if not title:
title = 'Histogram of ' + snpStat + '\n'
if scenCond != 'True': title += ' scenCond: ' + scenCond
if any( replicaCond != 'True' for replicaCond in replicaConds ):
title += ' replicaConds: ' + ', '.join(replicaCondsSfxs)
if any( snpCond != 'True' for snpCond in snpConds ): title += ' snpConds: ' + ', '.join(snpCondsSfxs)
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of snps'
if not xlabel: xlabel = snpStat
pr.addInvokeRule( invokeFn = GraphHistograms,
mediumRuleNameSfx = (snpStat,) + tuple(replicaCondsSfxs) + tuple(snpCondsSfxs),
name = 'GraphSnpHists',
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'cumulative normed ticksCoarsen colors' ) ),
attrs = Dict( 'snpStat replicaConds snpConds scenCond subplots_adjust' ) )
def DefineRulesTo_histogramReplicaStatistic( pr, Ddata,
outFile, replicaStat, binSize,
scenCond = 'True',
replicaTables = None,
sfx = '',
scen2sfxs = lambda scen: '',
allScens = tuple( GetScenarios() ),
nreplicas = 100, thinSfx = '',
replicaConds = 'True', replicaCondsSfxs = '',
title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None,
ticksCoarsen = 1, cumulative = False, normed = False,
cumulativeUpTo = 0.99,
subplots_adjust = {},
name = None, nameSfx = '' ):
"""Define rules to plot the distribution of a specified per-replica statistic for some subsets of replicas
in some subset of scenarios.
Params:
pr - the PipeRun object to which the rules should be added
Ddata - the root folder of the genetic data in simulations format
outFile - the filename to which the histogram plot will be written
replicaTables - names of tables containing per-replica values. For each such table T,
there must be a file of the form os.path.join( Ddata, replicastats, scenario.scenDir(), T + '.tsv' )
giving some values for each replica in the scenario.
replicaStat - a Python expression in which the names in replicaTables may appear as variables, and refer
to a named tuple representing the replica's row in the corresponding replicaTable.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
if callable( scen2sfxs ):
scen2sfxs = dict( ( scen, scen2sfxs( scen ) ) for scen in ourScens )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
totaledHistFiles = []
totaledLabels = []
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
totaledHistFile = os.path.join( Ddata, 'hist',
ReplaceFileExt( os.path.basename( outFile ), '.tsv' ) )
totaledLabels.append( replicaCondSfx + ': ' + replicaCond )
r = pr.addInvokeRule( invokeFn = histogramReplicaStatistic,
invokeArgs = Dict( 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas '
'binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx',
outFile = totaledHistFile ),
mediumRuleNameSfx = ( replicaStat, replicaCondSfx, sfx ),
fileDescrs = { 0:
( 'Distribution of <b>' + replicaStat + '</b> among '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of replicas with ' + replicaStat +
' in given bin' ),
)) } )
totaledHistFiles.append( r.creates[0] )
if not title:
if scenCond != 'True': title += ' scenCond: ' + scenCond
if len( replicaConds ) == 1 and replicaConds[0] != 'True': title += ' replicaCond: ' + replicaConds[0]
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of replicas'
if not xlabel: xlabel = replicaStat
pr.addInvokeRule( invokeFn = GraphHistograms,
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'sfx ticksCoarsen cumulative normed cumulativeUpTo' ) ),
name = 'GraphReplicaHists' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStat, sfx ) + tuple( replicaConds ),
attrs = Dict( 'replicaStat sfx subplots_adjust' ) )
return totaledHistFiles
def identifyReplicasMeetingConds( Ddata, scenario, replicaTables, replicaConds, condsFileFN, nreplicas,
thinSfx = '', getio = None ):
"""Given a list of named replica conditions, determine for each replica which conditions it meets, and
write out the result in an easy-to-access format.
Input params:
replicaConds - sequence of pairs of the form ( condName, cond ) -- for example,
( ( 'hi', 'replicaStats.causalAlleleFreq >= .5' ), ( 'lo', 'replicaStats.causalAlleleFreq < .5' ) )
"""
replicaTables = MakeSeq( replicaTables )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
replicaTable + ( '.tsv' if not os.path.splitext( replicaTable )[1] else '' ) )
for replicaTable in replicaTables ]
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
if getio: return dict( depends_on = replicaTableFiles, creates = condsFileFN, mediumRuleNameSfx = scenario.scenDir(),
attrs = dict( piperun_short = True,
condNames = ', '.join( map( operator.itemgetter( 0 ), replicaConds ) ) ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
assert all([ len( replicaTableVal ) == nreplicas for replicaTableVal in replicaTableVals ])
matchingReplicas = []
for replicaCond in map( operator.itemgetter( 1 ), replicaConds ):
replicaCondExpr = compile_expr( replicaCond )
replicasToUse = [ int( eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
matchingReplicas.append( replicasToUse )
Records = []
condNames = tuple( map( operator.itemgetter( 0 ), replicaConds ) )
for replicaNum, condResults in enumerate( izip( *matchingReplicas ) ):
Records.append( ( replicaNum, ','.join( replicaCondName for condNum, replicaCondName
in enumerate( condNames )
if condResults[ condNum ] ) )
+ condResults )
IDotData( names = ( 'replicaNum', 'matchingConds' ) + condNames, Records = Records ).save( condsFileFN )
def DefineRulesTo_identifyReplicasMeetingCommonConds( pr, Ddata, thinSfx = '', allScens = GetSelectionScenarios(),
nreplicas = 100 ):
"""Define rules to identify replicas meeting common conditions such as all/lo/hi freq"""
for scenario in allScens:
pr.addInvokeRule( invokeFn = identifyReplicasMeetingConds,
invokeArgs = Dict( 'Ddata scenario nreplicas thinSfx',
replicaTables = ( 'replicaStats', ),
replicaConds = ( ( 'all', 'True' ),
( 'hi', 'replicaStats.causalAlleleFreq >= .5' ),
( 'lo', 'replicaStats.causalAlleleFreq < .5' ) ),
condsFileFN = 'commonReplicaConds.tsv' ) )
def splitSnpStatsFile( Ddata, scenario, inFileFN, condsFileFN, condNames, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Split a file containing per-snp data for all replicas, into separate files containing the same data for each
kind of replica."""
if not os.path.dirname( inFileFN ): inFileFN = os.path.join( Ddata, scenario.scenDir(), inFileFN )
if not os.path.dirname( condsFileFN ):
condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(), condsFileFN )
outFileFNs = [ AddFileSfx( inFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = ( inFileFN, condsFileFN ),
creates = outFileFNs, mediumRuleNameSfx = scenario.scenDir() )
condsFile = IDotData( condsFileFN )
inFile = IDotData( inFileFN )
with contextlib.nested( *map( functools.partial( IDotData.openForWrite, headings = inFile.headings ),
outFileFNs ) ) as outFiles:
for (replica, replicaRows), condValues in izip( inFile.groupby( replicaColName, multiPass = False ), condsFile ):
assert condValues.replicaNum == replica
# if this replica matches more than one condition, save the replica rows so we can iterate over them more
# than once
if sum( condValues[ condNames ] ) > 1: replicaRows = tuple( replicaRows )
for condName, outFile in zip( condNames, outFiles ):
if condValues[ condName ]: outFile.writeRecords( replicaRows )
def joinSnpStatsFiles( Ddata, scenario, outFileFN, condNames, condsFileFN, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Join several per-snp stats files, each containing data for some of the replicas,
into a single file containing data for all the replicas.
"""
if not os.path.dirname( outFileFN ): outFileFN = os.path.join( Ddata, scenario.scenDir(), outFileFN )
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
inFileFNs = [ AddFileSfx( outFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = [ condsFileFN ] + inFileFNs, creates = outFileFN,
mediumRuleNameSfx = scenario.scenDir() )
inFiles = map( IDotData, inFileFNs )
dbg( 'inFiles' )
condsFile = IDotData( condsFileFN )
groupIters = [ inFile.groupby( replicaColName ) for inFile in inFiles ]
def getBlocks():
for r in condsFile:
for condName, groupIter in zip( condNames, groupIters ):
if r[ condName ]:
replicaNum, replicaRows = next( groupIter )
assert replicaNum == r.replicaNum
yield replicaRows
break
IDotData.vstackFromIterable( getBlocks() ).save( outFileFN )
def ScenAttrs( scen ):
"""Make a dictionary describing the attributes of a scenario"""
scenAttrs = dict( scen = scen, is_neutral = scen.is_neutral(), isNeutral = scen.isNeutral() )
if not scen.is_neutral(): scenAttrs.update( mutAge = scen.mutAge,
mutPop = scen.mutPop,
mutFreq = scen.mutFreq )
return scenAttrs
def scatterPlotReplicaStatistic( Ddata, nreplicas, replicaStatX,
replicaStatY,
outFile,
thinSfx = '',
scenCond = 'True',
replicaTables = (), replicaCond = 'True',
replicaColorings = (),
replicaDefaultColor = 'b',
replicaShow = None,
allScens = tuple( GetScenarios() ), nameSfx = '',
scen2sfxs = {},
title = '', subtitle = '',
highlightScen = None, highlightReplica = None,
xbound = None, ybound = None,
getio = None ):
"""Draw a scatter plot where for each replica we have a pair of values.
"""
args = Dict( 'Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens' )
if getio: return dict( depends_on = findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = outFile,
name = 'scatterPlotReplicaStatistic' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStatX, replicaStatY ), attrs = dict( piperun_short = True ) )
x = []
y = []
urls = []
nskipped = 0
colors = []
if IsSeq( replicaShow ): replicaShow = '"_".join(map(str,["%.2f" % v if isinstance(v,float) else v for v in (' + ','.join( map( str, replicaShow ) ) + ')]))'
for r in findReplicasMatchingConds( showHeadings = ( 'valX', 'valY', 'valShow' ) + tmap( operator.itemgetter( 0 ),
replicaColorings ),
showVals = ( replicaStatX, replicaStatY,
replicaShow if replicaShow is not None else '0' ) +
tmap( operator.itemgetter( 1 ), replicaColorings ),
**args ):
x.append( r.valX )
y.append( r.valY )
urls.append( '%s_%d_x=%s_y=%s' % ( r.scenario, r.replicaNum,
'%.2f' % r.valX if isinstance( r.valX, float ) else r.valX,
'%.2f' % r.valY if isinstance( r.valY, float ) else r.valY ) +
( ( '' if str( r.valShow).startswith('_') else '_' ) + str( r.valShow ) if replicaShow else '' ) )
if replicaColorings:
colorHere = None
for name, cond, color in replicaColorings:
if r[ name ]:
colorHere = color
break
colors.append( colorHere if colorHere is not None else replicaDefaultColor )
pp.scatter( **Dict( 'x y urls', c = colors if colors else 'b' ) )
pp.axis( 'equal' )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
if not xbound and not ybound:
start = min( min(x), min(y) )
rng = max( max( x ) - min( x ), max(y) - min(y) )
pp.plot( [ start, start+rng ], [ start, start+rng ], 'g--' )
pp.xlabel( replicaStatX )
pp.ylabel( replicaStatY )
if title: pp.title( title )
pp.savefig( outFile )
def findTableFiles( Ddata, thinSfx, whichStats, tables, scenCond, allScens, scen2sfxs ):
"""Return table files used in conditions"""
tables = MakeSeq( tables )
scen2sfxs = dict( scen2sfxs )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
depends_on = []
scen2table2file = {}
for scen in ourScens:
thisScenDict = {}
for table in tables:
# identify the scenario-specific suffix for this table
scenSfx = DictGet( scen2sfxs, scen, '' )
if scenSfx:
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
if not isinstance( scenSfx, types.StringTypes ): scenSfx = DictGet( dict( scenSfx ),
os.path.splitext( table )[0], '' )
tableFile = os.path.join( Ddata, whichStats+ thinSfx, scen.scenDir(),
AddFileSfx( table + ( '.tsv' if '.' not in table else '' ),
scenSfx )
+ ( '/' if table.endswith( '.data' ) else '' ) )
depends_on.append( tableFile )
thisScenDict[ table ] = tableFile
scen2table2file[ scen ] = thisScenDict
tableNames = map( operator.itemgetter( 0 ), map( os.path.splitext, tables ) )
return tableNames, tables, ourScens, scen2table2file, depends_on
def FindChromCol( iDotData ):
"""Find the column representing the replica or chromosome, based on our conventions."""
return 'replicaNum' if 'replicaNum' in iDotData.headings else ( 'Chrom' if 'Chrom' in iDotData.headings else 'chrom' )
def FindPosCol( iDotData ):
"""Find the column representing the SNP position, based on our conventions."""
return 'Pos' if 'Pos' in iDotData.headings else 'pos'
class NameCollector(ast.NodeVisitor):
"""Gather table names used in an expression"""
def __init__(self):
self.names = []
def visit_Name(self, node):
self.names.append( node.id )
@staticmethod
def getNamesIn( expr ):
nc = NameCollector()
nc.visit( ast.parse( expr ) )
return tuple( set( nc.names ) )
def FindTables( *exprs ):
"""Find tables referenced in specified expressions"""
return tuple( set( reduce( operator.concat, map( NameCollector.getNamesIn, exprs ) ) ) - set( ( 'True', 'False' ) ) )
def findReplicasMatchingConds( Ddata,
replicaTables = None, replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for replicas meeting specified conditions."""
dbg( '"findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs' )
if replicaTables is None: replicaTables = FindTables( replicaCond, *MakeSeq( showVals ) )
replicaTables = tuple( set( MakeSeq( replicaTables ) ) )
replicaTableNames, replicaTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' ) )
if getio: return dict( depends_on = depends_on,
creates = outFile,
attrs = dict( piperun_short = True ) )
replicaCondExpr = compile_expr( replicaCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings:
showHeadings = map( MakeAlphaNum, showVals )
showHeadings2 = []
for h in showHeadings:
h_new = h
i = 1
while h_new in showHeadings2:
h_new = h + Sfx( i )
i += 1
showHeadings2.append( h_new )
showHeadings = showHeadings2
def makeResult():
yield ( 'scenario', 'replicaNum' ) + tuple( MakeSeq( showHeadings ) )
numReplicasSkippedTot, numReplicasAllowedTot = 0, 0
for scen in ourScens:
logging.info( '"findReplicasMatchingConds" scen' )
numReplicasSkipped, numReplicasAllowed = 0, 0
thisScenDict = scen2table2file[ scen ]
replicaTableVals = [ IDotData( thisScenDict[ replicaTable ] ) for replicaTable in replicaTables ]
for replicaTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, replicaTableVals ),
cols = map( FindChromCol, replicaTableVals ),
blanks = ( None, ) * len( replicaTableVals ),
headingLens = map( IDotData.rootClass.numCols,
replicaTableVals ) ):
vdict = dict( zip( replicaTableNames, replicaTableRows ) )
dbg( 'scen vdict' )
evalHere = lambda expr: eval( expr, globals(), vdict )
if evalHere( replicaCondExpr ):
numReplicasAllowed += 1
yield [ scen.scenName(), replicaTableRows[0].replicaNum ] + map( evalHere, showValsExpr )
else:
numReplicasSkipped += 1
dbg( '"in_scenario" scen numReplicasSkipped numReplicasAllowed' )
numReplicasSkippedTot += numReplicasSkipped
numReplicasAllowedTot += numReplicasAllowed
dbg( 'numReplicasSkippedTot numReplicasAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def findSnpsMatchingConds( Ddata,
snpTables = (), snpCond = 'True', replicaTables = (), replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for SNPs meeting specified conditions in
replicas meeting specified conditions."""
snpTables = tuple( set( MakeSeq( snpTables ) ) )
dbg( '"findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals '
'scen2sfxs' )
replicaArgs = Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' )
snpTableNames, snpTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'snpStats', tables = snpTables, **replicaArgs )
if getio: return dict( depends_on = depends_on + findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**replicaArgs )[-1],
creates = outFile )
snpCondExpr = compile_expr( snpCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings: showHeadings = map( MakeAlphaNum, showVals )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
def makeResult():
yield ( 'scenario', 'replicaNum', 'Pos' ) + tuple( MakeSeq( showHeadings ) )
for scen in ourScens:
dbg( '"findSnpsMatchingConds" scen ')
numSnpsAllowed, numSnpsSkipped = 0, 0
replicasHere = findReplicasMatchingConds( **MergeDicts( replicaArgs,
Dict( 'replicaTables replicaCond scenCond',
allScens = ( scen, ) ) ) )
replicasHereSet = frozenset( replicasHere.replicaNum )
dbg( 'scen len(replicasHereSet) replicasHereSet' )
thisScenDict = scen2table2file[ scen ]
dbg( '#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]' )
snpTableVals = [ IDotData( thisScenDict[ snpTable ] ) for snpTable in snpTables ]
lastReplica = None
lastReplicaResult = None
replicaCol = FindChromCol( snpTableVals[ 0 ] )
posCol = FindPosCol( snpTableVals[ 0 ] )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
for snpTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, snpTableVals ),
cols = zip( map( FindChromCol, snpTableVals ),
map( FindPosCol, snpTableVals ) ),
blanks = ( None, ) * len( snpTableVals ),
headingLens = map( IDotData.rootClass.numCols,
snpTableVals ) ):
thisReplica = snpTableRows[0][ replicaCol ]
if thisReplica != lastReplica:
thisReplicaResult = ( thisReplica in replicasHereSet )
if not thisReplicaResult: dbg( '"SKIPPING_REPLICA" thisReplica' )
lastReplicaResult = thisReplicaResult
lastReplica = thisReplica
if thisReplicaResult:
localDict = dict( zip( snpTableNames, snpTableRows ) )
evalHere = lambda expr: eval( expr, globals(), localDict )
evalResult = evalHere( snpCondExpr )
if evalResult:
v = [ scen.scenName(), thisReplica, snpTableRows[0][ posCol ] ] \
+ map( evalHere, showValsExpr )
numSnpsAllowed += 1
yield v
else: numSnpsSkipped += 1
numSnpsSkippedTot += numSnpsSkipped
numSnpsAllowedTot += numSnpsAllowed
dbg( 'scen numSnpsSkippedTot numSnpsAllowedTot' )
dbg( '"finalCount" numSnpsSkippedTot numSnpsAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def gatherCausalStat( Ddata, scenario, snpStatFN, replicaCol = 'Chrom', posCol = 'Pos', getio = None ):
"""Gather a specified per-SNP statistic just for the causal SNPs, and write them out as a replicastat.
"""
replicaStatFN = string.replace( snpStatFN, 'snpStats', 'replicastats', 1 )
if getio: return dict( depends_on = snpStatFN, creates = replicaStatFN, attrs = dict( scenario = scenario.scenDir() ) )
snpStatFile = IDotData( snpStatFN )
with IDotData.openForWrite( replicaStatFN, snpStatFile.headings ) as replicaStatFile:
for r in snpStatFile:
if r[ posCol ] == CAUSAL_POS:
replicaStatFile.writeRecord( r )
def DefineRulesTo_gatherCausalStat( pr, Ddata, scen2snpStatFN, posCol = 'Pos' ):
"""Define rules to gather a specified per-SNP statistic for the causal SNPs into a replica stat."""
for scenario, snpStatFN in scen2snpStatFN.items():
pr.addInvokeRule( invokeFn = gatherCausalStat, invokeArgs = Dict( 'Ddata scenario snpStatFN posCol' ) )
| StarcoderdataPython |
151138 | import os
import sys
sys.path.append(os.path.dirname(__name__))
from app import app
from settings import DEFAULT_WEB_SERVER
app.run(host=DEFAULT_WEB_SERVER['host'], port=DEFAULT_WEB_SERVER['port'], debug=True)
| StarcoderdataPython |
1760925 | """test for CharArrowKeysInput"""
import os
import unittest
from mock import patch, Mock
try:
from ui import CharArrowKeysInput
except ImportError:
print("Absolute imports failed, trying relative imports")
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
# Store original __import__
orig_import = __import__
def import_mock(name, *args):
if name in ['helpers']:
return Mock()
elif name == 'ui.utils':
import utils
return utils
return orig_import(name, *args)
with patch('__builtin__.__import__', side_effect=import_mock):
from char_input import CharArrowKeysInput
def get_mock_input():
return Mock()
def get_mock_output(rows=8, cols=21):
m = Mock()
m.configure_mock(rows=rows, cols=cols, type=["char"])
return m
shorthands = {
"u":"KEY_UP",
"d":"KEY_DOWN",
"l":"KEY_LEFT",
"r":"KEY_RIGHT",
"e":"KEY_ENTER"}
def execute_shorthand(ci, shorthand):
# A helper function for us to easier test behaviour of CharArrowKeysInput
shorthand_key = shorthands[shorthand]
ci.keymap[shorthand_key]()
ci_name = "Test CharArrowKeysInput"
class TestCharArrowKeysInput(unittest.TestCase):
"""test CharArrowKeysInput class"""
def test_constructor(self):
"""tests constructor"""
ci = CharArrowKeysInput(get_mock_input(), get_mock_output(), name=ci_name)
self.assertIsNotNone(ci)
def test_initial_value_support(self):
"""tests support for the obsolete attribute"""
value = "ololo"
ci = CharArrowKeysInput(get_mock_input(), get_mock_output(), initial_value = value, name=ci_name)
assert ci.value == list(value)
def test_value_leakage(self):
"""tests whether the action key settings of one CharArrowKeysInput leaks into another"""
i = get_mock_input()
o = get_mock_output()
c1 = CharArrowKeysInput(i, o, value="1", name=ci_name + "1")
c2 = CharArrowKeysInput(i, o, value="2", name=ci_name + "2")
c3 = CharArrowKeysInput(i, o, name=ci_name + "3")
assert (c1.value != c2.value)
assert (c2.value != c3.value)
assert (c1.value != c3.value)
def test_f1_left_returns_none(self):
ci = CharArrowKeysInput(get_mock_input(), get_mock_output(), name=ci_name)
ci.refresh = lambda *args, **kwargs: None #not needed
# Checking at the start of the list
def scenario():
ci.keymap["KEY_LEFT"]()
assert not ci.in_foreground
with patch.object(ci, 'idle_loop', side_effect=scenario) as p:
return_value = ci.activate()
assert return_value is None
# Checking after entering some keys
letters_entered = 5
test_keys = "ur"*letters_entered
def scenario():
for key in test_keys:
execute_shorthand(ci, key)
for i in range(letters_entered):
execute_shorthand(ci, 'l')
assert ci.in_foreground #Not yet at the beginning of the value
execute_shorthand(ci, 'l')
assert not ci.in_foreground #At the beginning of the value
with patch.object(ci, 'idle_loop', side_effect=scenario) as p:
return_value = ci.activate()
assert return_value is None
def test_entering_value(self):
ci = CharArrowKeysInput(get_mock_input(), get_mock_output(), name=ci_name)
ci.refresh = lambda *args, **kwargs: None
expected_output = "hello"
test_key_offsets = (8, 5, 12, 12, 15)
test_keys = "r".join(["u"*offset for offset in test_key_offsets])
test_keys += "e" #Press ENTER
def scenario():
for key in test_keys:
execute_shorthand(ci, key)
assert not ci.is_active # Should exit on last "e"
with patch.object(ci, 'idle_loop', side_effect=scenario) as p:
return_value = ci.activate()
assert return_value == expected_output
def test_entering_value_with_backspaces(self):
ci = CharArrowKeysInput(get_mock_input(), get_mock_output(), name=ci_name)
ci.refresh = lambda *args, **kwargs: None
expected_output = "hello"
test_key_offsets = (8, 5, 12, 12, 15)
test_keys = "r".join(["u"*offset for offset in test_key_offsets])
test_keys += "d"*(test_key_offsets[-1]+1) #Going back to the backspace character
test_keys += "lr" #should erase the latest character and go to the position it took
test_keys += "u"*test_key_offsets[-1] #adding the latest character again
test_keys += "e" #Press ENTER
def scenario():
for key in test_keys:
execute_shorthand(ci, key)
assert not ci.in_foreground # Should exit on last "e"
with patch.object(ci, 'idle_loop', side_effect=scenario) as p:
return_value = ci.activate()
assert return_value == expected_output
def test_shows_data_on_screen(self):
"""Tests whether the CharArrowKeysInput outputs data on screen when it's ran"""
i = get_mock_input()
o = get_mock_output()
ci = CharArrowKeysInput(i, o, message="Test:", name=ci_name)
expected_output = "hello"
test_key_offsets = (8, 5, 12, 12, 15)
test_keys = "r".join(["u"*offset for offset in test_key_offsets])
test_keys += "e" #Press ENTER
def scenario():
assert o.display_data.called
assert o.display_data.call_args[0] == ('Test:', '')
for key in test_keys:
execute_shorthand(ci, key)
assert not ci.in_foreground # Should exit on last "e"
with patch.object(ci, 'idle_loop', side_effect=scenario) as p:
ci.activate()
#The scenario should only be called once
assert ci.idle_loop.called
assert ci.idle_loop.call_count == 1
assert o.display_data.called
assert o.display_data.call_count == len(test_keys) #Magically, it's the same
#There's one refresh right after the activate() that isn't because of a keypress,
#And ENTER keypress at the end doesn't trigger a refresh, so it evens out
assert o.display_data.call_args[0] == ('Test:', 'hello')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
32365 | from decimal import Decimal
from unittest.mock import patch
from django.test import TestCase
from django.urls import reverse
from django.contrib import auth
from main.models import Product, User, Address
from main.forms import UserCreationForm
class TestPage(TestCase):
def test_home_page_works(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
self.assertContains(response, 'BookStore')
def test_about_us_page_works(self):
response = self.client.get(reverse('about_us'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about_us.html')
self.assertContains(response, 'BookStore')
def test_products_page_returns_active(self):
Product.objects.create(
name='The cathedral and the bazaar',
slug='cathedral-bazaar',
price=Decimal('10.00'),
)
Product.objects.create(
name='A Tale of Two Cities',
slug='tale-two-cities',
price=Decimal('2.00'),
active=False,
)
product_list = Product.objects.active().order_by(
'name'
)
response = self.client.get(
reverse('products', kwargs={'tag': "all"})
)
self.assertEqual(
list(response.context['object_list']),
list(product_list),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'BookStore')
def test_products_page_filters_by_tag_and_active(self):
cb = Product.objects.create(
name='The cathedral and the bazaar',
slug='cathedral-bazaar',
price=Decimal('10.00'),
)
cb.tags.create(name='Open Source', slug='open-source')
Product.objects.create(
name='A Tale of Two Cities',
slug='tale-two-cities',
price=Decimal('2.00'),
active=False,
)
response = self.client.get(
reverse('products', kwargs={'tag': 'open-source'})
)
product_list = (
Product.objects.active()
.filter(tags__slug='open-source')
.order_by('name')
)
self.assertEqual(
list(response.context['object_list']),
list(product_list),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'BookStore')
def test_user_signup_page_loads_correctly(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'signup.html')
self.assertContains(response, "BookStore")
self.assertIsInstance(response.context['form'], UserCreationForm)
def test_user_signup_page_submission_works(self):
post_data = {
'email': '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
with patch.object(UserCreationForm, 'send_mail') as mock_send:
response = self.client.post(reverse('signup'), post_data)
self.assertEqual(response.status_code, 302)
self.assertTrue(auth.get_user(self.client).is_authenticated)
self.assertTrue(User.objects.filter(email='<EMAIL>').exists())
mock_send.assert_called_once()
def test_address_list_page_returns_owned_by_user(self):
user1 = User.objects.create_user("user1", "<PASSWORD>")
user2 = User.objects.create_user("user2", "<PASSWORD>")
Address.objects.create(
user=user1,
name="<NAME>",
address1="1 mende",
address2="24 church street",
city="kano",
country="Nigeria",
)
Address.objects.create(
user=user2,
name="<NAME>",
address1="4 mendez",
address2="24 boulevard street",
city="Abuja",
country="Nigeria",
)
self.client.force_login(user2)
response = self.client.get(reverse("address_list"))
self.assertEqual(response.status_code, 200)
address_list = Address.objects.filter(user=user2)
self.assertEqual(
list(response.context["object_list"]),
list(address_list),
)
def test_address_create_stores_user(self):
user1 = User.objects.create_user("user1", "12345pw")
post_data = {
"name": "<NAME>",
"address1": "20 broadstreet",
"address2": "",
"zip_code": "IKJ20",
"city": "Ibadan",
"country": "brazil",
}
self.client.force_login(user1)
self.client.post(
reverse("address_create"), post_data,
)
self.assertEqual(Address.objects.filter(user=user1).exists())
| StarcoderdataPython |
3250609 | import os
import sys
import shutil
import unittest
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
from tensorflow.contrib import layers
import hiddenlayer as hl
# Hide GPUs. Not needed for this test.
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Create output directory in project root
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
OUTPUT_DIR = os.path.join(ROOT_DIR, "test_output")
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
DATA_DIR = os.path.join(ROOT_DIR, "test_data")
def lrelu(x):
return tf.maximum(0.01 * x, x)
class TrafficSignsModel():
"""Model taken from my traffic signs recognition repo.
https://github.com/waleedka/traffic-signs-tensorflow
"""
def conv(self, input, num_outputs, name=None):
return layers.convolution2d(
input, num_outputs=num_outputs, kernel_size=(5, 5), stride=(1, 1),
padding="SAME", activation_fn=lrelu,
normalizer_fn=layers.batch_norm
)
def pool(self, input):
return layers.max_pool2d(input, kernel_size=(2, 2),
stride=(2, 2), padding="SAME")
def __init__(self):
self.graph = tf.Graph()
with self.graph.as_default():
# Global step counter
self.global_step = tf.Variable(0, trainable=False, name='global_step')
# Placeholders
self.images = tf.placeholder(tf.float32, [None, 32, 32, 3], name="images")
self.labels = tf.placeholder(tf.int32, [None], name="labels")
# Layers
self.conv1 = self.conv(self.images, 8)
self.pool1 = self.pool(self.conv1)
self.conv2 = self.conv(self.pool1, 12)
self.pool2 = self.pool(self.conv2)
self.conv3 = self.conv(self.pool2, 16)
self.pool3 = self.pool(self.conv3)
self.flat = layers.flatten(self.pool3)
# TODO self.h1 = layers.fully_connected(self.flat, 200, lrelu)
self.logits = layers.fully_connected(self.flat, 62, lrelu)
# Convert one-hot vector to label index (int).
self.predicted_labels = tf.argmax(self.logits, 1)
# Loss
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels, name="test_name"))
# Training Ops
self.train = tf.train.AdamOptimizer(learning_rate=0.001)\
.minimize(self.loss, global_step=self.global_step)
self.init = tf.global_variables_initializer()
# Create session
self.session = tf.Session()
# Run initialization op
self.session.run(self.init)
class TestTensorFlow(unittest.TestCase):
def test_graph(self):
m = TrafficSignsModel()
dot = hl.build_graph(m.graph).build_dot()
dot.format = 'pdf'
dot.render("tf_traffic_signs", directory=OUTPUT_DIR, cleanup=True)
# Import CIFAR from the demos folder
sys.path.append("../demos")
import tf_cifar10
with tf.Session():
with tf.Graph().as_default() as g:
tf_cifar10.CIFAR10(data_dir=DATA_DIR).model(inputs=tf.placeholder(tf.float32, shape=(8, 32, 32, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_cifar10", directory=OUTPUT_DIR, cleanup=True)
class TestSlimModels(unittest.TestCase):
def test_graph(self):
with tf.Session():
with tf.Graph().as_default() as g:
nets.vgg.vgg_16(tf.placeholder(tf.float32, shape=(1, 224, 224, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_vgg16", directory=OUTPUT_DIR, cleanup=True)
with tf.Session():
with tf.Graph().as_default() as g:
nets.resnet_v1.resnet_v1_50(
tf.placeholder(tf.float32, shape=(1, 224, 224, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_resnet50", directory=OUTPUT_DIR, cleanup=True)
with tf.Session():
with tf.Graph().as_default() as g:
nets.inception.inception_v1(
tf.placeholder(tf.float32, shape=(1, 224, 224, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_inception", directory=OUTPUT_DIR, cleanup=True)
with tf.Session():
with tf.Graph().as_default() as g:
nets.alexnet.alexnet_v2(
tf.placeholder(tf.float32, shape=(1, 224, 224, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_alexnet", directory=OUTPUT_DIR, cleanup=True)
with tf.Session():
with tf.Graph().as_default() as g:
nets.overfeat.overfeat(
tf.placeholder(tf.float32, shape=(1, 231, 231, 3)))
dot = hl.build_graph(g).build_dot()
dot.format = 'pdf'
dot.render("tf_overfeat", directory=OUTPUT_DIR, cleanup=True)
# Clean up
shutil.rmtree(OUTPUT_DIR)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1789133 | from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.FText import FText
class TextProperty:
position: int
Value = None
def __init__(self, reader: BinaryStream) -> None:
self.position = reader.base_stream.tell()
self.Value = FText(reader)
def GetValue(self):
return self.Value.GetValue()
| StarcoderdataPython |
1759236 | import requests
import json
import os
if 'RITO_SLACK_TOKEN' not in os.environ:
print("To use Rito's slack functions, first create a Slack app on your workspace following these instructions: https://api.slack.com/messaging/sending#getting_started")
print("Your app needs the permissions channel:read, chat:write, and chat:write.public")
print("After creating the app and installing it to your workspace, copy its auth token into an environment variable called RITO_SLACK_TOKEN")
exit(1)
auth_token = os.environ['RITO_SLACK_TOKEN']
def send_message(channel, text):
payload = {
"channel": channel,
"text": text,
}
headers = {
"Content-Type": "application/json; charset=utf-8",
"Authorization": "Bearer {}".format(auth_token)
}
resp = requests.post("https://slack.com/api/chat.postMessage", data=json.dumps(payload), headers=headers)
resp = json.loads(resp.text)
if not resp["ok"]:
raise Exception(resp["error"]) | StarcoderdataPython |
1625602 | import requests
from settings import PORT
from dbmodules.base import CoreDBServer
from dbmodules.user import UserTable
from dbmodules.order import OrderTable
import logging
def pe_api(url, data=None, headers=None):
try:
ret = requests.post(url, headers=headers, data=data)
# print(ret.request.headers, ret.request.body)
return ret.json()
except requests.exceptions.ConnectionError as e:
print('网络错误')
return
except Exception as e:
return (ret.request.headers, ret.request.body, ret.text)
def login():
row = CoreDBServer(UserTable).select().first()
data = {
'username': row['username'],
'password': row['password']
}
return pe_api('http://localhost:%s/jmeter/login' % PORT, data=data)
def exp_order(order_no):
"""过期订单(有效期为15分钟)."""
sql = 'update %s set create_time = date_sub(create_time, interval 15 minute) where order_no = %r' % (
OrderTable.name, order_no)
CoreDBServer().sql_execute(sql)
| StarcoderdataPython |
156700 | #!/usr/bin/env python
# coding: utf-8
# ### Modules ###
# In[1]:
import pandas as pd
import requests
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
# ### Setting Chrome Path ###
# In[2]:
def scrape():
# browser = init_browser()
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# ### The URLs ###
# In[3]:
# the Websites to scrap
news_url = 'https://redplanetscience.com/'
image_url = 'https://spaceimages-mars.com/'
galaxy_fact_url = 'https://galaxyfacts-mars.com/'
mars_hemi_pics_url = 'https://marshemispheres.com/'
# ### News! ###
# In[4]:
# URL to be scraped
browser.visit(news_url)
# new HTML object
html = browser.html
soup = bs(html, "html.parser")
# In[5]:
headline = soup.find("div", class_="content_title").get_text(strip=True)
# In[6]:
teaser = soup.find("div", class_="article_teaser_body").get_text(strip=True)
# ### Mars Featured Image ###
# In[7]:
# URL to be scraped
browser.visit(image_url)
# new HTML object
html = browser.html
soup = bs(html, "html.parser")
# Loop through images using Beautiful Soup and set feature_image_url
featured_image_url = image_url + [img.get("src") for img in soup.find_all("img", class_="headerimage fade-in")][0]
# ### Mars Facts / Galaxy Facts ###
# In[8]:
# URL to be scraped
galaxy_facts_table = pd.read_html(galaxy_fact_url)
# In[9]:
mars_earth_com_df = galaxy_facts_table[0]
mars_earth_com_df.columns = ['Description','Mars','Earth']
mars_earth_com_table = mars_earth_com_df.to_html(classes="table table-striped", index = False)
mars_earth_com_table
# In[10]:
mars_prof_df = galaxy_facts_table[1]
mars_prof_df.columns = ['Description','Values']
mars_prof_table = mars_prof_df.to_html(classes="table table-striped", index= False)
mars_prof_table
# ### Mars Hemisphere Images ###
# In[11]:
# URL to be scraped
browser.visit(mars_hemi_pics_url)
mars_images_urls = []
for i in range(4):
html = browser.html
soup = bs(html, "html.parser")
title = soup.find_all("h3")[i].get_text()
browser.find_by_tag('h3')[i].click()
html = browser.html
soup = bs(html, "html.parser")
img_url = soup.find("img", class_="wide-image")["src"]
mars_images_urls.append({
"title": title,
"img_url": mars_hemi_pics_url + img_url})
print
browser.back()
#close browser
browser.quit()
# ### Setting the HTML List ###
# In[14]:
# Set the List
info_mars = []
info_mars = {
'headline': headline,
'teaser': teaser,
'featured_image': featured_image_url,
'mars_earth_com_table': mars_earth_com_table,
'mars_prof_table': mars_prof_table,
'mars_images_urls':mars_images_urls
}
#printing the HTML List
return info_mars
| StarcoderdataPython |
59122 | <reponame>oliveirahelena/codeflix-catalog-api<filename>src/infrastructure/db/repo/video_repository.py
import uuid
from typing import Set, Union
from sqlalchemy.orm import Session
from src.domain.entities import CastMember, Category, Genre, Video
from src.infrastructure.db.models import orm
from .repository import SQLAlquemyRepository
class VideoRepository(SQLAlquemyRepository):
def __init__(self, session: Session):
super().__init__(session)
self.model = Video
@classmethod
def get_by_category(cls, category_id: uuid.UUID) -> Union[Set[Video], None]:
return (
cls.session.query(Video)
.join(Category)
.filter(
orm.categories.c.id == category_id,
)
)
@classmethod
def get_by_genre(cls, genre_id: uuid.UUID) -> Union[Set[Video], None]:
return (
cls.session.query(Video)
.join(Genre)
.filter(
orm.genres.c.id == genre_id,
)
)
@classmethod
def get_by_cast_member(cls, cast_member_id: uuid.UUID) -> Union[Set[Video], None]:
return (
cls.session.query(Video)
.join(CastMember)
.filter(
orm.cast_members.c.id == cast_member_id,
)
)
| StarcoderdataPython |
178881 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from . import verilog
__intrinsics__ = ('set_header', 'get_header',
'set_global_offset', 'set_global_addrs',
'set_global_addr_map', 'write_global_addr_map', 'load_global_addr_map',
'start', 'wait', 'sw_rst')
def set_header(fsm, saxi, index, header, wordsize=4):
awaddr = (verilog.header_reg + index) * wordsize
saxi.write(fsm, awaddr, header)
def get_header(fsm, saxi, index, wordsize=4):
araddr = (verilog.header_reg + index) * wordsize
h = saxi.read(fsm, araddr)
return h
def set_global_offset(fsm, saxi, addr, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
awaddr = verilog.control_reg_global_offset * wordsize
saxi.write(fsm, awaddr, addr)
def set_global_addrs(fsm, saxi, *addrs, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
offset = opt['offset'] if 'offset' in opt else 0
awaddr = (offset + verilog.control_reg_global_addr) * wordsize
for addr in addrs:
saxi.write(fsm, awaddr, addr)
awaddr += wordsize
def set_global_addr_map(fsm, saxi, memory, map_addr, *addrs, **opt):
write_global_addr_map(fsm, memory, map_addr, *addrs, **opt)
load_global_addr_map(fsm, saxi, map_addr, **opt)
def write_global_addr_map(fsm, memory, map_addr, *addrs, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
offset = opt['offset'] if 'offset' in opt else 0
for i, addr in enumerate(addrs):
memory.write_word(fsm, i + offset, map_addr, addr, wordsize * 8)
def load_global_addr_map(fsm, saxi, map_addr, **opt):
wordsize = opt['wordsize'] if 'wordsize' in opt else 4
awaddr = verilog.control_reg_addr_global_addr_map * wordsize
saxi.write(fsm, awaddr, map_addr)
awaddr = verilog.control_reg_load_global_addr_map * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_load_global_addr_map * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
araddr = verilog.control_reg_busy_global_addr_map * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def start(fsm, saxi, wordsize=4):
awaddr = verilog.control_reg_start * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_start * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def wait(fsm, saxi, wordsize=4):
araddr = verilog.control_reg_busy * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
def sw_rst(fsm, saxi, wordsize=4):
awaddr = verilog.control_reg_reset * wordsize
saxi.write(fsm, awaddr, 1)
araddr = verilog.control_reg_busy * wordsize
b = fsm.current
v = saxi.read(fsm, araddr)
fsm.If(v != 0).goto(b)
fsm.If(v == 0).goto_next()
| StarcoderdataPython |
3390006 | from django_filters import rest_framework as filters
from core.models import Ad
class AdFilter(filters.FilterSet):
class Meta:
model = Ad
fields = ['category']
| StarcoderdataPython |
1706538 | <filename>setup.py
"""Packaging settings."""
from dhandaulat import __version__
from setuptools import Command, find_packages, setup
setup(
name='dhandaulat',
version=__version__,
description='A Currency converter command line program in Python.',
url='https://github.com/Shivang-Bhandari/DhanDaulat',
author='<NAME>',
author_email='<EMAIL>',
license='UNLICENSE',
classifiers=[
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6'
],
keywords='cli',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=['docopt'],
entry_points={
'console_scripts': [
'dhandaulat=dhandaulat.paisa:main',
],
},
)
| StarcoderdataPython |
1756534 | """empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2020-06-16 19:37:25.877004
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('todoitem', 'list_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('todoitem', 'list_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
| StarcoderdataPython |
1752898 | <reponame>JNKielmann/cotect<gh_stars>10-100
from cotect_endpoints.schema import User, CaseReport, CasePlace, CaseSymptom, CaseContact
from datetime import date
from cotect_endpoints.db_handler import GraphHandler
# add neo4j logging
from logging import getLogger, StreamHandler, DEBUG
# ```python
# from firebase_admin import auth
# user = auth.get_user("", app=default_app)
# user.phone_number
# auth.delete_user(uid, app=None)
# id_token = ""
# ```
# ```python
# # https://fastapi.tiangolo.com/tutorial/sql-databases/#note
# def get_db():
# try:
# db = SessionLocal()
# yield db
# finally:
# db.close()
#
# def get_db(db_state=Depends(reset_db_state)):
# try:
# database.db.connect()
# yield
# finally:
# if not database.db.is_closed():
# database.db.close()
# ```
handler = StreamHandler()
handler.setLevel(DEBUG)
getLogger("neo4j").addHandler(handler)
user = User(user_id="1", verified=True)
report = CaseReport()
report.age = 20
report.gender = "male"
report.residence = CasePlace(
place_id="berlin",
latitude=1.5,
longitude=19
)
report.covid_test = "tested-negative"
report.symptoms = [
CaseSymptom(symptom_name="Caugh", report_date=date.today(), severity="mild"),
CaseSymptom(symptom_name="Headache", report_date=date.today()),
CaseSymptom(symptom_name="Fever", report_date=date.today(), severity="37.5"),
]
report.places = [
CasePlace(
place_id="my-place-1",
visit_date=date.today(),
latitude=1.1,
longitude=1.2,
place_name="test",
place_types=["testtype", "type-2"],
),
CasePlace(
place_id="my-place-3",
latitude=1.5,
longitude=19
),
CasePlace(
place_id="my-place-4",
latitude=1.9,
longitude=19
)
]
report.contacts = [
CaseContact(phone_number="+4917691377102", contact_date = date.today()),
CaseContact(phone_number="+49176947102", contact_date = date.today()),
CaseContact(phone_number="+491769 1377102", contact_date = date.today()),
CaseContact(phone_number="+49176934432", contact_date = date.today()),
]
graph_handler = GraphHandler("bolt://docker.for.mac.localhost:7687", "", "")
graph_handler.init_graph()
graph_handler.add_report(user, report)
graph_handler.get_driver
graph_handler.get_session().begin_transaction().close()
graph_handler.get_driver().close()
# +
from neo4j.exceptions import ServiceUnavailable
try:
graph_handler.get_session().begin_transaction().close()
except ServiceUnavailable:
graph_handler.close()
# -
graph_handler.add_report(user, report)
| StarcoderdataPython |
3307678 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021 <NAME>
# api.wwdt.me is released under the terms of the Apache License 2.0
"""Locations Models"""
from typing import List, Optional
from pydantic import BaseModel, conint, Field
# region Location Models
class Location(BaseModel):
"""Location Information"""
id: conint(ge=0, lt=2**31) = Field(title="Location ID")
city: Optional[str] = Field(default=None,
title="City")
state: Optional[str] = Field(default=None,
title="State")
venue: Optional[str] = Field(default=None,
title="Venue Name")
slug: Optional[str] = Field(default=None,
title="Location Slug String")
class Locations(BaseModel):
"""List of Locations"""
locations: List[Location] = Field(title="List of Locations")
class LocationRecordingCounts(BaseModel):
"""Count of Recordings for a Location"""
regular_shows: Optional[int] = Field(default=None,
title="Count of Regular Show Recordings")
all_shows: Optional[int] = Field(default=None,
title="Count of All Show Recordings")
class LocationRecordingShow(BaseModel):
"""Location Recording Information"""
show_id: conint(ge=0, lt=2**31) = Field(title="Show ID")
date: str = Field(title="Show Date")
best_of: bool = Field(title="Best Of Show")
repeat_show: bool = Field(title="Repeat Show")
class LocationRecordings(BaseModel):
"""Loation Information and Recordings"""
count: Optional[LocationRecordingCounts] = Field(default=None,
title="Count of Show Recordings")
shows: Optional[List[LocationRecordingShow]] = Field(default=None,
title="List of Show Recordings")
class LocationDetails(Location):
"""Location Information with Recordings"""
recordings: Optional[LocationRecordings] = Field(default=None,
title="List of Show Recordings")
class LocationsDetails(BaseModel):
"""List of Location Details"""
locations: List[LocationDetails] = Field(title="List of Location Details")
# endregion
| StarcoderdataPython |
4835134 | <filename>FrameApp/falconApp/src/models/user.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: user.py
@time: 2019-12-30 16:14:29
@description:
"""
from . import DBBase, Column, Integer, String
class User(DBBase):
__tablename__ = 'user'
id = Column('id', Integer, primary_key=True)
username = Column('username', String(16), nullable=True)
password = Column('password', String(16), nullable=True)
def __init__(self, username, password):
self.username = username
self.password = password
def to_dict(self):
_dict = {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
return _dict
| StarcoderdataPython |
166371 | <gh_stars>0
import pandas as pd
import re
//checked = input[(input['_raw'].str.contains(name, case=False))]
names = ['name1','name2']
for name in names:
infile = name + '.csv'
outfile = 'success-' + name + '.csv'
input = pd.read_csv(infile, sep=',')
checked = input
if not checked.empty:
success = checked[(checked['_raw'].str.contains('EventCode=4624'))]
success.index = range(0,len(success))
failure = checked[(checked['_raw'].str.contains('EventCode=4625'))]
failure.index = range(0,len(failure))
get_address = lambda x: re.findall('(Source Network Address:\\t)(.*?)(\\r)',x)[0][1]
success['ip'] = success._raw.apply(get_address)
failure['ip'] = failure._raw.apply(get_address)
get_user = lambda x: re.findall('(Security ID:\\t)(.*?)(\\r)',x)[0][1]
success['user'] = success._raw.apply(get_user)
get_user2 = lambda x: re.findall('(Security ID:\\t)(.*?)(\\r)',x)[1][1]
success['user2'] = success._raw.apply(get_user2)
get_logontype = lambda x: re.findall('(Logon Type:\\t)(.*?)(\\r)',x)[0][1]
success['logontype'] = success._raw.apply(get_logontype)
failure['user'] = failure._raw.apply(get_user)
success_csv = pd.DataFrame([success._time,success.ip,success.user,success.user2,success.logontype]).transpose()
failure_csv = pd.DataFrame([failure._time,failure.ip])
success_csv.to_csv(outfile)
failure_csv.to_csv("failure.csv")
| StarcoderdataPython |
4837062 | <reponame>bintulab/storm-analysis<filename>storm_analysis/diagnostics/slurm/analyze_data.py
#!/usr/bin/env python
"""
Analyze SLURM test data using Multiplane. We don't actually use
SLURM, instead we just analyze each movie sequentially.
Hazen 09/18
"""
import glob
import os
import time
import storm_analysis.multi_plane.multi_plane as mp
import storm_analysis.diagnostics.slurm.settings as settings
def analyzeData():
jobs = sorted(glob.glob(os.path.join(settings.wdir, "job*.xml")))
for job in jobs:
print()
print("Analyzing job:", job)
print()
index = os.path.splitext(job)[0].split("_")[-1]
h5_name = os.path.join(settings.wdir, "p_" + index + ".hdf5")
# Remove stale results, if any.
if os.path.exists(h5_name):
os.remove(h5_name)
# Run analysis.
start_time = time.time()
mp.analyze("slurm_test/test", h5_name, job)
stop_time = time.time()
# Print timing results.
print("Analysis completed in {0:.2f} seconds".format(stop_time - start_time))
if (__name__ == "__main__"):
analyzeData()
| StarcoderdataPython |
3323769 | <gh_stars>1-10
""" MobileNet V3
A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by / Copyright 2020 <NAME>
"""
import torch.nn as nn
from src.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from src.models.layers import create_conv2d, get_act_fn, hard_sigmoid
from .base import BackboneBase
from .efficientnet.efficientnet_blocks import round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from .efficientnet.efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights
from .utils.helpers import build_model_with_cfg
from .utils.registry import register_model
__all__ = ['MobileNetV3']
def _cfg(url='', **kwargs):
return {
'url': url, 'input_size': (3, 224, 224), 'pool_size': (1, 1),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs
}
default_cfgs = {
'mobilenetv3_large_075': _cfg(url=''),
'mobilenetv3_large_100': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'),
'mobilenetv3_large_100_miil': _cfg(
interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1),
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'),
'mobilenetv3_large_100_miil_in21k': _cfg(
interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1),
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221),
'mobilenetv3_small_075': _cfg(url=''),
'mobilenetv3_small_100': _cfg(url=''),
'mobilenetv3_rw': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
interpolation='bicubic'),
'tf_mobilenetv3_large_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
}
_DEBUG = False
class MobileNetV3(BackboneBase):
""" MobiletNet-V3
Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific
'efficient head', where global pooling is done before the head convolution without a final batch-norm
layer before the classifier.
Paper: https://arxiv.org/abs/1905.02244
"""
def __init__(self, block_args, in_chans=3, stem_size=16, num_features=1280, head_bias=True,
channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_path_rate=0.,
se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, set_neck=False):
super(MobileNetV3, self).__init__()
self.num_features = num_features
# Stem
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs,
norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.create_hooks()
self.create_neck(set_neck)
self.conv_head = create_conv2d(builder.in_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.init_weights()
def init_weights(self):
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1, self.act1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
return x
def forward_neck(self, x):
x = super().forward_neck(x)
x = self.conv_head(x)
x = self.act2(x)
return x
def _create_mnv3(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
MobileNetV3, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_strict=False,
**kwargs)
return model
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
head_bias=False,
channel_multiplier=channel_multiplier,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True, divisor=1),
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
if 'small' in variant:
num_features = 1024
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16'],
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'],
# stage 2, 28x28 in
['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'],
# stage 3, 14x14 in
['ir_r2_k3_s1_e3_c48'],
# stage 4, 14x14in
['ir_r3_k3_s2_e6_c96'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu
# stage 2, 28x28 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish
# stage 3, 14x14 in
['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish
# stage 4, 14x14in
['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'], # hard-swish
]
else:
num_features = 1280
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k3_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112'],
# stage 5, 14x14in
['ir_r3_k3_s2_e6_c160'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=16,
channel_multiplier=channel_multiplier,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=act_layer,
se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8),
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
@register_model
def mobilenetv3_large_075(pretrained=False, **kwargs):
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100(pretrained=False, **kwargs):
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100_miil(pretrained=False, **kwargs):
""" MobileNet V3
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs):
""" MobileNet V3, 21k pretraining
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_075(pretrained=False, **kwargs):
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_100(pretrained=False, **kwargs):
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_rw(pretrained=False, **kwargs):
""" MobileNet V3 """
if pretrained:
# pretrained model trained with non-default BN epsilon
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_075(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_100(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_075(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_100(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
""" MobileNet V3 """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
| StarcoderdataPython |
3333893 | # The MIT License (MIT)
# Copyright (c) 2021-present foxwhite25
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import asyncio
import datetime
from typing import AsyncIterator, TypeVar, Awaitable, Any, Optional, Callable, Union, List, TYPE_CHECKING
from .error import NoMoreItems
from .object import Object
from .utils import maybe_coroutine
if TYPE_CHECKING:
from .types.guild import (
Guild as GuildPayload,
)
from .types.message import (
Message as MessagePayload,
)
from .member import Member
from .message import Message
from .guild import Guild
__all__ = (
'GuildIterator',
'MemberIterator',
)
T = TypeVar('T')
OT = TypeVar('OT')
_Func = Callable[[T], Union[OT, Awaitable[OT]]]
class _AsyncIterator(AsyncIterator[T]):
__slots__ = ()
async def next(self) -> T:
raise NotImplementedError
def get(self, **attrs: Any) -> Awaitable[Optional[T]]:
def predicate(elem: T):
for attr, val in attrs.items():
nested = attr.split('__')
obj = elem
for attribute in nested:
obj = getattr(obj, attribute)
if obj != val:
return False
return True
return self.find(predicate)
async def find(self, predicate: _Func[T, bool]) -> Optional[T]:
while True:
try:
elem = await self.next()
except NoMoreItems:
return None
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def chunk(self, max_size: int):
if max_size <= 0:
raise ValueError('async iterator chunk sizes must be greater than 0.')
return _ChunkedAsyncIterator(self, max_size)
def map(self, func: _Func[T, OT]):
return _MappedAsyncIterator(self, func)
def filter(self, predicate: _Func[T, bool]):
return _FilteredAsyncIterator(self, predicate)
async def flatten(self) -> List[T]:
return [element async for element in self]
async def __anext__(self) -> T:
try:
return await self.next()
except NoMoreItems:
raise StopAsyncIteration()
class _ChunkedAsyncIterator(_AsyncIterator[List[T]]):
def __init__(self, iterator, max_size):
self.iterator = iterator
self.max_size = max_size
async def next(self) -> List[T]:
ret: List[T] = []
n = 0
while n < self.max_size:
try:
item = await self.iterator.next()
except NoMoreItems:
if ret:
return ret
raise
else:
ret.append(item)
n += 1
return ret
class _MappedAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, func):
self.iterator = iterator
self.func = func
async def next(self) -> T:
# this raises NoMoreItems and will propagate appropriately
item = await self.iterator.next()
return await maybe_coroutine(self.func, item)
def _identity(x):
return x
class _FilteredAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, predicate):
self.iterator = iterator
if predicate is None:
predicate = _identity
self.predicate = predicate
async def next(self) -> T:
getter = self.iterator.next
pred = self.predicate
while True:
# propagate NoMoreItems similar to _MappedAsyncIterator
item = await getter()
ret = await maybe_coroutine(pred, item)
if ret:
return item
class GuildIterator(_AsyncIterator['Guild']):
def __init__(self, bot, limit, before=None, after=None):
self.bot = bot
self.limit = limit
self.before = before
self.after = after
self._filter = None
self.state = self.bot._connection
self.get_guilds = self.bot.http.get_guilds
self.get_guild_channels = self.bot.http.get_guild_channels
self.guilds = asyncio.Queue()
if self.before and self.after:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
self._filter = lambda m: int(m['id']) > self.after.id
elif self.after:
self._retrieve_guilds = self._retrieve_guilds_after_strategy # type: ignore
else:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
async def next(self) -> Guild:
if self.guilds.empty():
await self.fill_guilds()
try:
return self.guilds.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
def create_guild(self, data):
from .guild import Guild
return Guild(data=data, state=self.state)
async def fill_guilds(self):
if self._get_retrieve():
data = await self._retrieve_guilds(self.retrieve)
if self.limit is None or len(data) < 100:
self.limit = 0
if self._filter:
data = filter(self._filter, data)
for element in data:
await self.guilds.put(self.create_guild(element))
async def _retrieve_guilds(self, retrieve) -> List[Guild]:
"""Retrieve guilds and update next parameters."""
raise NotImplementedError
async def _retrieve_guilds_before_strategy(self, retrieve):
"""Retrieve guilds using before parameter."""
before = self.before.id if self.before else None
data: List[GuildPayload] = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
return data
async def _retrieve_guilds_after_strategy(self, retrieve):
"""Retrieve guilds using after parameter."""
after = self.after.id if self.after else None
data: List[GuildPayload] = await self.get_guilds(retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
return data
class MemberIterator(_AsyncIterator['Member']):
def __init__(self, guild, limit=1000, after=None):
if isinstance(after, int):
after = Object(id=after)
self.guild = guild
self.limit = limit
self.after = after or 0
self.state = self.guild._state
self.get_members = self.state.http.get_members
self.members = asyncio.Queue()
async def next(self) -> Member:
if self.members.empty():
await self.fill_members()
try:
return self.members.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 400:
r = 400
else:
r = l
self.retrieve = r
return r > 0
async def fill_members(self):
if self._get_retrieve():
after = self.after.id if self.after else None
data = await self.get_members(self.guild.id, self.retrieve, after)
if not data:
# no data, terminate
return
self.after = Object(id=int(data[-1]['user']['id']))
for element in reversed(data):
await self.members.put(self.create_member(element))
def create_member(self, data):
from .member import Member
return Member(data=data, guild=self.guild, state=self.state)
class HistoryIterator(_AsyncIterator['Message']):
"""用于接收频道消息历史的迭代器。
消息端点有两个我们关心的行为:
如果指定了 ``before`` ,则消息端点返回 ``before`` 之前的 ``limit`` 最新消息,以最新的优先排序。
要填充超过 100 条消息,请将 ``before`` 参数更新为收到的最旧消息。消息将按时间顺序返回。
如果指定了 ``after``,它返回 ``after`` 之后的 ``limit`` 最旧的消息,以最新的在前排序。
要填充超过 100 条消息,请将 ``after`` 参数更新为收到的最新消息。如果消息没有反转,它们将乱序(99-0、199-100 等)
注意如果同时指定了 ``before`` 和 ``after`` ,则 ``before`` 将被忽略。
Parameters
-----------
messageable: :class:`abc.Messageable`
可从中检索消息历史记录的 Messageable 类。
limit: :class:`int`
要检索的最大消息数
before: Optional[:class:`datetime.datetime`]
所有消息必须在其之前的消息。
after: Optional[:class:`datetime.datetime`]
所有消息必须在其后的消息。
around: Optional[:class:`datetime.datetime`]
所有消息必须围绕的消息。 Limit max 101。注意,如果limit是偶数,这将最多返回limit+1条消息。
oldest_first: Optional[:class:`bool`]
如果设置为 ``True``,以最旧->最新的顺序返回消息。如果指定了“after”,则默认为“True”,否则为“False”。
"""
def __init__(self, messageable, limit, before=None, after=None, around=None, oldest_first=None):
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.messageable = messageable
self.limit = limit
self.before = before
self.after = after or 0
self.around = around
self._filter = None # message dict -> bool
self.state = self.messageable._state
self.logs_from = self.state.http.logs_from
self.messages = asyncio.Queue()
if self.around:
if self.limit is None:
raise ValueError('历史不支持limit=None')
if self.limit > 101:
raise ValueError("指定 around 参数时的历史最大限制 101")
elif self.limit == 101:
self.limit = 100 # Thanks qq
self._retrieve_messages = self._retrieve_messages_around_strategy # type: ignore
if self.before and self.after:
self._filter = lambda m: \
self.timestamp(self.after) < self.timestamp(m['timestamp']) < self.timestamp(
self.before) # type: ignore
elif self.before:
self._filter = lambda m: self.timestamp(m['timestamp']) < self.timestamp(self.before) # type: ignore
elif self.after:
self._filter = lambda m: self.timestamp(self.after) < self.timestamp(m['timestamp']) # type: ignore
else:
if self.reverse:
self._retrieve_messages = self._retrieve_messages_after_strategy # type: ignore
if self.before:
self._filter = lambda m: self.timestamp(m['timestamp']) < self.timestamp(
self.before) # type: ignore
else:
self._retrieve_messages = self._retrieve_messages_before_strategy # type: ignore
if self.after and self.after != 0:
self._filter = lambda m: self.timestamp(m['timestamp']) > self.timestamp(self.after) # type: ignore
def timestamp(self, dt: Union[datetime.datetime, str]):
if isinstance(dt, str):
dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%SZ")
return int(datetime.datetime.timestamp(dt))
async def next(self) -> Message:
if self.messages.empty():
await self.fill_messages()
try:
return self.messages.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def fill_messages(self):
if not hasattr(self, 'channel'):
# do the required set up
channel, private = await self.messageable._get_channel()
self.channel = channel
if self._get_retrieve():
data = await self._retrieve_messages(self.retrieve)
if len(data) < 100:
self.limit = 0 # terminate the infinite loop
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
channel = self.channel
for element in data:
await self.messages.put(self.state.create_message(channel=channel, data=element))
async def _retrieve_messages(self, retrieve) -> List[Message]:
"""检索消息并更新下一个参数"""
raise NotImplementedError
async def _retrieve_messages_before_strategy(self, retrieve):
"""使用 before 参数检索消息。"""
before = self.timestamp(self.before) if self.before else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = datetime.datetime.strptime(data[-1]['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
return data
async def _retrieve_messages_after_strategy(self, retrieve):
"""使用 after 参数检索消息。"""
after = self.timestamp(self.after) if self.after else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = datetime.datetime.strptime(data[0]['timestamp'], "%Y-%m-%dT%H:%M:%SZ")
return data
async def _retrieve_messages_around_strategy(self, retrieve):
"""使用 around 参数检索消息。"""
if self.around:
around = self.around.id if self.around else None
data: List[MessagePayload] = await self.logs_from(self.channel.id, retrieve, around=around)
self.around = None
return data
return []
| StarcoderdataPython |
96851 | <reponame>anna-hope/python-firestore
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
def _make_document_reference(*args, **kwargs):
from google.cloud.firestore_v1.document import DocumentReference
return DocumentReference(*args, **kwargs)
def test_constructor():
collection_id1 = "users"
document_id1 = "alovelace"
collection_id2 = "platform"
document_id2 = "*nix"
client = mock.MagicMock()
client.__hash__.return_value = 1234
document = _make_document_reference(
collection_id1, document_id1, collection_id2, document_id2, client=client
)
assert document._client is client
expected_path = "/".join(
(collection_id1, document_id1, collection_id2, document_id2)
)
assert document.path == expected_path
def _make_commit_repsonse(write_results=None):
from google.cloud.firestore_v1.types import firestore
response = mock.create_autospec(firestore.CommitResponse)
response.write_results = write_results or [mock.sentinel.write_result]
response.commit_time = mock.sentinel.commit_time
return response
def _write_pb_for_create(document_path, document_data):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
),
current_document=common.Precondition(exists=False),
)
def _create_helper(retry=None, timeout=None):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock()
firestore_api.commit.mock_add_spec(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {"hello": "goodbye", "count": 99}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.create(document_data, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_create(document._document_path, document_data)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_create():
_create_helper()
def test_documentreference_create_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_create_helper(retry=retry, timeout=timeout)
def test_documentreference_create_empty():
# Create a minimal fake GAPIC with a dummy response.
from google.cloud.firestore_v1.document import DocumentReference
from google.cloud.firestore_v1.document import DocumentSnapshot
firestore_api = mock.Mock(spec=["commit"])
document_reference = mock.create_autospec(DocumentReference)
snapshot = mock.create_autospec(DocumentSnapshot)
snapshot.exists = True
document_reference.get.return_value = snapshot
firestore_api.commit.return_value = _make_commit_repsonse(
write_results=[document_reference]
)
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
client.get_all = mock.MagicMock()
client.get_all.exists.return_value = True
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {}
write_result = document.create(document_data)
assert write_result.get().exists
def _write_pb_for_set(document_path, document_data, merge):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
write_pbs = write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
)
)
if merge:
field_paths = [
field_path
for field_path, value in _helpers.extract_fields(
document_data, _helpers.FieldPath()
)
]
field_paths = [field_path.to_api_repr() for field_path in sorted(field_paths)]
mask = common.DocumentMask(field_paths=sorted(field_paths))
write_pbs._pb.update_mask.CopyFrom(mask._pb)
return write_pbs
def _set_helper(merge=False, retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("db-dee-bee")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("User", "Interface", client=client)
document_data = {"And": 500, "Now": b"\xba\xaa\xaa \xba\xaa\xaa"}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.set(document_data, merge, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_set(document._document_path, document_data, merge)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_set():
_set_helper()
def test_documentreference_set_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_set_helper(retry=retry, timeout=timeout)
def test_documentreference_set_merge():
_set_helper(merge=True)
def _write_pb_for_update(document_path, update_values, field_paths):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(update_values)
),
update_mask=common.DocumentMask(field_paths=field_paths),
current_document=common.Precondition(exists=True),
)
def _update_helper(retry=None, timeout=None, **option_kwargs):
from collections import OrderedDict
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.transforms import DELETE_FIELD
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = OrderedDict(
(("hello", 1), ("then.do", False), ("goodbye", DELETE_FIELD))
)
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
if option_kwargs:
option = client.write_option(**option_kwargs)
write_result = document.update(field_updates, option=option, **kwargs)
else:
option = None
write_result = document.update(field_updates, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
update_values = {
"hello": field_updates["hello"],
"then": {"do": field_updates["then.do"]},
}
field_paths = list(field_updates.keys())
write_pb = _write_pb_for_update(
document._document_path, update_values, sorted(field_paths)
)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_update_with_exists():
with pytest.raises(ValueError):
_update_helper(exists=True)
def test_documentreference_update():
_update_helper()
def test_documentreference_update_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_update_helper(retry=retry, timeout=timeout)
def test_documentreference_update_with_precondition():
from google.protobuf import timestamp_pb2
timestamp = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_update_helper(last_update_time=timestamp)
def test_documentreference_empty_update():
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = {}
with pytest.raises(ValueError):
document.update(field_updates)
def _delete_helper(retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import write
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if option_kwargs:
option = client.write_option(**option_kwargs)
delete_time = document.delete(option=option, **kwargs)
else:
option = None
delete_time = document.delete(**kwargs)
# Verify the response and the mocks.
assert delete_time is mock.sentinel.commit_time
write_pb = write.Write(delete=document._document_path)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_delete():
_delete_helper()
def test_documentreference_delete_with_option():
from google.protobuf import timestamp_pb2
timestamp_pb = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_delete_helper(last_update_time=timestamp_pb)
def test_documentreference_delete_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_delete_helper(retry=retry, timeout=timeout)
def _get_helper(
field_paths=None,
use_transaction=False,
not_found=False,
# This should be an impossible case, but we test against it for
# completeness
return_empty=False,
retry=None,
timeout=None,
):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
from google.cloud.firestore_v1.transaction import Transaction
# Create a minimal fake GAPIC with a dummy response.
create_time = 123
update_time = 234
read_time = 345
firestore_api = mock.Mock(spec=["batch_get_documents"])
response = mock.create_autospec(firestore.BatchGetDocumentsResponse)
response.read_time = read_time
response.found = mock.create_autospec(document.Document)
response.found.fields = {}
response.found.create_time = create_time
response.found.update_time = update_time
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
document_reference = _make_document_reference("where", "we-are", client=client)
response.found.name = None if not_found else document_reference._document_path
response.missing = document_reference._document_path if not_found else None
def WhichOneof(val):
return "missing" if not_found else "found"
response._pb = response
response._pb.WhichOneof = WhichOneof
firestore_api.batch_get_documents.return_value = iter(
[response] if not return_empty else []
)
if use_transaction:
transaction = Transaction(client)
transaction_id = transaction._id = b"asking-me-2"
else:
transaction = None
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
snapshot = document_reference.get(
field_paths=field_paths, transaction=transaction, **kwargs
)
assert snapshot.reference is document_reference
if not_found or return_empty:
assert snapshot._data is None
assert not snapshot.exists
assert snapshot.read_time is not None
assert snapshot.create_time is None
assert snapshot.update_time is None
else:
assert snapshot.to_dict() == {}
assert snapshot.exists
assert snapshot.read_time is read_time
assert snapshot.create_time is create_time
assert snapshot.update_time is update_time
# Verify the request made to the API
if field_paths is not None:
mask = common.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
if use_transaction:
expected_transaction_id = transaction_id
else:
expected_transaction_id = None
firestore_api.batch_get_documents.assert_called_once_with(
request={
"database": client._database_string,
"documents": [document_reference._document_path],
"mask": mask,
"transaction": expected_transaction_id,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_get_not_found():
_get_helper(not_found=True)
def test_documentreference_get_default():
_get_helper()
def test_documentreference_get_return_empty():
_get_helper(return_empty=True)
def test_documentreference_get_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_get_helper(retry=retry, timeout=timeout)
def test_documentreference_get_w_string_field_path():
with pytest.raises(ValueError):
_get_helper(field_paths="foo")
def test_documentreference_get_with_field_path():
_get_helper(field_paths=["foo"])
def test_documentreference_get_with_multiple_field_paths():
_get_helper(field_paths=["foo", "bar.baz"])
def test_documentreference_get_with_transaction():
_get_helper(use_transaction=True)
def _collections_helper(page_size=None, retry=None, timeout=None):
from google.cloud.firestore_v1.collection import CollectionReference
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.services.firestore.client import FirestoreClient
collection_ids = ["coll-1", "coll-2"]
class Pager(object):
def __iter__(self):
yield from collection_ids
api_client = mock.create_autospec(FirestoreClient)
api_client.list_collection_ids.return_value = Pager()
client = _make_client()
client._firestore_api_internal = api_client
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if page_size is not None:
collections = list(document.collections(page_size=page_size, **kwargs))
else:
collections = list(document.collections(**kwargs))
# Verify the response and the mocks.
assert len(collections) == len(collection_ids)
for collection, collection_id in zip(collections, collection_ids):
assert isinstance(collection, CollectionReference)
assert collection.parent == document
assert collection.id == collection_id
api_client.list_collection_ids.assert_called_once_with(
request={"parent": document._document_path, "page_size": page_size},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_collections_wo_page_size():
_collections_helper()
def test_documentreference_collections_w_page_size():
_collections_helper(page_size=10)
def test_documentreference_collections_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_collections_helper(retry=retry, timeout=timeout)
@mock.patch("google.cloud.firestore_v1.document.Watch", autospec=True)
def test_documentreference_on_snapshot(watch):
client = mock.Mock(_database_string="sprinklez", spec=["_database_string"])
document = _make_document_reference("yellow", "mellow", client=client)
document.on_snapshot(None)
watch.for_document.assert_called_once()
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project="project-project"):
from google.cloud.firestore_v1.client import Client
credentials = _make_credentials()
return Client(project=project, credentials=credentials)
| StarcoderdataPython |
1726656 | <filename>Aulas/aula016.py
lanche = ('Hamburguer', 'Suco','Pizza','Pudim')
#print(lanche)
for comida in lanche:
print(f'Eu vou comer {comida}')
print('Comi pra caramba!')
#for cont in range (0, len(lanche)):
# print(f'Eu vou comer {lanche[cont]} na posição {cont}')
| StarcoderdataPython |
127614 | <filename>examples/synchronous/getting_started.py
from hiddb.synchronous import HIDDB
hiddb = HIDDB("<key>", "<secret>")
# Create a database via dashboard and insert the database_id here
database_id = "<database_id>"
# Create a collection named "wordvectors"
hiddb.create_collection(database_id=database_id, collection_name="wordvectors")
# Create an index in that collection
hiddb.create_index(
database_id=database_id,
collection_name='wordvectors',
field_name="word-vector",
dimension=300
)
# Insert a document which is indexed
hiddb.insert_document(
database_id=database_id,
collection_name='wordvectors',
documents=[{
"id": "test-document",
"word-vector": [42.0]*300
}]
)
# Search for nearest documents
hiddb.search_nearest_documents(
database_id=database_id,
collection_name='wordvectors',
index_name="word-vector",
vectors=[[43.0]*300]
)
# Delete collection and corresponding indices
hiddb.delete_collection(database_id=database_id, collection_name="wordvectors") | StarcoderdataPython |
3298675 | <reponame>anishsingh42/CodeChef
t = int(input())
while t:
N = list(map(int, input().split()))
N.remove(len(N)-1)
print(max(N))
t = t-1 | StarcoderdataPython |
135831 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Game(models.Model): #Overall Game Object
name = models.CharField(max_length=200) #name of the game
start_time = models.DateTimeField() #time to start
end_time = models.DateTimeField() #time game ends
active = models.IntegerField(default=1) #is the game active
require_regcodes = models.IntegerField(default=0) #does the game require reg codes
def __unicode__(self):
return self.name
class Category(models.Model): #categories for challegnes
game = models.ForeignKey(Game) #in which game
name = models.CharField(max_length=200) #name of the category
def __unicode__(self):
return self.name
class Challenge(models.Model): #CTF challenges
game = models.ForeignKey(Game) #in which game
category = models.ForeignKey(Category) #Category Associated
name = models.CharField(max_length=200) #Name of the challenge
description = models.CharField(max_length=2000) #description , pointers etc
points = models.IntegerField(default=100) #point value for the challenge
active = models.IntegerField(default=0) #is the challenge active
key = models.CharField(max_length=200) #scoring key for the challenge
def __unicode__(self):
return self.name
class Hint(models.Model): #hints to be displayed for a given challenge
game = models.ForeignKey(Game) #in which game
challenge = models.ForeignKey(Challenge) #challenge
text = models.CharField(max_length=2000) #hint text
active = models.IntegerField(default=0) #is the hint active
def __unicode__(self):
return self.text
class RegCodes(models.Model): # valid once reg codes
code = models.CharField(max_length=200, null=True, blank=True) #codes
used = models.IntegerField(default=0) #is it used?
def __unicode__(self):
return self.code
class Competitor(models.Model): #hold competiors (may extend the auth_user, dunno)
game = models.ForeignKey(Game) #in which game
user = models.OneToOneField(User)
display_name = models.CharField(max_length=200) #name to display
affiliation = models.CharField(max_length=200, null=True, blank=True) #affiliation text to display
url = models.CharField(max_length=200, null=True, blank=True) #url
bad_keys = models.IntegerField(default=0) #how many bad keys have they submitted
points = models.IntegerField(default=0) #current point total
active = models.IntegerField(default=1) #is the competitor active (ie allowed to play, score, count in standings)
ipaddr = models.CharField(max_length=200, null=True, blank=True) #ip the competitor reged from
regcode = models.ForeignKey(RegCodes, null=True) #code the competitor used to reg
def __unicode__(self):
return self.display_name
class Solved(models.Model): #challenges solved
game = models.ForeignKey(Game) #in which game
competitor = models.ForeignKey(Competitor) #by whom
challenge = models.ForeignKey(Challenge) #which challenge
points = models.IntegerField(default=0) #how many points they got
time = models.DateTimeField() #when they did it
| StarcoderdataPython |
3223969 | #!/sw/bin/python3.3
#! E:\Python33\python
#Read FASTA files from current directory, generate output txt file with values for features.
#30.6.2013 . Edited order of features generated, and bigrams from absolute to relative freq. + added length
#import Bio
#added features not yet put in featuregen classic
import re
import numpy as np
import sys
from itertools import permutations
from itertools import product
from Bio import SeqIO
from Bio.SeqUtils import IsoelectricPoint
from Bio.SeqUtils import ProtParam as pp
import random
import os
from math import log
from collections import defaultdict
import numba
from numba import autojit
#parameters that change depending on + or - set - output file's name + number of samples. (Neg - 120 samples each)
outPut_filename = "Features_POS+"
num_samples = 900
negative_set = False
unwanted_residues = ['U','X','Z','B']
#Total Np+, 90% redundnacy, NOT receptors (10.10.2012) = 627. 84% = 530
k = raw_input ('Positive (+) or Negative (-) training set? \n "-" for negative, any other sign for "+" \n')
if k == '-':
outPut_filename = "Features_NEG-"
num_samples = 190
negative_set = True
#Used tofilter out ,U,B,Z,X non standrd AAs from a given sequence/string. Returns true if illegals present
#@jit
def contain_illegals(str, set):
for c in set:
if c in str:
print("ILLEGAL AA!")
return True
return False;
# Data is Imported from a FASTA sequence file:
# list - each entry is appended strings/the sequence
def parse_fasta(filename) :
#f = open(sys.argv[1]+'/'+filename, 'r')
f = open(filename, 'r')
sequences =[]
i = 0
for line in f:
if line.startswith('>'):
i = (i+1)
else:
if not contain_illegals(line,unwanted_residues):
if (len(sequences) - 1 == i):
sequences[i] += line.rstrip('\n')
else:
sequences.append(line.rstrip('\n'))
return sequences
#Writes out results to (same file each time) file with name "outname".txt, param is the key/values dict to print
#Modify: - 'for Key in dictionary, write out k[V], /n .... (after iterating over all key's values, close file)
## This loop syntax accesses the whole dict by looping over the .items() tuple list
# for k, v in dict.items(): print k, '>', v
def outWrite(param, outName) :
out = open('./'+outName + '.txt', "w")
for k, v in param.items() :
out.write('\t'.join(map(str, v)))
out.write('\n')
#print(' Values: \n' + ('\t'.join(map(str, v))))
out.close()
return
def getStr(param):
data = ""
for k, v in param.items() :
data += '\t'.join(map(str, v))
data += '\n'
return data
@jit
def KROne(seq) :
seq = re.sub("\D", '0', seq)
return seq
# GKR = 1 (Glycine)
@jit
def GKROne(seq) :
seq = seq.replace('G', '1')
seq = re.sub("\D", '0', seq)
return seq
#Hydrophibicity - "Charged AA" (DERHK) = '1'
@jit
def chargeOne(seq) :
seq = seq.replace('D', '1').replace('E', '1').replace('H', '1')
seq = re.sub("\D", '0', seq)
return seq
#@jit
def MerCount(s) :
#Combo [list] holds all the 2^5 binary combinations
#Transpose list of permutations (combo) into a new defaultdictionary (key,0)!
d = dict.fromkeys(combo, 0)
for i in xrange(len(s) - 4) :
d[s[i :i + 5]] += 1
return d.values()
# a and b are relative volume of valine and Leu/Ile side chains to side chain of alanine.
# http://stackoverflow.com/questions/991350/counting-repeated-characters-in-a-string-in-python
@jit
def aliphaticness(seq) :
a = 2.9
b = 3.9
length = float(len(seq))
alanine_per = (seq.count('A') / length )
valine_per = (seq.count('V') / length )
isoleucine_per = (seq.count('I') / length )
leucine_per = (seq.count('L') / length )
# Aliphatic index = X(Ala) + a * X(Val) + b * ( X(Ile) + X(Leu) )
aliphatic_index = (100 * (alanine_per + a * valine_per + b * (isoleucine_per + leucine_per )))
return aliphatic_index
@jit
def Autocorrellation(seq,loc) :
seq = seq.replace('RR', '1').replace('KK', '1').replace('RK', '1').replace('KR', '1')
seq = seq.replace('R', '1').replace('K', '1')
seq = re.sub("\D", '0', seq)
seq = map(int, seq)
selfCor = np.correlate(seq, seq, 'full')
#Added to Avoid divide by zero error:
if sum(seq)==0:
return 0
#Normalization - By Sum ("1's") or seq.length?
autoCor = sorted(selfCor)[loc] / float(len(seq))
#Second highest (NOT "100%" Autocorrelation : loc=-2
return autoCor
# Counts percentage of occurences of biGrams (from bigramDict) for a given seq
def bigramsFreq(seq, bigramDict ) :
length=((len(seq))/2)
for Aa in bigramDict.keys() :
bigramDict[Aa] = ((seq.count(str(Aa)))/length)
#print bigramDict
return bigramDict.values()
def seq_Entropy(seq) :
length = float(len(seq))
letters = list('ACDEFGHIKLMNPQRSTVWY')
amino_acids = dict.fromkeys(letters, 0)
for Aa in amino_acids :
hits = []
hits = [a.start() for a in list(re. finditer(Aa, seq))]
p_prev = 0
p_next = 1
sum = 0
while p_next < len(hits) :
distance = (hits[p_next] - hits[p_prev]) / length
sum += distance * log(distance, 2)
p_prev = p_next
p_next += 1
amino_acids[Aa] = -sum
return amino_acids.values()
#Code for Finding AA motifs counts
def PrefixCount(Aa, seq) :
counts = len(re.findall('[%s].{0,1}[KR][KR]' % (Aa), seq))
return counts / float(len(seq))
def SuffixCount(Aa, seq) :
counts = len(re.findall('[KR][KR].{0,1}[%s]' % (Aa), seq))
return (counts / float(len(seq)))
def NSites (seq,length):
''' N Glycosylation sites'''
NSites = len(re.findall(r'N[^P][ST][^P]', seq))
return (NSites/length)
# Aspartic acid, asparagine hydroxylation sites
def hydroxSites (seq,length):
hydroxSites = len(re.findall('CC.{13}C.{2}[GN].{12}C.C.{2, 4}C',seq))
return (hydroxSites/length)
#counts # of suspected cleavage sites according to known motif model
# Xxx-Xxx-Lys-Lys# , Xxx-Xxx-Lys-Arg# , Xxx-Xxx-Arg-Arg# , Arg-Xxx-Xxx-Lys# , # # Arg-Xxx-Xxx-Arg#
# lysine: K. arginine: R. #"Not Proline" = [^P]
@jit
def cleavageCounts(seq) :
count1 = len(re.findall('R.[^P][RK]', seq))
#Arg-Xxx-Xxx-Arg|Lys
count2 = (len(re.findall('.[^P][RK][RK]', seq)))
return (count1 + count2)
#@jit
# Bigramsdict
def gen_BigramDict():
bigramsAll = []
for i in (permutations('ACDEFGHIKLMNPQRSTVWY', 2)) :
bigramsAll.append(i[0] + i[1])
bigramDict = dict.fromkeys(bigramsAll, 0)
return bigramDict
# bigramDict = dict containing all the valid 2 letter bigrams as keys
#==============================================================================
# #main CODE:
#==============================================================================
combo = [''.join(x) for x in product('01', repeat=5)] #combo = list of all possible [0/1] ,length 5 combinations
sampled_proteins = {}
sampled_seq = []
sequences = {} #will contain sequences as values (rather than JUST as "keys" as is case with 'sampled_proteins')
aa_groups = ('FYW', 'P', 'C', 'RHK', 'DE' , 'CSTMNQ','RK',
'ST','LASGVTIPMC','EKRDNQH')
bigramDict=gen_BigramDict()
#Read FASTA files from current directory
#for f in os.listdir(sys.argv[1]) :
files = [f for f in os.listdir(os.curdir) if (os.path.isfile(f) and f.endswith(".fasta"))]
for f in files:
#for f in os.listdir(os.curdir) :
if (negative_set): #If negative sets marker set to true due to user input
if f.endswith(".fasta") and not f.startswith("_") and f.startswith('NEG') or f.startswith('Neg') :
Fasta_seq = parse_fasta(f)
#num_samples = How many samples to sample at random from each file (in the given directory).
sampled_seq += random.sample(Fasta_seq, num_samples)
sampled_proteins = dict.fromkeys(sampled_seq,0)
#NP+ Positive case
elif f.endswith(".fasta") and not f.startswith("_"):
Fasta_seq = parse_fasta(f)
# sampled_seq += random.sample(Fasta_seq, len(Fasta_seq))
#Make samples seqs for NP+ Contain all the NP+:
sampled_seq +=Fasta_seq
sampled_proteins = dict.fromkeys(sampled_seq,0)
sequences = dict(zip(sampled_seq, sampled_seq))
#http://biopython.org/wiki/ProtParam
for seq in sampled_proteins :
length = float(len(seq))
Z = pp.ProteinAnalysis(sequences[seq].replace('X', '').replace('Z', ''))
sampled_proteins[seq] = []
window_mer = sequences[seq].replace('R', '1').replace('K', '1')
sampled_proteins[seq].append(length)
sampled_proteins[seq].append(Z.isoelectric_point())
sampled_proteins[seq].append(Z.molecular_weight())
sampled_proteins[seq].append(Z.gravy())
sampled_proteins[seq].append(Z.aromaticity())
sampled_proteins[seq].append(Z.instability_index())
# (Z.flexibility())
#protparam AA% returns a dict. of K(Aa):V(%) pairs
sampled_proteins[seq].append(Autocorrellation(sequences[seq],-2))
#sampled_proteins[seq].append(Autocorrellation(sequences[seq],-3))
sampled_proteins[seq].append(aliphaticness(sequences[seq]))
# N Glycosylation sites
sampled_proteins[seq].append(NSites(sequences[seq],length))
# Aspartic acid, asparagine hydroxylation sites
sampled_proteins[seq].append(hydroxSites(seq,length))
#Counts of suspected cleavage sites
sampled_proteins[seq].append(cleavageCounts(sequences[seq]) / length)
sampled_proteins[seq].extend(Z.get_amino_acids_percent().values())
sampled_proteins[seq].extend(MerCount(KROne(window_mer)))
sampled_proteins[seq].extend(MerCount(GKROne(window_mer)))
sampled_proteins[seq].extend(MerCount(chargeOne(window_mer)))
sampled_proteins[seq].extend(seq_Entropy(sequences[seq]))
#AA Bigrams (400) frequencies:
#sampled_proteins[seq].extend(bigramsFreq(sequences[seq], bigramDict))
for Aa in aa_groups :
sampled_proteins[seq].append(PrefixCount(Aa, sequences[seq]))
sampled_proteins[seq].append(SuffixCount(Aa, sequences[seq]))
#Finally Write out results (with seperate lines per key/sequence) to a new file:
outWrite(sampled_proteins, outPut_filename)
#print 'length'
#print len(sampled_proteins)
print 'Done'
| StarcoderdataPython |
3244957 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from steerable.SCFpyr_NumPy import SCFpyr_NumPy
import steerable.utils as utils
import cv2
import torch
import torchvision
# img_size=256
img=cv2.imread("./assets/lena.jpg",0)
# img=cv2.resize(img,(img_size,img_size))
# cv2.imshow('img',img)
# build csp
height=12
nbands=4
scale_factor=2**(1/2)
# scale_factor=2
pyr=SCFpyr_NumPy(height=height,nbands=nbands,scale_factor=scale_factor)
# coeff=pyr.build_c(img)
# img_con=pyr.reconstruct_c(coeff)
coeff=pyr.build(img)
img_con=pyr.reconstruct(coeff)
SCFpyr_NumPy.pyr_info(coeff)
print('MSE: ',np.mean(np.power(img.astype(np.float)-img_con,2)))
# coeff_grid = utils.make_grid_coeff(coeff, normalize=True)
# cv2.imshow('coeff', coeff_grid)
# cv2.imshow('img_recon',img_con.astype(np.uint8))
# cv2.waitKey(0) | StarcoderdataPython |
72540 | <filename>src/engineV2.py
escapeProtect = ""
def parseTag(line, skip=0):
skipNext = skip
split = line.replace("\:", "")
split = line.split(":")
for i in range(len(split)-1):
split[i] = split[i].replace("", ":")
split[i] = split[i].replace("\(", "")
split[0] = split[0].replace("(", "")
for i in range(len(split)-1):
split[i].replace("", "(")
if len(split) != 2:
for i in range(1,len(split)):
if skipNext!=0:
skipNext = skipNext-1
pass
elif skipNext==0:
split[i] = split[i].replace("\)", "")
split[i] = split[i].replace(")", "")
split[i] = split[i].replace("", ")")
if split[i][0] == " ":
if split[i][1] == "'" or split[i][1] == '"':
split[i] = split[i][2:-1]
elif split[i][1] == "(":
print(split[i])
this = split[i][1:]
next = split[i+1]
fl = True
c=1
p=next
while fl==True:
if p[0] == "(" or p[1] == "(":
next=f'{next}:{split[i+c]}'
p = split[i+c]
skipNext=skipNext+1
c=c+1
else:
skipNext=skipNext+1
fl = False
while next[-1] == ")":
if next[-1] == ")":
next = next[0:-1]
addBack = 0
for j in range(len(next)):
if(next[j] == "("):
addBack = addBack + 1
while addBack>0:
next=next+")"
addBack = addBack-1
print(i)
split[i] = parseTag(f"{this}:{next}", skip=skipNext)
elif split[i][0] == "'" or '"':
split[i] = split[1][2:-1]
elif split[i][0] == "(":
print(split[i])
this = split[i][1:]
next = split[i+1]
fl = True
c=1
p=next
while fl==True:
if p[0] == "(" or p[1] == "(":
next=f'{next}:{split[i+c]}'
p = split[i+c]
skipNext=skipNext+1
c=c+1
else:
skipNext=skipNext+1
fl = False
while next[-1] == ")":
if next[-1] == ")":
next = next[0:-1]
addBack = 0
for j in range(len(next)):
if(next[j] == "("):
addBack = addBack + 1
while addBack>0:
next=next+")"
addBack = addBack-1
print(i)
split[i] = parseTag(f"{this}:{next}", skip=skipNext)
else:
split[1] = split[1].replace("\)", "")
split[1] = split[1].replace(")", "")
split[1] = split[1].replace("", ")")
if split[1][0] == " ":
if split[1][1] == "'" or '"':
split[1] = split[1][2:-1]
elif split[1][1] == "(":
print("ERR")
elif split[1][0] == "'" or '"':
split[1] = split[1][2:-1]
elif split[1][0] == "(":
pass
#print([split[0], split[1]])
return [split[0],split[1]]
def parseSite(input):
lines = []
comments=[]
variables={}
metavariables={}
tags=[]
for line in input:
lines.append(line)
for line in lines:
if line[:2] == "##":
comments.append(line[2:])
elif line[:2] == "+!":
variable = line[2:].replace(" = ", "=").split("=")
if variable[1][0] == "'" or variable[1][0] == '"':
variable[1] =['str', variable[1][1:-1]]
elif variable[1] == "True" or variable[1] == "False":
variable[1]=['bol', variable[1]]
elif int(variable[1]):
variable[1]=['int', variable[1]]
elif variable[1][0] == "!":
variable[1]=variables[variable[1]]
elif variable[1][0] == "%":
variable[1]=metavariables[variable[1]]
else:
#TODO: GET CURRENT LINE NUMBER FOR ERROR
return("!!Error on line (tba): Invalid Variable Type")
variables[variable[0]]=variable[1]
elif line[:2] == "+%":
variable = line[2:].replace(" = ", "=").split("=")
if variable[1][0] == "'" or variable[1][0] == '"':
variable[1] =['str', variable[1][1:-1]]
elif variable[1] == "True" or variable[1] == "False":
variable[1]=['bol', variable[1]]
elif int(variable[1]):
variable[1]=['int', variable[1]]
elif variable[1][0] == "!":
variable[1]=variables[variable[1]]
elif variable[1][0] == "%":
variable[1]=metavariables[variable[1]]
else:
#TODO: GET CURRENT LINE NUMBER FOR ERROR
return("!!Error on line (tba): Invalid MetaVariable Type")
metavariables[variable[0]]=variable[1]
elif line[0] == "(":
tags.append(parseTag(line))
#print(parseTag(line))
elif line[0] == "$":
pass
elif line[0] == "+":
working = line
working = working[1:]
working = working.replace(" = ", "=").split("=")
if working[1][0] == "!":
working[1]=variables[working[1][1:]][1]
elif working[1][0] == "'" or working[1][0] == '"':
working[1]=working[1][1:-1]
tag = tags[-1]
#print(tag)
if len(tag) == 2:
tag.append([working])
elif len(tag) == 3:
tag[2].append(working)
else:
return("!!Error: Internal Error. Submit an issue with code 'r01:ev2:086'")
#print(tag)
else:
return(f"!!Error on line (tba): Unknown line start. {line}")
return comments, variables, metavariables, tags
def toHtml(input, part="head"):
textInput = input
input = input.replace("; ", "\n").replace(";", "\n")
input = input.replace("\\\n", ";")
input = input.splitlines()
tags = []
comments = []
variables = {}
metavariables = {}
print(input, "\n")
if input[0] == "$site":
comments, variables, metavariables, tags = parseSite(input)
elif input[0] == "$script":
return "!!Error: Attempt to compile pure script into HTML"
else:
return f"!!Error on line 1: Unknown Daze type '{input[0]}'"
output = input
return metavariables, variables, tags, comments
# Testing Area
print(toHtml("$site; +!lol = 'rofl'; (div: (div: (div: (p: 'hey')))); +lol='lol'")) | StarcoderdataPython |
3372898 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 24 17:23:10 2016
fun with compression
@author: mike
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import metrics
from scipy import ndimage
def meandist2points(image, kpoints):
length = np.max(np.shape(image))
dists =metrics.pairwise.euclidean_distances(image, kpoints)**2
nearest = np.argmin(dists, axis =1)
vecdists = []
for pixel in range(length):
val = image[pixel, :] - kpoints[nearest[pixel],:] + 0.0
vecdists.append(np.sqrt(val.dot(val)))
#sum_of_dists += np.sqrt(val.dot(val))
#return sum_of_dists/length
return np.mean(vecdists)
img = ndimage.imread('mike_lounging.jpg')
img_height = np.shape(img)[0]
img_width = np.shape(img)[1]
plt.imshow(img)
new_img = np.reshape(img, (img_width*img_height, 3))
num_ks = 31
for k in range(21, num_ks):
kvec = np.random.uniform(0, 255, (k, 3))
err = meandist2points(new_img, kvec)
while True:
dists =metrics.pairwise.euclidean_distances(new_img, kvec)**2
nearest = np.argmin(dists, axis =1)
#bestdists = np.min(dists,axis =1)
#avg_dists = np.mean(bestdists)
#print avg_dists
#print meandist2points(new_img, kvec)
#print kvec
for means in range(k):
new_point = np.mean(new_img[nearest== means, :],axis= 0)
if new_point[0] != new_point[0]: # nan because no pixels are with this point
new_point = np.random.uniform(0, 255, (1, 3))
#new_point = np.nan_to_num(np.mean(new_img[nearest== means, :],axis= 0))
#if new_point[0] < 1 and new_point[1] < 1 and new_point[2] < 1:
# new_point = np.random.uniform(0, 255, (1, 3))
kvec[means, :] = new_point
err2 = meandist2points(new_img, kvec)
print err2
print ''
if np.abs(err2 - err) < .01:
break
err = err2
kfinal = np.round(kvec)
dists =metrics.pairwise.euclidean_distances(new_img, kfinal)
nearest = np.argmin(dists, axis =1)
#print kvec
compressed_img = np.zeros((img_width*img_height, 3))
#compressed_img[:] = new_img
for means in range(k):
compressed_img[nearest == means, :] = kfinal[means]
final_img = np.reshape(compressed_img, (img_height,img_width,3))
plt.imshow(-final_img)
stringname = 'me%dcolor.png' % k
plt.imsave(fname=stringname, arr= -final_img, format='png')
#residual = np.abs(compressed_img - new_img)
#residual = np.sum(residual, axis = 1)
#plt.hist(residual, 20)
#plt.imshow(final_img)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(kvec[:,0], kvec[:, 1], kvec[:, 2] )
#plt.show()
| StarcoderdataPython |
1709971 | # -*- coding: utf-8 -*-
import sys
import os
from os.path import join, dirname, abspath, exists
sys_path = dirname(dirname(abspath(__file__)))
parent_sys_path = dirname(sys_path)
if sys_path not in sys.path:
sys.path.insert(0, sys_path)
if parent_sys_path not in sys.path:
sys.path.insert(0, parent_sys_path)
import utils.config_loader as config
from utils.config_loader import logger
from data.dataset_parser import dataset_parser
import utils.tools as tools
import summ.rank_sent as rank_sent
import summ.select_sent as select_sent
from lexrank import STOPWORDS, LexRank
import itertools
from tqdm import tqdm
assert config.grain == 'sent'
MODEL_NAME = 'lexrank-{}'.format(config.test_year)
COS_THRESHOLD = 1.0
def _lexrank(cid):
"""
Run LexRank on all sentences from all documents in a cluster.
:param cid:
:return: rank_records
"""
_, processed_sents = dataset_parser.cid2sents(cid) # 2d lists, docs => sents
flat_processed_sents = list(itertools.chain(*processed_sents)) # 1d sent list
lxr = LexRank(processed_sents, stopwords=STOPWORDS['en'])
scores = lxr.rank_sentences(flat_processed_sents, threshold=None, fast_power_method=True)
sid2score = dict()
abs_idx = 0
for doc_idx, doc in enumerate(processed_sents):
for sent_idx, sent in enumerate(doc):
sid = config.SEP.join((str(doc_idx), str(sent_idx)))
score = scores[abs_idx]
sid2score[sid] = score
abs_idx += 1
sid_score_list = rank_sent.sort_sid2score(sid2score)
rank_records = rank_sent.get_rank_records(sid_score_list, sents=processed_sents, flat_sents=False)
return rank_records
def rank_e2e():
rank_dp = tools.get_rank_dp(model_name=MODEL_NAME)
if exists(rank_dp):
raise ValueError('rank_dp exists: {}'.format(rank_dp))
os.mkdir(rank_dp)
cc_ids = tools.get_test_cc_ids()
for cid in tqdm(cc_ids):
rank_records = _lexrank(cid)
rank_sent.dump_rank_records(rank_records, out_fp=join(rank_dp, cid), with_rank_idx = False)
logger.info('Successfully dumped rankings to: {}'.format(rank_dp))
def select_e2e():
"""
No redundancy removal is applied here.
"""
params = {
'model_name': MODEL_NAME,
'cos_threshold': COS_THRESHOLD,
}
select_sent.select_end2end(**params)
if __name__ == '__main__':
rank_e2e()
select_e2e()
| StarcoderdataPython |
1678339 | import unittest
from src.CoordinateMatcher import CoordinateMatcher
class MyTestCase(unittest.TestCase):
def test_basics(self):
matcher = CoordinateMatcher('data/equivalence_table.csv')
input = {1: 105.43, 2: 48.97}
matcher.match_json_input(input)
def test_jsonfy(self):
matcher = CoordinateMatcher('data/equivalence_table.csv')
input = {1: 105.43, 2: 48.97}
matcher.match_json_input(input)
results = matcher.results
results_dct = {str(i+1): results[i] for i in range(0, len(results))}
print(results_dct)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
53148 | import os
import sys
import random
"""
Notes:
- Excellent attempt
"""
def main():
x = random.choices(range(5), k=20)
x0 = x.count(0)
x1 = x.count(1)
x2 = x.count(2)
x3 = x.count(3)
x4 = x.count(4)
print(f'x = {x}')
print(f'no. of zeroes = {x0}, no. of ones = {x1}, no. of twos = {x2}, no. of threes = {x3}, no. of fours = {x4}')
return os.X_OK
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
179537 | """
Collection of Data Science helper functions
"""
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
def confusion_plot(y_true, y_pred, cmap='viridis'):
"""
Plots a confusion matrix using the Seaborn library
"""
labels = unique_labels(y_val)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns,
index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap=cmap)
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50))
| StarcoderdataPython |
1753354 | <reponame>dlist-top/client-py
from datetime import datetime
class Timestamp(datetime):
@classmethod
def parse(cls, unix):
return datetime.utcfromtimestamp(unix / 1000)
| StarcoderdataPython |
172443 | # coding=utf-8
from setproctitle import setproctitle
from utils import SensorConsumerBase
import datetime
import os
import sys
import json
class Door(SensorConsumerBase):
def __init__(self, redis_host, redis_port):
SensorConsumerBase.__init__(self, redis_host=redis_host, redis_port=redis_port)
self.notification = None
self.delete_notification("door")
self.door_open_elapsed_since = None
self.outer_door_state = None
self.inner_door_state = None
def run(self):
self.subscribe("door-pubsub", self.pubsub_callback)
def pubsub_callback(self, data):
if "action" in data:
return
influx_data = {
"measurement": "door",
"time": datetime.datetime.utcnow().isoformat() + "Z",
"tags": {
"location": "door",
},
"fields": data["data"],
}
self.insert_into_influx([influx_data])
if self.outer_door_state is not None:
if self.outer_door_state != data["data"]["door_outer_open"]:
self.redis_instance.publish("lightcontrol-triggers-pubsub", json.dumps({"key": "outer-door", "trigger": "switch", "open": data["data"]["door_outer_open"]}))
if self.inner_door_state is not None:
if self.inner_door_state != data["data"]["door_inner_open"]:
self.redis_instance.publish("lightcontrol-triggers-pubsub", json.dumps({"key": "inner-door", "trigger": "switch", "open": data["data"]["door_inner_open"]}))
self.outer_door_state = data["data"]["door_outer_open"]
self.inner_door_state = data["data"]["door_inner_open"]
self.redis_instance.publish("switch-pubsub", json.dumps({"source": "door", "name": "outer-door", "value": self.outer_door_state}))
self.redis_instance.publish("switch-pubsub", json.dumps({"source": "door", "name": "inner-door", "value": self.inner_door_state}))
if data["data"]["door_outer_open"]:
if not self.door_open_elapsed_since:
self.door_open_elapsed_since = datetime.datetime.now()
notification = {
"notification": "door",
"message": "Ulko-ovi on auki ({elapsed_since})",
"user_dismissable": False,
"elapsed_since": self.door_open_elapsed_since,
}
if self.notification != notification:
self.notification = notification
self.update_notification_from_dict(**notification)
else:
if self.door_open_elapsed_since:
self.delete_notification("door")
self.door_open_elapsed_since = None
self.notification = None
def main():
setproctitle("door: run")
redis_host = os.environ["REDIS_HOST"]
redis_port = os.environ["REDIS_PORT"]
item = Door(redis_host, redis_port)
item.run()
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
75488 | <filename>Lista02/Q06.py
#!/usr/bin/env python3
#-*- encoding: UTF-8 -*-
def main():
salario = float(input("Salário-hora: R$ "))
tempo = int(input("Quantidade de horas trabalhadas: "))
ir = (salario * tempo) * (11/100)
inss = (salario * tempo)* (8/100)
sindicato = (salario * tempo) * (5/100)
print("+ Salário bruto: R$", (salario * tempo) )
print("- IR (11%): R$", ir)
print("- INSS(8%): R$", inss)
print("- Sindicato(5%): R$", sindicato)
print("Salário Líquido : R$ ", ((salario * tempo) - (ir + inss + sindicato)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3258532 | <reponame>Wealize/apminsight-site24x7-py
class DbMetric:
def __init__(self):
self.opn = ''
self.obj = ''
self.component = ''
self.errorct = 0
self.count = 0
self.time = 0
self.minrt = None
self.maxrt = None
def accumulate(self, dbtracker):
info = dbtracker.extract_operartion_info()
if 'opn' not in info or 'obj' not in info:
return
self.opn = info['opn']
self.obj = info['obj']
self.component = dbtracker.get_component()
if dbtracker.is_error():
self.errorct += 1
return
self.time += dbtracker.get_rt()
self.count +=1
if self.minrt is None or dbtracker.get_rt() < self.minrt:
self.minrt = dbtracker.get_rt()
if self.maxrt is None or dbtracker.get_rt() > self.minrt:
self.maxrt = dbtracker.get_rt()
def get_formatted_dbmetric(self):
return [self.time, self.minrt, self.maxrt, self.count, self.errorct]
def get_opn(self):
return self.opn
def get_obj(self):
return self.obj | StarcoderdataPython |
1770754 | <filename>Forest-Type-Cover-Prediction/code.py<gh_stars>0
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset= pd.read_csv(path)
print(dataset.head())
dataset.drop(columns=['Id'],axis=1,inplace=True)
print(dataset.describe())
# look at the first five columns
# Check if there's any column which is not useful and remove it like the column id
# check the statistical description
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols= dataset.columns
print(cols)
#number of attributes (exclude target)
size= len(dataset)
print(size)
#x-axis has target attribute to distinguish between classes
x=dataset['Cover_Type'].copy()
#y-axis shows values of an attribute
y=dataset.drop(['Cover_Type'],axis=1)
#Plot violin for all attributes
plt.violinplot(x)
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train=dataset.iloc[:,0:10]
data_corr= subset_train.corr(method='pearson')
sns.heatmap(data_corr)
correlation= data_corr.unstack().sort_values(kind='quicksort')
corr_var_list=correlation[(abs(correlation)>upper_threshold) & (correlation != 1)]
print(corr_var_list)
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
import numpy as np
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
X= dataset.drop(['Cover_Type'],axis=1)
Y= dataset['Cover_Type'].copy()
X_train,X_test,Y_train,Y_test= cross_validation.train_test_split(X,Y,test_size=0.2,random_state=0)
#Standardized
#Apply transform only for continuous data
scaler= StandardScaler()
X_train_temp=X_train.iloc[:,0:10]
X_test_temp=X_test.iloc[:,0:10]
X_train_temp= scaler.fit_transform(X_train_temp)
X_test_temp= scaler.fit_transform(X_test_temp)
#Concatenate scaled continuous data and categorical
X_train1=np.concatenate((X_train_temp,X_train.iloc[:,10:]),axis=1)
X_test1=np.concatenate((X_test_temp,X_test.iloc[:,10:]),axis=1)
X_train_col= X_train.columns
features=[]
for i in X_train_col:
features.append(i)
scaled_features_train_df =pd.DataFrame(X_train1,columns=features,index=X_train.index)
scaled_features_test_df = pd.DataFrame(X_test1,columns=features,index=X_test.index)
print(scaled_features_train_df)
"""X_temp = StandardScaler().fit_transform(X_train[:,0:size])
X_val_temp = StandardScaler().fit_transform(X_val[:,0:size])
#Concatenate non-categorical data and categorical
X_con = numpy.concatenate((X_temp,X_train[:,size:]),axis=1)
X_val_con = numpy.concatenate((X_val_temp,X_val[:,size:]),axis=1)
#Add this version of X to the list
X_all.append(['StdSca','All', X_con,X_val_con,1.0,cols,rem,ranks,i_cols,i_rem])"""
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
skb=SelectPercentile(score_func=f_classif, percentile=20)
predictors= skb.fit(X_train1,Y_train)
scores= predictors.scores_
Features= X_train.columns
dataframe= pd.DataFrame({'Features':Features, 'scores':scores})
dat=dataframe.sort_values(by=['scores'],ascending=False)
print(dat)
top_k_predictors= list(dat['Features'].head(11))
print(top_k_predictors)
# Write your solution here:
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
model=LogisticRegression()
clf=OneVsRestClassifier(estimator=model)
clf1=OneVsRestClassifier(estimator=model)
model_fit_all_features= clf1.fit(X_train,Y_train)
predictions_all_features=model_fit_all_features.predict(X_test)
score_all_features= accuracy_score(Y_test,predictions_all_features)
print(score_all_features)
model_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors],Y_train)
predictions_top_features=model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])
score_top_features= accuracy_score(Y_test,predictions_top_features)
print(score_top_features)
| StarcoderdataPython |
3271324 | <reponame>prodProject/WorkkerAndConsumerServer<gh_stars>0
# start
# get count
# done
from enum import Enum
from CommonCode.convertJSONTOPb import ConvertJSONToPb
from CommonCode.convertPbToJSON import ConvertPbToJSON
from CommonCode.queryExecutor import QueryExecuter
from Counter.workerCounter import WorkerCounter
from Enums.databaseTables import Tables
from Searcher.workerSearcher import WorkerSearcher
from protobuff import worker_pb2
from protobuff.workersearch_pb2 import WorkerSearchRequestPb, WorkerSearchResponsePb
class States(Enum):
START = 0,
GET_COUNT = 1,
DONE = 2,
class CountWorkerEntity:
m_queryExecutor = QueryExecuter()
workerserchreqPb = WorkerSearchRequestPb()
m_converterJsonToPb = ConvertJSONToPb()
workerserchresPb = WorkerSearchResponsePb()
m_countHandler = WorkerCounter()
builder = None
def start(self, workerSearchPb):
self.builder = workerSearchPb
self.controlFlow(currentState=States.GET_COUNT)
def done(self):
return self.workerserchresPb
def getCount(self):
workerPb = self.m_countHandler.handle(workerpb=self.builder)
if (workerPb != None):
self.workerserchresPb.summary.totalHits=workerPb
# self.m_queryExecutor.count(table, subquery)
self.controlFlow(currentState=States.DONE)
def controlFlow(self, currentState):
if (currentState == States.GET_COUNT):
self.getCount()
elif (currentState == States.DONE):
self.done()
| StarcoderdataPython |
115819 | from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'email_address', 'subject', 'created_on')
list_display_links = ('id', 'email_address')
search_fields = ('name_first', 'name_last', 'email_address')
list_per_page = 25
admin.site.register(Contact, ContactAdmin)
| StarcoderdataPython |
1716095 | # -*- coding: utf-8 -*-
"""Pizza test."""
import unittest
from os import unlink
from main import read_input
from pizza_cell import Ingredient
input_file = 'test.in'
input_text = b'''3 5 1 6
TTTTT
TMMMT
TTTTT
'''
class PizzaTestCase(unittest.TestCase):
pizza = None
def setUp(self):
"""setUp test.
:return:
"""
super(PizzaTestCase, self).setUp()
try:
unlink(input_file)
except FileNotFoundError:
pass
with open(input_file, 'wb') as file_descriptor:
file_descriptor.write(input_text)
self.pizza = read_input(input_file)
def tearDown(self):
"""tearDown test.
:return:
"""
try:
unlink(input_file)
except FileNotFoundError:
pass
super(PizzaTestCase, self).tearDown()
def test_pizza_cons(self):
self.assertIsNotNone(self.pizza)
self.assertEqual(3, self.pizza.rows)
self.assertEqual(5, self.pizza.columns)
self.assertEqual(1, self.pizza.minimum_ingredient_number)
self.assertEqual(6, self.pizza.maximum_cells_number)
self.assertEqual(3, self.pizza.number_of_mushrooms)
self.assertEqual(12, self.pizza.number_of_tomatoes)
self.assertEqual(15, self.pizza.number_of_ingredients)
def test_pizza_cell(self):
pizza_cell = self.pizza.cells[0][0]
self.assertFalse(pizza_cell.mushroom)
self.assertTrue(pizza_cell.tomato)
self.assertEqual(Ingredient.TOMATO, pizza_cell.ingredient)
cell = pizza_cell.cell
self.assertEqual(0, cell.x)
self.assertEqual(0, cell.y)
self.assertEqual(1, cell.id)
pizza_cell = self.pizza.cells[0][1]
self.assertFalse(pizza_cell.mushroom)
self.assertTrue(pizza_cell.tomato)
self.assertEqual(Ingredient.TOMATO, pizza_cell.ingredient)
cell = pizza_cell.cell
self.assertEqual(1, cell.x)
self.assertEqual(0, cell.y)
self.assertEqual(11, cell.id)
@unittest.skip('debug only')
def test_pizza_print(self):
self.pizza.print()
def test_pizza_neighbour(self):
self.pizza.map()
# top-left
pizza_cell = self.pizza.cells[0][0]
self.assertIsNone(pizza_cell.top)
self.assertIsNone(pizza_cell.left)
self.assertTrue(pizza_cell.bottom.is_equal)
self.assertTrue(pizza_cell.right.is_equal)
self.assertEqual(0, pizza_cell.bottom.x)
self.assertEqual(1, pizza_cell.bottom.y)
self.assertEqual(1, pizza_cell.right.x)
self.assertEqual(0, pizza_cell.right.y)
# bottom-left
pizza_cell = self.pizza.cells[2][0]
self.assertIsNone(pizza_cell.left)
self.assertIsNone(pizza_cell.bottom)
self.assertEqual(0, pizza_cell.top.x)
self.assertEqual(1, pizza_cell.top.y)
self.assertEqual(1, pizza_cell.right.x)
self.assertEqual(2, pizza_cell.right.y)
# first-mushroom
pizza_cell = self.pizza.cells[1][1]
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1693952 | #!/usr/bin/python
from boto3.session import Session
import sys
import os
import uuid
import time
def get_env(name):
if name in os.environ:
return os.environ[name]
return None
aws_args = {
'aws_access_key_id': get_env('AWS_ACCESS_KEY'),
'aws_secret_access_key': get_env('AWS_SECRET_KEY'),
'region_name': get_env('AWS_REGION'),
'aws_session_token': get_env('AWS_SESSION_TOKEN'),
'profile_name': get_env('AWS_PROFILE_NAME')
}
AWS_ARG_MAP = {
'--ak': 'aws_access_key_id',
'--as': 'aws_secret_access_key',
'--ar': 'region_name',
'--at': 'aws_session_token',
'--ap': 'profile_name'
}
dynamodb_args = {}
db_prefix = 'whimbrel_'
activity_exec_id = None
transition = None
source = 'Python CLI'
i = 1
while i < len(sys.argv):
# AWS specific setup
if sys.argv[i] in AWS_ARG_MAP:
arg = sys.argv[i]
i += 1
aws_args[AWS_ARG_MAP[arg]] = sys.argv[i]
# DynamoDB specific setup
elif sys.argv[i] == '--endpoint':
i += 1
dynamodb_args['endpoint_url'] = sys.argv[i]
elif sys.argv[i] == '--ssl':
dynamodb_args['use_ssl'] = True
# Whimbrel specific setup
elif sys.argv[i] == '--prefix':
i += 1
db_prefix = sys.argv[i]
elif sys.argv[i] == '--aei':
i += 1
activity_exec_id = sys.argv[i]
elif sys.argv[i] == '--transition':
i += 1
transition = sys.argv[i]
elif sys.argv[i] == '--source':
i += 1
source = sys.argv[i]
i += 1
session = Session(**aws_args)
db = session.client('dynamodb', **dynamodb_args)
activity_event_id = activity_exec_id + '::' + str(uuid.uuid1())
when_epoch = int(time.time())
when_gm = time.gmtime(when_epoch)
when_list = [
{"N": str(when_gm.tm_year)},
{"N": str(when_gm.tm_mon)},
{"N": str(when_gm.tm_mday)},
{"N": str(when_gm.tm_hour)},
{"N": str(when_gm.tm_min)},
{"N": str(when_gm.tm_sec)}
]
db.put_item(
TableName=db_prefix + 'activity_event',
Item={
"activity_event_id": {"S": activity_event_id},
"activity_exec_id": {"S": activity_exec_id},
"transition": {"S": transition},
"when": {"L": when_list},
"when_epoch": {"N": str(when_epoch)},
"source": {"S": source}
}
) | StarcoderdataPython |
3285538 | <reponame>craymichael/ArgValidation<gh_stars>0
# ======================================================================================================================
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# ======================================================================================================================
import six
from functools import wraps # Python 2.5+
# Special case constants
class ARGS:
"""Constant to indicate a *arg special case for validation."""
pass
class KWARGS:
"""Constant to indicate a **kwarg special case for validation."""
pass
def req(*args):
"""Decorator for any function to validate its arguments seamlessly.
Args:
*args: Tuples of indices and/or names in position 0 and `Arg` instances in position 1. For docs on
specifications for `Arg`, view the `Arg` class.
Examples:
Below, Arg() is used for all `Arg` instances. To specify what to validate, view the possible specifications
to the `Arg` class.
Some basic usage:
@req(
(3, Arg()), # Validate a single index
((1, 2), Arg()), # Validate multiple indices
(ARGS, Arg()), # Validate the rest of func *args (ARGS special case)
((4, 'slug'), Arg()), # Validate a mixture of kwargs and args.
('test', Arg()), # Validate a single name
(('spec', 'index'), Arg()), # Validate multiple names
(KWARGS, Arg()) # Validate the rest of func **kwargs (KWARGS special case)
)
These specifications also work:
@req(
((-1, -2), Arg()) # Validate the last and second to last indices of *args
((2, 4, ARGS), Arg()), # Validate indices 2 and 4, and remaining *args
(('test', KWARGS), Arg()) # Validate name 'test' and remaining **kwargs
)
@req(
((2, 'test', ARGS), Arg()), # Validate index 2, name 'test' and remaining *args
(('pid', 2, 1, KWARGS), Arg()) # Validate name 'pid', indices 2 and 1, and remaining **kwargs
)
These specifications don't work:
@req(
(2, Arg()), # Validate index 2
((1, 2, 3), Arg()) # ValueError raised due to index 2 being validated prior
)
@req(
((2, -1), Arg()) # This fails if *args has length 3 (arg[2] == arg[-1])
)
NOTE: The order of args/kwargs/special cases doesn't matter; the order of validation is as follows:
1. Specified indices and kwargs in the order they appear to `req`
2. ARGS special case
3. KWARGS special case
Raises:
ValueError: ARGS or KWARGS specified more than once, or an index or name specified more than once.
Others: See the `validate` function doc under the class `Arg` for exceptions raised due to invalid user
specification.
"""
def validation_decorator(func):
"""Decorator for a function `func` which validates provided arguments according to the
specifications of `req`."""
@wraps(func)
def func_wrapper(*func_args, **func_kwargs):
"""Wrapper for a function. Uses all provided args and kwargs."""
# Handled (visited) args
visited_args = set()
# Handled (visited) kwargs
visited_kwargs = set()
# Number of function args, used to obtain actual index from negative indices
num_func_args = len(func_args)
# Placeholder for ARGS to be handled last
args_special_case = None
# Placeholder for KWARGS to be handled last
kwargs_special_case = None
def _validate_arg(_index_or_name, _arg_instance):
"""Validates an arg or kwarg using `_arg_instance` (Arg instance). `_index_or_name` should be
`int` or `str`."""
# Define var scope
global args_special_case, kwargs_special_case
# Check ARGS/KWARGS special cases
if _index_or_name is ARGS:
if args_special_case is not None:
raise ValueError('`ARGS` specified multiple times in input.')
args_special_case = _arg_instance
# Break function; ARGS special case handled at end
return
elif _index_or_name is KWARGS:
if kwargs_special_case is not None:
raise ValueError('`KWARGS` specified multiple times in input.')
kwargs_special_case = _arg_instance
# Break function; KWARGS special case handled at end
return
# Not ARGS/KWARGS special cases: continue
if isinstance(_index_or_name, six.string_types): # Name (kwarg)
# Ensure name hasn't been handled
if _index_or_name in visited_kwargs:
raise ValueError(
'Specified kwarg was already handled by another `Arg` instance: {}'.format(_index_or_name))
# Store name
visited_kwargs.add(_index_or_name)
# Validate kwarg
_arg_instance.validate(func_kwargs[_index_or_name], index=_index_or_name)
else: # Assume `_index_or_name` is int index (arg)
# Store index as positive/actual value
if _index_or_name < 0:
_index_or_name += num_func_args
# Ensure index hasn't been handled
if _index_or_name in visited_kwargs:
raise ValueError(
'Specified arg was already handled by another `Arg` instance: {}'.format(_index_or_name))
# Store index
visited_args.add(_index_or_name)
# Validate arg
_arg_instance.validate(func_args[_index_or_name], name=_index_or_name)
# Loop through args to validate
for indices_or_names, arg in args:
if isinstance(indices_or_names, tuple): # Tuple of indices or names
for index_or_name in indices_or_names:
_validate_arg(index_or_name, arg)
else: # Single index or name
_validate_arg(indices_or_names, arg)
# Check for special cases and handle them
if args_special_case is not None: # ARGS special case
# Find remaining args
remaining_args = set(range(num_func_args)) - visited_args
# Validate remaining args
for index_to_validate in remaining_args:
_validate_arg(index_to_validate, args_special_case)
if kwargs_special_case is not None: # KWARGS special case
# Find remaining kwargs
remaining_kwargs = set(func_kwargs) - visited_kwargs
# Validate remaining kwargs
for name_to_validate in remaining_kwargs:
_validate_arg(name_to_validate, kwargs_special_case)
# Return function with specified args and kwargs
return func(*func_args, **func_kwargs)
# Return wrapper
return func_wrapper
# Return decorator
return validation_decorator
| StarcoderdataPython |
1641715 | import numpy as np
from flipper25d.config import *
from flipper25d.util import *
from flipper25d.cspace.cspace import *
from flipper25d.vis.vis import rot_vector
import pdb
from scipy.misc import imsave
import sys
import os
'''
we use the left side S2 as the pivot
right side S2 can be uniquely defined with negative_y direction on robo coordinate system
get neighbour: get_neighbours
get constrained point: right_S2
get cost: cost_fun
DFS path search: get_path_store and retrieve_path
t <- (p_, yaw, pitch, roll, alpha)
neighbour is (t, t_r)
q is (neighbour, cost, store_id)
'''
global touched
def right_S2(p_, yaw, pitch, roll):
v_oRS2 = [0.,WIDTH,0.]
R_o_y = rot_vector([0.,0.,1.],-yaw)
R_o_p = rot_vector([np.sin(yaw),np.cos(yaw),0.],-pitch)
R_o_r = rot_vector([np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch)], -roll)
v_oRS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oRS2).reshape((3,1)))))
p_oRS2_ = [p_[0]-int(v_oRS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oRS2_[1,0]/PIXEL_RESO), v_oRS2_[2,0]+p_[2]]
return p_oRS2_
def have_same_pitch(pitchs1, pitchs2):
'''the version pitchs is a list of possible pitchs
paired_list = []
for i in range(len(pitchs1)):
for j in range(len(pitchs2)):
if (pitchs1[i] == pitchs2[j]):
paired_list.append([i,j])
if len(paired_list) == 0:
return None
else:
return paired_list[0]#later maybe have a rank
'''
#this version pitchs is either [pitch] or [pitch_ub, pitch_lb]
if len(pitchs1) == 0 or len(pitchs2) == 0:
return None
elif len(pitchs1) == 1 and len(pitchs2) == 1:
if pitchs1[0] == pitchs2[0]:
return pitchs1[0]
elif len(pitchs1) == 1 or len(pitchs2) == 1:
if len(pitchs1) == 1:
if pitchs1[0] >= pitchs2[1] and pitchs1[0] <= pitchs2[0]:
return pitchs1[0]
else:
return None
else:
if pitchs2[0] >= pitchs1[1] and pitchs2[0] <= pitchs1[0]:
return pitchs2[0]
else:
return None
else:#both len==2
#use its smallest intersection
if pitchs1[1] > pitchs2[0] or pitchs1[0] < pitchs2[1]:
return None
else:
return min(pitchs1[1],pitchs2[1])
def cost_fun(t_ori, neiblr, p_tgt_):#ano_t_ori,p_tgt_):
'''
t <- (p_, yaw, pitch, roll, alpha)
the cost of one point algo related to its ancester
#actually only use p_, yaw, pitch
'''
neibl,neibr = neiblr
yaw,pitch = neibl[1],neibl[2]
ano_t_ori = S2_to_middle(neibl, neibr)[0]
diff_height = (exp_map[ano_t_ori[0],ano_t_ori[1]]-ano_t_ori[2])**2
for i in range(10):
p_center = get_point(ano_t_ori, yaw=yaw, pitch=pitch, line_len=LEN_S1S2/10*(i+1))
diff_height += (exp_map[p_center[0],p_center[1]]-p_center[2])**2
#pitch_dist = neibl[2] ** 2
#return dist_ano+diff_height
return diff_height# + pitch_dist
#diff*a1+dist*a2 + ang_diff
def check_point_on_ground(p_):
if (exp_map[p_[0],p_[1]]+HEIGHT_EPSILON > p_[2]):
return True
else:
return False
#def get_neighbours(p_,yaw,pitch,roll,alpha):
def get_neighbours(t_l,t_r,p_tgt_):
'''
here the naming rule:
p_ is for the current point
ano_p_ is for the neighbour piint
p_/ano_p_ r is for the right side
p_tgt_ is used to compute the cost
note: following find_valid_neighbour_points.jpeg to find neighbour
'''
global touched
p_l_,yaw,pitch,roll,_ = t_l
p_r_ = t_r[0]
p_l_S1 = get_point(p_l_, yaw=yaw, pitch=pitch, line_len=LEN_S1S2)
p_r_S1 = get_point(p_r_, yaw=yaw, pitch=pitch, line_len=LEN_S1S2)
center = [(p_l_[0]+p_r_[0]+p_l_S1[0]+p_r_S1[0])/4,\
(p_l_[1]+p_r_[1]+p_l_S1[1]+p_r_S1[1])/4,\
(p_l_[2]+p_r_[2]+p_l_S1[2]+p_r_S1[2])/4]
norm_robo_x = (np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch))
norm_z = np.array((-np.cos(yaw)*np.sin(pitch),np.sin(yaw)*np.sin(pitch),np.cos(pitch)))
R_roll = rot_vector(norm_robo_x, roll)
rolled_norm_z = np.dot(R_roll,norm_z.reshape((3,1)))[:,0]
v_o_p_l_ = [-(p_l_[0] - center[0])*PIXEL_RESO, -(p_l_[1] - center[1])*PIXEL_RESO,p_l_[2] - center[2]]
#check if four points on the ground
four_point_on_ground_tag = False
if check_point_on_ground(p_l_) and check_point_on_ground(p_r_) and check_point_on_ground(p_l_S1) and check_point_on_ground(p_r_S1):
four_point_on_ground_tag = True
#p_r_ = right_S2(p_, yaw, pitch, roll)
neighbours = []
#costs = []
neib_id = 0
for y in YAWS:
turn_left = None
if y != yaw:
turn_left = True
continue#now only allows for go straight
if turn_left is None:
dr,dc = int(-np.cos(yaw)*GO_STRAIGHT_ITS), int(np.sin(yaw)*GO_STRAIGHT_ITS)
else:
if not four_point_on_ground_tag:
continue
else:
R_y = rot_vector(rolled_norm_z, yaw - y)
'''
if turn_left:
dr,dc = int(-np.cos(yaw)*WIDTH/PIXEL_RESO), int(np.sin(yaw)*WIDTH/PIXEL_RESO)
else:
dr,dc = int(-np.cos(yaw)*WIDTH/PIXEL_RESO), int(np.sin(yaw)*WIDTH/PIXEL_RESO)
'''
roted_v = np.dot(R_y, np.array(v_o_p_l_).reshape((3,1)))[:,0]
dr, dc, dh = -(roted_v[0]/PIXEL_RESO), -(roted_v[1]/PIXEL_RESO), roted_v[2]
for h_it in range(4):
#use both left and right S2 as pivot
for on_left_Pivot in [True,False]:
if turn_left is not None:
if h_it > 0 or not four_point_on_ground_tag:
continue
ano_p_ = [int(center[0]+dr),int(center[1]+dc),center[2]+dh]
else:
d_h = h_it * H_ITS
if on_left_Pivot:
ano_p_ = [p_l_[0]+dr,p_l_[1]+dc,exp_map[p_l_[0]+dr,p_l_[1]+dc]+d_h]
p_ = p_l_
else:
ano_p_ = [p_r_[0]+dr,p_r_[1]+dc,exp_map[p_r_[0]+dr,p_r_[1]+dc]+d_h]
p_ = p_r_
pitchs = get_pitch(ano_p_, yaw)
#t = [p_, y, pitch, 0.,0.]
for pt in pitchs:
# check pitch
if pt < PITCH_LB or pt > PITCH_UB:
continue
ano_t = [ano_p_, y, pt, 0., 0.]
#cost = cost_fun(t,ano_t,p_tgt_, is_left_side=y<=yaw)#+cost_fun(t_r,ano_t_r,p_tgt_)
if turn_left is not None:
neighbours.append([ano_t,None])
else:
if on_left_Pivot:
neighbours.append([ano_t, None])
else:
neighbours.append([None, ano_t])
#costs.append(cost)
return neighbours#, costs
def middle_to_S2(t_ori):
p_, yaw, pitch,roll,_ = t_ori
v_oLS2 = [0.,WIDTH/2,0.]
v_oRS2 = [0.,-WIDTH/2,0.]
R_o_y = rot_vector([0.,0.,1.],-yaw)
R_o_p = rot_vector([np.sin(yaw),np.cos(yaw),0.],-pitch)
R_o_r = rot_vector([np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch)], -roll)
v_oLS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oLS2).reshape((3,1)))))
v_oRS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oRS2).reshape((3,1)))))
p_oRS2_ = [p_[0]-int(v_oRS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oRS2_[1,0]/PIXEL_RESO), v_oRS2_[2,0]+p_[2]]
p_oLS2_ = [p_[0]-int(v_oLS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oLS2_[1,0]/PIXEL_RESO), v_oLS2_[2,0]+p_[2]]
return [p_oLS2_, yaw,pitch,roll,0.], [p_oRS2_, yaw,pitch,roll,0.]
def S2_to_middle(neibl, neibr):
middle = [int((neibl[0][0]+neibr[0][0])/2),int((neibl[0][1]+neibr[0][1])/2)]
return [[middle[0],middle[1],(neibl[0][2]+neibr[0][2])/2], neibl[1], neibl[2], neibl[3], neibl[4]]
def reach_target(middle, p_tgt_):
p_ = middle
if (p_[0] - p_tgt_[0])**2 <= 100:
return True
else:
return False
def get_path_store(t_ori, p_tgt_):
'''
from ORIGIN to TARGET on arena man
Note: here when we compute the path, only x,y,z,yaw,pitch will be considered.
input: t_ori, p_tgt_
Detail
1. given xyz(middle),yaw,pitch,roll
2. find a next step xyz,yaw,pitch, then check if roll valid
Note: the yaw is fixed, p_tgt_ is only used to check reach target
'''
global touched
t_ori_l, t_ori_r = middle_to_S2(t_ori)
#p_ori_r = right_S2(t_ori[0],t_ori[1],t_ori[2],t_ori[3])
'''
if t_ori_r is None:#it should be origin
Q = [[(t_ori,(p_ori_r,t_ori[1],t_ori[2],t_ori[3],t_ori[4])),0]]#in Q, it will remember its store_id
else:
'''
Q = [(t_ori_l,t_ori_r)]
store = [Q[0]]#in store, it will remember its ancester's store_id
reach_target_tag = False
before_last = 0
while len(Q)>0:
q = Q.pop()
t_ = q
print(t_[0],'\t\t\t',t_[1])
t_l, t_r= t_
t_middle = S2_to_middle(t_l,t_r)
neighbours= get_neighbours(t_l,t_r,p_tgt_)
#ids = np.argsort(costs)#cost from small to large
if len(neighbours) == 0:
print('no neighbour')
continue
nbso = []
costs = []
for i in range(len(neighbours)):
neibl,neibr = neighbours[i]
if neibl is None:
roll, _ = get_roll(neibr[0],neibr[1],neibr[2],False)
if roll is None:
continue
ano_p_ = get_point(neibr[0], yaw=neibr[1]-np.pi/2, pitch=roll, line_len=WIDTH)
neibl = [ano_p_, neibr[1],neibr[2], roll, 0.]
neibr[3] = roll
else:
roll, _ = get_roll(neibl[0],neibl[1],neibl[2],True)
if roll is None:
continue
ano_p_ = get_point(neibl[0], yaw=neibl[1]+np.pi/2, pitch=-roll, line_len=WIDTH)
neibr = [ano_p_, neibl[1],neibl[2], roll, 0.]
neibl[3] = roll
#cost = cost_fun(t_middle, S2_to_middle(neibl,neibr),p_tgt_)
cost = cost_fun(t_middle, (neibl,neibr),p_tgt_)
#print(cost, neibl,neibr)
nbso.append([neibl,neibr])
costs.append(cost)
ids = np.argsort(costs)
nbs = [nbso[i] for i in ids[::-1]]
middle = int((nbs[-1][0][0][0]+nbs[-1][1][0][0])/2),int((nbs[-1][0][0][1]+nbs[-1][1][0][1])/2)
#check if the smallest cost is target
Q = [nbs[-1]]
store.append(nbs[-1])
if reach_target(middle, p_tgt_):
store.append(nbs[-1])
return store
return store
def retrieve_path(store):
path = store
return path
if __name__ == '__main__':
t_ori = (ORIGIN_, 0., 0., 0., DEFAULT_ALPHA)
entire_path = []
p_tgt_ = TARGET_#[1512,800]
print('###### From ', t_ori, ' to ', p_tgt_, '#################')
store = get_path_store(t_ori, p_tgt_)
entire_path = retrieve_path(store)
import scipy.io
scipy.io.savemat('exp/conf_path/'+ARENA_NAME+'config_path.mat',{'path':np.array(entire_path)})
expanded_entire_path = [expand_param(p) for p in entire_path]
scipy.io.savemat('exp/conf_path/'+ARENA_NAME+'expanded_config_path.mat',{'path':np.array(expanded_entire_path)})
| StarcoderdataPython |
1638880 | <filename>app/model/draw_statistics.py
# -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey
from sqlalchemy import String, Integer
from sqlalchemy.orm import relationship
from app.model import Base
class DrawStatistics(Base):
__tablename__ = 'draw_statistics'
holo_twitter_draw_id = Column(Integer, ForeignKey('holo_twitter_draw.index'), nullable=True)
holo_twitter_draw = relationship("HoloTwitterDraw", backref="draw_statistics")
holo_twitter_custom_draw_id = Column(Integer, ForeignKey('holo_twitter_custom_draw.index'), nullable=True)
holo_twitter_custom_draw = relationship("HoloTwitterCustomDraw", backref="draw_statistics")
event = Column(String(10), nullable=False)
user_uuid = Column(String(40), nullable=False)
def __repr__(self):
return "<DrawStatistics(index='%s', holo_twitter_draw_id='%s', holo_twitter_custom_draw_id='%s',event='%s',user_uuid='%s')>" % (
self.index,
self.holo_twitter_draw_id,
self.holo_twitter_custom_draw_id,
self.event,
self.user_uuid,
)
@classmethod
def get_id(cls):
return DrawStatistics.index
@classmethod
def get_auto_event_names(cls):
return ['click','download','disable']
FIELDS = {"index": int, "holo_twitter_draw_id": str, "holo_twitter_custom_draw_id": str, "event": str,
"user_uuid": str}
FIELDS.update(Base.FIELDS)
| StarcoderdataPython |
3343897 | <filename>ois_api_client/v3_0/dto/QueryInvoiceCheckResponse.py
from dataclasses import dataclass
from .BasicOnlineInvoiceResponse import BasicOnlineInvoiceResponse
@dataclass
class QueryInvoiceCheckResponse(BasicOnlineInvoiceResponse):
"""Response type of the POST /queryInvoiceCheck REST operation
:param invoice_check_result: Indicates whether the queried invoice number exists in the system as a valid invoice, if the tax number of the querying entity is present on the invoice either as supplier or customer
"""
invoice_check_result: bool
| StarcoderdataPython |
3302533 | import quickfix as fix
class Message(fix.Message):
def __init__(self):
fix.Message.__init__(self)
self.getHeader().setField( fix.BeginString("FIXT.1.1") )
class Heartbeat(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("0") )
class TestRequest(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("1") )
class ResendRequest(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("2") )
class Reject(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("3") )
class SequenceReset(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("4") )
class Logout(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("5") )
class Logon(Message):
def __init__(self):
Message.__init__(self)
self.getHeader().setField( fix.MsgType("A") )
class NoMsgTypes(fix.Group):
def __init__(self):
order = fix.IntArray(5)
order[0] = 372
order[1] = 385
order[2] = 1130
order[3] = 1131
order[4] = 0
fix.Group.__init__(self, 384, 372, order)
| StarcoderdataPython |
117546 | <reponame>clemenshage/grslra
import numpy as np
import grslra
from grslra.visualization import plot_lpnorm
from grslra.scaling import Scaling
from matplotlib import pyplot as plt
from grslra.grst import grst
from sys import argv
PROFILE = 0
if PROFILE:
import cProfile
m = 500
n = 5000
k = 5
omegasquared = 1e-5
rate_Omega = 0.1
rho = 0.1
X_0, L_0, S_0, U_0, Y_0 = grslra.testdata.testdata_rst_static(m, n, k, rho, omegasquared)
if rate_Omega < 1.0:
card_Omega = np.int(np.round(rate_Omega * m * n))
Omega = np.unravel_index(np.random.choice(m * n, card_Omega, replace=False), (m, n))
X = X_0[Omega]
else:
Omega = None
X = X_0
scaling = Scaling(percentile=38, val_at_percentile=0.17)
# determine scaling factor
scaling.scale_reference(X, Omega=Omega, dimensions=(m, n))
print "scaling: ", scaling.factor
for p in [1.0, 0.7, 0.4, 0.1]:
mu_opt = (1-p) * (3 * np.sqrt(omegasquared) / scaling.factor) ** 2
print "mu_opt: ", mu_opt
mu = np.maximum(mu_opt, 0.1 * (3 * np.sqrt(omegasquared) / scaling.factor) ** 2)
params_grpca = {"PRINT": None}
params_cg_y = {"delta": 1e-8}
params_cg_U = {}
params_cg_Y = {}
params_gd_U = {"t_init": 1, "rho": 0.9, "I_max": 1}
params_grst = {"init_factor": 0, "VERBOSE": 2, "PRINT": None}
if PROFILE:
profile = cProfile.Profile()
profile.enable()
L = grst(X, k, p, mu, Omega=Omega, dimensions=(m, n), params=params_grst, params_grpca=params_grpca, params_cg_U=params_cg_U, params_cg_Y=params_cg_Y, params_gd_U=params_gd_U, params_cg_y=params_cg_y, L_0=L_0, scaling=scaling, U_0=U_0)
err = np.linalg.norm(L - L_0, ord='fro') / np.linalg.norm(L_0, ord='fro')
print "\n||L - L_0||_F / ||L_0||_F = ", '{:.8f}'.format(err)
if PROFILE:
profile.disable()
profile.dump_stats("profile.bin") | StarcoderdataPython |
3203263 | <filename>migrations/versions/710c83b4682f_.py
"""empty message
Revision ID: 710c83b4682f
Revises:
Create Date: 2017-11-22 07:26:30.882578
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('shoppinglistitems',
sa.Column('item_id', sa.Integer(), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('shoppinglist_id', sa.Integer(), nullable=False),
sa.Column('item_title', sa.String(length=255), nullable=False),
sa.Column('item_description', sa.String(length=500), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('item_id'),
sa.UniqueConstraint('item_title')
)
op.create_table('shoppinglists',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('description', sa.String(length=500), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
op.create_table('user_token',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=500), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
op.drop_table('user_token')
op.drop_table('shoppinglists')
op.drop_table('shoppinglistitems')
# ### end Alembic commands ###
| StarcoderdataPython |
4819809 | <filename>demo2/advance.py
#!/usr/bin/env python
#coding:utf-8
'''
Created on 2017年5月5日
@author: 唐斌
'''
from collections import Iterable
import os
def demo1():
'''切片'''
a = [1, 'as', 2, 'd', 4, 'fg']
print(a[2:4])
print(a[-1::-1])
print(a)
'''迭代'''
''''判断对象是否可迭代'''
flag = isinstance(a, Iterable)
print(flag)
for i in a:
print(i)
'''实现下标索引迭代'''
for i,v in enumerate(a):
print(i, v)
for x,y in [[1,2], [3,4], (4,6)]:
print(x,y)
'''列表生成式'''
li = list(range(0,11))
print(li)
aa = [x*y for x in range(0,10) for y in range(10, 20)]
print(aa)
d = [d for d in os.listdir('.')]
print(d)
'''生成器'''
g = (x*x for x in range(10))
print(g)
print(g.next())
'''杨辉三角'''
def triggle():
a = [1]
while True:
yield a
a = [v+a[i+1] for i,v in enumerate(a) if i<len(a)-1]
a.insert(0,1)
a.append(1)
n = 0;
for i in triggle():
n += 1
print(i)
if n > 10:
break
if __name__ == '__main__':
'''demo1()''' | StarcoderdataPython |
56406 | import unittest
from forestgame.colour import colour_to_hex
class ToHexTest(unittest.TestCase):
def test_convert_solid_black(self):
hex_code = colour_to_hex((0, 0, 0))
self.assertEqual(hex_code, "#000000")
def test_convert_solid_whex_codeite(self):
hex_code = colour_to_hex((255, 255, 255))
self.assertEqual(hex_code, "#FFFFFF")
def test_convert_solid_red(self):
hex_code = colour_to_hex((255, 0, 0))
self.assertEqual(hex_code, "#FF0000")
def test_convert_solid_green(self):
hex_code = colour_to_hex((0, 255, 0))
self.assertEqual(hex_code, "#00FF00")
def test_convert_solid_blue(self):
hex_code = colour_to_hex((0, 0, 255))
self.assertEqual(hex_code, "#0000FF")
def test_convert_low_values(self):
hex_code = colour_to_hex((15, 15, 15))
self.assertEqual(hex_code, "#0F0F0F")
| StarcoderdataPython |
1744721 | <filename>flink-ai-flow/ai_flow/test/endpoint/test_client.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import time
import unittest
from typing import List
from unittest import TestCase
import cloudpickle
from ai_flow.project.project_config import ProjectConfig
from notification_service.base_notification import EventWatcher
from ai_flow.common.properties import Properties
from ai_flow.common.status import Status
from ai_flow.meta.dataset_meta import DatasetMeta, DataType, Schema
from ai_flow.meta.job_meta import State
from ai_flow.meta.metric_meta import MetricType, MetricMeta, MetricSummary
from ai_flow.model_center.entity.model_version_stage import ModelVersionStage
from ai_flow.protobuf.message_pb2 import RESOURCE_ALREADY_EXISTS
from ai_flow.client.ai_flow_client import AIFlowClient
from ai_flow.endpoint.server.exception import AIFlowException
from ai_flow.endpoint.server.server import AIFlowServer
from ai_flow.store.db.base_model import base
from ai_flow.test.store.test_sqlalchemy_store import _get_store
_SQLITE_DB_FILE = 'aiflow.db'
_SQLITE_DB_URI = '%s%s' % ('sqlite:///', _SQLITE_DB_FILE)
_PORT = '50051'
client = None
client1 = None
client2 = None
class AIFlowClientTestCases(object):
"""test dataset"""
def test_save_dataset_get_dataset_by_id_and_name(self):
dataset = client.register_dataset(name='dataset', data_format='csv', description='it is mq data',
uri='mysql://',
properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
dataset_id = client.get_dataset_by_id(2)
self.assertIsNone(dataset_id)
dataset_name = client.get_dataset_by_name('dataset')
self.assertEqual('dataset', dataset.name)
self.assertEqual('dataset', dataset_name.name)
def test_save_dataset_with_catalog_by_id_and_name(self):
client.register_dataset_with_catalog(name='dataset',
catalog_name='my_hive', catalog_connection_uri='/path/to/conf',
catalog_type='hive', catalog_database='my_db', catalog_table='my_table')
dataset_id = client.get_dataset_by_id(2)
self.assertIsNone(dataset_id)
dataset_name = client.get_dataset_by_name('dataset')
self.assertEqual('my_hive', dataset_name.catalog_name)
self.assertEqual('hive', dataset_name.catalog_type)
self.assertEqual('my_db', dataset_name.catalog_database)
self.assertEqual('my_table', dataset_name.catalog_table)
self.assertEqual('/path/to/conf', dataset_name.catalog_connection_uri)
def test_double_register_dataset(self):
dataset_1 = client.register_dataset(name='dataset', data_format='csv', description='it is mq data',
uri='mysql://', properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
dataset_2 = client.register_dataset(name='dataset', data_format='csv', description='it is mq data',
uri='mysql://', properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
self.assertEqual(dataset_1.uuid, dataset_2.uuid)
self.assertEqual(dataset_1.schema.to_json_dict(), dataset_2.schema.to_json_dict())
self.assertRaises(AIFlowException, client.register_dataset, name='dataset',
data_format='csv',
description='it is not mq data', uri='mysql://',
properties=Properties({'a': 'b'}), name_list=['a'], type_list=[DataType.INT32])
def test_list_datasets(self):
client.register_dataset(name='dataset_1', data_format='csv', description='it is mq data',
uri='mysql://', properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
client.register_dataset(name='dataset_2', data_format='npz', description='it is',
uri='mysql://', properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
response_list = client.list_datasets(5, 0)
self.assertEqual(len(response_list), 2)
self.assertEqual('dataset_1', response_list[0].name)
self.assertEqual('dataset_2', response_list[1].name)
def test_save_datasets_list_datasets(self):
dataset_1 = DatasetMeta(name='dataset1',
data_format='csv',
create_time=None, update_time=1000,
properties=Properties({'a': 'b'}))
schema = Schema(name_list=['a', 'b'],
type_list=[DataType.STRING, DataType.INT32])
dataset_2 = DatasetMeta(name='dataset2',
data_format='csv',
create_time=None, update_time=1000,
properties=Properties({'a': 'b'}), schema=schema)
response = client.register_datasets([dataset_1, dataset_2])
self.assertEqual(len(response), 2)
self.assertEqual(1, response[0].uuid)
self.assertEqual(2, response[1].uuid)
response_list = client.list_datasets(2, 0)
self.assertEqual(2, len(response_list))
self.assertEqual('dataset1', response_list[0].name)
self.assertEqual('dataset2', response_list[1].name)
def test_delete_dataset(self):
dataset = client.register_dataset(name='dataset',
data_format='csv',
description='it is mq data',
uri='mysql://',
properties=Properties({'a': 'b'}), name_list=['a'],
type_list=[DataType.INT32])
self.assertEqual(Status.OK, client.delete_dataset_by_name(dataset.name))
self.assertIsNone(client.get_dataset_by_name(dataset.name))
self.assertIsNone(client.list_datasets(1, 0))
def test_update_dataset(self):
client.register_dataset(name='dataset', data_format='csv', description='it is mq data',
uri='mysql://',
properties=Properties({'a': 'b'}), name_list=['a'], type_list=[DataType.INT32])
now = int(time.time() * 1000)
update_dataset = client.update_dataset(dataset_name='dataset', data_format='npz',
properties=Properties({'kafka': 'localhost:9092'}),
name_list=['b'], type_list=[DataType.STRING])
dataset = client.get_dataset_by_name('dataset')
self.assertTrue(dataset.update_time >= now)
self.assertEqual(dataset.schema.name_list, update_dataset.schema.name_list)
self.assertEqual(dataset.schema.type_list, update_dataset.schema.type_list)
update_dataset_1 = client.update_dataset(dataset_name='dataset', catalog_type='hive', catalog_name='my_hive',
catalog_database='my_db', catalog_table='my_table')
self.assertEqual(update_dataset_1.catalog_type, 'hive')
self.assertEqual(update_dataset_1.catalog_name, 'my_hive')
self.assertEqual(update_dataset_1.catalog_database, 'my_db')
self.assertEqual(update_dataset_1.catalog_table, 'my_table')
"""test project"""
def test_save_project_get_project_by_id_and_name(self):
response = client.register_project(name='project', uri='www.code.com',)
project_id = client.get_project_by_id(response.uuid)
project_name = client.get_project_by_name('project')
self.assertEqual(project_id.name, 'project')
self.assertEqual(project_name.name, 'project')
print(project_id)
def test_double_register_project(self):
client.register_project(name='project', uri='www.code.com')
client.register_project(name='project', uri='www.code.com')
self.assertRaises(AIFlowException, client.register_project,
name='project', uri='www.code2.com')
def test_list_project(self):
response = client.register_project(name='project', uri='www.code.com')
client.register_project(name='project1', uri='www.code.com')
project_list = client.list_project(2, response.uuid - 1)
self.assertEqual(2, len(project_list))
self.assertEqual('project', project_list[0].name)
self.assertEqual('project1', project_list[1].name)
def test_delete_project_by_id(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model.uuid,
project_snapshot_id=None)
self.assertEqual(client.get_project_by_id(project.uuid).name, 'project')
self.assertEqual(client.get_model_relation_by_id(model.uuid).name, 'model')
self.assertEqual(client.get_model_version_relation_by_version('1', 1).version, '1')
self.assertEqual(Status.OK, client.delete_project_by_id(project.uuid))
self.assertIsNone(client.get_project_by_id(project.uuid))
self.assertIsNone(client.get_model_relation_by_id(model.uuid))
self.assertIsNone(client.get_model_version_relation_by_version('1', model.uuid))
self.assertIsNone(client.list_project(1, 0))
self.assertIsNone(client.list_model_relation(1, 0))
self.assertIsNone(client.list_model_version_relation(1, 1, 0))
def test_delete_project_by_name(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model.uuid,
project_snapshot_id=None)
self.assertEqual(client.get_project_by_id(project.uuid).name, 'project')
self.assertEqual(client.get_model_relation_by_id(model.uuid).name, 'model')
self.assertEqual(client.get_model_version_relation_by_version('1', 1).version, '1')
self.assertEqual(Status.OK, client.delete_project_by_id(project.uuid))
self.assertIsNone(client.get_project_by_name('project'))
self.assertIsNone(client.get_model_relation_by_id(model.uuid))
self.assertIsNone(client.get_model_version_relation_by_version('1', model.uuid))
self.assertIsNone(client.list_project(1, 0))
self.assertIsNone(client.list_model_relation(1, 0))
self.assertIsNone(client.list_model_version_relation(1, 1, 0))
def test_update_project(self):
client.register_project(name='project', uri='www.code.com')
update_project = client.update_project(project_name='project', uri='<EMAIL>')
project = client.get_project_by_name('project')
self.assertEqual(update_project.uri, project.uri)
"""test workflow"""
def test_save_workflow_get_workflow_by_id_and_name(self):
project_response = client.register_project(name='project', uri='www.code.com')
self.assertEqual(project_response.uuid, 1)
response = client.register_workflow(name='workflow',
project_id=project_response.uuid,
properties=Properties({'a': 'b'}))
self.assertEqual(response.uuid, 1)
self.assertEqual(response.properties, Properties({'a': 'b'}))
response_by_id = client.get_workflow_by_id(response.uuid)
response_by_name = client.get_workflow_by_name(project_response.name, response.name)
self.assertEqual('workflow', response_by_id.name)
self.assertEqual('workflow', response_by_name.name)
self.assertEqual(Properties({'a': 'b'}), response_by_id.properties)
self.assertEqual(Properties({'a': 'b'}), response_by_name.properties)
def test_double_register_workflow(self):
project_response = client.register_project(name='project', uri='www.code.com')
project_response2 = client.register_project(name='project2', uri='www.code.com')
client.register_workflow(name='workflow', project_id=project_response.uuid)
client.register_workflow(name='workflow', project_id=project_response2.uuid)
self.assertRaises(AIFlowException, client.register_workflow, name='workflow',
project_id=project_response.uuid)
def test_list_workflows(self):
project_response = client.register_project(name='project', uri='www.code.com')
client.register_workflow(name='workflow1', project_id=project_response.uuid)
client.register_workflow(name='workflow2', project_id=project_response.uuid)
response_list = client.list_workflows(project_response.name, 2, 0)
self.assertEqual('workflow1', response_list[0].name)
self.assertEqual('workflow2', response_list[1].name)
def test_delete_workflow(self):
project_response = client.register_project(name='project', uri='www.code.com')
response = client.register_workflow(name='workflow',
project_id=project_response.uuid,
properties=Properties({'a': 'b'}))
self.assertEqual(Status.OK, client.delete_workflow_by_name(project_name=project_response.name,
workflow_name='workflow'))
self.assertIsNone(client.get_workflow_by_id(response.uuid))
response = client.register_workflow(name='workflow', project_id=project_response.uuid)
self.assertEqual(Status.OK, client.delete_workflow_by_id(response.uuid))
self.assertIsNone(client.get_workflow_by_id(response.uuid))
def test_update_workflow(self):
project_response = client.register_project(name='project', uri='www.code.com')
response = client.register_workflow(name='workflow',
project_id=project_response.uuid,
properties=Properties({'a': 'b'}))
updated_workflow = client.update_workflow(project_name=project_response.name,
workflow_name='workflow',
context_extractor=cloudpickle.loads(response.context_extractor_in_bytes),
properties=Properties({'a': 'c'}))
self.assertEqual(updated_workflow.properties, Properties({'a': 'c'}))
"""test model"""
def test_model_api(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model(model_name='test_register_model1',
model_desc='test register model1', project_id=project.uuid)
self.assertIsNone(client.get_model_by_name('no'))
self.assertIsNone(client.get_model_by_id(2))
self.assertEqual(client.get_model_by_id(model.uuid).name, 'test_register_model1')
self.assertEqual(client.get_model_by_name('test_register_model1').name, 'test_register_model1')
self.assertEqual(client.get_model_by_name('test_register_model1').model_desc, 'test register model1')
client.register_model(model_name='test_register_model2',
model_desc='test register model2', project_id=1)
self.assertEqual(len(client.list_model_relation(10, 0)), 2)
client.delete_model_by_id(model.uuid)
client.delete_model_by_name('test_register_model2')
self.assertIsNone(client.list_model_relation(10, 0))
self.assertEqual(len(client.list_registered_models()), 0)
def test_get_deployed_model_version(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model(model_name='test_register_model1',
model_desc='test register model1', project_id=project.uuid)
model_version = client.register_model_version(model=model.uuid, model_path='/path/to/your/model/version')
deployed_model_version = client.get_deployed_model_version(model_name=model.name)
self.assertIsNone(deployed_model_version)
client.update_model_version(model_name=model.name, model_version=model_version.version,
current_stage=ModelVersionStage.DEPLOYED)
deployed_model_version = client.get_deployed_model_version(model_name=model.name)
self.assertEqual(deployed_model_version.version, model_version.version)
self.assertRaises(AIFlowException,
client.update_model_version, model_name=model.name, model_version=model_version.version,
current_stage=ModelVersionStage.DEPLOYED)
def test_save_model_get_id_and_name(self):
project = client.register_project(name='project', uri='www.code.com')
response = client.register_model_relation(name='model', project_id=project.uuid)
model_id = client.get_model_relation_by_id(response.uuid)
model_name = client.get_model_relation_by_name('model')
self.assertEqual(model_id.name, model_name.name)
self.assertEqual(1, len(client.list_model_relation(2, response.uuid - 1)))
print(model_id)
def test_list_model(self):
project = client.register_project(name='project', uri='www.code.com')
client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_relation(name='model1', project_id=project.uuid)
self.assertEqual(2, len(client.list_model_relation(2, 0)))
self.assertEqual('model', client.list_model_relation(2, 0)[0].name)
self.assertEqual('model1', client.list_model_relation(2, 0)[1].name)
def test_delete_model_by_id(self):
project = client.register_project(name='project', uri='www.code.com')
model_relation = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model_relation.uuid,
project_snapshot_id=None)
self.assertEqual(client.get_model_version_relation_by_version('1', model_relation.uuid).version, '1')
self.assertEqual(client.get_model_relation_by_name('model').name, 'model')
self.assertEqual(Status.OK, client.delete_model_relation_by_id(model_relation.uuid))
self.assertIsNone(client.get_model_version_relation_by_version('1', model_relation.uuid))
self.assertIsNone(client.get_model_relation_by_name('model'))
def test_delete_model_by_name(self):
project = client.register_project(name='project', uri='www.code.com')
model_relation = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model_relation.uuid,
project_snapshot_id=None)
self.assertEqual(client.get_model_version_relation_by_version('1', model_relation.uuid).version, '1')
self.assertEqual(client.get_model_relation_by_name('model').name, 'model')
self.assertEqual(Status.OK, client.delete_model_relation_by_name('model'))
self.assertIsNone(client.get_model_version_relation_by_version('1', model_relation.uuid))
self.assertIsNone(client.get_model_relation_by_name('model'))
"""test model version"""
def test_model_version_api(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model(model_name='test_register_model',
model_desc='test register model', project_id=project.uuid)
self.assertIsNone(client.get_model_version_by_version('1', model.uuid))
self.assertEqual(client.get_model_by_id(model.uuid).name, 'test_register_model')
self.assertEqual(client.get_model_by_name('test_register_model').name, 'test_register_model')
response = client.register_model_version(model=model.uuid,
project_snapshot_id=None,
model_path='fs://source1.pkl',
version_desc='test model version 1',
current_stage=ModelVersionStage.GENERATED)
self.assertEqual(response.version, '1')
model_version_meta = client.get_model_version_by_version(response.version, model.uuid)
self.assertEqual(model_version_meta.version, '1')
self.assertEqual(model_version_meta.model_path, 'fs://source1.pkl')
self.assertIsNone(model_version_meta.model_type)
self.assertEqual(model_version_meta.version_desc, 'test model version 1')
response = client.update_model_version(model_name=model.name, model_version='1',
current_stage=ModelVersionStage.DEPLOYED)
self.assertEqual(response.current_stage, ModelVersionStage.DEPLOYED)
response = client.get_deployed_model_version(model.name)
self.assertEqual(response.version, '1')
response = client.register_model_version(model=model.uuid,
project_snapshot_id=None,
model_path='fs://source2.pkl',
model_type='{"flavor.version":2}',
version_desc='test model version 2')
self.assertEqual(response.version, '2')
self.assertEqual(len(client.list_model_version_relation(1, 10, 0)), 2)
client.delete_model_version_by_version(version='2', model_id=1)
self.assertEqual(len(client.list_model_version_relation(1, 10, 0)), 1)
# register model version with deleted model version name
response = client.register_model_version(model=model.uuid,
project_snapshot_id=None,
model_path='fs://source1.pkl',
version_desc='test model version 1')
self.assertEqual(response.version, '2')
model_version_meta = client.get_model_version_by_version(response.version, model.uuid)
self.assertEqual(model_version_meta.version, '2')
self.assertEqual(model_version_meta.model_path, 'fs://source1.pkl')
self.assertIsNone(model_version_meta.model_type)
self.assertEqual(model_version_meta.version_desc, 'test model version 1')
def test_get_latest_model_version(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model(model_name='test_register_model',
model_desc='test register model', project_id=project.uuid)
response_1 = client.register_model_version(model=model.uuid,
project_snapshot_id=None,
model_path='fs://source1.pkl',
version_desc='test model version 1',
current_stage=ModelVersionStage.GENERATED)
new_generated_model_version_1 = client.get_latest_generated_model_version(model.name)
new_validated_model_version_1 = client.get_latest_validated_model_version(model.name)
self.assertIsNone(new_validated_model_version_1)
self.assertEqual(response_1.version, new_generated_model_version_1.version)
client.update_model_version(model_name=model.name, model_version=response_1.version,
current_stage=ModelVersionStage.VALIDATED)
new_validated_model_version_2 = client.get_latest_validated_model_version(model.name)
self.assertEqual(new_validated_model_version_2.version, response_1.version)
response_2 = client.register_model_version(model=model.uuid,
project_snapshot_id=None,
model_path='fs://source1.pkl',
version_desc='test model version 1',
current_stage=ModelVersionStage.GENERATED)
new_generated_model_version_2 = client.get_latest_generated_model_version(model.name)
client.update_model_version(model_name=model.name, model_version=response_2.version,
current_stage=ModelVersionStage.VALIDATED)
new_validated_model_version_2 = client.get_latest_validated_model_version(model.name)
self.assertEqual(new_validated_model_version_2.version, response_2.version)
self.assertEqual(response_2.version, new_generated_model_version_2.version)
def test_save_model_version_get_by_version(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model_relation(name='model', project_id=project.uuid)
response = client.register_model_version_relation(version='1', model_id=model.uuid,
project_snapshot_id=None)
self.assertEqual(response.version, '1')
self.assertEqual(client.get_model_version_relation_by_version(response.version, model.uuid).version, '1')
self.assertEqual(len(client.list_model_version_relation(model.uuid, 2, 0)), 1)
print(client.get_model_version_relation_by_version(response.version, model.uuid))
def test_list_model_version(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model.uuid,
project_snapshot_id=None)
client.register_model_version_relation(version='2', model_id=model.uuid,
project_snapshot_id=None)
self.assertEqual(len(client.list_model_version_relation(1, 2, 0)), 2)
self.assertEqual(client.list_model_version_relation(1, 2, 0)[0].version, '1')
self.assertEqual(client.list_model_version_relation(1, 2, 0)[1].version, '2')
def test_delete_model_version_by_version(self):
project = client.register_project(name='project', uri='www.code.com')
model = client.register_model_relation(name='model', project_id=project.uuid)
client.register_model_version_relation(version='1', model_id=model.uuid,
project_snapshot_id=None)
self.assertEqual(client.get_model_version_relation_by_version('1', model.uuid).version, '1')
client.delete_model_version_relation_by_version('1', model.uuid)
self.assertIsNone(client.get_model_version_relation_by_version('1', model.uuid))
"""test artifact"""
def test_save_artifact_get_artifact_by_id_and_name(self):
artifact = client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
artifact_id = client.get_artifact_by_id(artifact.uuid)
artifact_name = client.get_artifact_by_name(artifact.name)
self.assertEqual(artifact.artifact_type, artifact_id.artifact_type)
self.assertEqual('artifact', artifact_name.name)
def test_double_save_artifact(self):
artifact_1 = client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
artifact_2 = client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
self.assertEqual(artifact_1.to_json_dict(), artifact_2.to_json_dict())
self.assertRaises(AIFlowException, client.register_artifact, name='artifact', artifact_type='json',
uri='./artifact.json', description='whatever')
def test_save_artifact_list_artifact(self):
client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
client.register_artifact(name='artifact_1', artifact_type='json', uri='./artifact.json')
self.assertEqual(2, len(client.list_artifact(2, 0)))
def test_delete_artifact_by_id_and_name(self):
client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
client.register_artifact(name='artifact_1', artifact_type='json', uri='./artifact.json')
self.assertIsNotNone(client.get_artifact_by_id(1))
self.assertIsNotNone(client.get_artifact_by_name('artifact_1'))
self.assertEqual(Status.OK, client.delete_artifact_by_id(1))
self.assertEqual(Status.OK, client.delete_artifact_by_name('artifact_1'))
self.assertEqual(Status.ERROR, client.delete_artifact_by_name('no artifact'))
self.assertIsNone(client.get_artifact_by_id(1))
self.assertIsNone(client.get_artifact_by_name('artifact_1'))
def test_update_artifact(self):
client.register_artifact(name='artifact', artifact_type='json', uri='./artifact.json')
artifact = client.update_artifact(artifact_name='artifact', artifact_type='csv', uri='../..')
artifact_id = client.get_artifact_by_id(artifact.uuid)
self.assertEqual(artifact_id.artifact_type, 'csv')
self.assertIsNotNone(artifact_id.update_time)
self.assertEqual(artifact_id.uri, '../..')
def test_create_registered_model(self):
model_name = 'test_create_registered_model'
model_desc = 'test create registered model'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_desc, model_desc)
with self.assertRaises(AIFlowException) as exception_context:
client.create_registered_model(model_name=model_name)
assert exception_context.exception.error_code == str(RESOURCE_ALREADY_EXISTS)
def test_double_register_model(self):
model_name = 'test_create_registered_model'
model_desc = 'test create registered model'
client.create_registered_model(model_name=model_name, model_desc=model_desc)
client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertRaises(AIFlowException, client.create_registered_model, model_name=model_name,
model_desc='')
project = client.register_project(name='project')
client.register_model(model_name=model_name, project_id=project.uuid,
model_desc=model_desc)
client.register_model(model_name=model_name, project_id=project.uuid,
model_desc=model_desc)
self.assertRaises(AIFlowException, client.register_model, model_name=model_name,
project_id=project.uuid,
model_desc='')
def test_update_registered_model(self):
model_name1 = 'test_update_registered_model1'
model_desc1 = 'test update registered model1'
response = client.create_registered_model(model_name=model_name1,
model_desc=model_desc1)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name1)
model_name2 = 'test_update_registered_model2'
model_desc2 = 'test update registered model2'
response = client.update_registered_model(model_name=model_name1, new_name=model_name2,
model_desc=model_desc2)
self.assertEqual(response.model_name, model_name2)
self.assertEqual(response.model_desc, model_desc2)
def test_delete_registered_model(self):
model_name = 'test_delete_registered_model'
model_desc = 'test delete registered model'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
client.delete_registered_model(model_name=model_name)
response = client.get_registered_model_detail(model_name=model_name)
self.assertIsNone(response)
def test_list_registered_model(self):
model_name1 = 'test_list_registered_model1'
model_desc1 = 'test list registered model1'
response = client.create_registered_model(model_name=model_name1,
model_desc=model_desc1)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name1)
model_name2 = 'test_list_registered_model2'
model_desc2 = 'test list registered model2'
response = client.create_registered_model(model_name=model_name2,
model_desc=model_desc2)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name2)
response = client.list_registered_models()
self.assertEqual(len(response), 2)
self.assertEqual(response[0].model_name, model_name1)
self.assertEqual(response[1].model_name, model_name2)
def test_get_registered_model_detail(self):
model_name = 'test_get_registered_model_detail'
model_desc = 'test get registered model detail'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
response = client.get_registered_model_detail(model_name=model_name)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_desc, model_desc)
model_path1 = 'fs://source1.pkl'
model_type1 = '{"flavor.version":1}'
version_desc1 = 'test get registered model detail1'
response = client.create_model_version(model_name=model_name, model_path=model_path1,
model_type=model_type1,
version_desc=version_desc1)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path1)
self.assertEqual(response.model_type, model_type1)
self.assertEqual(response.version_desc, version_desc1)
response = client.get_registered_model_detail(model_name=model_name)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_desc, model_desc)
model_version = response.latest_model_version
self.assertEqual(model_version.model_version, '1')
self.assertEqual(model_version.model_path, model_path1)
self.assertEqual(model_version.model_type, model_type1)
self.assertEqual(model_version.version_desc, version_desc1)
model_path2 = 'fs://source2.pkl'
model_type2 = '{"flavor.version":2}'
version_desc2 = 'test get registered model detail2'
response = client.create_model_version(model_name=model_name, model_path=model_path2,
model_type=model_type2,
version_desc=version_desc2)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '2')
self.assertEqual(response.model_path, model_path2)
self.assertEqual(response.model_type, model_type2)
self.assertEqual(response.version_desc, version_desc2)
response = client.get_registered_model_detail(model_name=model_name)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_desc, model_desc)
model_version = response.latest_model_version
self.assertEqual(model_version.model_version, '2')
self.assertEqual(model_version.model_path, model_path2)
self.assertEqual(model_version.model_type, model_type2)
self.assertEqual(model_version.version_desc, version_desc2)
def test_create_model_version(self):
model_name = 'test_create_model_version'
model_desc = 'test create model version'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
model_path1 = 'fs://source1.pkl'
model_type1 = '{"flavor.version":1}'
version_desc1 = 'test create model version1'
response = client.create_model_version(model_name=model_name, model_path=model_path1,
model_type=model_type1,
version_desc=version_desc1)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path1)
self.assertEqual(response.model_type, model_type1)
self.assertEqual(response.version_desc, version_desc1)
model_path2 = 'fs://source2.pkl'
model_type2 = '{"flavor.version":2}'
version_desc2 = 'test create model version2'
response = client.create_model_version(model_name=model_name, model_path=model_path2,
model_type=model_type2,
version_desc=version_desc2)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '2')
self.assertEqual(response.model_path, model_path2)
self.assertEqual(response.model_type, model_type2)
self.assertEqual(response.version_desc, version_desc2)
def test_update_model_version(self):
model_name = 'test_update_model_version'
model_desc = 'test update model version'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
model_path1 = 'fs://source1.pkl'
model_type1 = '{"flavor.version":1}'
version_desc1 = 'test update model version1'
version_stage1 = ModelVersionStage.GENERATED
response = client.create_model_version(model_name=model_name, model_path=model_path1,
model_type=model_type1,
version_desc=version_desc1, current_stage=version_stage1)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path1)
self.assertEqual(response.model_type, model_type1)
self.assertEqual(response.version_desc, version_desc1)
self.assertEqual(response.current_stage, version_stage1)
model_path2 = 'fs://source2.pkl'
model_type2 = '{"flavor.version":2}'
version_desc2 = 'test update model version2'
version_stage2 = ModelVersionStage.VALIDATED
response = client.update_model_version(model_name=model_name, model_version='1',
model_path=model_path2, model_type=model_type2,
version_desc=version_desc2, current_stage=version_stage2)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path2)
self.assertEqual(response.model_type, model_type2)
self.assertEqual(response.version_desc, version_desc2)
self.assertEqual(response.current_stage, version_stage2)
response = client.update_model_version(model_name=model_name, model_version='1',
current_stage=ModelVersionStage.DEPLOYED)
self.assertEqual(response.current_stage, ModelVersionStage.DEPLOYED)
def test_delete_model_version(self):
model_name = 'test_delete_model_version'
model_desc = 'test delete model version'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
model_path = 'fs://source.pkl'
model_type = '{"flavor.version":1}'
version_desc = 'test delete model version'
response = client.create_model_version(model_name=model_name, model_path=model_path,
model_type=model_type, version_desc=version_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path)
self.assertEqual(response.model_type, model_type)
self.assertEqual(response.version_desc, version_desc)
client.delete_model_version(model_name, '1')
response = client.get_model_version_detail(model_name, '1')
self.assertIsNone(response)
def test_get_model_version_detail(self):
model_name = 'test_get_model_version_detail'
model_desc = 'test get model version detail'
response = client.create_registered_model(model_name=model_name, model_desc=model_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
model_path = 'fs://source.pkl'
model_type = '{"flavor.version":1}'
version_desc = 'test get model version detail'
response = client.create_model_version(model_name=model_name, model_path=model_path,
model_type=model_type, version_desc=version_desc)
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path)
self.assertEqual(response.model_type, model_type)
self.assertEqual(response.version_desc, version_desc)
response = client.get_model_version_detail(model_name, '1')
self.assertIsNotNone(response)
self.assertEqual(response.model_name, model_name)
self.assertEqual(response.model_version, '1')
self.assertEqual(response.model_path, model_path)
self.assertEqual(response.model_type, model_type)
self.assertEqual(response.version_desc, version_desc)
def test_update_and_list_notification(self):
key = 'test_publish_event_key'
value1 = 'test_publish_event_value1'
response = client.publish_event(key=key, value=value1)
self.assertIsNotNone(response)
self.assertEqual(response.key, key)
self.assertEqual(response.value, value1)
self.assertTrue(response.version > 0)
notifications = client.list_events(key=key)
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].key, key)
self.assertEqual(notifications[0].value, value1)
self.assertEqual(notifications[0].version, response.version)
notifications = client.list_events(key=key, version=0)
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].key, key)
self.assertEqual(notifications[0].value, value1)
value2 = 'test_publish_event_value2'
old_response = response
response = client.publish_event(key=key, value=value2)
self.assertIsNotNone(response)
self.assertEqual(response.version, old_response.version + 1)
notifications = client.list_events(key=key)
self.assertEqual(len(notifications), 2)
self.assertEqual(notifications[1].key, key)
self.assertEqual(notifications[1].value, value2)
self.assertEqual(notifications[1].version, old_response.version + 1)
notifications = client.list_events(key=key, version=old_response.version)
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].key, key)
self.assertEqual(notifications[0].value, value2)
old_response = response
response = client.publish_event(key=key, value=value2)
self.assertIsNotNone(response)
self.assertEqual(response.version, old_response.version + 1)
notifications = client.list_events(key=key)
self.assertEqual(len(notifications), 3)
self.assertEqual(notifications[2].key, key)
self.assertEqual(notifications[2].value, value2)
self.assertEqual(notifications[2].version, old_response.version + 1)
notifications = client.list_events(key=key, version=old_response.version)
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].key, key)
self.assertEqual(notifications[0].value, value2)
def test_listen_notification(self):
class TestWatcher(EventWatcher):
def __init__(self, event_type, test_case: TestCase):
super(TestWatcher, self).__init__()
self.event_type = event_type
self.test_case = test_case
def process(self, notifications):
self.test_case.assertNotEqual(len(notifications), 0)
for notification in notifications:
print(notification)
event_type1 = 'test_listen_notification1'
key1 = 'test_listen_notification_key1'
client.start_listen_event(key=key1,
watcher=TestWatcher(event_type1, self))
client.start_listen_event(key=key1,
watcher=TestWatcher(event_type1, self))
client1.start_listen_event(key=key1,
watcher=TestWatcher(event_type1, self))
client2.start_listen_event(key=key1,
watcher=TestWatcher(event_type1, self))
value1 = 'test_listen_notification_value1'
client.publish_event(key=key1, value=value1)
value2 = 'test_listen_notification_value2'
client.publish_event(key=key1, value=value2)
time.sleep(10)
value3 = 'test_listen_notification_value3'
client.publish_event(key=key1, value=value3)
time.sleep(1)
client.stop_listen_event(key1)
client1.stop_listen_event(key1)
client2.stop_listen_event(key1)
key2 = 'test_listen_notification_key2'
client.publish_event(key=key2, value=value1)
client.publish_event(key=key2, value=value2)
event_type2 = 'test_listen_notification2'
client.start_listen_event(key=key2,
watcher=TestWatcher(event_type2, self))
client1.start_listen_event(key=key2,
watcher=TestWatcher(event_type2, self))
client2.start_listen_event(key=key2,
watcher=TestWatcher(event_type2, self))
time.sleep(10)
client.publish_event(key=key2, value=value3)
time.sleep(1)
client.stop_listen_event(key2)
client1.stop_listen_event(key2)
client2.stop_listen_event(key2)
# def test_submit_workflow(self):
#
# def create_job(index) -> BaseJob:
# job: BaseJob = LocalCMDJob(exec_cmd='echo "hello {}" && sleep 1'.format(str(index)),
# job_context=JobContext(),
# job_config=BaseJobConfig(engine="cmd_line", platform="local"))
# job.instance_id = str(index)
# return job
#
# def create_workflow() -> Workflow:
# ex_workflow = Workflow()
# for i in range(3):
# job = create_job(i)
# ex_workflow.add_job(job)
# deps = [JobControlEdge(target_node_id='0', source_node_id='2',
# signal_config=SignalConfig(signal_key=generate_job_status_key('0'),
# signal_value=State.FINISHED.value)),
# JobControlEdge(target_node_id='1', source_node_id='2',
# signal_config=SignalConfig(signal_key=generate_job_status_key('1'),
# signal_value=State.FINISHED.value))]
# ex_workflow.add_edges("2", deps)
# workflow_meta = client.register_workflow_execution(name=generate_time_str(),
# project_id=None,
# execution_state=State.INIT,
# workflow_json=dumps(ex_workflow))
# ex_workflow.workflow_id = workflow_meta.uuid
# return ex_workflow
#
# workflow = create_workflow()
# res = client.submit_workflow(json_utils.dumps(workflow))
# self.assertEqual(0, res[0])
# workflow_id = res[1]
# res = client.stop_workflow(workflow_id=workflow_id)
# self.assertEqual(0, res[0])
# while client.is_alive_workflow(workflow_id)[1]:
# time.sleep(1)
# self.assertEqual(1, res[0])
# execution_meta = client.get_workflow_execution_by_id(workflow_id)
# self.assertEqual(State.FINISHED, execution_meta.execution_state)
def test_dataset_metric_meta(self):
start_time = round(time.time())
end_time = start_time + 1
metric_meta = client.register_metric_meta(metric_name='test_dataset_metric_meta_1',
metric_type=MetricType.DATASET,
project_name='test_dataset_metric_meta_project_1',
dataset_name='test_dataset_metric_meta_dataset_1',
job_name='test_dataset_metric_meta_job',
start_time=start_time, end_time=end_time, uri='/tmp/metric',
tags='test_dataset_metric_meta', properties=Properties({'a': 'a'}))[2]
metric_meta = client.get_metric_meta(metric_meta.metric_name)[2]
self.assertEqual('test_dataset_metric_meta_1', metric_meta.metric_name)
self.assertEqual(MetricType.DATASET, MetricType.value_of(metric_meta.metric_type))
self.assertEqual('test_dataset_metric_meta_project_1', metric_meta.project_name)
self.assertEqual('test_dataset_metric_meta_dataset_1', metric_meta.dataset_name)
self.assertEqual(start_time, metric_meta.start_time)
self.assertEqual(end_time, metric_meta.end_time)
self.assertEqual('/tmp/metric', metric_meta.uri)
self.assertEqual('test_dataset_metric_meta', metric_meta.tags)
self.assertEqual(metric_meta.properties['a'], metric_meta.properties['a'])
metric_meta = client.update_metric_meta(metric_name=metric_meta.metric_name,
dataset_name='test_dataset_metric_meta_dataset_2')[2]
metric_meta = client.get_metric_meta(metric_meta.metric_name)[2]
self.assertEqual('test_dataset_metric_meta_dataset_2', metric_meta.dataset_name)
metric_meta = client.register_metric_meta(metric_name='test_dataset_metric_meta_2',
metric_type=metric_meta.metric_type,
project_name='test_dataset_metric_meta_project_2',
dataset_name=metric_meta.dataset_name,
job_name=metric_meta.job_name,
start_time=metric_meta.start_time, end_time=metric_meta.end_time,
uri=metric_meta.uri,
tags=metric_meta.tags, properties=metric_meta.properties)[2]
metric_metas = client.list_dataset_metric_metas(dataset_name=metric_meta.dataset_name)[2]
self.assertEqual(2, len(metric_metas))
metric_meta = client.list_dataset_metric_metas(dataset_name=metric_meta.dataset_name,
project_name=metric_meta.project_name)[2]
self.assertEqual('test_dataset_metric_meta_2', metric_meta.metric_name)
self.assertEqual('test_dataset_metric_meta_project_2', metric_meta.project_name)
self.assertTrue(client.delete_metric_meta(metric_name=metric_meta.metric_name))
metric_metas = client.list_dataset_metric_metas(dataset_name=metric_meta.dataset_name)[2]
self.assertTrue(isinstance(metric_metas, MetricMeta))
self.assertEqual('test_dataset_metric_meta_1', metric_metas.metric_name)
def test_model_metric_meta(self):
metric_meta = client.register_metric_meta(metric_name='test_model_metric_meta_1',
metric_type=MetricType.MODEL,
project_name='test_model_metric_meta_project_1',
model_name='test_model_metric_meta_model_1',
job_name='test_model_metric_meta_job',
uri='/tmp/metric',
tags='test_model_metric_meta', properties=Properties({'a': 'a'}))[2]
metric_meta = client.get_metric_meta(metric_meta.metric_name)[2]
self.assertEqual('test_model_metric_meta_1', metric_meta.metric_name)
self.assertEqual(MetricType.MODEL, MetricType.value_of(metric_meta.metric_type))
self.assertEqual('test_model_metric_meta_project_1', metric_meta.project_name)
self.assertEqual('test_model_metric_meta_model_1', metric_meta.model_name)
self.assertEqual('/tmp/metric', metric_meta.uri)
self.assertEqual('test_model_metric_meta', metric_meta.tags)
self.assertEqual(metric_meta.properties['a'], metric_meta.properties['a'])
metric_meta = client.update_metric_meta(metric_name=metric_meta.metric_name,
model_name='test_model_metric_meta_model_2')[2]
metric_meta = client.get_metric_meta(metric_meta.metric_name)[2]
self.assertEqual('test_model_metric_meta_model_2', metric_meta.model_name)
metric_meta = client.register_metric_meta(metric_name='test_model_metric_meta_2',
metric_type=metric_meta.metric_type,
project_name='test_model_metric_meta_project_2',
model_name=metric_meta.model_name,
job_name=metric_meta.job_name,
uri=metric_meta.uri,
tags=metric_meta.tags, properties=metric_meta.properties)[2]
metric_metas = client.list_model_metric_metas(model_name=metric_meta.model_name)[2]
self.assertEqual(2, len(metric_metas))
metric_meta = client.list_model_metric_metas(model_name=metric_meta.model_name,
project_name=metric_meta.project_name)[2]
self.assertEqual('test_model_metric_meta_2', metric_meta.metric_name)
self.assertEqual('test_model_metric_meta_project_2', metric_meta.project_name)
self.assertTrue(client.delete_metric_meta(metric_name=metric_meta.metric_name))
metric_metas = client.list_model_metric_metas(model_name=metric_meta.model_name)[2]
self.assertTrue(isinstance(metric_metas, MetricMeta))
self.assertEqual('test_model_metric_meta_1', metric_metas.metric_name)
def test_metric_summary(self):
metric_timestamp = round(time.time())
metric_summary = client.register_metric_summary(metric_name='test_metric_summary_1', metric_key='auc',
metric_value='0.6', metric_timestamp=metric_timestamp)[2]
metric_summary = client.get_metric_summary(metric_summary.uuid)[2]
self.assertEqual(1, metric_summary.uuid)
self.assertEqual('test_metric_summary_1', metric_summary.metric_name)
self.assertEqual('auc', metric_summary.metric_key)
self.assertEqual('0.6', metric_summary.metric_value)
self.assertEqual(metric_timestamp, metric_summary.metric_timestamp)
metric_summary = client.update_metric_summary(uuid=metric_summary.uuid, metric_value='0.8')[2]
metric_summary = client.get_metric_summary(metric_summary.uuid)[2]
self.assertEqual('0.8', metric_summary.metric_value)
metric_summary = client.register_metric_summary(metric_name=metric_summary.metric_name,
metric_key=metric_summary.metric_key,
metric_value='0.7', metric_timestamp=metric_timestamp + 1,
model_version='test_metric_summary_model_version_1')[2]
metric_summary = client.register_metric_summary(metric_name=metric_summary.metric_name,
metric_key='roc',
metric_value='0.9', metric_timestamp=metric_timestamp + 1,
model_version='test_metric_summary_model_version_2')[2]
metric_summaries = client.list_metric_summaries(metric_name=metric_summary.metric_name)[2]
self.assertEqual(3, len(metric_summaries))
self.assertEqual('auc', metric_summaries[0].metric_key)
self.assertEqual('0.8', metric_summaries[0].metric_value)
self.assertEqual('auc', metric_summaries[1].metric_key)
self.assertEqual('0.7', metric_summaries[1].metric_value)
self.assertEqual('roc', metric_summaries[2].metric_key)
self.assertEqual('0.9', metric_summaries[2].metric_value)
metric_summaries = client.list_metric_summaries(metric_key='auc')[2]
self.assertEqual(2, len(metric_summaries))
self.assertEqual('0.8', metric_summaries[0].metric_value)
self.assertEqual('0.7', metric_summaries[1].metric_value)
metric_summary = client.list_metric_summaries(model_version='test_metric_summary_model_version_1')[2]
self.assertEqual('test_metric_summary_1', metric_summary.metric_name)
self.assertEqual('auc', metric_summary.metric_key)
self.assertEqual('0.7', metric_summary.metric_value)
metric_summary = client.list_metric_summaries(model_version='test_metric_summary_model_version_1')[2]
self.assertEqual('test_metric_summary_1', metric_summary.metric_name)
self.assertEqual('auc', metric_summary.metric_key)
self.assertEqual('0.7', metric_summary.metric_value)
metric_summaries = client.list_metric_summaries(metric_name=metric_summary.metric_name,
start_time=metric_timestamp + 1,
end_time=metric_summary.metric_timestamp)[2]
self.assertEqual(2, len(metric_summaries))
self.assertEqual('auc', metric_summaries[0].metric_key)
self.assertEqual('0.7', metric_summaries[0].metric_value)
self.assertEqual('roc', metric_summaries[1].metric_key)
self.assertEqual('0.9', metric_summaries[1].metric_value)
metric_summary = client.list_metric_summaries(metric_name=metric_summary.metric_name, metric_key='auc',
model_version='test_metric_summary_model_version_1')[2]
self.assertEqual('test_metric_summary_1', metric_summary.metric_name)
self.assertEqual('auc', metric_summary.metric_key)
self.assertEqual('0.7', metric_summary.metric_value)
@staticmethod
def register_workflow_job():
project = client.register_project(name='project')
return project
@staticmethod
def register_model_and_version(project):
model_name = 'test_create_registered_model'
model_desc = 'test create registered model'
model = client.register_model(model_name=model_name, project_id=project.uuid,
model_desc=model_desc)
version = client.register_model_version(model=model.uuid,
model_path="/tmp",
project_snapshot_id=None)
return model, version
class TestAIFlowClientSqlite(AIFlowClientTestCases, unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
global client, client1, client2
print("TestAIFlowClientSqlite setUpClass")
if os.path.exists(_SQLITE_DB_FILE):
os.remove(_SQLITE_DB_FILE)
cls.server = AIFlowServer(store_uri=_SQLITE_DB_URI, port=_PORT, start_scheduler_service=False)
cls.server.run()
client = AIFlowClient(server_uri='localhost:' + _PORT)
client1 = AIFlowClient(server_uri='localhost:' + _PORT)
client2 = AIFlowClient(server_uri='localhost:' + _PORT)
@classmethod
def tearDownClass(cls) -> None:
client.stop_listen_event()
client1.stop_listen_event()
client2.stop_listen_event()
cls.server.stop()
os.remove(_SQLITE_DB_FILE)
def setUp(self) -> None:
_get_store(_SQLITE_DB_URI)
def tearDown(self) -> None:
store = _get_store(_SQLITE_DB_URI)
base.metadata.drop_all(store.db_engine)
class TestAIFlowClientSqliteWithSingleHighAvailableServer(
AIFlowClientTestCases, unittest.TestCase):
"""
Used to ensure the high available server has the same functionality with normal server.
"""
@classmethod
def setUpClass(cls) -> None:
global client, client1, client2
print("TestAIFlowClientSqlite setUpClass")
if os.path.exists(_SQLITE_DB_FILE):
os.remove(_SQLITE_DB_FILE)
cls.server = AIFlowServer(store_uri=_SQLITE_DB_URI, port=_PORT, enabled_ha=True, start_scheduler_service=False,
ha_server_uri='localhost:' + _PORT)
cls.server.run()
config = ProjectConfig()
config.set_server_ip('localhost')
config.set_server_port('50051')
config.set_project_name('test_project')
config.set_enable_ha(True)
client = AIFlowClient(server_uri='localhost:' + _PORT, project_config=config)
client1 = AIFlowClient(server_uri='localhost:' + _PORT, project_config=config)
client2 = AIFlowClient(server_uri='localhost:' + _PORT, project_config=config)
@classmethod
def tearDownClass(cls) -> None:
client.stop_listen_event()
client.disable_high_availability()
client1.stop_listen_event()
client1.disable_high_availability()
client2.stop_listen_event()
client2.disable_high_availability()
cls.server.stop()
os.remove(_SQLITE_DB_FILE)
def setUp(self) -> None:
_get_store(_SQLITE_DB_URI)
def tearDown(self) -> None:
store = _get_store(_SQLITE_DB_URI)
base.metadata.drop_all(store.db_engine)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1709670 | <gh_stars>1-10
from .filetype import check_file_type
| StarcoderdataPython |
4825004 | <filename>src/pipeline_reader/objects.py
class Pipeline:
def __init__(self):
self.stages = []
self.options = []
def __str__(self):
return f'Pipeline(stages="{self.stages}", options="{self.options}")'
def __repr__(self):
return self.__str__()
class Stage:
def __init__(self, name=''):
self.name = name
self.code = ''
self.indent = -1
def __str__(self):
return f'Stage(name="{self.name}", code="{self.code}")'
def __repr__(self):
return self.__str__()
class Options:
def __init__(self):
self.code = '' | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.