hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70f9780dd1066df809bf256c632987a9ff361e1c | 1,893 | py | Python | lecarb/estimator/estimator.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 34 | 2020-12-14T01:21:29.000Z | 2022-03-29T04:52:46.000Z | lecarb/estimator/estimator.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 5 | 2020-12-28T16:06:22.000Z | 2022-01-19T18:28:53.000Z | lecarb/estimator/estimator.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 12 | 2021-02-08T17:50:13.000Z | 2022-03-28T11:09:06.000Z | import time
import logging
import numpy as np
from typing import Tuple, Any
from ..workload.workload import Query, query_2_triple
from ..dataset.dataset import Table
L = logging.getLogger(__name__)
class Estimator(object):
"""Base class for a cardinality estimator."""
def __init__(self, table: Table, **kwargs: Any) -> None:
self.table = table
self.params = dict(kwargs)
def __repr__(self) -> str:
pstr = ';'.join([f"{p}={v}" for p, v in self.params.items()])
return f"{self.__class__.__name__.lower()}-{pstr}"
def query(self, query: Query) -> Tuple[float, float]:
"""return est_card, dur_ms"""
raise NotImplementedError
def in_between(data: Any, val: Tuple[Any, Any]) -> bool:
assert len(val) == 2
lrange, rrange = val
return np.greater_equal(data, lrange) & np.less_equal(data, rrange)
OPS = {
'>': np.greater,
'<': np.less,
'>=': np.greater_equal,
'<=': np.less_equal,
'=': np.equal,
'[]': in_between
}
class Oracle(Estimator):
def __init__(self, table):
super(Oracle, self).__init__(table=table)
def query(self, query):
columns, operators, values = query_2_triple(query, with_none=False, split_range=False)
start_stmp = time.time()
bitmap = np.ones(self.table.row_num, dtype=bool)
for c, o, v in zip(columns, operators, values):
bitmap &= OPS[o](self.table.data[c], v)
card = bitmap.sum()
dur_ms = (time.time() - start_stmp) * 1e3
return card, dur_ms
# from pandasql import sqldf <- too slow
# def query(self, query):
# sql = query_2_sql(query, self.table)
# data = self.table.data
# start_stmp = time.time()
# df = sqldf(sql, locals())
# card = df.iloc[0, 0]
# dur_ms = (time.time() - start_stmp) * 1e3
# return card, dur_ms
| 31.032787 | 94 | 0.605917 |
7e1c37ec37782385c3f5577272f4ed283a511c87 | 8,756 | py | Python | pypy/module/cpyext/memoryobject.py | SeraphRoy/PyPy-Functional | e825dce7f7c484fa666566974a93ed5d59fb73be | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/memoryobject.py | SeraphRoy/PyPy-Functional | e825dce7f7c484fa666566974a93ed5d59fb73be | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/memoryobject.py | SeraphRoy/PyPy-Functional | e825dce7f7c484fa666566974a93ed5d59fb73be | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | from rpython.rlib.objectmodel import keepalive_until_here
from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import (
cpython_api, Py_buffer, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS,
build_type_checkers, Py_ssize_tP, PyObjectFields, cpython_struct,
bootstrap_function, Py_bufferP, slot_function, generic_cpy_call)
from pypy.module.cpyext.pyobject import (
PyObject, make_ref, as_pyobj, decref, from_ref, make_typedescr,
get_typedescr, track_reference)
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import widen
from pypy.objspace.std.memoryobject import W_MemoryView
from pypy.module.cpyext.object import _dealloc
from pypy.module.cpyext.import_ import PyImport_Import
PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView")
PyMemoryViewObjectStruct = lltype.ForwardReference()
PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct)
PyMemoryViewObjectFields = PyObjectFields + \
(("view", Py_buffer),)
cpython_struct(
"PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct,
level=2)
@bootstrap_function
def init_memoryobject(space):
"Type description of PyDictObject"
make_typedescr(W_MemoryView.typedef,
basestruct=PyMemoryViewObject.TO,
attach=memory_attach,
dealloc=memory_dealloc,
realize=memory_realize,
)
def memory_attach(space, py_obj, w_obj, w_userdata=None):
"""
Fills a newly allocated PyMemoryViewObject with the given W_MemoryView object.
"""
assert isinstance(w_obj, W_MemoryView)
py_obj = rffi.cast(PyMemoryViewObject, py_obj)
view = py_obj.c_view
ndim = w_obj.buf.getndim()
if ndim >= Py_MAX_NDIMS:
# XXX warn?
return
fill_Py_buffer(space, w_obj.buf, view)
try:
view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address())
view.c_obj = make_ref(space, w_userdata)
rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly)
except ValueError:
w_s = w_obj.descr_tobytes(space)
view.c_obj = make_ref(space, w_s)
view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.bytes_w(w_s),
track_allocation=False))
rffi.setintfield(view, 'c_readonly', 1)
def memory_realize(space, obj):
"""
Creates the memory object in the interpreter
"""
from pypy.module.cpyext.slotdefs import CPyBuffer, fq
py_mem = rffi.cast(PyMemoryViewObject, obj)
view = py_mem.c_view
ndim = widen(view.c_ndim)
shape = None
if view.c_shape:
shape = [view.c_shape[i] for i in range(ndim)]
strides = None
if view.c_strides:
strides = [view.c_strides[i] for i in range(ndim)]
format = 'B'
if view.c_format:
format = rffi.charp2str(view.c_format)
buf = CPyBuffer(space, view.c_buf, view.c_len, from_ref(space, view.c_obj),
format=format, shape=shape, strides=strides,
ndim=ndim, itemsize=view.c_itemsize,
readonly=widen(view.c_readonly))
# Ensure view.c_buf is released upon object finalization
fq.register_finalizer(buf)
# Allow subclassing W_MemeoryView
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_MemoryView, w_type)
w_obj.__init__(buf)
track_reference(space, obj, w_obj)
return w_obj
@slot_function([PyObject], lltype.Void)
def memory_dealloc(space, py_obj):
mem_obj = rffi.cast(PyMemoryViewObject, py_obj)
if mem_obj.c_view.c_obj:
decref(space, mem_obj.c_view.c_obj)
mem_obj.c_view.c_obj = rffi.cast(PyObject, 0)
_dealloc(space, py_obj)
def fill_Py_buffer(space, buf, view):
# c_buf, c_obj have been filled in
ndim = buf.getndim()
view.c_len = buf.getlength()
view.c_itemsize = buf.getitemsize()
rffi.setintfield(view, 'c_ndim', ndim)
view.c_format = rffi.cast(rffi.CCHARP, view.c__format)
fmt = buf.getformat()
n = Py_MAX_FMT - 1 # NULL terminated buffer
if len(fmt) > n:
w_message = space.newbytes("PyPy specific Py_MAX_FMT is %d which is too "
"small for buffer format, %d needed" % (
Py_MAX_FMT, len(fmt)))
w_stacklevel = space.newint(1)
w_module = PyImport_Import(space, space.newbytes("warnings"))
w_warn = space.getattr(w_module, space.newbytes("warn"))
space.call_function(w_warn, w_message, space.w_None, w_stacklevel)
else:
n = len(fmt)
for i in range(n):
view.c_format[i] = fmt[i]
view.c_format[n] = '\x00'
if ndim > 0:
view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape)
view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides)
shape = buf.getshape()
strides = buf.getstrides()
for i in range(ndim):
view.c_shape[i] = shape[i]
view.c_strides[i] = strides[i]
else:
view.c_shape = lltype.nullptr(Py_ssize_tP.TO)
view.c_strides = lltype.nullptr(Py_ssize_tP.TO)
view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
return 0
def _IsFortranContiguous(view):
ndim = widen(view.c_ndim)
if ndim == 0:
return 1
if not view.c_strides:
return ndim == 1
sd = view.c_itemsize
if ndim == 1:
return view.c_shape[0] == 1 or sd == view.c_strides[0]
for i in range(view.c_ndim):
dim = view.c_shape[i]
if dim == 0:
return 1
if view.c_strides[i] != sd:
return 0
sd *= dim
return 1
def _IsCContiguous(view):
ndim = widen(view.c_ndim)
if ndim == 0:
return 1
if not view.c_strides:
return ndim == 1
sd = view.c_itemsize
if ndim == 1:
return view.c_shape[0] == 1 or sd == view.c_strides[0]
for i in range(ndim - 1, -1, -1):
dim = view.c_shape[i]
if dim == 0:
return 1
if view.c_strides[i] != sd:
return 0
sd *= dim
return 1
@cpython_api([Py_bufferP, lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
def PyBuffer_IsContiguous(space, view, fort):
"""Return 1 if the memory defined by the view is C-style (fort is
'C') or Fortran-style (fort is 'F') contiguous or either one
(fort is 'A'). Return 0 otherwise."""
# traverse the strides, checking for consistent stride increases from
# right-to-left (c) or left-to-right (fortran). Copied from cpython
if view.c_suboffsets:
return 0
if (fort == 'C'):
return _IsCContiguous(view)
elif (fort == 'F'):
return _IsFortranContiguous(view)
elif (fort == 'A'):
return (_IsCContiguous(view) or _IsFortranContiguous(view))
return 0
@cpython_api([PyObject], PyObject, result_is_ll=True)
def PyMemoryView_FromObject(space, w_obj):
w_memview = space.call_method(space.builtin, "memoryview", w_obj)
py_memview = make_ref(space, w_memview, w_obj)
return py_memview
@cpython_api([Py_bufferP], PyObject, result_is_ll=True)
def PyMemoryView_FromBuffer(space, view):
"""Create a memoryview object wrapping the given buffer-info structure view.
The memoryview object then owns the buffer, which means you shouldn't
try to release it yourself: it will be released on deallocation of the
memoryview object."""
# XXX this should allocate a PyMemoryViewObject and
# copy view into obj.c_view, without creating a new view.c_obj
typedescr = get_typedescr(W_MemoryView.typedef)
py_obj = typedescr.allocate(space, space.w_memoryview)
py_mem = rffi.cast(PyMemoryViewObject, py_obj)
mview = py_mem.c_view
mview.c_buf = view.c_buf
mview.c_obj = view.c_obj
mview.c_len = view.c_len
mview.c_itemsize = view.c_itemsize
mview.c_readonly = view.c_readonly
mview.c_ndim = view.c_ndim
mview.c_format = view.c_format
if view.c_strides == rffi.cast(Py_ssize_tP, view.c__strides):
py_mem.c_view.c_strides = rffi.cast(Py_ssize_tP, py_mem.c_view.c__strides)
for i in range(view.c_ndim):
py_mem.c_view.c_strides[i] = view.c_strides[i]
else:
# some externally allocated memory chunk
py_mem.c_view.c_strides = view.c_strides
if view.c_shape == rffi.cast(Py_ssize_tP, view.c__shape):
py_mem.c_view.c_shape = rffi.cast(Py_ssize_tP, py_mem.c_view.c__shape)
for i in range(view.c_ndim):
py_mem.c_view.c_shape[i] = view.c_shape[i]
else:
# some externally allocated memory chunk
py_mem.c_view.c_shape = view.c_shape
# XXX ignore suboffsets?
return py_obj
| 38.069565 | 82 | 0.668342 |
bc1bc53909301bdbb2f581959b4822dcd2eea90e | 10,170 | py | Python | library/search_indexes.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | library/search_indexes.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | library/search_indexes.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | from haystack import indexes
from library.models import Item, CaseBrief, SynthesisItem, Synthesis, CollectionTag
name_boost = 1.25
class LibraryCommonIndexPropertiesMixin(object):
belongs_to = indexes.CharField()
# class ItemIndex(indexes.SearchIndex): # this would disable indexing for this index class.
class ItemIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Item Index - I originally hoped this would be a NON-indexed Index class which is then subclassed into
apps' index classes and made indexable there so facilitate restricting to app-level access. Not sure what the
future holds for that concept though.
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
"indexes.Indexable" - tells django-haystack to index that material. Without
it the index is ignored.
Note on assets:
- I think we can leave the secureasset index largely in place - BUT in the index_queryset
method we should restrict the selection to secureassets that are part of Library ie: they have
an entry in ItemAsset relation.
Tags:
http://django-haystack.readthedocs.io/en/v2.4.1/searchindex_api.html#prepare-self-object
'''
def get_model(self):
return Item
'''
Item index document :
'''
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/item_text.txt'
'''
Item Fields
'''
name = indexes.CharField(null=False, model_attr='name', boost=name_boost)
tags = indexes.FacetMultiValueField()
collections = indexes.FacetMultiValueField()
cataloger = indexes.CharField(model_attr='cataloger', null=True)
reviewer = indexes.CharField(model_attr='reviewer', null=True)
prefixed_id = indexes.CharField(model_attr='prefixed_id')
file_names = indexes.FacetMultiValueField()
'''
DublinCore Fields
'''
item_type = indexes.FacetCharField(null=True, model_attr='dublin_core__type')
contributor = indexes.FacetCharField(null=True, model_attr='dublin_core__contributor')
coverage = indexes.FacetCharField(null=True, model_attr='dublin_core__coverage')
creator = indexes.FacetCharField(null=True, model_attr='dublin_core__creator')
date = indexes.FacetDateField(null=True, model_attr='dublin_core__date')
description = indexes.CharField(null=True, model_attr='dublin_core__description')
format = indexes.CharField(null=True, model_attr='dublin_core__format')
identifier = indexes.CharField(null=True, model_attr='dublin_core__identifier')
language = indexes.FacetCharField(null=True, model_attr='dublin_core__language')
publisher = indexes.CharField(null=True, model_attr='dublin_core__publisher')
relation = indexes.CharField(null=True, model_attr='dublin_core__relation')
rights = indexes.CharField(null=True, model_attr='dublin_core__rights')
source = indexes.CharField(null=True, model_attr='dublin_core__source')
subject = indexes.CharField(null=True, model_attr='dublin_core__subject')
'''
Holdings Fields
'''
item_type_comments = indexes.CharField(null=True, model_attr='holdings__item_type_comments')
source_type = indexes.FacetCharField(null=True, model_attr='holdings__source_type')
media_mode = indexes.FacetCharField(null=True, model_attr='holdings__media_mode')
item_internal_location = indexes.CharField(null=True, model_attr='holdings__item_internal_location')
digital_file_name_path = indexes.CharField(null=True, model_attr='holdings__digital_file_name_path')
digital_file_name = indexes.CharField(null=True, model_attr='holdings__digital_file_name')
digital_file_ocrd = indexes.FacetBooleanField(null=True, model_attr='holdings__digital_file_ocrd')
digital_file_type_comments = indexes.CharField(null=True, model_attr='holdings__digital_file_type_comments')
'''
Review Fields
'''
summary = indexes.CharField(null=True, model_attr='review__summary')
people_mentioned = indexes.FacetMultiValueField()
plants = indexes.FacetMultiValueField()
animals = indexes.FacetMultiValueField()
mup_category = indexes.FacetCharField(null=True, model_attr='review__mup_category__name')
use_occupancy_category = indexes.FacetCharField(null=True, model_attr='review__use_occupancy_category__name')
full_text = indexes.FacetCharField(null=True, model_attr='review__full_text')
'''
ResearcherNotes Fields
'''
spreadsheet_id = indexes.FacetIntegerField(null=True, model_attr='researcher_notes__spreadsheet_id')
researcher_notes = indexes.CharField(null=True, model_attr='researcher_notes__researcher_notes')
actions_needed = indexes.CharField(null=True, model_attr='researcher_notes__actions_needed')
search_location = indexes.CharField(null=True, model_attr='researcher_notes__search_location')
search_terms = indexes.FacetCharField(null=True, model_attr='researcher_notes__search_terms')
search_results = indexes.CharField(null=True, model_attr='researcher_notes__search_results')
search_identifier = indexes.FacetCharField(null=True, model_attr='researcher_notes__search_identifier')
cross_reference = indexes.FacetCharField(null=True, model_attr='researcher_notes__cross_reference')
search_summary = indexes.CharField(null=True, model_attr='researcher_notes__search_summary')
def index_queryset(self, using=None):
'''
Get the default QuerySet to index when doing a full update.
Subclasses can override this method to avoid indexing certain objects.
:param using:
:return:
'''
# the super does this:
# self.get_model().objects.all()
qs = super(ItemIndex, self).index_queryset(using=using)
return qs
def prepare_tags(self, obj):
return [tag.name for tag in obj.tags.all()]
def prepare_collections(self, obj):
return [tag.name for tag in obj.collections.all()]
def prepare_file_names(self, obj):
return [file.name for file in obj.files.all()]
def prepare_people_mentioned(self, obj):
if obj.review:
return [tag.name for tag in obj.review.people_mentioned.all()]
else:
return None
def prepare_plants(self, obj):
if obj.review:
return [tag.name for tag in obj.review.plants.all()]
else:
return None
def prepare_animals(self, obj):
if obj.review:
return [tag.name for tag in obj.review.animals.all()]
else:
return None
class CaseBriefIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Case Brief Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
Tags:
http://django-haystack.readthedocs.io/en/v2.4.1/searchindex_api.html#prepare-self-object
'''
def get_model(self):
return CaseBrief
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/casebrief_text.txt'
name = indexes.CharField(null=False, model_attr='story_title', boost=name_boost) # use story title for boosted name field
story_title = indexes.CharField(null=False, model_attr='story_title')
cataloger = indexes.CharField(model_attr='cataloger', null=True)
reviewer = indexes.CharField(model_attr='reviewer', null=True)
prefixed_id = indexes.CharField(model_attr='prefixed_id')
# sources =
source_notes = indexes.CharField()
issues = indexes.CharField()
facts = indexes.CharField()
decision = indexes.CharField()
reasons = indexes.FacetCharField()
notes = indexes.CharField()
tags = indexes.FacetMultiValueField()
keywords = indexes.FacetMultiValueField()
def prepare_tags(self, obj):
return [tag.name for tag in obj.tags.all()]
def prepare_keywords(self, obj):
return [keyword.name for keyword in obj.keywords.all()]
class SynthesisIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Synthesis Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return Synthesis
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/synthesis_text.txt'
prefixed_id = indexes.CharField(model_attr='prefixed_id')
name = indexes.CharField(boost=name_boost)
class SynthesisItemIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Synthesis Item Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return SynthesisItem
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/synthesisitem_text.txt'
subject = indexes.CharField()
overview = indexes.CharField()
# items = indexes.CharField()
# casebriefs = indexes.CharField()
synthesis_category = indexes.CharField(model_attr='category__name', null=True)
class CollectionTagIndex(LibraryCommonIndexPropertiesMixin, indexes.SearchIndex, indexes.Indexable):
'''
Collection Tag Index:
For guidance:
http://django-haystack.readthedocs.io/en/latest/best_practices.html#good-search-needs-good-content
'''
def get_model(self):
return CollectionTag
# Main document index:
text = indexes.CharField(document=True, use_template=True)
document_template_name = 'search/indexes/library/collectiontag_text.txt'
name = indexes.CharField(boost=name_boost)
description = indexes.CharField()
| 39.571984 | 126 | 0.726549 |
3d55fba3865da043632ac5893d58b4b95007fb77 | 1,789 | py | Python | asyncio_pool/mx_asyncgen.py | alvistack/gistart-asyncio-pool | 1da1080594a51e59a4c0d9d58879513e6583fdd2 | [
"MIT"
] | 79 | 2018-07-22T18:41:29.000Z | 2022-03-31T17:48:30.000Z | asyncio_pool/mx_asyncgen.py | alvistack/gistart-asyncio-pool | 1da1080594a51e59a4c0d9d58879513e6583fdd2 | [
"MIT"
] | 5 | 2019-12-08T01:21:51.000Z | 2020-08-05T10:34:48.000Z | asyncio_pool/mx_asyncgen.py | alvistack/gistart-asyncio-pool | 1da1080594a51e59a4c0d9d58879513e6583fdd2 | [
"MIT"
] | 10 | 2019-02-05T20:32:55.000Z | 2021-11-11T12:29:50.000Z | '''Mixin for BaseAioPool with async generator features, python3.6+'''
import asyncio as aio
from .results import getres
async def iterwait(futures, *, flat=True, get_result=getres.flat,
timeout=None, yield_when=aio.ALL_COMPLETED, loop=None):
'''Wraps `asyncio.wait` into asynchronous generator, accessible with
`async for` syntax. May be useful in conjunction with `spawn_n`.
`timeout` and `yield_when` parameters are passed to `asyncio.wait`, see
documentation for this great instrument.
Returns results for provided futures, as soon as results are ready. If
`flat` is True -- generates one result at a time (per `async for`). If
`flat` is False -- generates a list of ready results.
'''
_futures = futures[:]
while _futures:
done, _futures = await aio.wait(_futures, timeout=timeout,
return_when=yield_when, loop=loop)
if flat:
for fut in done:
yield get_result(fut)
else:
yield [get_result(fut) for fut in done]
class MxAsyncGenPool(object):
# Asynchronous generator wrapper for asyncio.wait.
async def itermap(self, fn, iterable, cb=None, ctx=None, *, flat=True,
get_result=getres.flat, timeout=None,
yield_when=aio.ALL_COMPLETED):
'''Spawns coroutines created with `fn` for each item in `iterable`, then
waits for results with `iterwait`. See docs for `map_n` and `iterwait`.
'''
futures = self.map_n(fn, iterable, cb, ctx)
generator = iterwait(futures, flat=flat, timeout=timeout,
get_result=get_result, yield_when=yield_when)
async for batch in generator:
yield batch # TODO is it possible to return a generator?
| 40.659091 | 80 | 0.65232 |
ef40072b93837edd3f0cc3036b0617daf18c9469 | 124 | py | Python | tests/test_inductivestep.py | dmgolembiowski/edbpool | 665eefd31a12e6c469e2eef9e588e169e475ec9e | [
"Apache-2.0"
] | 1 | 2020-05-08T05:57:50.000Z | 2020-05-08T05:57:50.000Z | tests/test_inductivestep.py | dmgolembiowski/edbpool-server | 665eefd31a12e6c469e2eef9e588e169e475ec9e | [
"Apache-2.0"
] | 1 | 2020-05-04T19:54:00.000Z | 2020-05-05T06:57:04.000Z | tests/test_inductivestep.py | dmgolembiowski/edbpool | 665eefd31a12e6c469e2eef9e588e169e475ec9e | [
"Apache-2.0"
] | 1 | 2020-05-08T05:58:11.000Z | 2020-05-08T05:58:11.000Z | from unittest import TestCase
class InductiveStepTestCase(TestCase):
def test(self):
self.assertEqual(0+1, 1)
| 17.714286 | 38 | 0.717742 |
cb6984d9f3004230bb8313d19f279620d603f172 | 6,624 | py | Python | justap_server_sdk_python/models/v1_channel.py | justapnet/justap-server-sdk-python | 2d3110c6447833334fa2f7e93ffa63e06913df17 | [
"Apache-2.0"
] | null | null | null | justap_server_sdk_python/models/v1_channel.py | justapnet/justap-server-sdk-python | 2d3110c6447833334fa2f7e93ffa63e06913df17 | [
"Apache-2.0"
] | null | null | null | justap_server_sdk_python/models/v1_channel.py | justapnet/justap-server-sdk-python | 2d3110c6447833334fa2f7e93ffa63e06913df17 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Justap API
欢迎阅读 Justap Api 文档 Justap 是为移动端应用和PC端应用打造的下一代聚合支付SAAS服务平台,通过一个 SDK 即可快速的支持各种形式的应用,并且一次接口完成多个不同支付渠道的接入。平台除了支持服务商子商户模式,同时还对商家自有商户(即自己前往微信、支付宝等机构开户)提供了完整的支持。 感谢您的支持,我们将不断探索,为您提供更优质的服务!如需技术支持可前往商户中心提交工单,支持工程师会尽快与您取得联系! # 文档说明 采用 REST 风格设计。所有接口请求地址都是可预期的以及面向资源的。使用规范的 HTTP 响应代码来表示请求结果的正确或错误信息。使用 HTTP 内置的特性,如 HTTP Authentication 和 HTTP 请求方法让接口易于理解。 ## HTTP 状态码 HTTP 状态码可以用于表明服务的状态。服务器返回的 HTTP 状态码遵循 [RFC 7231](http://tools.ietf.org/html/rfc7231#section-6) 和 [IANA Status Code Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) 标准。 ## 认证 在调用 API 时,必须提供 API Key 作为每个请求的身份验证。你可以在管理平台内管理你的 API Key。API Key 是商户在系统中的身份标识,请安全存储,确保其不要被泄露。如需获取或更新 API Key ,也可以在商户中心内进行操作。 Api Key 在使用自定义的 HTTP Header 进行传递。 ``` X-Justap-Api-Key ``` API Key 分为 live 和 test 两种模式。分别对应真实交易环境和模拟测试交易环境并且可以实时切换。 测试模式下的 API Key 会模拟交易等请求,但是不会产生任何真实交易行为和费用,便于调试和接入。 **⚠️ 注意**:在使用 live 模式前,需要先前往 `商户中心 -> 应用设置 -> 开发参数` 开启 live 模式。 <SecurityDefinitions /> ## 请求类型 所有的 API 请求只支持 HTTPS 方式调用。 ## 路由参数 路由参数是指出现在 URL 路径中的可变变量。在本文档中,使用 `{}` 包裹的部分。 例如: `{charge_id}`,在实际使用是,需要将 `{charge_id}` 替换为实际值 `charge_8a8sdf888888` ## MIME Type MIME 类型用于指示服务器返回的数据格式。服务器目前默认采用 `application/json`。 例如: ``` application/json ``` ## 错误 服务器使用 HTTP 状态码 (status code) 来表明一个 API 请求的成功或失败状态。返回 HTTP 2XX 表明 API 请求成功。返回 HTTP 4XX 表明在请求 API 时提供了错误信息,例如参数缺失、参数错误、支付渠道错误等。返回 HTTP 5XX 表明 API 请求时,服务器发生了错误。 在返回错误的状态码时,回同时返回一些错误信息提示出错原因。 具体的错误码我们正在整理当中。 ## 分页 所有的 Justap 资源都可以被 list API 方法支持,例如分页 charges 和 refunds。这些 list API 方法拥有相同的数据结构。Justap 是基于 cursor 的分页机制,使用参数 starting_after 来决定列表从何处开始,使用参数 ending_before 来决定列表从何处结束。 ## 参数说明 请求参数中包含的以下字段释义请参考: - REQUIRED: 必填参数 - OPTIONAL: 可选参数,可以在请求当前接口时按需传入 - CONDITIONAL: 在某些条件下必传 - RESPONSE-ONLY: 标示该参数仅在接口返回参数中出现,调用 API 时无需传入 # 如何保证幂等性 如果发生请求超时或服务器内部错误,客户端可能会尝试重发请求。您可以在请求中设置 ClientToken 参数避免多次重试带来重复操作的问题。 ## 什么是幂等性 在数学计算或者计算机科学中,幂等性(idempotence)是指相同操作或资源在一次或多次请求中具有同样效果的作用。幂等性是在分布式系统设计中具有十分重要的地位。 ## 保证幂等性 通常情况下,客户端只需要在500(InternalErrorInternalError)或503(ServiceUnavailable)错误,或者无法获取响应结果时重试。充实时您可以从客户端生成一个参数值不超过64个的ASCII字符,并将值赋予 ClientToken,保证重试请求的幂等性。 ## ClientToken 详解 ClientToken参数的详细信息如下所示。 - ClientToken 是一个由客户端生成的唯一的、大小写敏感、不超过64个ASCII字符的字符串。例如,`ClientToken=123e4567-e89b-12d3-a456-426655440000`。 - 如果您提供了一个已经使用过的 ClientToken,但其他请求参数**有变化**,则服务器会返回 IdempotentParameterMismatch 的错误代码。 - 如果您提供了一个已经使用过的 ClientToken,且其他请求参数**不变**,则服务器会尝试返回 ClientToken 对应的记录。 ## API列表 以下为部分包含了 ClientToken 参数的API,供您参考。具体哪些API支持 ClientToken 参数请以各 API 文档为准,此处不一一列举。 - [申请退款接口](https://www.justap.cn/docs#operation/TradeService_Refunds) # 签名 为保证安全,JUSTAP 所有接口均需要对请求进行签名。服务器收到请求后进行签名的验证。如果签名验证不通过,将会拒绝处理请求,并返回 401 Unauthorized。 签名算法: ``` base64Encode(hamc-sha256(md5(请求 body + 请求时间戳 + 一次性随机字符串) + 一次性随机字符串)) ``` ## 准备 首先需要在 Justap 创建一个应用,商户需要生成一对 RSA 密钥对,并将公钥配置到 `商户中心 -> 开发配置`。 RSA 可以使用支付宝提供的 [密钥生成工具](https://opendocs.alipay.com/common/02kipl) 来生成。 商户在使用时,可以按照下述步骤生成请求的签名。 ## 算法描述: - 在请求发送前,取完整的**请求 body** - 生成一个随机的32位字符串,得到 **一次性随机字符串** - 获取当前时间的时间戳,得到 **请求时间戳** - 在请求字符串后面拼接上 **请求时间戳** 和 **一次性随机字符串**,得到 **待 Hash 字符串** - 对 **待 Hash 字符串** 计算 md5,得到 **待签名字符串** - **待签名字符串** 后面拼接上 一次性随机字符串,得到完整的 **待签名字符串** - 使用商户 RSA 私钥,对 **待签名字符串** 计算签名,并对 结果 进行 base64 编码,即可得到 **签名** ## 设置HTTP头 Justap 要求请求通过 自定义头部 来传递签名。具体定义如下: ``` X-Justap-Signature: 签名 X-Justap-Request-Time: 请求时间戳 X-Justap-Nonce: 一次性随机字符串 X-Justap-Body-Hash: 待签名字符串 ``` 具体的签名算法实现,可参考我们提供的各语言 SDK。 # WebHooks # noqa: E501
OpenAPI spec version: 1.0
Contact: support@justap.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from justap_server_sdk_python.configuration import Configuration
class V1Channel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
CHANNEL_INVALID_UNSPECIFIED = "CHANNEL_INVALID_UNSPECIFIED"
BALANCE = "BALANCE"
ALIPAYQR = "AlipayQR"
ALIPAYSCAN = "AlipayScan"
ALIPAYAPP = "AlipayApp"
ALIPAYWAP = "AlipayWap"
ALIPAYPAGE = "AlipayPage"
ALIPAYFACE = "AlipayFace"
ALIPAYLITE = "AlipayLite"
WECHATPAYAPP = "WechatpayApp"
WECHATPAYJSAPI = "WechatpayJSAPI"
WECHATPAYH5 = "WechatpayH5"
WECHATPAYNATIVE = "WechatpayNative"
WECHATPAYLITE = "WechatpayLite"
WECHATPAYFACE = "WechatpayFace"
WECHATPAYSCAN = "WechatpayScan"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""V1Channel - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1Channel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Channel):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Channel):
return True
return self.to_dict() != other.to_dict()
| 57.103448 | 3,404 | 0.683273 |
dbd24b1e3d7696b016c49aa64680a5efdfcfcef4 | 5,099 | bzl | Python | dependency_support/repo.bzl | cbalint13/xls | 7033a1482dc6f536d15d46d3954187c9845bba2a | [
"Apache-2.0"
] | null | null | null | dependency_support/repo.bzl | cbalint13/xls | 7033a1482dc6f536d15d46d3954187c9845bba2a | [
"Apache-2.0"
] | null | null | null | dependency_support/repo.bzl | cbalint13/xls | 7033a1482dc6f536d15d46d3954187c9845bba2a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining XLS Bazel external system dependencies."""
_SINGLE_URL_WHITELIST = depset([
"arm_compiler",
])
def _is_windows(ctx):
return ctx.os.name.lower().find("windows") != -1
def _wrap_bash_cmd(ctx, cmd):
if _is_windows(ctx):
bazel_sh = _get_env_var(ctx, "BAZEL_SH")
if not bazel_sh:
fail("BAZEL_SH environment variable is not set")
cmd = [bazel_sh, "-l", "-c", " ".join(["\"%s\"" % s for s in cmd])]
return cmd
def _get_env_var(ctx, name):
if name in ctx.os.environ:
return ctx.os.environ[name]
else:
return None
# Checks if we should use the system lib instead of the bundled one
def _use_system_lib(ctx, name):
syslibenv = _get_env_var(ctx, "XLS_SYSTEM_LIBS")
if syslibenv:
for n in syslibenv.strip().split(","):
if n.strip() == name:
return True
return False
# Executes specified command with arguments and calls 'fail' if it exited with
# non-zero code
def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
result = repo_ctx.execute(cmd_and_args, timeout = 60)
if result.return_code != 0:
fail(("Non-zero return code({1}) when executing '{0}':\n" + "Stdout: {2}\n" +
"Stderr: {3}").format(
" ".join([str(x) for x in cmd_and_args]),
result.return_code,
result.stdout,
result.stderr,
))
def _repos_are_siblings():
return Label("@foo//bar").workspace_root.startswith("../")
# Apply a patches to the repository root directory.
def _apply_patch(ctx, patches):
for patch in patches:
ctx.patch(Label(str(patch)), strip = 0)
def _apply_delete(ctx, paths):
for path in paths:
if path.startswith("/"):
fail("refusing to rm -rf path starting with '/': " + path)
if ".." in path:
fail("refusing to rm -rf path containing '..': " + path)
cmd = _wrap_bash_cmd(ctx, ["rm", "-rf"] + [ctx.path(path) for path in paths])
_execute_and_check_ret_code(ctx, cmd)
def _xls_http_archive(ctx):
use_syslib = _use_system_lib(ctx, ctx.attr.name)
# Work around the bazel bug that redownloads the whole library.
# Remove this after https://github.com/bazelbuild/bazel/issues/10515 is fixed.
if ctx.attr.additional_build_files:
for internal_src in ctx.attr.additional_build_files:
_ = ctx.path(Label(internal_src))
# End of workaround.
if not use_syslib:
ctx.download_and_extract(
ctx.attr.urls,
"",
ctx.attr.sha256,
ctx.attr.type,
ctx.attr.strip_prefix,
)
if ctx.attr.delete:
_apply_delete(ctx, ctx.attr.delete)
if ctx.attr.patches != None:
_apply_patch(ctx, ctx.attr.patches)
if use_syslib and ctx.attr.system_build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.system_build_file, {
"%prefix%": ".." if _repos_are_siblings() else "external",
}, False)
elif ctx.attr.build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.build_file, {
"%prefix%": ".." if _repos_are_siblings() else "external",
}, False)
if use_syslib:
for internal_src, external_dest in ctx.attr.system_link_files.items():
ctx.symlink(Label(internal_src), ctx.path(external_dest))
if ctx.attr.additional_build_files:
for internal_src, external_dest in ctx.attr.additional_build_files.items():
ctx.symlink(Label(internal_src), ctx.path(external_dest))
xls_http_archive = repository_rule(
attrs = {
"sha256": attr.string(mandatory = True),
"urls": attr.string_list(
mandatory = True,
allow_empty = False,
),
"strip_prefix": attr.string(),
"type": attr.string(),
"delete": attr.string_list(),
"patches": attr.string_list(),
"build_file": attr.label(),
"build_file_content": attr.string(),
"system_build_file": attr.label(),
"system_link_files": attr.string_dict(),
"additional_build_files": attr.string_dict(),
},
environ = [
"XLS_SYSTEM_LIBS",
],
implementation = _xls_http_archive,
)
| 34.924658 | 85 | 0.635811 |
dd025c7d6df2e07e28458d483bdf902a0623b5c7 | 2,352 | py | Python | resamble.py | Asseel-Naji/stutter_AI | 9992eae8b39e7f73f35c51069e2a55f1080f42d4 | [
"MIT"
] | null | null | null | resamble.py | Asseel-Naji/stutter_AI | 9992eae8b39e7f73f35c51069e2a55f1080f42d4 | [
"MIT"
] | null | null | null | resamble.py | Asseel-Naji/stutter_AI | 9992eae8b39e7f73f35c51069e2a55f1080f42d4 | [
"MIT"
] | null | null | null | #I officially GIVE UP ON THIS FUNCTION.
# In order to remove silence I have to split into mono, then resample and then clean
# and EVEN AFTER I managed to do all that, the script I use to remove silence doesn't like
# libosa resambling, however when I manually resamble with audacity it works just fine.
# THIS MAKES NO SENSE WHATSOEVER.
# I THOUGHT THIS WILL BE AN EASY TASK AND NOW I WASTED 3 HOURS!
# import librosa
# import resampy
# import wave
# import scipy
# x, sr_orig = librosa.load("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/right.wav",sr=None)
# y_low = resampy.resample(x, sr_orig, 16000)
# scipy.io.wavfile.write("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/ugh.wav", 16000, y_low)
# # librosa.output.write_wav("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/librosad.wav",y,sr,True)
# # import os
# # import wave
# # def downsampleWav(src, dst, inrate=44100, outrate=16000, inchannels=2, outchannels=1):
# # if not os.path.exists(src):
# # print('Source not found!')
# # return False
# # if not os.path.exists(os.path.dirname(dst)):
# # os.makedirs(os.path.dirname(dst))
# # try:
# # s_read = wave.open(src, 'r')
# # s_write = wave.open(dst, 'w')
# # except:
# # print('Failed to open files!')
# # return False
# # n_frames = s_read.getnframes()
# # data = s_read.readframes(n_frames)
# # try:
# # converted = audioop.ratecv(data, 2, inchannels, inrate, outrate, None)
# # if outchannels == 1:
# # converted = audioop.tomono(converted[0], 2, 1, 0)
# # except:
# # print('Failed to downsample wav')
# # return False
# # try:
# # s_write.setparams((outchannels, 2, outrate, 0, 'NONE', 'Uncompressed'))
# # s_write.writeframes(converted)
# # except:
# # print('Failed to write wav')
# # return False
# # try:
# # s_read.close()
# # s_write.close()
# # except:
# # print('Failed to close wav files')
# # return False
# # return True
# # downsampleWav("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/test.wav","/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/downed.wav") | 36.75 | 190 | 0.650935 |
917b7d755599e783c340a33573e71777747e148a | 58 | py | Python | test/unit/test_topologies/test_abc/__init__.py | MichalKononenko/FoundationsOfMechanics | d1ca2cc961a98be4761b3938f3a7f58d82daed62 | [
"MIT"
] | null | null | null | test/unit/test_topologies/test_abc/__init__.py | MichalKononenko/FoundationsOfMechanics | d1ca2cc961a98be4761b3938f3a7f58d82daed62 | [
"MIT"
] | null | null | null | test/unit/test_topologies/test_abc/__init__.py | MichalKononenko/FoundationsOfMechanics | d1ca2cc961a98be4761b3938f3a7f58d82daed62 | [
"MIT"
] | null | null | null | """
Contains unit tests for :mod:`fom.topologies.abc`
"""
| 14.5 | 49 | 0.672414 |
1d231744a682d25ba943d36e8380290c3d7f5a39 | 2,898 | py | Python | tests/test_train_classifier.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | tests/test_train_classifier.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | tests/test_train_classifier.py | LesterFreamon/disaster_response_pipeline | 388cdb2fe74da8dc8c4cea14298ef3fd36348dc6 | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from ..src.train_classifier import (
_split_to_feature_and_targets,
evaluate_model,
SpecialCharExtractor,
tokenize
)
from .helpers import sort_and_assert_frame_equal
class TestTrainClassifier(unittest.TestCase):
def test_split_to_feature_and_targets(self):
df = pd.DataFrame(
[
[1, 'hello', 'greeting', 'something', 'nice'],
[2, 'bye', 'greeting', 'something', 'not nice'],
[3, 'you!', 'command', 'something', 'not nice']
]
,
columns=['id', 'message', 'genre', 'who_knows', 'target']
)
output_X, output_Y = _split_to_feature_and_targets(df, ['message', 'genre', 'who_knows'])
expected_X = pd.DataFrame([
['hello', 'greeting'],
['bye', 'greeting'],
['you!', 'command']
],
columns=['message', 'genre'],
index=pd.Index([1, 2, 3], name='id')
)
expected_Y = pd.DataFrame([
['nice'],
['not nice'],
['not nice']
],
columns=['target'],
index=pd.Index([1, 2, 3], name='id')
)
sort_and_assert_frame_equal(expected_X, output_X)
sort_and_assert_frame_equal(expected_Y, output_Y)
def test_evaluate_model(self):
Y_test = pd.DataFrame(
[
[0, 1, 1],
[0, 1, 1],
[0, 1, 0],
[0, 1, 0]
],
columns=['a', 'b', 'c']
)
Y_pred = pd.DataFrame(
[
[0, 1, 1],
[0, 1, 1],
[0, 1, 0],
[0, 0, 0]
],
columns=['a', 'b', 'c']
).values
output = evaluate_model(Y_test, Y_pred)
expected = [
'a: Accuracy: 1.000 Precision: 1.0 Recall: 1.000 F1_score: 1.000',
'b: Accuracy: 0.750 Precision: 1.0 Recall: 0.750 F1_score: 0.857',
'c: Accuracy: 1.000 Precision: 1.0 Recall: 1.000 F1_score: 1.000'
]
self.assertListEqual(expected, output)
def test_SpecialCharExtractor(self):
special_char_extractor = SpecialCharExtractor()
output = special_char_extractor.transform(['what are you filming this for?!'])
expected = pd.DataFrame( # question_mark, exclamation_mark, number_of_commas, text_len
[
[1, 1, 0, 31],
],
columns=[0, 1, 2, 3]
)
sort_and_assert_frame_equal(expected, output)
def test_tokenize(self):
output = tokenize(
'We need many more people here right now. Stop filming http bit.ly 7ENICX'
)
expected = ['need', 'many', 'people', 'right', 'stop', 'film', 'urlplaceholder']
self.assertListEqual(expected, output)
| 29.876289 | 97 | 0.507246 |
fc36e7b0fb70defe5246149030c1149bd3ef034b | 5,053 | py | Python | tests/functional/test_hands.py | gkuznetsov/veles.znicz | 73147839f80e1eec3a627be8cc2a11211d74a801 | [
"Apache-2.0"
] | 28 | 2015-07-20T12:02:02.000Z | 2020-10-08T02:50:28.000Z | tests/functional/test_hands.py | gkuznetsov/veles.znicz | 73147839f80e1eec3a627be8cc2a11211d74a801 | [
"Apache-2.0"
] | 39 | 2015-08-11T06:56:51.000Z | 2016-05-11T15:02:05.000Z | tests/functional/test_hands.py | gkuznetsov/veles.znicz | 73147839f80e1eec3a627be8cc2a11211d74a801 | [
"Apache-2.0"
] | 27 | 2015-07-20T09:45:34.000Z | 2021-04-15T01:03:54.000Z | #!/usr/bin/env python3
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on April 2, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import os
from veles.config import root
from veles.snapshotter import SnapshotterToFile
from veles.tests import timeout, multi_device
from veles.znicz.tests.functional import StandardTest
import veles.znicz.tests.research.Hands.hands as hands
class TestHands(StandardTest):
@classmethod
def setUpClass(cls):
train_dir = [
os.path.join(root.common.dirs.datasets, "hands/Training")]
validation_dir = [
os.path.join(root.common.dirs.datasets, "hands/Testing")]
root.hands.update({
"decision": {"fail_iterations": 100, "max_epochs": 2},
"downloader": {
"url":
"https://s3-eu-west-1.amazonaws.com/veles.forge/Hands/"
"hands.tar",
"directory": root.common.dirs.datasets,
"files": ["hands"]},
"loss_function": "softmax",
"loader_name": "hands_loader",
"snapshotter": {"prefix": "hands", "interval": 2,
"time_interval": 0},
"loader": {"minibatch_size": 40, "train_paths": train_dir,
"force_numpy": False, "color_space": "GRAY",
"background_color": (0,),
"normalization_type": "linear",
"validation_paths": validation_dir},
"layers": [{"type": "all2all_tanh",
"->": {"output_sample_shape": 30},
"<-": {"learning_rate": 0.008, "weights_decay": 0.0}},
{"type": "softmax",
"<-": {"learning_rate": 0.008,
"weights_decay": 0.0}}]})
@timeout(500)
@multi_device()
def test_hands(self):
self.info("Will test hands workflow")
workflow = hands.HandsWorkflow(
self.parent,
layers=root.hands.layers,
decision_config=root.hands.decision,
snapshotter_config=root.hands.snapshotter,
loader_config=root.hands.loader,
downloader_config=root.hands.downloader,
loss_function=root.hands.loss_function,
loader_name=root.hands.loader_name)
self.assertEqual(workflow.evaluator.labels,
workflow.loader.minibatch_labels)
workflow.initialize(device=self.device)
self.assertEqual(workflow.evaluator.labels,
workflow.loader.minibatch_labels)
workflow.run()
self.assertIsNone(workflow.thread_pool.failure)
file_name = workflow.snapshotter.destination
err = workflow.decision.epoch_n_err[1]
self.assertEqual(err, 570)
self.assertEqual(2, workflow.loader.epoch_number)
self.info("Will load workflow from %s", file_name)
workflow_from_snapshot = SnapshotterToFile.import_(file_name)
workflow_from_snapshot.workflow = self.parent
self.assertTrue(workflow_from_snapshot.decision.epoch_ended)
workflow_from_snapshot.decision.max_epochs = 9
workflow_from_snapshot.decision.complete <<= False
self.assertEqual(workflow_from_snapshot.evaluator.labels,
workflow_from_snapshot.loader.minibatch_labels)
workflow_from_snapshot.initialize(device=self.device, snapshot=True)
self.assertEqual(workflow_from_snapshot.evaluator.labels,
workflow_from_snapshot.loader.minibatch_labels)
workflow_from_snapshot.run()
self.assertIsNone(workflow_from_snapshot.thread_pool.failure)
err = workflow_from_snapshot.decision.epoch_n_err[1]
self.assertEqual(err, 506)
self.assertEqual(9, workflow_from_snapshot.loader.epoch_number)
self.info("All Ok")
if __name__ == "__main__":
StandardTest.main()
| 40.103175 | 79 | 0.605779 |
ebac8fbe63ea1d58d1903fca95598c17e219bab4 | 2,146 | py | Python | temperature_processing_lt4.py | mknowlton15/genesis | 80b1baa621d7dbfea84a3b128b565dd92918d55f | [
"MIT"
] | null | null | null | temperature_processing_lt4.py | mknowlton15/genesis | 80b1baa621d7dbfea84a3b128b565dd92918d55f | [
"MIT"
] | null | null | null | temperature_processing_lt4.py | mknowlton15/genesis | 80b1baa621d7dbfea84a3b128b565dd92918d55f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Steven Guinn
#02/24/2016
#University of Maryland Center for Environmental Science
#Appalachian Laboratory http://www.al.umces.edu/
#301 689 7146
#301 Braddock Rd Frostburg MD 215325
import sys,os
# Import arcpy module
import arcpy
def cleanMakeDir(out_dir,mode=0777):
if os.path.exists(out_dir):
print "Output Directory exist!"
removeall(out_dir) #this erases the contents of the directory
print "Cleaned Output Directory!"
else:
os.mkdir(out_dir)
os.chmod(out_dir,mode)
print "Created Output Directory!"
def rmgeneric(path, __func__):
try:
__func__(path)
print 'Removed ', path
except OSError, (errno, strerror):
print "Error" % {'path' : path, 'error': strerror }
def removeall(path):
if not os.path.isdir(path):
return
files=os.listdir(path)
for x in files:
fullpath=os.path.join(path, x)
if os.path.isfile(fullpath):
f=os.remove
rmgeneric(fullpath, f)
elif os.path.isdir(fullpath):
removeall(fullpath)
f=os.rmdir
rmgeneric(fullpath, f)
def listFiles(dir):
rootdir = dir
for root, subFolders, files in os.walk(rootdir):
for file in files:
yield os.path.join(root,file)
return
wd=sys.argv[1]
outdir=sys.argv[2]
os.chdir(wd)
# get the list of compressed archives
gz_list=list()
tree=listFiles(wd)
cleanMakeDir(outdir)
for t in tree:
if os.path.splitext(t)[1]=='.gz':
gz_list.append(os.path.basename(t))
gz_list.sort()
_temp_dir = os.path. join(wd,'_temp')
for gz in gz_list:
cleanMakeDir(_temp_dir)
_command='7z.exe x '+gz+' -o'+_temp_dir
print _command
os.system(_command)
tar_name = os.path.join(_temp_dir,os.path.splitext(gz)[0])
_command='7z.exe x '+tar_name+' -o'+_temp_dir+' *band6*'
print _command
os.system(_command)
tif_list=list()
tiffs=listFiles(_temp_dir)
for t in tiffs:
if os.path.splitext(t)[1]=='.tif':
tif_list.append(t)
scene_name_tif=os.path.join(outdir,(os.path.basename(gz).split('-')[0]+'.tif'))
arcpy.CompositeBands_management(tif_list[0]+';'+tif_list[1]+';'+tif_list[2]+';'+tif_list[3], scene_name_tif)
| 27.87013 | 110 | 0.678938 |
fec3b38203c93a020f2f12f565eb5add9c2c0c1f | 296 | py | Python | benchmark/python_fastapi/main.py | SABER-labs/Drogon-torch-serve | eb61472abe32f769daa870278685f0342a00b292 | [
"MIT"
] | 15 | 2022-01-15T03:07:12.000Z | 2022-02-18T15:30:06.000Z | benchmark/python_fastapi/main.py | SABER-labs/Drogon-torch-serve | eb61472abe32f769daa870278685f0342a00b292 | [
"MIT"
] | 1 | 2022-01-27T11:13:49.000Z | 2022-01-28T05:42:24.000Z | benchmark/python_fastapi/main.py | SABER-labs/Drogon-torch-serve | eb61472abe32f769daa870278685f0342a00b292 | [
"MIT"
] | 2 | 2022-01-17T02:55:34.000Z | 2022-01-19T05:46:30.000Z | import utils
from fastapi import FastAPI, File, UploadFile
from fastapi.staticfiles import StaticFiles
app = FastAPI()
@app.get("/")
def home():
return 'OK'
@app.post("/classify")
async def classify(image: UploadFile = File(...)):
return utils.get_result(image_file=image, is_api=True) | 22.769231 | 58 | 0.726351 |
72a26975b80b67478f0d9a29d9a4b42db8d81674 | 2,008 | py | Python | commands/avatar.py | brunohpaiva/fire-bot | de9d67043eef1023d5bd40f3aaa249111fcd253f | [
"MIT"
] | 1 | 2021-09-24T22:48:33.000Z | 2021-09-24T22:48:33.000Z | commands/avatar.py | brunohpaiva/fire-bot | de9d67043eef1023d5bd40f3aaa249111fcd253f | [
"MIT"
] | 1 | 2022-01-13T04:05:04.000Z | 2022-01-13T04:05:04.000Z | commands/avatar.py | brunohpaiva/fire-bot | de9d67043eef1023d5bd40f3aaa249111fcd253f | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fire.converters import Member, UserWithFallback
from discord.ext import commands
import traceback
import discord
import typing
class Avatar(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['av'])
async def avatar(self, ctx, *, user: typing.Union[Member, UserWithFallback] = None):
if not user:
user = ctx.author
if ctx.guild:
member = ctx.guild.get_member(user.id)
await ctx.send(embed=discord.Embed(
color=member.color if member else ctx.author.color
).set_image(
url=str(user.avatar_url_as(static_format='png', size=2048))
))
def setup(bot):
try:
bot.add_cog(Avatar(bot))
bot.logger.info(f'$GREENLoaded $CYAN"avatar" $GREENcommand!')
except Exception as e:
bot.logger.error(
f'$REDError while adding command $CYAN"avatar"', exc_info=e)
| 40.16 | 133 | 0.725598 |
52dd7932165b262aa084081afd98829e13ec6c91 | 1,245 | py | Python | tests/flows/test_mixedsource.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 424 | 2021-12-04T15:45:12.000Z | 2022-03-31T20:27:55.000Z | tests/flows/test_mixedsource.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 105 | 2021-12-03T21:25:29.000Z | 2022-03-31T22:36:59.000Z | tests/flows/test_mixedsource.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 38 | 2021-12-04T21:26:20.000Z | 2022-03-21T02:39:29.000Z | import os
import pytest
import siliconcompiler
##################################
@pytest.mark.skip(reason="Mixed-source functionality is still a work-in-progress.")
@pytest.mark.eda
@pytest.mark.quick
def test_mixedsrc_local_py(scroot):
'''Basic Python API test: build the mixed-source example using only Python code.
'''
# Create instance of Chip class
chip = siliconcompiler.Chip()
ex_dir = os.path.join(scroot, 'examples', 'mixed-source')
# Inserting value into configuration
chip.add('source', os.path.join(ex_dir, 'eq1.vhd'))
chip.add('source', os.path.join(ex_dir, 'eq2.v'))
chip.set('design', 'eq2')
chip.target("freepdk45")
flow = [
('import', 'morty'),
('importvhdl', 'ghdl'),
('syn', 'yosys')
]
for i, (step, tool) in enumerate(flow):
if i > 0:
chip.add('flowgraph', step, 'input', flow[i-1][0])
chip.set('flowgraph', step, 'tool', tool)
# Run the chip's build process synchronously.
chip.run()
# Verify that the Verilog netlist is generated
assert os.path.isfile('build/eq2/job0/syn/0/outputs/eq2.v')
if __name__ == "__main__":
from tests.fixtures import scroot
test_mixedsrc_local_py(scroot())
| 27.666667 | 84 | 0.627309 |
ecbf2633b378cf15838b4a6a069c0cce69d0b356 | 76,207 | py | Python | airflow/models/dag.py | ternarydata/airflow | 1e3cdddcd87be3c0f11b43efea11cdbddaff4470 | [
"Apache-2.0"
] | null | null | null | airflow/models/dag.py | ternarydata/airflow | 1e3cdddcd87be3c0f11b43efea11cdbddaff4470 | [
"Apache-2.0"
] | null | null | null | airflow/models/dag.py | ternarydata/airflow | 1e3cdddcd87be3c0f11b43efea11cdbddaff4470 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import functools
import logging
import os
import pickle
import re
import sys
import traceback
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Collection, Dict, FrozenSet, Iterable, List, Optional, Set, Type, Union
import jinja2
import pendulum
from croniter import croniter
from dateutil.relativedelta import relativedelta
from sqlalchemy import Boolean, Column, ForeignKey, Index, Integer, String, Text, func, or_
from sqlalchemy.orm import backref, joinedload, relationship
from sqlalchemy.orm.session import Session
from airflow import settings, utils
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag
from airflow.exceptions import (
AirflowDagCycleException, AirflowException, DagNotFound, DuplicateTaskIdFound, TaskNotFound,
)
from airflow.models.base import ID_LEN, Base
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagbag import DagBag
from airflow.models.dagpickle import DagPickle
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance, clear_task_instances
from airflow.settings import MIN_SERIALIZED_DAG_UPDATE_INTERVAL, STORE_SERIALIZED_DAGS
from airflow.utils import timezone
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.file import correct_maybe_zipped
from airflow.utils.helpers import validate_key
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import Interval, UtcDateTime
from airflow.utils.state import State
log = logging.getLogger(__name__)
ScheduleInterval = Union[str, timedelta, relativedelta]
DEFAULT_VIEW_PRESETS = ['tree', 'graph', 'duration', 'gantt', 'landing_times']
ORIENTATION_PRESETS = ['LR', 'TB', 'RL', 'BT']
def get_last_dagrun(dag_id, session, include_externally_triggered=False):
"""
Returns the last dag run for a dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored.
"""
DR = DagRun
query = session.query(DR).filter(DR.dag_id == dag_id)
if not include_externally_triggered:
query = query.filter(DR.external_trigger == False) # noqa pylint: disable=singleton-comparison
query = query.order_by(DR.execution_date.desc())
return query.first()
@functools.total_ordering
class DAG(BaseDag, LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start date and an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG; must consist exclusively of alphanumeric
characters, dashes, dots and underscores (all ASCII)
:type dag_id: str
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: str
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: str or list[str]
:param template_undefined: Template undefined type.
:type template_undefined: jinja2.Undefined
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created. The timeout
is only enforced for scheduled DagRuns, and only once the
# of active DagRuns == max_active_runs.
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration,
gantt, landing_times), default tree
:type default_view: str
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT), default LR
:type orientation: str
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
:param on_failure_callback: A function to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:type on_failure_callback: callable
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:type on_success_callback: callable
:param access_control: Specify optional DAG-level permissions, e.g.,
"{'role1': {'can_dag_read'}, 'role2': {'can_dag_read', 'can_dag_edit'}}"
:type access_control: dict
:param is_paused_upon_creation: Specifies if the dag is paused when created for the first time.
If the dag exists already, this flag will be ignored. If this optional parameter
is not specified, the global config setting will be used.
:type is_paused_upon_creation: bool or None
:param jinja_environment_kwargs: additional configuration options to be passed to Jinja
``Environment`` for template rendering
**Example**: to avoid Jinja from removing a trailing newline from template strings ::
DAG(dag_id='my-dag',
jinja_environment_kwargs={
'keep_trailing_newline': True,
# some other jinja2 Environment options here
}
)
**See**: `Jinja Environment documentation
<https://jinja.palletsprojects.com/en/master/api/#jinja2.Environment>`_
:type jinja_environment_kwargs: dict
:param tags: List of tags to help filtering DAGS in the UI.
:type tags: List[str]
"""
_comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
__serialized_fields: Optional[FrozenSet[str]] = None
def __init__(
self,
dag_id: str,
description: str = '',
schedule_interval: Optional[ScheduleInterval] = timedelta(days=1),
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
full_filepath: Optional[str] = None,
template_searchpath: Optional[Union[str, Iterable[str]]] = None,
template_undefined: Type[jinja2.Undefined] = jinja2.Undefined,
user_defined_macros: Optional[Dict] = None,
user_defined_filters: Optional[Dict] = None,
default_args: Optional[Dict] = None,
concurrency: int = conf.getint('core', 'dag_concurrency'),
max_active_runs: int = conf.getint('core', 'max_active_runs_per_dag'),
dagrun_timeout: Optional[timedelta] = None,
sla_miss_callback: Optional[Callable] = None,
default_view: str = conf.get('webserver', 'dag_default_view').lower(),
orientation: str = conf.get('webserver', 'dag_orientation'),
catchup: bool = conf.getboolean('scheduler', 'catchup_by_default'),
on_success_callback: Optional[Callable] = None,
on_failure_callback: Optional[Callable] = None,
doc_md: Optional[str] = None,
params: Optional[Dict] = None,
access_control: Optional[Dict] = None,
is_paused_upon_creation: Optional[bool] = None,
jinja_environment_kwargs: Optional[Dict] = None,
tags: Optional[List[str]] = None
):
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = copy.deepcopy(default_args or {})
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
# Properties from BaseDag
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id = None
self._description = description
# set file location to caller source path
self.fileloc = sys._getframe().f_back.f_code.co_filename
self.task_dict: Dict[str, BaseOperator] = dict()
# set timezone from start_date
if start_date and start_date.tzinfo:
self.timezone = start_date.tzinfo
elif 'start_date' in self.default_args and self.default_args['start_date']:
if isinstance(self.default_args['start_date'], str):
self.default_args['start_date'] = (
timezone.parse(self.default_args['start_date'])
)
self.timezone = self.default_args['start_date'].tzinfo
if not hasattr(self, 'timezone') or not self.timezone:
self.timezone = settings.TIMEZONE
# Apply the timezone we settled on to end_date if it wasn't supplied
if 'end_date' in self.default_args and self.default_args['end_date']:
if isinstance(self.default_args['end_date'], str):
self.default_args['end_date'] = (
timezone.parse(self.default_args['end_date'], timezone=self.timezone)
)
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
# also convert tasks
if 'start_date' in self.default_args:
self.default_args['start_date'] = (
timezone.convert_to_utc(self.default_args['start_date'])
)
if 'end_date' in self.default_args:
self.default_args['end_date'] = (
timezone.convert_to_utc(self.default_args['end_date'])
)
self.schedule_interval = schedule_interval
if isinstance(schedule_interval, str) and schedule_interval in cron_presets:
self._schedule_interval = cron_presets.get(schedule_interval) # type: Optional[ScheduleInterval]
elif schedule_interval == '@once':
self._schedule_interval = None
else:
self._schedule_interval = schedule_interval
if isinstance(template_searchpath, str):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.template_undefined = template_undefined
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = timezone.utcnow()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
if default_view in DEFAULT_VIEW_PRESETS:
self._default_view = default_view
else:
raise AirflowException(f'Invalid values of dag.default_view: only support '
f'{DEFAULT_VIEW_PRESETS}, but get {default_view}')
if orientation in ORIENTATION_PRESETS:
self.orientation = orientation
else:
raise AirflowException(f'Invalid values of dag.orientation: only support '
f'{ORIENTATION_PRESETS}, but get {orientation}')
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self.on_success_callback = on_success_callback
self.on_failure_callback = on_failure_callback
self.doc_md = doc_md
self._access_control = access_control
self.is_paused_upon_creation = is_paused_upon_creation
self.jinja_environment_kwargs = jinja_environment_kwargs
self.tags = tags
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
def __eq__(self, other):
if (type(self) == type(other) and
self.dag_id == other.dag_id):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
DagContext.push_context_managed_dag(self)
return self
def __exit__(self, _type, _value, _tb):
DagContext.pop_context_managed_dag()
# /Context Manager ----------------------------------------------
def date_range(self, start_date, num=None, end_date=timezone.utcnow()):
if num:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date,
num=num, delta=self._schedule_interval)
def is_fixed_time_schedule(self):
"""
Figures out if the DAG schedule has a fixed time (e.g. 3 AM).
:return: True if the schedule has a fixed time, False if not.
"""
now = datetime.now()
cron = croniter(self._schedule_interval, now)
start = cron.get_next(datetime)
cron_next = cron.get_next(datetime)
if cron_next.minute == start.minute and cron_next.hour == start.hour:
return True
return False
def following_schedule(self, dttm):
"""
Calculates the following schedule for this dag in UTC.
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, str):
# we don't want to rely on the transitions created by
# croniter as they are not always correct
dttm = pendulum.instance(dttm)
naive = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, naive)
# We assume that DST transitions happen on the minute/hour
if not self.is_fixed_time_schedule():
# relative offset (eg. every 5 minutes)
delta = cron.get_next(datetime) - naive
following = dttm.in_timezone(self.timezone).add_timedelta(delta)
else:
# absolute (e.g. 3 AM)
naive = cron.get_next(datetime)
tz = pendulum.timezone(self.timezone.name)
following = timezone.make_aware(naive, tz)
return timezone.convert_to_utc(following)
elif self._schedule_interval is not None:
return dttm + self._schedule_interval
def previous_schedule(self, dttm):
"""
Calculates the previous schedule for this dag in UTC
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, str):
# we don't want to rely on the transitions created by
# croniter as they are not always correct
dttm = pendulum.instance(dttm)
naive = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, naive)
# We assume that DST transitions happen on the minute/hour
if not self.is_fixed_time_schedule():
# relative offset (eg. every 5 minutes)
delta = naive - cron.get_prev(datetime)
previous = dttm.in_timezone(self.timezone).subtract_timedelta(delta)
else:
# absolute (e.g. 3 AM)
naive = cron.get_prev(datetime)
tz = pendulum.timezone(self.timezone.name)
previous = timezone.make_aware(naive, tz)
return timezone.convert_to_utc(previous)
elif self._schedule_interval is not None:
return dttm - self._schedule_interval
def get_run_dates(self, start_date, end_date=None):
"""
Returns a list of dates between the interval received as parameter using this
dag's schedule interval. Returned dates can be used for execution dates.
:param start_date: the start date of the interval
:type start_date: datetime
:param end_date: the end date of the interval, defaults to timezone.utcnow()
:type end_date: datetime
:return: a list of dates within the interval following the dag's schedule
:rtype: list
"""
run_dates = []
using_start_date = start_date
using_end_date = end_date
# dates for dag runs
using_start_date = using_start_date or min([t.start_date for t in self.tasks])
using_end_date = using_end_date or timezone.utcnow()
# next run date for a subdag isn't relevant (schedule_interval for subdags
# is ignored) so we use the dag run's start date in the case of a subdag
next_run_date = (self.normalize_schedule(using_start_date)
if not self.is_subdag else using_start_date)
while next_run_date and next_run_date <= using_end_date:
run_dates.append(next_run_date)
next_run_date = self.following_schedule(next_run_date)
return run_dates
def normalize_schedule(self, dttm):
"""
Returns dttm + interval unless dttm is first interval then it returns dttm
"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
return get_last_dagrun(self.dag_id, session=session,
include_externally_triggered=include_externally_triggered)
@property
def dag_id(self):
return self._dag_id
@dag_id.setter
def dag_id(self, value):
self._dag_id = value
@property
def full_filepath(self):
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value):
self._full_filepath = value
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, value):
self._concurrency = value
@property
def access_control(self):
return self._access_control
@access_control.setter
def access_control(self, value):
self._access_control = value
@property
def description(self):
return self._description
@property
def default_view(self):
return self._default_view
@property
def pickle_id(self):
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value):
self._pickle_id = value
@property
def tasks(self):
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError(
'DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self):
return list(self.task_dict.keys())
@property
def filepath(self) -> str:
"""
File location of where the dag object is instantiated
"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self) -> str:
"""Folder location of where the DAG object is instantiated."""
return os.path.dirname(self.full_filepath)
@property
def owner(self) -> str:
"""
Return list of all owners found in DAG tasks.
:return: Comma separated list of owners in DAG tasks
:rtype: str
"""
return ", ".join({t.owner for t in self.tasks})
@property
def allow_future_exec_dates(self) -> bool:
return conf.getboolean(
'scheduler',
'allow_trigger_in_future',
fallback=False) and self.schedule_interval is None
@provide_session
def _get_concurrency_reached(self, session=None) -> bool:
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
def concurrency_reached(self) -> bool:
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
return self._get_concurrency_reached()
@provide_session
def _get_is_paused(self, session=None):
qry = session.query(DagModel).filter(
DagModel.dag_id == self.dag_id)
return qry.value(DagModel.is_paused)
@property
def is_paused(self) -> bool:
"""
Returns a boolean indicating whether this DAG is paused
"""
return self._get_is_paused()
@provide_session
def handle_callback(self, dagrun, success=True, reason=None, session=None):
"""
Triggers the appropriate callback depending on the value of success, namely the
on_failure_callback or on_success_callback. This method gets the context of a
single TaskInstance part of this DagRun and passes that to the callable along
with a 'reason', primarily to differentiate DagRun failures.
.. note: The logs end up in
``$AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log``
:param dagrun: DagRun object
:param success: Flag to specify if failure or success callback should be called
:param reason: Completion reason
:param session: Database session
"""
callback = self.on_success_callback if success else self.on_failure_callback
if callback:
self.log.info('Executing dag callback function: {}'.format(callback))
tis = dagrun.get_task_instances()
ti = tis[-1] # get first TaskInstance of DagRun
ti.task = self.get_task(ti.task_id)
context = ti.get_template_context(session=session)
context.update({'reason': reason})
callback(context)
def get_active_runs(self):
"""
Returns a list of dag run execution dates currently running
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_num_active_runs(self, external_trigger=None, session=None):
"""
Returns the number of active "running" dag runs
:param external_trigger: True for externally triggered active dag runs
:type external_trigger: bool
:param session:
:return: number greater than 0 for active dag runs
"""
# .count() is inefficient
query = (session
.query(func.count())
.filter(DagRun.dag_id == self.dag_id)
.filter(DagRun.state == State.RUNNING))
if external_trigger is not None:
query = query.filter(DagRun.external_trigger == external_trigger)
return query.scalar()
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == execution_date)
.first())
return dagrun
@provide_session
def get_dagruns_between(self, start_date, end_date, session=None):
"""
Returns the list of dag runs between start_date (inclusive) and end_date (inclusive).
:param start_date: The starting execution date of the DagRun to find.
:param end_date: The ending execution date of the DagRun to find.
:param session:
:return: The list of DagRuns found.
"""
dagruns = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date >= start_date,
DagRun.execution_date <= end_date)
.all())
return dagruns
@provide_session
def _get_latest_execution_date(self, session=None):
return session.query(func.max(DagRun.execution_date)).filter(
DagRun.dag_id == self.dag_id
).scalar()
@property
def latest_execution_date(self):
"""
Returns the latest date for which at least one dag run exists
"""
return self._get_latest_execution_date()
@property
def subdags(self):
"""
Returns a list of the subdag objects associated to this DAG
"""
# Check SubDag for class but don't check class directly
from airflow.operators.subdag_operator import SubDagOperator
subdag_lst = []
for task in self.tasks:
if (isinstance(task, SubDagOperator) or
# TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator' or
task.task_type == 'SubDagOperator'):
subdag_lst.append(task.subdag)
subdag_lst += task.subdag.subdags
return subdag_lst
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self) -> jinja2.Environment:
"""Build a Jinja2 environment."""
# Collect directories to search for template files
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
# Default values (for backward compatibility)
jinja_env_options = {
'loader': jinja2.FileSystemLoader(searchpath),
'undefined': self.template_undefined,
'extensions': ["jinja2.ext.do"],
'cache_size': 0
}
if self.jinja_environment_kwargs:
jinja_env_options.update(self.jinja_environment_kwargs)
env = jinja2.Environment(**jinja_env_options) # type: ignore
# Add any user defined items. Safe to edit globals as long as no templates are rendered yet.
# http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment.globals
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
@provide_session
def get_task_instances(
self, start_date=None, end_date=None, state=None, session=None):
if not start_date:
start_date = (timezone.utcnow() - timedelta(30)).date()
start_date = timezone.make_aware(
datetime.combine(start_date, datetime.min.time()))
tis = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.execution_date >= start_date,
TaskInstance.task_id.in_([t.task_id for t in self.tasks]),
)
# This allows allow_trigger_in_future config to take affect, rather than mandating exec_date <= UTC
if end_date or not self.allow_future_exec_dates:
end_date = end_date or timezone.utcnow()
tis = tis.filter(TaskInstance.execution_date <= end_date)
if state:
if isinstance(state, str):
tis = tis.filter(TaskInstance.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.filter(TaskInstance.state.is_(None))
else:
not_none_state = [s for s in state if s]
tis = tis.filter(
or_(TaskInstance.state.in_(not_none_state),
TaskInstance.state.is_(None))
)
else:
tis = tis.filter(TaskInstance.state.in_(state))
tis = tis.order_by(TaskInstance.execution_date).all()
return tis
@property
def roots(self) -> List[BaseOperator]:
"""Return nodes with no parents. These are first to execute and are called roots or root nodes."""
return [task for task in self.tasks if not task.upstream_list]
@property
def leaves(self) -> List[BaseOperator]:
"""Return nodes with no children. These are last to execute and are called leaves or leaf nodes."""
return [task for task in self.tasks if not task.downstream_list]
def topological_sort(self, include_subdag_tasks: bool = False):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:param include_subdag_tasks: whether to include tasks in subdags, default to False
:return: list of tasks in topological order
"""
from airflow.operators.subdag_operator import SubDagOperator # Avoid circular import
# convert into an OrderedDict to speedup lookup while keeping order the same
graph_unsorted = OrderedDict((task.task_id, task) for task in self.tasks)
graph_sorted = [] # type: List[BaseOperator]
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted.values()):
for edge in node.upstream_list:
if edge.task_id in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
del graph_unsorted[node.task_id]
graph_sorted.append(node)
if include_subdag_tasks and isinstance(node, SubDagOperator):
graph_sorted.extend(node.subdag.topological_sort(include_subdag_tasks=True))
if not acyclic:
raise AirflowException("A cyclic dependency occurred in dag: {}"
.format(self.dag_id))
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self,
state: str = State.RUNNING,
session: Session = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
) -> None:
query = session.query(DagRun).filter_by(dag_id=self.dag_id)
if start_date:
query = query.filter(DagRun.execution_date >= start_date)
if end_date:
query = query.filter(DagRun.execution_date <= end_date)
drs = query.all()
dirty_ids = []
for dr in drs:
dr.state = state
dirty_ids.append(dr.dag_id)
@provide_session
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
include_parentdag=True,
reset_dag_runs=True,
dry_run=False,
session=None,
get_tis=False,
recursion_depth=0,
max_recursion_depth=None,
dag_bag=None,
):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
:param start_date: The minimum execution_date to clear
:type start_date: datetime.datetime or None
:param end_date: The maximum exeuction_date to clear
:type end_date: datetime.datetime or None
:param only_failed: Only clear failed tasks
:type only_failed: bool
:param only_running: Only clear running tasks.
:type only_running: bool
:param confirm_prompt: Ask for confirmation
:type confirm_prompt: bool
:param include_subdags: Clear tasks in subdags and clear external tasks
indicated by ExternalTaskMarker
:type include_subdags: bool
:param include_parentdag: Clear tasks in the parent dag of the subdag.
:type include_parentdag: bool
:param reset_dag_runs: Set state of dag to RUNNING
:type reset_dag_runs: bool
:param dry_run: Find the tasks to clear but don't clear them.
:type dry_run: bool
:param session: The sqlalchemy session to use
:type session: sqlalchemy.orm.session.Session
:param get_tis: Return the sqlachemy query for finding the TaskInstance without clearing the tasks
:type get_tis: bool
:param recursion_depth: The recursion depth of nested calls to DAG.clear().
:type recursion_depth: int
:param max_recursion_depth: The maximum recusion depth allowed. This is determined by the
first encountered ExternalTaskMarker. Default is None indicating no ExternalTaskMarker
has been encountered.
:type max_recursion_depth: int
:param dag_bag: The DagBag used to find the dags
:type dag_bag: airflow.models.dagbag.DagBag
"""
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append(
TI.dag_id.like(dag.dag_id) &
TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if include_parentdag and self.is_subdag:
p_dag = self.parent_dag.sub_dag(
task_regex=r"^{}$".format(self.dag_id.split('.')[1]),
include_upstream=False,
include_downstream=True)
tis = tis.union(p_dag.clear(
start_date=start_date, end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=confirm_prompt,
include_subdags=include_subdags,
include_parentdag=False,
reset_dag_runs=reset_dag_runs,
get_tis=True,
session=session,
recursion_depth=recursion_depth,
max_recursion_depth=max_recursion_depth,
dag_bag=dag_bag
))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(or_(
TI.state == State.FAILED,
TI.state == State.UPSTREAM_FAILED))
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if include_subdags:
from airflow.sensors.external_task_sensor import ExternalTaskMarker
# Recursively find external tasks indicated by ExternalTaskMarker
instances = tis.all()
for ti in instances:
if ti.operator == ExternalTaskMarker.__name__:
ti.task = self.get_task(ti.task_id)
if recursion_depth == 0:
# Maximum recursion depth allowed is the recursion_depth of the first
# ExternalTaskMarker in the tasks to be cleared.
max_recursion_depth = ti.task.recursion_depth
if recursion_depth + 1 > max_recursion_depth:
# Prevent cycles or accidents.
raise AirflowException("Maximum recursion depth {} reached for {} {}. "
"Attempted to clear too many tasks "
"or there may be a cyclic dependency."
.format(max_recursion_depth,
ExternalTaskMarker.__name__, ti.task_id))
ti.render_templates()
external_tis = session.query(TI).filter(TI.dag_id == ti.task.external_dag_id,
TI.task_id == ti.task.external_task_id,
TI.execution_date ==
pendulum.parse(ti.task.execution_date))
for tii in external_tis:
if not dag_bag:
dag_bag = DagBag()
external_dag = dag_bag.get_dag(tii.dag_id)
if not external_dag:
raise AirflowException("Could not find dag {}".format(tii.dag_id))
downstream = external_dag.sub_dag(
task_regex=r"^{}$".format(tii.task_id),
include_upstream=False,
include_downstream=True
)
tis = tis.union(downstream.clear(start_date=tii.execution_date,
end_date=tii.execution_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=confirm_prompt,
include_subdags=include_subdags,
include_parentdag=False,
reset_dag_runs=reset_dag_runs,
get_tis=True,
session=session,
recursion_depth=recursion_depth + 1,
max_recursion_depth=max_recursion_depth,
dag_bag=dag_bag))
if get_tis:
return tis
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
do_it = True
if count == 0:
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(count=count, ti_list=ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(tis.all(),
session,
dag=self,
)
if reset_dag_runs:
self.set_dag_runs_state(session=session,
start_date=start_date,
end_date=end_date,
)
else:
count = 0
print("Bail. Nothing was cleared.")
session.commit()
return count
@classmethod
def clear_dags(
cls, dags,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
include_parentdag=False,
reset_dag_runs=True,
dry_run=False,
):
all_tis = []
for dag in dags:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
include_parentdag=include_parentdag,
reset_dag_runs=reset_dag_runs,
dry_run=True)
all_tis.extend(tis)
if dry_run:
return all_tis
count = len(all_tis)
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in all_tis])
question = (
"You are about to delete these {} tasks:\n"
"{}\n\n"
"Are you sure? (yes/no): ").format(count, ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
for dag in dags:
dag.clear(start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
reset_dag_runs=reset_dag_runs,
dry_run=False,
)
else:
count = 0
print("Bail. Nothing was cleared.")
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
return result
def sub_dag(self, task_regex, include_downstream=False,
include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
# deep-copying self.task_dict takes a long time, and we don't want all
# the tasks anyway, so we copy the tasks manually later
task_dict = self.task_dict
self.task_dict = {}
dag = copy.deepcopy(self)
self.task_dict = task_dict
regex_match = [
t for t in self.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
# Make sure to not recursively deepcopy the dag while copying the task
dag.task_dict = {t.task_id: copy.deepcopy(t, {id(t.dag): dag})
for t in regex_match + also_include}
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_task_ids = t.upstream_task_ids.intersection(dag.task_dict.keys())
t._downstream_task_ids = t.downstream_task_ids.intersection(
dag.task_dict.keys())
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id, include_subdags=False):
if task_id in self.task_dict:
return self.task_dict[task_id]
if include_subdags:
for dag in self.subdags:
if task_id in dag.task_dict:
return dag.task_dict[task_id]
raise TaskNotFound("Task {task_id} not found".format(task_id=task_id))
def pickle_info(self):
d = dict()
d['is_picklable'] = True
try:
dttm = timezone.utcnow()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = "{}".format(timezone.utcnow() - dttm)
except Exception as e:
self.log.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None):
dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = timezone.utcnow()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self) -> None:
"""Print an ASCII tree representation of the DAG."""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.downstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if task.task_id in self.task_dict and self.task_dict[task.task_id] != task:
raise DuplicateTaskIdFound(
"Task id '{}' has already been added to the DAG".format(task.task_id))
else:
self.task_dict[task.task_id] = task
task.dag = self
self.task_count = len(self.task_dict)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
local=False,
executor=None,
donot_pickle=conf.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
):
"""
Runs the DAG.
:param start_date: the start date of the range to run
:type start_date: datetime.datetime
:param end_date: the end date of the range to run
:type end_date: datetime.datetime
:param mark_success: True to mark jobs as succeeded without running them
:type mark_success: bool
:param local: True to run the tasks using the LocalExecutor
:type local: bool
:param executor: The executor instance to run the tasks
:type executor: airflow.executor.BaseExecutor
:param donot_pickle: True to avoid pickling DAG object and send to workers
:type donot_pickle: bool
:param ignore_task_deps: True to skip upstream tasks
:type ignore_task_deps: bool
:param ignore_first_depends_on_past: True to ignore depends_on_past
dependencies for the first set of tasks only
:type ignore_first_depends_on_past: bool
:param pool: Resource pool to use
:type pool: str
:param delay_on_limit_secs: Time in seconds to wait before next attempt to run
dag run when max_active_runs limit has been reached
:type delay_on_limit_secs: float
:param verbose: Make logging output more verbose
:type verbose: bool
:param conf: user defined dictionary passed from CLI
:type conf: dict
:param rerun_failed_tasks:
:type: bool
:param run_backwards:
:type: bool
"""
from airflow.jobs.backfill_job import BackfillJob
if not executor and local:
from airflow.executors.local_executor import LocalExecutor
executor = LocalExecutor()
elif not executor:
from airflow.executors.executor_loader import ExecutorLoader
executor = ExecutorLoader.get_default_executor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool,
delay_on_limit_secs=delay_on_limit_secs,
verbose=verbose,
conf=conf,
rerun_failed_tasks=rerun_failed_tasks,
run_backwards=run_backwards,
)
job.run()
def cli(self):
"""
Exposes a CLI specific to this DAG
"""
from airflow.bin import cli
parser = cli.CLIFactory.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date=None,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state
)
session.add(run)
session.commit()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
return run
@classmethod
@provide_session
def bulk_sync_to_db(cls, dags: Collection["DAG"], sync_time=None, session=None):
"""
Save attributes about list of DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param dags: the DAG objects to save to the DB
:type dags: List[airflow.models.dag.DAG]
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
if not dags:
return
from airflow.models.serialized_dag import SerializedDagModel
if sync_time is None:
sync_time = timezone.utcnow()
log.info("Sync %s DAGs", len(dags))
dag_by_ids = {dag.dag_id: dag for dag in dags}
dag_ids = set(dag_by_ids.keys())
orm_dags = (
session
.query(DagModel)
.options(joinedload(DagModel.tags, innerjoin=False))
.filter(DagModel.dag_id.in_(dag_ids))
.with_for_update(of=DagModel)
.all()
)
existing_dag_ids = {orm_dag.dag_id for orm_dag in orm_dags}
missing_dag_ids = dag_ids.difference(existing_dag_ids)
for missing_dag_id in missing_dag_ids:
orm_dag = DagModel(dag_id=missing_dag_id)
dag = dag_by_ids[missing_dag_id]
if dag.is_paused_upon_creation is not None:
orm_dag.is_paused = dag.is_paused_upon_creation
orm_dag.tags = []
log.info("Creating ORM DAG for %s", dag.dag_id)
session.add(orm_dag)
orm_dags.append(orm_dag)
for orm_dag in sorted(orm_dags, key=lambda d: d.dag_id):
dag = dag_by_ids[orm_dag.dag_id]
if dag.is_subdag:
orm_dag.is_subdag = True
orm_dag.fileloc = dag.parent_dag.fileloc # type: ignore
orm_dag.root_dag_id = dag.parent_dag.dag_id # type: ignore
orm_dag.owners = dag.parent_dag.owner # type: ignore
else:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = sync_time
orm_dag.default_view = dag.default_view
orm_dag.description = dag.description
orm_dag.schedule_interval = dag.schedule_interval
for orm_tag in list(orm_dag.tags):
if orm_tag.name not in orm_dag.tags:
session.delete(orm_tag)
orm_dag.tags.remove(orm_tag)
if dag.tags:
orm_tag_names = [t.name for t in orm_dag.tags]
for dag_tag in list(dag.tags):
if dag_tag not in orm_tag_names:
dag_tag_orm = DagTag(name=dag_tag, dag_id=dag.dag_id)
orm_dag.tags.append(dag_tag_orm)
session.add(dag_tag_orm)
session.commit()
for dag in dags:
cls.bulk_sync_to_db(dag.subdags, sync_time=sync_time, session=session)
if STORE_SERIALIZED_DAGS and not dag.is_subdag:
SerializedDagModel.write_dag(
dag,
min_update_interval=MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session
)
@provide_session
def sync_to_db(self, sync_time=None, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
self.bulk_sync_to_db([self], sync_time, session)
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
for dag in session.query(
DagModel).filter(DagModel.last_scheduler_run < expiration_date,
DagModel.is_active).all():
log.info(
"Deactivating DAG ID %s since it was last touched by the scheduler at %s",
dag.dag_id, dag.last_scheduler_run.isoformat()
)
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids=None, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
)
if task_ids:
qry = qry.filter(
TaskInstance.task_id.in_(task_ids),
)
if states:
if None in states:
if all(x is None for x in states):
qry = qry.filter(TaskInstance.state.is_(None))
else:
not_none_states = [state for state in states if state]
qry = qry.filter(or_(
TaskInstance.state.in_(not_none_states),
TaskInstance.state.is_(None)))
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
def test_cycle(self):
"""
Check to see if there are any cycles in the DAG. Returns False if no cycle found,
otherwise raises exception.
"""
from airflow.models.dagbag import DagBag # Avoid circular imports
# default of int is 0 which corresponds to CYCLE_NEW
visit_map = defaultdict(int)
for task_id in self.task_dict.keys():
# print('starting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_NEW:
self._test_cycle_helper(visit_map, task_id)
return False
def _test_cycle_helper(self, visit_map, task_id):
"""
Checks if a cycle exists from the input task using DFS traversal
"""
from airflow.models.dagbag import DagBag # Avoid circular imports
# print('Inspecting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_DONE:
return False
visit_map[task_id] = DagBag.CYCLE_IN_PROGRESS
task = self.task_dict[task_id]
for descendant_id in task.get_direct_relative_ids():
if visit_map[descendant_id] == DagBag.CYCLE_IN_PROGRESS:
msg = "Cycle detected in DAG. Faulty task: {0} to {1}".format(
task_id, descendant_id)
raise AirflowDagCycleException(msg)
else:
self._test_cycle_helper(visit_map, descendant_id)
visit_map[task_id] = DagBag.CYCLE_DONE
@classmethod
def get_serialized_fields(cls):
"""Stringified DAGs and operators contain exactly these fields."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(vars(DAG(dag_id='test')).keys()) - {
'parent_dag', '_old_context_manager_dags', 'safe_dag_id', 'last_loaded',
'_full_filepath', 'user_defined_filters', 'user_defined_macros',
'_schedule_interval', 'partial', '_old_context_manager_dags',
'_pickle_id', '_log', 'is_subdag', 'task_dict', 'template_searchpath',
'sla_miss_callback', 'on_success_callback', 'on_failure_callback',
'template_undefined', 'jinja_environment_kwargs'
}
return cls.__serialized_fields
class DagTag(Base):
"""
A tag name per dag, to allow quick filtering in the DAG view.
"""
__tablename__ = "dag_tag"
name = Column(String(100), primary_key=True)
dag_id = Column(String(ID_LEN), ForeignKey('dag.dag_id'), primary_key=True)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
root_dag_id = Column(String(ID_LEN))
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = conf\
.getboolean('core',
'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(UtcDateTime)
# Last time this DAG was pickled
last_pickled = Column(UtcDateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(UtcDateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
# Note: Do not depend on fileloc pointing to a file; in the case of a
# packaged DAG, it will point to the subpath of the DAG within the
# associated zip.
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
# Description of the dag
description = Column(Text)
# Default view of the inside the webserver
default_view = Column(String(25))
# Schedule interval
schedule_interval = Column(Interval)
# Tags for view filter
tags = relationship('DagTag', cascade='all,delete-orphan', backref=backref('dag'))
__table_args__ = (
Index('idx_root_dag_id', root_dag_id, unique=False),
)
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@property
def timezone(self):
return settings.TIMEZONE
@staticmethod
@provide_session
def get_dagmodel(dag_id, session=None):
return session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
@classmethod
@provide_session
def get_current(cls, dag_id, session=None):
return session.query(cls).filter(cls.dag_id == dag_id).first()
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
return get_last_dagrun(self.dag_id, session=session,
include_externally_triggered=include_externally_triggered)
@staticmethod
@provide_session
def get_paused_dag_ids(dag_ids: List[str], session: Session = None) -> Set[str]:
"""
Given a list of dag_ids, get a set of Paused Dag Ids
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
"""
paused_dag_ids = (
session.query(DagModel.dag_id)
.filter(DagModel.is_paused.is_(True))
.filter(DagModel.dag_id.in_(dag_ids))
.all()
)
paused_dag_ids = set(paused_dag_id for paused_dag_id, in paused_dag_ids)
return paused_dag_ids
@property
def safe_dag_id(self):
return self.dag_id.replace('.', '__dot__')
def get_dag(self, store_serialized_dags=False):
"""Creates a dagbag to load and return a DAG.
Calling it from UI should set store_serialized_dags = STORE_SERIALIZED_DAGS.
There may be a delay for scheduler to write serialized DAG into database,
loads from file in this case.
FIXME: remove it when webserver does not access to DAG folder in future.
"""
dag = DagBag(
dag_folder=self.fileloc, store_serialized_dags=store_serialized_dags).get_dag(self.dag_id)
if store_serialized_dags and dag is None:
dag = self.get_dag()
return dag
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the run id for this dag run
:type run_id: str
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime.datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
return self.get_dag().create_dagrun(run_id=run_id,
state=state,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
session=session)
@provide_session
def set_is_paused(self,
is_paused: bool,
including_subdags: bool = True,
store_serialized_dags: bool = False,
session=None) -> None:
"""
Pause/Un-pause a DAG.
:param is_paused: Is the DAG paused
:param including_subdags: whether to include the DAG's subdags
:param store_serialized_dags: whether to serialize DAGs & store it in DB
:param session: session
"""
dag_ids = [self.dag_id] # type: List[str]
if including_subdags:
dag = self.get_dag(store_serialized_dags)
if dag is None:
raise DagNotFound("Dag id {} not found".format(self.dag_id))
subdags = dag.subdags
dag_ids.extend([subdag.dag_id for subdag in subdags])
dag_models = session.query(DagModel).filter(DagModel.dag_id.in_(dag_ids)).all()
try:
for dag_model in dag_models:
dag_model.is_paused = is_paused
session.commit()
except Exception:
session.rollback()
raise
@classmethod
@provide_session
def deactivate_deleted_dags(cls, alive_dag_filelocs: List[str], session=None):
"""
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
Additionally change ``is_active=False`` to ``True`` if the DAG file exists.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ",
cls.__tablename__)
dag_models = session.query(cls).all()
try:
for dag_model in dag_models:
if dag_model.fileloc is not None:
if correct_maybe_zipped(dag_model.fileloc) not in alive_dag_filelocs:
dag_model.is_active = False
else:
# If is_active is set as False and the DAG File still exists
# Change is_active=True
if not dag_model.is_active:
dag_model.is_active = True
else:
continue
session.commit()
except Exception:
session.rollback()
raise
class DagContext:
"""
DAG context is used to keep the current DAG when DAG is used as ContextManager.
You can use DAG as context:
.. code-block:: python
with DAG(
dag_id='example_dag',
default_args=default_args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60)
) as dag:
If you do this the context stores the DAG and whenever new task is created, it will use
such stored DAG as the parent DAG.
"""
_context_managed_dag: Optional[DAG] = None
_previous_context_managed_dags: List[DAG] = []
@classmethod
def push_context_managed_dag(cls, dag: DAG):
if cls._context_managed_dag:
cls._previous_context_managed_dags.append(cls._context_managed_dag)
cls._context_managed_dag = dag
@classmethod
def pop_context_managed_dag(cls) -> Optional[DAG]:
old_dag = cls._context_managed_dag
if cls._previous_context_managed_dags:
cls._context_managed_dag = cls._previous_context_managed_dags.pop()
else:
cls._context_managed_dag = None
return old_dag
@classmethod
def get_current_dag(cls) -> Optional[DAG]:
return cls._context_managed_dag
| 39.180977 | 109 | 0.607438 |
49101d72fc546fa093f4a440de795801c4a41279 | 6,168 | py | Python | school/school_spider.py | xuludev/Work | ac7418cb907df23c9a4613928c07b4d89b63aff6 | [
"MIT"
] | null | null | null | school/school_spider.py | xuludev/Work | ac7418cb907df23c9a4613928c07b4d89b63aff6 | [
"MIT"
] | null | null | null | school/school_spider.py | xuludev/Work | ac7418cb907df23c9a4613928c07b4d89b63aff6 | [
"MIT"
] | null | null | null | """
a web spider for http://xuexiao.pinwaiyi.com/hy/
"""
import logging.handlers
import urllib.parse
import pandas as pd
import pymysql.cursors
import requests
from bs4 import BeautifulSoup
LOG_FILE = 'baikespider.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('baikespider')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
success_url_list = []
fail_url_list = []
class School():
def __init__(self, name='', href='', headmaster='', location='', region='', tel='', stage='', zipcode='',
pupil_num='', middler_num='', senior_num=''):
self.name = name
self.href = href
self.headmaster = headmaster
self.location = location
self.region = region
self.tel = tel
self.stage = stage
self.zipcode = zipcode
self.pupil_num = pupil_num
self.middler_num = middler_num
self.senior_num = senior_num
def __str__(self):
return "{ " + str(self.name) + " ;" + self.href + " ;" + str(self.headmaster) + " ;" + str(
self.location) + " ;" + self.region + " ;" + self.tel + " ;" + self.stage + " ;" \
+ self.zipcode + " ;" + self.pupil_num + " ;" + self.middler_num + " ;" + self.senior_num + " }"
def read_school_names(excel_path):
df = pd.read_excel(excel_path, sheetname="Sheet1", index_col=False)['名称']
return df.tolist()
def crawl(max_pn):
headers = {
"Host": "xuexiao.pinwaiyi.com",
"Referer": "http://xuexiao.pinwaiyi.com/hy/list.php?fid=1&page=1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
}
request_url_list = ["http://xuexiao.pinwaiyi.com/hy/list.php?fid=1&page=%d" % (pn + 1) for pn in range(max_pn)]
connection = open_mysql()
for request_url in request_url_list:
school = School()
response = requests.get(request_url, timeout=10, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, "html5lib")
for table in soup.find_all("table", class_="list"):
try:
href = table.find_all(class_="td2")[0].a['href'] # 学校详情页面URL
name = table.find_all(class_="td2")[0].a.text.strip() # 学校名称
headmaster_and_location = table.find_all(class_="td2")[0].find_all('div', class_="other2")[
0].get_text().strip()
headmaster = headmaster_and_location.split(" ")[0].strip() # 校长
location = headmaster_and_location.split(" ")[1].strip() # 学校地址
detail = crawl_school_detail(href)
region = detail[0] # 地区
tel = detail[1] # 联系方式
stage = detail[2] # 学段
zipcode = detail[3] # 邮编
headmaster = detail[4] # 校长
pupil_num = detail[5] # 小学生数量
middler_num = detail[6] # 初中生数量
senior_num = detail[7] # 高中生数量
school.name = name
school.href = href
school.headmaster = headmaster
school.location = location
school.region = region
school.tel = tel
school.stage = stage
school.zipcode = zipcode
school.pupil_num = pupil_num
school.middler_num = middler_num
school.senior_num = senior_num
if school is not None and school.name != "":
insert_to_mysql(school, connection)
print(str(school) + " has been inserted successfully~")
except:
pass
else:
logging.error("ERROR " + request_url)
close_mysql(connection)
def crawl_school_detail(school_href):
detail = []
headers = {
"Host": "xuexiao.pinwaiyi.com",
"Referer": "http://xuexiao.pinwaiyi.com/hy/list.php?fid=1&page=1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
}
response = requests.get(school_href, headers=headers, timeout=10)
if response.status_code == 200:
soup = BeautifulSoup(response.text, "html5lib")
root_td = soup.find_all(class_="content")[-1]
index = 0
for each_td in root_td.find_all("tbody")[1].find_all("td"):
if index % 2 == 1:
detail.append(each_td.get_text().strip())
index += 1
else:
logging.error(school_href)
return detail
def open_mysql():
connection = pymysql.connect(host='localhost',
user='root',
password='19930620',
db='hzau',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return connection
def insert_to_mysql(school, connection):
try:
with connection.cursor() as cursor:
sql = "INSERT INTO `school_info` (`name`, `href`, `headmaster`, `location`, `region`, `tel`, `stage`, " \
"`zipcode`, `pupil_num`, `middler_num`, `senior_num`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"
cursor.execute(sql, (
school.name, school.href, school.headmaster, school.location, school.region, school.tel,
school.stage, school.zipcode, school.pupil_num, school.middler_num, school.senior_num))
connection.commit()
except:
pass
def close_mysql(connection):
connection.close()
if __name__ == '__main__':
crawl(147)
| 37.609756 | 139 | 0.554475 |
abb08b442f09f7bf4cfb96474e67bf0b245f9f5b | 3,236 | py | Python | main.py | mkrum/das | 76559ffa3055d718590e9c457675cdf9a885f656 | [
"MIT"
] | null | null | null | main.py | mkrum/das | 76559ffa3055d718590e9c457675cdf9a885f656 | [
"MIT"
] | null | null | null | main.py | mkrum/das | 76559ffa3055d718590e9c457675cdf9a885f656 | [
"MIT"
] | null | null | null | import pickle as pkl
from dataclasses import dataclass
import torch
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from data import make_binary_datasets
from mllg import LogWriter, TestInfo, TrainInfo, ValidationInfo
from yamlargs.parser import load_config_and_create_parser, parse_args_into_config
import config
def eval_model(model, test_dl, sample_size):
correct = 0.0
total = 0.0
for (x, y) in test_dl:
with torch.no_grad():
out = model(x)
pred = torch.argmax(out, dim=1).cpu()
correct += torch.sum(pred == y).item()
total += pred.shape[0]
if total > sample_size:
break
return correct / total
@dataclass
class StandardTrain:
batch_size: int = 1024
num_epochs: int = 1
eval_freq: int = 1000
lr: float = 1e-4
def __call__(self, dfa, model, tokenizer, train_data, test_data, logger):
train_dl = DataLoader(
train_data,
batch_size=self.batch_size,
collate_fn=train_data.create_collate(tokenizer),
shuffle=True,
)
test_dl = DataLoader(
test_data,
batch_size=self.batch_size,
collate_fn=test_data.create_collate(tokenizer),
shuffle=True,
)
opt = optim.Adam(model.parameters(), lr=self.lr)
eval_acc = eval_model(model, test_dl, 1000)
logger.log_info(ValidationInfo(0, 0, [TestInfo("ACC", eval_acc)]))
for epoch in range(self.num_epochs):
for (batch_idx, (x, y)) in enumerate(train_dl):
opt.zero_grad()
out = model(x)
loss = F.cross_entropy(out, y.cuda())
loss.backward()
logger.log_info(TrainInfo(epoch, batch_idx, loss.item()))
opt.step()
if batch_idx % self.eval_freq == 0 and batch_idx > 0:
eval_acc = eval_model(model, test_dl, 1000)
logger.log_info(
ValidationInfo(epoch, batch_idx, [TestInfo("ACC", eval_acc)])
)
eval_acc = eval_model(model, test_dl, 1000)
logger.log_info(
ValidationInfo(epoch, batch_idx, [TestInfo("ACC", eval_acc)])
)
if __name__ == "__main__":
config, parser = load_config_and_create_parser()
parser.add_argument("log_path")
args = parser.parse_args()
config = parse_args_into_config(config, args)
logger = LogWriter(args.log_path)
config_data = config.to_json()
config_data["type"] = "config"
logger.log_str(str(config_data))
dfa = config["DFA"](config["datasets"]["max_size"])
with open(f"{args.log_path}/config.yml", "w") as cfg_save:
cfg_save.write(config.to_yaml())
dfa.show_diagram(path=f"{args.log_path}/dfa.png")
pkl.dump(dfa, open(f"{args.log_path}/dfa.pkl", "wb"))
train_data, test_data = config["datasets"](dfa)
model = config["model"]()
model = model.cuda()
tokenizer = config["tokenizer"](dfa)
StandardTrain()(dfa, model, tokenizer, train_data, test_data, logger)
| 28.637168 | 85 | 0.615266 |
877ecf1a02d45559b8d28c871dc33786eb727bda | 1,126 | py | Python | src/main/python/tests/test_ts.py | marregui/mygupsql | 2adca0da4bd95932fa08e2b122aa4f6f6713ad0c | [
"Apache-2.0"
] | null | null | null | src/main/python/tests/test_ts.py | marregui/mygupsql | 2adca0da4bd95932fa08e2b122aa4f6f6713ad0c | [
"Apache-2.0"
] | 1 | 2020-12-29T18:03:45.000Z | 2020-12-29T18:03:45.000Z | src/main/python/tests/test_ts.py | marregui/mygupsql | 2adca0da4bd95932fa08e2b122aa4f6f6713ad0c | [
"Apache-2.0"
] | null | null | null | # Licensed to Miguel Arregui ("marregui") under one or more contributor
# license agreements. See the LICENSE file distributed with this work
# for additional information regarding copyright ownership. You may
# obtain a copy at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2019 - 2022, Miguel Arregui a.k.a. marregui
#
import unittest
from quest import (
to_timestamp,
from_timestamp,
to_date,
from_date
)
class TimestampTest(unittest.TestCase):
def test_timestamp(self):
timestamp_value = to_timestamp('2021-10-01 09:38:42.123456')
self.assertEqual('2021-10-01 09:38:42.123456', from_timestamp(timestamp_value))
def test_date(self):
date_value = to_date('2021-10-01')
self.assertEqual('2021-10-01', from_date(date_value))
| 32.171429 | 87 | 0.737123 |
c25988b9e6d8bb9069ee293a3fe1a1caa7bf3fe1 | 138 | py | Python | zk/datadog_checks/zk/__about__.py | flowcommerce/integrations-core | c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72 | [
"BSD-3-Clause"
] | 1 | 2021-01-28T01:45:37.000Z | 2021-01-28T01:45:37.000Z | zk/datadog_checks/zk/__about__.py | flowcommerce/integrations-core | c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72 | [
"BSD-3-Clause"
] | 3 | 2021-01-27T04:56:40.000Z | 2021-02-26T06:29:22.000Z | zk/datadog_checks/zk/__about__.py | flowcommerce/integrations-core | c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72 | [
"BSD-3-Clause"
] | 1 | 2021-04-07T16:58:27.000Z | 2021-04-07T16:58:27.000Z | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '4.0.0'
| 23 | 59 | 0.717391 |
12f60063c35ca7aa7ad55da1ab46b66839f1fd3b | 335 | py | Python | reboot.py | klaslofstedt/alfred_home_assistent | d7f79786336f53a2867037de3b64d6fe375062a0 | [
"Apache-2.0"
] | 1 | 2016-02-14T23:53:37.000Z | 2016-02-14T23:53:37.000Z | reboot.py | klaslofstedt/alfred_home_assistent_webapp | d7f79786336f53a2867037de3b64d6fe375062a0 | [
"Apache-2.0"
] | null | null | null | reboot.py | klaslofstedt/alfred_home_assistent_webapp | d7f79786336f53a2867037de3b64d6fe375062a0 | [
"Apache-2.0"
] | null | null | null | import datetime
import time
import os
reboot_time = datetime.datetime.now().time().strftime("%H:%M:%S")
while(reboot_time != "06:00:00"):
reboot_time = datetime.datetime.now().time().strftime("%H:%M:%S")
print reboot_time
time.sleep(0.5)
print "rebooting"
os.system("/usr/bin/sudo /sbin/shutdown -r now")
#os.system("sudo reboot")
| 25.769231 | 66 | 0.707463 |
01033ad82d44cee3b4e64f780e182ed97103db13 | 7,214 | py | Python | sa_rbac_aggregator.py | nezdali/k8s-api | f75b6ff648fccd8fb4ef8d0aa1b9c09e9c4b22cd | [
"MIT"
] | null | null | null | sa_rbac_aggregator.py | nezdali/k8s-api | f75b6ff648fccd8fb4ef8d0aa1b9c09e9c4b22cd | [
"MIT"
] | null | null | null | sa_rbac_aggregator.py | nezdali/k8s-api | f75b6ff648fccd8fb4ef8d0aa1b9c09e9c4b22cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Scrape ClusterRole/Binding, ServiceAccount data
and aggregate it per Pod
"""
import logging
from kubernetes import client, config
from kubernetes.client.rest import ApiException
class GetMetadata:
"""Get metadata via k8s api class"""
def __init__(self):
#config.load_incluster_config()
config.load_kube_config()
self.v1 = client.CoreV1Api()
self.v2 = client.RbacAuthorizationV1Api()
def get_role_list(self):
"""Get Roles list from k8s api"""
try:
thread = self.v2.list_role_for_all_namespaces(
watch=False, async_req=True)
list_roles = thread.get()
except ApiException as err:
logging.debug(f"Exception when calling CoreV1Api->get_api_group: {err}\n")
return list_roles
def get_role_binding_list(self):
"""Get Role Binding list from k8s api"""
try:
thread = self.v2.list_role_binding_for_all_namespaces(
watch=False, async_req=True)
list_role_binding = thread.get()
except ApiException as err:
logging.debug(f"Exception when calling CoreV1Api->get_api_group: {err}\n")
return list_role_binding
def get_cluster_role_list(self):
"""Get Cluster roles list from k8s api"""
try:
thread = self.v2.list_cluster_role(
watch=False, async_req=True)
list_cluster_roles = thread.get()
except ApiException as err:
logging.debug(f"Exception when calling CoreV1Api->get_api_group: {err}\n")
return list_cluster_roles
def get_cluster_role_binding_list(self):
"""Get Cluster role Binding list from k8s api"""
try:
thread = self.v2.list_cluster_role_binding(
watch=False, async_req=True)
list_cluster_role_binding = thread.get()
except ApiException as err:
logging.debug(f"Exception when calling CoreV1Api->get_api_group: {err}\n")
return list_cluster_role_binding
def get_pods_list(self):
"""Get Pods list from k8s api"""
try:
thread = self.v1.list_pod_for_all_namespaces(
watch=False, async_req=True)
list_pods = thread.get()
except ApiException as err:
logging.debug(f"Exception when calling CoreV1Api->get_api_group: {err}\n")
list_pods = {}
return list_pods
def get_pod_metadata(self, list_pods):
"""Get Pod's metadata from Pods list"""
pods_dict = {}
for i in list_pods.items:
service_account = i.spec.service_account
pod_name = i.metadata.name
if pod_name not in pods_dict:
pods_dict[pod_name] = service_account
return pods_dict
def get_role_binding_metadata(self):
"""Get Role Binding metadata from k8s api"""
role_binding_metadata_dict = {}
list_role_binding = self.get_role_binding_list()
for i in list_role_binding.items:
crb_role_ref = i.role_ref
crb_subjects = i.subjects
role_binding_name = vars(crb_role_ref)
role_binding_name = role_binding_name['_name']
if crb_subjects:
for sa in crb_subjects:
crb_service_account = vars(sa)
crb_service_account_name = crb_service_account['_name']
role_binding_metadata_dict[role_binding_name] = crb_service_account_name
return role_binding_metadata_dict
def get_role_metadata(self, list_pods):
"""Aggregate data from RoleBinding and Role,
then compare this data with Pod's serviceAccount
"""
pods_dict = self.get_pod_metadata(list_pods)
list_roles = self.get_role_list()
role_binding_metadata_dict = self.get_role_binding_metadata()
for role in list_roles.items:
role_name = role.metadata.name
# Check if there is a RoleBinding bind to the Role
for role_binding_name, crb_service_account_name in role_binding_metadata_dict.items():
if role_binding_name != role_name:
continue
cr_rules = role.rules
for rule in cr_rules:
rules = vars(rule)
for pod_name, pod_service_account_name in pods_dict.items():
if pod_name:
if pod_service_account_name == crb_service_account_name:
print(f"Pod: {pod_name}, ServiceAccount:{crb_service_account_name},Resources:{rules['_resources']},Verbs:{rules['_verbs']}")
def get_cluster_role_binding_metadata(self):
"""Get Cluster Role Binding metadata from k8s api"""
cluster_role_binding_metadata_dict = {}
list_cluster_role_binding = self.get_cluster_role_binding_list()
for i in list_cluster_role_binding.items:
crb_role_ref = i.role_ref
crb_subjects = i.subjects
role_binding_name = vars(crb_role_ref)
role_binding_name = role_binding_name['_name']
if crb_subjects:
for sa in crb_subjects:
crb_service_account = vars(sa)
crb_service_account_name = crb_service_account['_name']
cluster_role_binding_metadata_dict[role_binding_name] = crb_service_account_name
return cluster_role_binding_metadata_dict
def get_cluster_role_metadata(self, list_pods):
"""Aggregate data from ClusterRoleBinding and ClusterRole,
then compare this data with Pod's serviceAccount
"""
pods_dict = self.get_pod_metadata(list_pods)
list_cluster_roles = self.get_cluster_role_list()
cluster_role_binding_metadata_dict = self.get_cluster_role_binding_metadata()
for role in list_cluster_roles.items:
cluster_role_name = role.metadata.name
# Check if there is a ClusterRoleBinding bind to the ClusterRole
for role_binding_name, crb_service_account_name in cluster_role_binding_metadata_dict.items():
if role_binding_name != cluster_role_name:
continue
cr_rules = role.rules
for rule in cr_rules:
rules = vars(rule)
for pod_name, pod_service_account_name in pods_dict.items():
if pod_name:
if pod_service_account_name == crb_service_account_name:
print(f"Pod: {pod_name}, ServiceAccount:{crb_service_account_name},Resources:{rules['_resources']},Verbs:{rules['_verbs']}")
def main():
""" Main function to work with the Class"""
run = GetMetadata()
list_pods = run.get_pods_list()
print(f"####################### Roles and RoleBinding aggregation #######################")
run.get_role_metadata(list_pods)
print(f"####################### ClusterRoles and ClusterRoleBinding aggregation #######################")
run.get_cluster_role_metadata(list_pods)
if __name__ == "__main__":
main()
| 44.257669 | 156 | 0.626421 |
4a8d3bf8fbd403fd8ac1e39184a7e0aec0ec0f5a | 7,180 | py | Python | openGaussBase/testcase/SQL/DDL/gin_index/Opengauss_Function_DDL_Gin_Index_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DDL/gin_index/Opengauss_Function_DDL_Gin_Index_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DDL/gin_index/Opengauss_Function_DDL_Gin_Index_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能
Case Name : 分区表创建索引后dump, 恢复数据
Description :
1.执行create_partition.sql创建数据
2.创建索引
3.dump数据
4.恢复数据
5.查询索引
6.查询
Expect :
1.创建表并插入数据成功
2.创建索引成功
3.dump成功
4.恢复数据成功
5.索引存在
6.正常使用索引且查询结果正确
History :
"""
import os
import unittest
from yat.test import Node
from yat.test import macro
import sys
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
logger = Logger()
common = Common()
class set_search_limit(unittest.TestCase):
dbPrimaryUserNode = Node(node='PrimaryDbUser')
dbPrimaryRootNode = Node(node='PrimaryRoot')
DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
DB_ENV_PATH = macro.DB_ENV_PATH
commsh = CommonSH('PrimaryDbUser')
target_path = os.path.join(macro.DB_INSTANCE_PATH,'testscript')
sql_path = os.path.join(target_path, 'create_partition.sql')
dump_file_name = 'Opengauss_Gin_Index_0037.tar'
def setUp(self):
logger.info("-----------this is setup-----------")
logger.info("-----------Opengauss_Gin_Index_0037 start-----------")
self.Constant = Constant()
self.nodelist = ['Standby1DbUser', 'Standby2DbUser']
result = self.commsh.get_db_cluster_status('detail')
logger.info(result)
self.node_num = result.count('Standby Normal') + 1
self.comshsta = []
logger.info(self.node_num)
for i in range(int(self.node_num) - 1):
self.comshsta.append(CommonSH(self.nodelist[i]))
logger.info("-------------------------vacuum-----------------------------")
result = self.commsh.execut_db_sql('vacuum;')
logger.info(result)
self.assertIn('VACUUM', result)
self.dump_file = os.path.join(macro.DB_INSTANCE_PATH, self.dump_file_name)
self.dbname = 'dbsys_restore_db'
logger.info("---------------------create database--------------------")
result = self.commsh.execut_db_sql(f'drop database if exists {self.dbname};create database {self.dbname};')
logger.info(result)
self.assertIn(self.Constant.CREATE_DATABASE_SUCCESS, result)
def test_set_search_limit(self):
logger.info('---------------execute create_partition.sql---------------------')
common.scp_file(self.dbPrimaryUserNode, 'create_partition.sql', self.target_path)
sql_bx_cmd = f'''
source {macro.DB_ENV_PATH};
gsql -d {self.dbname} -p {self.dbPrimaryUserNode.db_port} -f {self.sql_path}
'''
logger.info(sql_bx_cmd)
sql_bx_msg = self.dbPrimaryUserNode.sh(sql_bx_cmd).result()
logger.info(sql_bx_msg)
logger.info('------------------create index-------------------------')
sql = 'CREATE INDEX test_gin_student_index_row2 ON test_gin_student_row USING GIN(to_tsvector(\'english\', data1)) LOCAL ( PARTITION data2_index_1, PARTITION data2_index_2, PARTITION data2_index_3 ) ;'
msg = self.commsh.execut_db_sql(sql, dbname=self.dbname)
logger.info(msg)
self.assertTrue(msg.find(self.Constant.CREATE_INDEX_SUCCESS_MSG)>-1)
logger.info('--------------------dump file-------------------------')
dumpCmd = '''
source {source_path};
gs_dump {dbname} -p {port} -f {file_name} -F t'''.format(source_path = self.DB_ENV_PATH, dbname = self.dbname,port=self.dbPrimaryUserNode.db_port, file_name=self.dump_file)
logger.info(dumpCmd)
dumpMsg = self.dbPrimaryUserNode.sh(dumpCmd).result()
logger.info(dumpMsg)
flag ='dump database ' + self.dbname +' successfully'
self.assertIn(flag, dumpMsg)
logger.info('--------------------restore file without -c--------------------------------------------------')
dumpResult = self.commsh.restore_file(self.dump_file, ' ', self.dbname)
logger.info(dumpResult)
self.assertTrue(dumpResult.find(self.Constant.RESTORE_SUCCESS_MSG)>-1)
logger.info('------------------check index-------------------------')
sql = '\di'
msg = self.commsh.execut_db_sql(sql, dbname=self.dbname)
logger.info(msg)
self.assertTrue(msg.find('test_gin_student_index_row2')>-1)
logger.info('------------------querry-------------------------')
sql = "SET ENABLE_SEQSCAN=off;explain SELECT * FROM test_gin_student_row WHERE to_tsvector(\'english\', data1) @@ to_tsquery(\'english\', 'Mexico') ORDER BY num, data1, data2;SELECT * FROM test_gin_student_row WHERE to_tsvector(\'english\', data1) @@ to_tsquery(\'english\', \'Mexico\') ORDER BY num, data1, data2;"
msg = self.commsh.execut_db_sql(sql, dbname=self.dbname)
logger.info(msg)
self.assertTrue(msg.find('13 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico')>-1)
self.assertTrue(msg.find('14 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico')>-1)
self.assertTrue(msg.find('5001 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico')>-1)
self.assertTrue(msg.find('Bitmap Index Scan on test_gin_student_index_row2')>-1)
def tearDown(self):
logger.info('----------------this is tearDown-----------------------')
logger.info("-----------------build standby------------------------")
for i in range(int(self.node_num) - 1):
result = self.comshsta[i].execute_gsctl('build', self.Constant.BUILD_SUCCESS_MSG, '-b full')
logger.info(result)
logger.info('----------------drop table-----------------------')
sql = 'drop table test_gin_student_row;'
msg = self.commsh.execut_db_sql(sql, dbname=self.dbname)
logger.info(msg)
logger.info("-----------delete scripts-----------")
cmd = 'rm -rf ' + self.target_path
self.dbPrimaryRootNode.sh(cmd)
logger.info("-----------delete dumpfile-----------")
cmd = 'rm -rf ' + self.dump_file
self.dbPrimaryRootNode.sh(cmd)
logger.info("---------------------drop database--------------------")
result = self.commsh.execut_db_sql(f'drop database {self.dbname};')
logger.info(result)
logger.info("-----------Opengauss_Gin_Index_0037 end-----------")
| 47.236842 | 323 | 0.605292 |
308d47621e1139226234acd8c7a722408efbbe86 | 753 | py | Python | new/tidebilling/tidebilling/urls.py | ashraful88/tide-billing | cd11d273889c39bbb4768060fc61e9dfef1972f9 | [
"MIT"
] | 1 | 2020-07-27T17:47:26.000Z | 2020-07-27T17:47:26.000Z | new/tidebilling/tidebilling/urls.py | ashraful88/tide-billing | cd11d273889c39bbb4768060fc61e9dfef1972f9 | [
"MIT"
] | 4 | 2021-06-04T22:32:50.000Z | 2022-03-12T00:17:15.000Z | new/tidebilling/tidebilling/urls.py | ashraful88/tide-billing | cd11d273889c39bbb4768060fc61e9dfef1972f9 | [
"MIT"
] | 1 | 2020-03-16T22:10:06.000Z | 2020-03-16T22:10:06.000Z | """tidebilling URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.227273 | 77 | 0.710491 |
85e2bd96f56b68de80e799edc3797a14c41282a3 | 2,172 | py | Python | tuprolog/solve/flags/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-08-07T06:29:28.000Z | 2021-08-07T06:29:28.000Z | tuprolog/solve/flags/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 14 | 2021-09-16T13:25:12.000Z | 2022-01-03T10:12:22.000Z | tuprolog/solve/flags/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-12-22T00:25:32.000Z | 2021-12-22T00:25:32.000Z | from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.flags as _flags
from tuprolog.core import Term
from tuprolog.jvmutils import kpair, jmap, jarray, Pair
from typing import Iterable, Union, Mapping
DoubleQuotes = _flags.DoubleQuotes
FlagStore = _flags.FlagStore
LastCallOptimization = _flags.LastCallOptimization
MaxArity = _flags.MaxArity
NotableFlag = _flags.NotableFlag
Unknown = _flags.Unknown
Flag = Pair
EMPTY_FLAG_STORE: FlagStore = FlagStore.EMPTY
DEFAULT_FLAG_STORE: FlagStore = FlagStore.DEFAULT
DoubleQuotes: NotableFlag = DoubleQuotes.INSTANCE
LastCallOptimization: NotableFlag = LastCallOptimization.INSTANCE
MaxArity: NotableFlag = MaxArity.INSTANCE
Unknown: NotableFlag = Unknown.INSTANCE
def flag(first: Union[str, NotableFlag, Iterable], value: Term = None) -> Flag:
if isinstance(first, NotableFlag):
if value is None:
return first.toPair()
else:
return first.to(value)
elif isinstance(first, str):
if value is None:
raise ValueError("Argument value is None")
return Flag(first, value)
elif isinstance(first, Iterable) and value is None:
return kpair(first)
else:
raise ValueError("Argument first is not iterable nor str")
def flag_store(*flags: Union[NotableFlag, Flag, Iterable, FlagStore], **kwargs: Mapping[str, Term]):
normal_flags = []
notable_flags = []
other_stores = []
for f in flags:
if isinstance(f, NotableFlag):
notable_flags.append(f)
elif isinstance(f, Flag):
normal_flags.append(f)
elif isinstance(f, FlagStore):
other_stores.append(f)
else:
normal_flags.append(flag(f))
store1 = FlagStore.of(jarray(NotableFlag)@notable_flags)
store2 = FlagStore.of(jarray(Flag)@normal_flags)
store3 = FlagStore.of(jmap(kwargs))
store = store1.plus(store2).plus(store3)
for s in other_stores:
store = store.plus(s)
return store
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.flags.*")
| 28.207792 | 100 | 0.708103 |
de19f3beaa3567f4fcd26eeb01ae84659a72c7a4 | 10,676 | py | Python | NEST-14.0-FPGA/examples/neuronview/neuronview.py | OpenHEC/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 45 | 2019-12-09T06:45:53.000Z | 2022-01-29T12:16:41.000Z | NEST-14.0-FPGA/examples/neuronview/neuronview.py | zlchai/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 2 | 2020-05-23T05:34:21.000Z | 2021-09-08T02:33:46.000Z | NEST-14.0-FPGA/examples/neuronview/neuronview.py | OpenHEC/SNN-simulator-on-PYNQcluster | 14f86a76edf4e8763b58f84960876e95d4efc43a | [
"MIT"
] | 10 | 2019-12-09T06:45:59.000Z | 2021-03-25T09:32:56.000Z | # -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk # noqa
import pango # noqa
import gobject # noqa
from matplotlib.figure import Figure # noqa
from matplotlib.backends.backend_gtkagg import \
FigureCanvasGTKAgg as FigureCanvas # noqa
import matplotlib.gridspec as gridspec # noqa
import os # noqa
import nest # noqa
default_neuron = "iaf_psc_alpha"
default_stimulator = "dc_generator"
class Main():
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(
self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float,
bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1] * len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if
x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron",
"parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron:
neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator:
stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.sli_run("statusdict /prgdocdir get")
docdir = nest.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel,
params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"),
nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView):
def __init__(self, params=None):
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self):
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING,
gobject.TYPE_STRING)
for key in sorted(self.params.keys()):
pos = model.insert_after(None, None)
data = {"key": key, "element_type": type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text):
model = self.get_model()
data = model[path][0]
try:
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"]:
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError:
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
self.repopulate()
if __name__ == "__main__":
Main()
| 32.351515 | 77 | 0.610809 |
adff8ec59fd16afc2173c12cf3cb24e315854461 | 52,965 | py | Python | cogs/utility.py | drumman22/modmail | a7126674aaa23d01846849b7407a98daa64dc85a | [
"MIT"
] | 1 | 2019-05-22T23:23:19.000Z | 2019-05-22T23:23:19.000Z | cogs/utility.py | AliUltimate/modmail | dcd5a583b142dc629b414f287611c04c81beca2f | [
"MIT"
] | null | null | null | cogs/utility.py | AliUltimate/modmail | dcd5a583b142dc629b414f287611c04c81beca2f | [
"MIT"
] | null | null | null | import inspect
import logging
import os
import traceback
from contextlib import redirect_stdout
from datetime import datetime
from difflib import get_close_matches
from io import StringIO
from typing import Union
from types import SimpleNamespace as param
from json import JSONDecodeError
from textwrap import indent
from discord import Embed, Color, Activity, Role
from discord.enums import ActivityType, Status
from discord.ext import commands
from aiohttp import ClientResponseError
from pkg_resources import parse_version
from core import checks
from core.changelog import Changelog
from core.decorators import github_access_token_required, trigger_typing
from core.models import InvalidConfigError, PermissionLevel
from core.paginator import PaginatorSession, MessagePaginatorSession
from core.utils import cleanup_code, info, error, User, get_perm_level
logger = logging.getLogger('Modmail')
class ModmailHelpCommand(commands.HelpCommand):
async def format_cog_help(self, cog):
bot = self.context.bot
prefix = self.clean_prefix
formats = ['']
for cmd in await self.filter_commands(cog.get_commands(), sort=True, key=get_perm_level):
perm_level = get_perm_level(cmd)
if perm_level is PermissionLevel.INVALID:
format_ = f'`{prefix + cmd.qualified_name}` '
else:
format_ = f'`[{perm_level}] {prefix + cmd.qualified_name}` '
format_ += f'- {cmd.short_doc}\n'
if not format_.strip():
continue
if len(format_) + len(formats[-1]) >= 1024:
formats.append(format_)
else:
formats[-1] += format_
embeds = []
for format_ in formats:
embed = Embed(
description=f'*{cog.description or "No description."}*',
color=bot.main_color
)
embed.add_field(name='Commands', value=format_ or 'No commands.')
continued = ' (Continued)' if embeds else ''
embed.set_author(name=cog.qualified_name + ' - Help' + continued,
icon_url=bot.user.avatar_url)
embed.set_footer(text=f'Type "{prefix}{self.command_attrs["name"]} command" '
'for more info on a specific command.')
embeds.append(embed)
return embeds
def process_help_msg(self, help_: str):
return help_.format(prefix=self.clean_prefix) if help_ else 'No help message.'
async def send_bot_help(self, mapping):
embeds = []
# TODO: Implement for no cog commands
for cog in sorted((key for key in mapping.keys() if key is not None),
key=lambda c: c.qualified_name):
embeds.extend(await self.format_cog_help(cog))
p_session = PaginatorSession(self.context, *embeds, destination=self.get_destination())
return await p_session.run()
async def send_cog_help(self, cog):
embeds = await self.format_cog_help(cog)
p_session = PaginatorSession(self.context, *embeds, destination=self.get_destination())
return await p_session.run()
async def send_command_help(self, command):
if not await self.filter_commands([command]):
return
perm_level = get_perm_level(command)
if perm_level is not PermissionLevel.INVALID:
perm_level = f'{perm_level.name} [{perm_level}]'
else:
perm_level = ''
embed = Embed(
title=f'`{self.get_command_signature(command)}`',
color=self.context.bot.main_color,
description=self.process_help_msg(command.help)
)
embed.set_footer(text=f'Permission level: {perm_level}')
await self.get_destination().send(embed=embed)
async def send_group_help(self, group):
if not await self.filter_commands([group]):
return
perm_level = get_perm_level(group)
if perm_level is not PermissionLevel.INVALID:
perm_level = f'{perm_level.name} [{perm_level}]'
else:
perm_level = ''
embed = Embed(
title=f'`{self.get_command_signature(group)}`',
color=self.context.bot.main_color,
description=self.process_help_msg(group.help)
)
embed.add_field(name='Permission level', value=perm_level, inline=False)
format_ = ''
length = len(group.commands)
for i, command in enumerate(
await self.filter_commands(group.commands, sort=True, key=lambda c: c.name)
):
# BUG: fmt may run over the embed limit
# TODO: paginate this
if length == i + 1: # last
branch = '└─'
else:
branch = '├─'
format_ += f'`{branch} {command.name}` - {command.short_doc}\n'
embed.add_field(name='Sub Commands', value=format_, inline=False)
embed.set_footer(
text=f'Type "{self.clean_prefix}{self.command_attrs["name"]} command" '
'for more info on a command.'
)
await self.get_destination().send(embed=embed)
async def send_error_message(self, msg): # pylint: disable=W0221
logger.warning(error(f'CommandNotFound: {msg}'))
embed = Embed(
color=Color.red()
)
embed.set_footer(text=f'Command/Category "{self.context.kwargs.get("command")}" not found.')
choices = set()
for name, cmd in self.context.bot.all_commands.items():
if not cmd.hidden:
choices.add(name)
command = self.context.kwargs.get('command')
closest = get_close_matches(command, choices)
if closest:
embed.add_field(name=f'Perhaps you meant:', value="\n".join(f'`{x}`' for x in closest))
else:
embed.title = 'Cannot find command or category'
embed.set_footer(text=f'Type "{self.clean_prefix}{self.command_attrs["name"]}" '
'for a list of all available commands.')
await self.get_destination().send(embed=embed)
class Utility(commands.Cog):
"""General commands that provide utility."""
def __init__(self, bot):
self.bot = bot
self._original_help_command = bot.help_command
self.bot.help_command = ModmailHelpCommand(
verify_checks=False,
command_attrs={'help': 'Shows this help message.'}
)
# Looks a bit ugly
self.bot.help_command._command_impl = checks.has_permissions(
PermissionLevel.REGULAR
)(self.bot.help_command._command_impl)
self.bot.help_command.cog = self
def cog_unload(self):
self.bot.help_command = self._original_help_command
@commands.command()
@checks.has_permissions(PermissionLevel.REGULAR)
@trigger_typing
async def changelog(self, ctx):
"""Shows the changelog of the Modmail."""
changelog = await Changelog.from_url(self.bot)
try:
paginator = PaginatorSession(ctx, *changelog.embeds)
await paginator.run()
except:
await ctx.send(changelog.CHANGELOG_URL)
@commands.command(aliases=['bot', 'info'])
@checks.has_permissions(PermissionLevel.REGULAR)
@trigger_typing
async def about(self, ctx):
"""Shows information about this bot."""
embed = Embed(color=self.bot.main_color,
timestamp=datetime.utcnow())
embed.set_author(name='Modmail - About',
icon_url=self.bot.user.avatar_url)
embed.set_thumbnail(url=self.bot.user.avatar_url)
desc = 'This is an open source Discord bot that serves as a means for '
desc += 'members to easily communicate with server administrators in '
desc += 'an organised manner.'
embed.description = desc
embed.add_field(name='Uptime', value=self.bot.uptime)
embed.add_field(name='Latency', value=f'{self.bot.latency * 1000:.2f} ms')
embed.add_field(name='Version',
value=f'`{self.bot.version}`')
embed.add_field(name='Author',
value='[`kyb3r`](https://github.com/kyb3r)')
changelog = await Changelog.from_url(self.bot)
latest = changelog.latest_version
if parse_version(self.bot.version) < parse_version(latest.version):
footer = f"A newer version is available v{latest.version}"
else:
footer = 'You are up to date with the latest version.'
embed.add_field(name='GitHub',
value='https://github.com/kyb3r/modmail',
inline=False)
embed.add_field(name='\u200b',
value='Support this bot on [Patreon](https://patreon.com/kyber).')
embed.set_footer(text=footer)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
@trigger_typing
async def debug(self, ctx):
"""Shows the recent application-logs of the bot."""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../temp/logs.log'), 'r+') as f:
logs = f.read().strip()
if not logs:
embed = Embed(
color=self.bot.main_color,
title='Debug Logs:',
description='You don\'t have any logs at the moment.'
)
embed.set_footer(text='Go to Heroku to see your logs.')
return await ctx.send(embed=embed)
messages = []
# Using Scala formatting because it's similar to Python for exceptions
# and it does a fine job formatting the logs.
msg = '```Scala\n'
for line in logs.splitlines(keepends=True):
if msg != '```Scala\n':
if len(line) + len(msg) + 3 > 2000:
msg += '```'
messages.append(msg)
msg = '```Scala\n'
msg += line
if len(msg) + 3 > 2000:
msg = msg[:1993] + '[...]```'
messages.append(msg)
msg = '```Scala\n'
if msg != '```Scala\n':
msg += '```'
messages.append(msg)
embed = Embed(
color=self.bot.main_color
)
embed.set_footer(text='Debug logs - Navigate using the reactions below.')
session = MessagePaginatorSession(ctx, *messages, embed=embed)
session.current = len(messages) - 1
return await session.run()
@debug.command(name='hastebin', aliases=['haste'])
@checks.has_permissions(PermissionLevel.OWNER)
@trigger_typing
async def debug_hastebin(self, ctx):
"""Posts application-logs to Hastebin."""
haste_url = os.environ.get('HASTE_URL', 'https://hasteb.in')
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../temp/logs.log'), 'r+') as f:
logs = f.read().strip()
try:
async with self.bot.session.post(haste_url + '/documents',
data=logs) as resp:
key = (await resp.json())["key"]
embed = Embed(
title='Debug Logs',
color=self.bot.main_color,
description=f'{haste_url}/' + key
)
except (JSONDecodeError, ClientResponseError, IndexError):
embed = Embed(
title='Debug Logs',
color=self.bot.main_color,
description='Something\'s wrong. '
'We\'re unable to upload your logs to hastebin.'
)
embed.set_footer(text='Go to Heroku to see your logs.')
await ctx.send(embed=embed)
@debug.command(name='clear', aliases=['wipe'])
@checks.has_permissions(PermissionLevel.OWNER)
@trigger_typing
async def debug_clear(self, ctx):
"""Clears the locally cached logs."""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../temp/logs.log'), 'w'):
pass
await ctx.send(embed=Embed(
color=self.bot.main_color,
description='Cached logs are now cleared.'
))
@commands.command()
@checks.has_permissions(PermissionLevel.OWNER)
@github_access_token_required
@trigger_typing
async def github(self, ctx):
"""Shows the GitHub user your Github_Access_Token is linked to."""
data = await self.bot.api.get_user_info()
embed = Embed(
title='GitHub',
description='Current User',
color=self.bot.main_color
)
user = data['user']
embed.set_author(name=user['username'],
icon_url=user['avatar_url'],
url=user['url'])
embed.set_thumbnail(url=user['avatar_url'])
await ctx.send(embed=embed)
@commands.command()
@checks.has_permissions(PermissionLevel.OWNER)
@github_access_token_required
@trigger_typing
async def update(self, ctx, *, flag: str = ''):
"""
Update Modmail.
This only works for Heroku users who have configured their bot for updates.
To stay up-to-date with the latest commit
from GitHub, specify "force" as the flag.
"""
changelog = await Changelog.from_url(self.bot)
latest = changelog.latest_version
desc = (f'The latest version is [`{self.bot.version}`]'
'(https://github.com/kyb3r/modmail/blob/master/bot.py#L25)')
if parse_version(self.bot.version) >= parse_version(latest.version) and flag.lower() != 'force':
embed = Embed(
title='Already up to date',
description=desc,
color=self.bot.main_color
)
data = await self.bot.api.get_user_info()
if not data.get('error'):
user = data['user']
embed.set_author(name=user['username'],
icon_url=user['avatar_url'],
url=user['url'])
else:
data = await self.bot.api.update_repository()
commit_data = data['data']
user = data['user']
if commit_data:
embed = Embed(color=self.bot.main_color)
embed.set_footer(text=f'Updating Modmail v{self.bot.version} '
f'-> v{latest.version}')
embed.set_author(name=user['username'] + ' - Updating bot',
icon_url=user['avatar_url'],
url=user['url'])
embed.description = latest.description
for name, value in latest.fields.items():
embed.add_field(name=name, value=value)
# message = commit_data['commit']['message']
html_url = commit_data["html_url"]
short_sha = commit_data['sha'][:6]
embed.add_field(name='Merge Commit',
value=f'[`{short_sha}`]({html_url})')
else:
embed = Embed(
title='Already up to date with master repository.',
description='No further updates required',
color=self.bot.main_color
)
embed.set_author(name=user['username'],
icon_url=user['avatar_url'],
url=user['url'])
return await ctx.send(embed=embed)
@commands.command(aliases=['presence'])
@checks.has_permissions(PermissionLevel.ADMINISTRATOR)
async def activity(self, ctx, activity_type: str.lower, *, message: str = ''):
"""
Set an activity status for the bot.
Possible activity types:
- `playing`
- `streaming`
- `listening`
- `watching`
When activity type is set to `listening`,
it must be followed by a "to": "listening to..."
When activity type is set to `streaming`, you can set
the linked twitch page:
- `{prefix}config set twitch_url https://www.twitch.tv/somechannel/`
To remove the current activity status:
- `{prefix}activity clear`
"""
if activity_type == 'clear':
self.bot.config['activity_type'] = None
self.bot.config['activity_message'] = None
await self.bot.config.update()
await self.set_presence()
embed = Embed(
title='Activity Removed',
color=self.bot.main_color
)
return await ctx.send(embed=embed)
if not message:
raise commands.MissingRequiredArgument(param(name='message'))
activity, msg = (await self.set_presence(
activity_identifier=activity_type,
activity_by_key=True,
activity_message=message
))['activity']
if activity is None:
raise commands.MissingRequiredArgument(param(name='activity'))
self.bot.config['activity_type'] = activity.type.value
self.bot.config['activity_message'] = message
await self.bot.config.update()
embed = Embed(
title='Activity Changed',
description=msg,
color=self.bot.main_color
)
return await ctx.send(embed=embed)
@commands.command()
@checks.has_permissions(PermissionLevel.ADMINISTRATOR)
async def status(self, ctx, *, status_type: str.lower):
"""
Set a status for the bot.
Possible status types:
- `online`
- `idle`
- `dnd`
- `do_not_disturb` or `do not disturb`
- `invisible` or `offline`
To remove the current status:
- `{prefix}status clear`
"""
if status_type == 'clear':
self.bot.config['status'] = None
await self.bot.config.update()
await self.set_presence()
embed = Embed(
title='Status Removed',
color=self.bot.main_color
)
return await ctx.send(embed=embed)
status_type = status_type.replace(' ', '_')
status, msg = (await self.set_presence(
status_identifier=status_type,
status_by_key=True
))['status']
if status is None:
raise commands.MissingRequiredArgument(param(name='status'))
self.bot.config['status'] = status.value
await self.bot.config.update()
embed = Embed(
title='Status Changed',
description=msg,
color=self.bot.main_color
)
return await ctx.send(embed=embed)
async def set_presence(self, *,
status_identifier=None,
status_by_key=True,
activity_identifier=None,
activity_by_key=True,
activity_message=None):
activity = status = None
if status_identifier is None:
status_identifier = self.bot.config.get('status', None)
status_by_key = False
try:
if status_by_key:
status = Status[status_identifier]
else:
status = Status(status_identifier)
except (KeyError, ValueError):
if status_identifier is not None:
msg = f'Invalid status type: {status_identifier}'
logger.warning(error(msg))
if activity_identifier is None:
if activity_message is not None:
raise ValueError('activity_message must be None '
'if activity_identifier is None.')
activity_identifier = self.bot.config.get('activity_type', None)
activity_by_key = False
try:
if activity_by_key:
activity_type = ActivityType[activity_identifier]
else:
activity_type = ActivityType(activity_identifier)
except (KeyError, ValueError):
if activity_identifier is not None:
msg = f'Invalid activity type: {activity_identifier}'
logger.warning(error(msg))
else:
url = None
activity_message = (
activity_message or
self.bot.config.get('activity_message', '')
).strip()
if activity_type == ActivityType.listening:
if activity_message.lower().startswith('to '):
# The actual message is after listening to [...]
# discord automatically add the "to"
activity_message = activity_message[3:].strip()
elif activity_type == ActivityType.streaming:
url = self.bot.config.get(
'twitch_url', 'https://www.twitch.tv/discord-modmail/'
)
if activity_message:
activity = Activity(type=activity_type,
name=activity_message,
url=url)
else:
msg = 'You must supply an activity message to use custom activity.'
logger.warning(error(msg))
await self.bot.change_presence(activity=activity, status=status)
presence = {'activity': (None, 'No activity has been set.'),
'status': (None, 'No status has been set.')}
if activity is not None:
use_to = 'to ' if activity.type == ActivityType.listening else ''
msg = f'Activity set to: {activity.type.name.capitalize()} '
msg += f'{use_to}{activity.name}.'
presence['activity'] = (activity, msg)
if status is not None:
msg = f'Status set to: {status.value}.'
presence['status'] = (status, msg)
return presence
@commands.Cog.listener()
async def on_ready(self):
# Wait until config cache is populated with stuff from db
await self.bot.config.wait_until_ready()
presence = await self.set_presence()
logger.info(info(presence['activity'][1]))
logger.info(info(presence['status'][1]))
@commands.command()
@checks.has_permissions(PermissionLevel.ADMINISTRATOR)
@trigger_typing
async def ping(self, ctx):
"""Pong! Returns your websocket latency."""
embed = Embed(
title='Pong! Websocket Latency:',
description=f'{self.bot.ws.latency * 1000:.4f} ms',
color=self.bot.main_color
)
return await ctx.send(embed=embed)
@commands.command()
@checks.has_permissions(PermissionLevel.ADMINISTRATOR)
async def mention(self, ctx, *, mention: str = None):
"""
Change what the bot mentions at the start of each thread.
Type only `{prefix}mention` to retrieve your current "mention" message.
"""
current = self.bot.config.get('mention', '@here')
if mention is None:
embed = Embed(
title='Current text',
color=self.bot.main_color,
description=str(current)
)
else:
embed = Embed(
title='Changed mention!',
description=f'On thread creation the bot now says {mention}.',
color=self.bot.main_color
)
self.bot.config['mention'] = mention
await self.bot.config.update()
return await ctx.send(embed=embed)
@commands.command()
@checks.has_permissions(PermissionLevel.ADMINISTRATOR)
async def prefix(self, ctx, *, prefix=None):
"""
Change the prefix of the bot.
Type only `{prefix}prefix` to retrieve your current bot prefix.
"""
current = self.bot.prefix
embed = Embed(
title='Current prefix',
color=self.bot.main_color,
description=f'{current}'
)
if prefix is None:
await ctx.send(embed=embed)
else:
embed.title = 'Changed prefix!'
embed.description = f'Set prefix to `{prefix}`'
self.bot.config['prefix'] = prefix
await self.bot.config.update()
await ctx.send(embed=embed)
@commands.group(aliases=['configuration'], invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def config(self, ctx):
"""
Modify changeable configuration variables for this bot.
Type `{prefix}config options` to view a list
of valid configuration variables.
To set a configuration variable:
- `{prefix}config set varname value here`
To remove a configuration variable:
- `{prefix}config remove varname`
"""
await ctx.send_help(ctx.command)
@config.command(name='options', aliases=['list'])
@checks.has_permissions(PermissionLevel.OWNER)
async def config_options(self, ctx):
"""Return a list of valid configuration names you can change."""
allowed = self.bot.config.allowed_to_change_in_command
valid = ', '.join(f'`{k}`' for k in allowed)
embed = Embed(title='Valid Keys',
description=valid,
color=self.bot.main_color)
return await ctx.send(embed=embed)
@config.command(name='set', aliases=['add'])
@checks.has_permissions(PermissionLevel.OWNER)
async def config_set(self, ctx, key: str.lower, *, value: str):
"""Set a configuration variable and its value."""
keys = self.bot.config.allowed_to_change_in_command
if key in keys:
try:
value, value_text = await self.bot.config.clean_data(key, value)
except InvalidConfigError as exc:
embed = exc.embed
else:
await self.bot.config.update({key: value})
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'Set `{key}` to `{value_text}`'
)
else:
embed = Embed(
title='Error',
color=Color.red(),
description=f'{key} is an invalid key.'
)
valid_keys = [f'`{k}`' for k in keys]
embed.add_field(name='Valid keys', value=', '.join(valid_keys))
return await ctx.send(embed=embed)
@config.command(name='remove', aliases=['del', 'delete', 'rm'])
@checks.has_permissions(PermissionLevel.OWNER)
async def config_remove(self, ctx, key: str.lower):
"""Delete a set configuration variable."""
keys = self.bot.config.allowed_to_change_in_command
if key in keys:
try:
del self.bot.config.cache[key]
await self.bot.config.update()
except KeyError:
# when no values were set
pass
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'`{key}` had been deleted from the config.'
)
else:
embed = Embed(
title='Error',
color=Color.red(),
description=f'{key} is an invalid key.'
)
valid_keys = [f'`{k}`' for k in keys]
embed.add_field(name='Valid keys', value=', '.join(valid_keys))
return await ctx.send(embed=embed)
@config.command(name='get')
@checks.has_permissions(PermissionLevel.OWNER)
async def config_get(self, ctx, key: str.lower = None):
"""
Show the configuration variables that are currently set.
Leave `key` empty to show all currently set configuration variables.
"""
keys = self.bot.config.allowed_to_change_in_command
if key:
if key in keys:
desc = f'`{key}` is set to `{self.bot.config.get(key)}`'
embed = Embed(
color=self.bot.main_color,
description=desc
)
embed.set_author(name='Config variable',
icon_url=self.bot.user.avatar_url)
else:
embed = Embed(
title='Error',
color=Color.red(),
description=f'`{key}` is an invalid key.'
)
valid_keys = [f'`{k}`' for k in keys]
embed.add_field(name='Valid keys', value=', '.join(valid_keys))
else:
embed = Embed(
color=self.bot.main_color,
description='Here is a list of currently '
'set configuration variables.'
)
embed.set_author(name='Current config',
icon_url=self.bot.user.avatar_url)
config = {
key: val for key, val in self.bot.config.cache.items()
if val and key in keys
}
for name, value in reversed(list(config.items())):
embed.add_field(name=name, value=f'`{value}`', inline=False)
return await ctx.send(embed=embed)
@commands.group(aliases=['aliases'], invoke_without_command=True)
@checks.has_permissions(PermissionLevel.MODERATOR)
async def alias(self, ctx):
"""
Create shortcuts to bot commands.
When `?alias` is used by itself, this will retrieve
a list of alias that are currently set.
To use alias:
First create a snippet using:
- `{prefix}alias add alias-name other-command`
For example:
- `{prefix}alias add r reply`
- Now you can use `{prefix}r` as an replacement for `{prefix}reply`.
See also `{prefix}snippets`.
"""
embeds = []
desc = 'Here is a list of aliases that are currently configured.'
if self.bot.aliases:
embed = Embed(
color=self.bot.main_color,
description=desc
)
else:
embed = Embed(
color=self.bot.main_color,
description='You dont have any aliases at the moment.'
)
embed.set_author(name='Command aliases', icon_url=ctx.guild.icon_url)
embed.set_footer(text=f'Do {self.bot.prefix}'
'help aliases for more commands.')
embeds.append(embed)
for name, value in self.bot.aliases.items():
if len(embed.fields) == 5:
embed = Embed(color=self.bot.main_color, description=desc)
embed.set_author(name='Command aliases',
icon_url=ctx.guild.icon_url)
embed.set_footer(text=f'Do {self.bot.prefix}help '
'aliases for more commands.')
embeds.append(embed)
embed.add_field(name=name, value=value, inline=False)
session = PaginatorSession(ctx, *embeds)
return await session.run()
@alias.command(name='add')
@checks.has_permissions(PermissionLevel.MODERATOR)
async def alias_add(self, ctx, name: str.lower, *, value):
"""Add an alias."""
if 'aliases' not in self.bot.config.cache:
self.bot.config['aliases'] = {}
if self.bot.get_command(name) or self.bot.config.aliases.get(name):
embed = Embed(
title='Error',
color=Color.red(),
description='A command or alias already exists '
f'with the same name: `{name}`.'
)
return await ctx.send(embed=embed)
if not self.bot.get_command(value.split()[0]):
embed = Embed(
title='Error',
color=Color.red(),
description='The command you are attempting to point '
f'to does not exist: `{value.split()[0]}`.'
)
return await ctx.send(embed=embed)
self.bot.config.aliases[name] = value
await self.bot.config.update()
embed = Embed(
title='Added alias',
color=self.bot.main_color,
description=f'`{name}` points to: {value}'
)
return await ctx.send(embed=embed)
@alias.command(name='remove', aliases=['del', 'delete', 'rm'])
@checks.has_permissions(PermissionLevel.MODERATOR)
async def alias_remove(self, ctx, *, name: str.lower):
"""Remove an alias."""
if 'aliases' not in self.bot.config.cache:
self.bot.config['aliases'] = {}
if self.bot.config.aliases.get(name):
del self.bot.config['aliases'][name]
await self.bot.config.update()
embed = Embed(
title='Removed alias',
color=self.bot.main_color,
description=f'`{name}` no longer exists.'
)
else:
embed = Embed(
title='Error',
color=Color.red(),
description=f'Alias `{name}` does not exist.'
)
return await ctx.send(embed=embed)
@commands.group(aliases=['perms'], invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions(self, ctx):
"""
Set the permissions for Modmail commands.
You may set permissions based on individual command names, or permission
levels.
Acceptable permission levels are:
- **Owner** [5] (absolute control over the bot)
- **Administrator** [4] (administrative powers such as setting activities)
- **Moderator** [3] (ability to block)
- **Supporter** [2] (access to core Modmail supporting functions)
- **Regular** [1] (most basic interactions such as help and about)
By default, owner is set to the absolute bot owner and regular is `@everyone`.
Note: You will still have to manually give/take permission to the Modmail
category to users/roles.
"""
await ctx.send_help(ctx.command)
@permissions.group(name='add', invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add(self, ctx):
"""Add a permission to a command or a permission level."""
await ctx.send_help(ctx.command)
@permissions_add.command(name='command')
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add_command(self, ctx, command: str, *,
user_or_role: Union[User, Role, str]):
"""
Add a user, role, or everyone permission to use a command.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if command not in self.bot.all_commands:
embed = Embed(
title='Error',
color=Color.red(),
description='The command you are attempting to point '
f'to does not exist: `{command}`.'
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, 'id'):
value = user_or_role.id
elif user_or_role in {'everyone', 'all'}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(self.bot.all_commands[command].name, value)
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'Permission for {command} was successfully updated.'
)
return await ctx.send(embed=embed)
@permissions_add.command(name='level', aliases=['group'])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add_level(self, ctx, level: str, *,
user_or_role: Union[User, Role, str]):
"""
Add a user, role, or everyone permission to use commands of a permission level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title='Error',
color=Color.red(),
description='The permission level you are attempting to point '
f'to does not exist: `{level}`.'
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, 'id'):
value = user_or_role.id
elif user_or_role in {'everyone', 'all'}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(PermissionLevel[level.upper()], value)
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'Permission for {level} was successfully updated.'
)
return await ctx.send(embed=embed)
@permissions.group(name='remove', aliases=['del', 'delete', 'rm', 'revoke'],
invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove(self, ctx):
"""Remove permission to use a command or permission level."""
await ctx.send_help(ctx.command)
@permissions_remove.command(name='command')
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove_command(self, ctx, command: str, *,
user_or_role: Union[User, Role, str]):
"""
Remove a user, role, or everyone permission to use a command.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if command not in self.bot.all_commands:
embed = Embed(
title='Error',
color=Color.red(),
description='The command you are attempting to point '
f'to does not exist: `{command}`.'
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, 'id'):
value = user_or_role.id
elif user_or_role in {'everyone', 'all'}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(self.bot.all_commands[command].name, value, add=False)
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'Permission for {command} was successfully updated.'
)
return await ctx.send(embed=embed)
@permissions_remove.command(name='level', aliases=['group'])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove_level(self, ctx, level: str, *,
user_or_role: Union[User, Role, str]):
"""
Remove a user, role, or everyone permission to use commands of a permission level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title='Error',
color=Color.red(),
description='The permission level you are attempting to point '
f'to does not exist: `{level}`.'
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, 'id'):
value = user_or_role.id
elif user_or_role in {'everyone', 'all'}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(PermissionLevel[level.upper()], value, add=False)
embed = Embed(
title='Success',
color=self.bot.main_color,
description=f'Permission for {level} was successfully updated.'
)
return await ctx.send(embed=embed)
@permissions.group(name='get', invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get(self, ctx, *, user_or_role: Union[User, Role, str]):
"""
View the currently-set permissions.
You can specify `user_or_role` as an alternative to get-by-command or get-by-level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if hasattr(user_or_role, 'id'):
value = user_or_role.id
elif user_or_role in {'everyone', 'all'}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
cmds = []
levels = []
for cmd in self.bot.commands:
permissions = self.bot.config.command_permissions.get(cmd.name, [])
if value in permissions:
cmds.append(cmd.name)
for level in PermissionLevel:
permissions = self.bot.config.level_permissions.get(level.name, [])
if value in permissions:
levels.append(level.name)
mention = user_or_role.name if hasattr(user_or_role, 'name') else user_or_role
desc_cmd = ', '.join(map(lambda x: f'`{x}`', cmds)) if cmds else 'No permission entries found.'
desc_level = ', '.join(map(lambda x: f'`{x}`', levels)) if levels else 'No permission entries found.'
embeds = [
Embed(
title=f'{mention} has permission with the following commands:',
description=desc_cmd,
color=self.bot.main_color
),
Embed(
title=f'{mention} has permission with the following permission groups:',
description=desc_level,
color=self.bot.main_color
)
]
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@permissions_get.command(name='command')
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get_command(self, ctx, *, command: str = None):
"""View currently-set permissions for a command."""
def get_command(cmd):
permissions = self.bot.config.command_permissions.get(cmd.name, [])
if not permissions:
embed = Embed(
title=f'Permission entries for command `{cmd.name}`:',
description='No permission entries found.',
color=self.bot.main_color,
)
else:
values = []
for perm in permissions:
if perm == -1:
values.insert(0, '**everyone**')
continue
member = ctx.guild.get_member(perm)
if member is not None:
values.append(member.mention)
continue
user = self.bot.get_user(perm)
if user is not None:
values.append(user.mention)
continue
role = ctx.guild.get_role(perm)
if role is not None:
values.append(role.mention)
else:
values.append(str(perm))
embed = Embed(
title=f'Permission entries for command `{cmd.name}`:',
description=', '.join(values),
color=self.bot.main_color
)
return embed
embeds = []
if command is not None:
if command not in self.bot.all_commands:
embed = Embed(
title='Error',
color=Color.red(),
description='The command you are attempting to point '
f'to does not exist: `{command}`.'
)
return await ctx.send(embed=embed)
embeds.append(get_command(self.bot.all_commands[command]))
else:
for cmd in self.bot.commands:
embeds.append(get_command(cmd))
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@permissions_get.command(name='level', aliases=['group'])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get_level(self, ctx, *, level: str = None):
"""View currently-set permissions for commands of a permission level."""
def get_level(perm_level):
permissions = self.bot.config.level_permissions.get(perm_level.name, [])
if not permissions:
embed = Embed(
title='Permission entries for permission '
f'level `{perm_level.name}`:',
description='No permission entries found.',
color=self.bot.main_color,
)
else:
values = []
for perm in permissions:
if perm == -1:
values.insert(0, '**everyone**')
continue
member = ctx.guild.get_member(perm)
if member is not None:
values.append(member.mention)
continue
user = self.bot.get_user(perm)
if user is not None:
values.append(user.mention)
continue
role = ctx.guild.get_role(perm)
if role is not None:
values.append(role.mention)
else:
values.append(str(perm))
embed = Embed(
title=f'Permission entries for permission level `{perm_level.name}`:',
description=', '.join(values),
color=self.bot.main_color,
)
return embed
embeds = []
if level is not None:
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title='Error',
color=Color.red(),
description='The permission level you are attempting to point '
f'to does not exist: `{level}`.'
)
return await ctx.send(embed=embed)
embeds.append(get_level(PermissionLevel[level.upper()]))
else:
for perm_level in PermissionLevel:
embeds.append(get_level(perm_level))
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@commands.group(invoke_without_command=True, aliases=['oauth2', 'auth', 'authentication'])
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth(self, ctx):
"""Commands relating to Logviewer oauth2 login authentication."""
await ctx.send_help(ctx.command)
@oauth.command(name='whitelist')
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth_whitelist(self, ctx, target: Union[User, Role]):
"""
Whitelist or un-whitelist a user or role to have access to logs.
`target` may be a role ID, name, mention, user ID, name, or mention.
"""
whitelisted = self.bot.config['oauth_whitelist']
if target.id in whitelisted:
whitelisted.remove(target.id)
removed = True
else:
whitelisted.append(target.id)
removed = False
await self.bot.config.update()
embed = Embed(color=self.bot.main_color)
embed.title = 'Success'
embed.description = (
f"{'Un-w' if removed else 'W'}hitelisted "
f"{target.mention} to view logs."
)
await ctx.send(embed=embed)
@oauth.command(name='show', aliases=['get', 'list', 'view'])
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth_show(self, ctx):
"""Shows a list of users and roles that are whitelisted to view logs."""
whitelisted = self.bot.config['oauth_whitelist']
users = []
roles = []
for id_ in whitelisted:
user = self.bot.get_user(id_)
if user:
users.append(user)
role = self.bot.modmail_guild.get_role(id_)
if role:
roles.append(role)
embed = Embed(color=self.bot.main_color)
embed.title = 'Oauth Whitelist'
embed.add_field(name='Users', value=' '.join(u.mention for u in users) or 'None')
embed.add_field(name='Roles', value=' '.join(r.mention for r in roles) or 'None')
await ctx.send(embed=embed)
@commands.command(hidden=True, name='eval')
@checks.has_permissions(PermissionLevel.OWNER)
async def eval_(self, ctx, *, body: str):
"""Evaluates Python code."""
env = {
'ctx': ctx,
'bot': self.bot,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'source': inspect.getsource,
'discord': __import__('discord')
}
env.update(globals())
body = cleanup_code(body)
stdout = StringIO()
to_compile = f'async def func():\n{indent(body, " ")}'
def paginate(text: str):
"""Simple generator that paginates text."""
last = 0
pages = []
appd_index = curr = None
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text) - 1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages))
try:
exec(to_compile, env) # pylint: disable=exec-used
except Exception as exc: # pylint: disable=broad-except
await ctx.send(f'```py\n{exc.__class__.__name__}: {exc}\n```')
return await ctx.message.add_reaction('\u2049')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception: # pylint: disable=broad-except
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
return await ctx.message.add_reaction('\u2049')
else:
value = stdout.getvalue()
if ret is None:
if value:
try:
await ctx.send(f'```py\n{value}\n```')
except Exception: # pylint: disable=broad-except
paginated_text = paginate(value)
for page in paginated_text:
if page == paginated_text[-1]:
await ctx.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
else:
try:
await ctx.send(f'```py\n{value}{ret}\n```')
except Exception: # pylint: disable=broad-except
paginated_text = paginate(f"{value}{ret}")
for page in paginated_text:
if page == paginated_text[-1]:
await ctx.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
def setup(bot):
bot.add_cog(Utility(bot))
| 37.886266 | 109 | 0.554479 |
b32084de90e11d15b938eceb3d9bc7d32a916e60 | 4,331 | py | Python | src/robot/utils/ordereddict.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 7 | 2015-02-25T10:55:02.000Z | 2015-11-04T03:20:05.000Z | src/robot/utils/ordereddict.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 12 | 2015-02-24T17:00:06.000Z | 2015-07-31T08:32:07.000Z | src/robot/utils/ordereddict.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 2 | 2015-12-15T11:00:35.000Z | 2018-02-24T18:11:24.000Z | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from six import PY3
if PY3:
from collections import MutableMapping as DictMixin
else:
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
if PY3:
iterkeys = DictMixin.keys
itervalues = DictMixin.values
iteritems = DictMixin.items
else:
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| 31.384058 | 79 | 0.596398 |
cef1baa7fc3017b609649a7233b32714d3eff769 | 2,246 | py | Python | src/envee/virtual_environments.py | Peilonrayz/envee | 66f5b6b1ff7f5966be794e1e3878418c560c1f65 | [
"MIT"
] | null | null | null | src/envee/virtual_environments.py | Peilonrayz/envee | 66f5b6b1ff7f5966be794e1e3878418c560c1f65 | [
"MIT"
] | null | null | null | src/envee/virtual_environments.py | Peilonrayz/envee | 66f5b6b1ff7f5966be794e1e3878418c560c1f65 | [
"MIT"
] | null | null | null | import collections
import functools
import io
import logging
import os.path
import pathlib
import shutil
import subprocess
import sys
import tempfile
import textwrap
from typing import Dict, Optional, Type, Union
import teetime
from . import core, package_managers
__all__ = [
"VirtualEnvironment",
"VirtualEnv",
]
class VirtualEnvironment:
__slots__ = (
"_path",
"_program",
"_flags",
"_package_manager",
"_expand_program",
)
def __init__(
self,
path: Union[str, pathlib.PurePath],
package_manager: Type[package_managers.PackageManager],
program: Optional[str] = None,
flags: Optional[core.Flags] = None,
) -> None:
self._path = path
self._program = program
self._flags = flags
self._package_manager = package_manager
self._expand_program = functools.lru_cache(8)(core._expand_program)
def exists(self) -> bool:
return self._exists()
def make(self, force: bool = False) -> None:
if self.exists():
if not force:
return
self.remove()
self._make()
def load(self, force: bool = False) -> core.Environment:
self.make(force=force)
return core.Environment(
self._env(), self._package_manager(), self._expand_program,
)
def remove(self) -> None:
self._remove()
def _exists(self) -> bool:
return os.path.exists(self._path)
def _make(self) -> None:
if not self.exists():
os.mkdir(self._path)
def _env(self) -> Dict[str, str]:
return os.environ.copy()
def _remove(self) -> None:
shutil.rmtree(self._path, ignore_errors=True)
class VirtualEnv(VirtualEnvironment):
def _make(self) -> None:
flags = self._flags or core.Flags()
if self._program is not None:
flags = flags(f"--python={self._program}")
core.popen(["python", "-m", "virtualenv", self._path] + flags).log()
def _env(self) -> Dict[str, str]:
env = super()._env()
env["PATH"] = (
os.path.abspath(os.path.join(self._path, "Scripts")) + ";" + env["PATH"]
)
return env
| 24.413043 | 84 | 0.596171 |
3fb79c42b5ad107658366517b7a85c0640545cf2 | 12,126 | py | Python | attention_decoder.py | AngledLuffa/pointer-generator | 7929237250aecffbee8a02c82c0cb9b0b9508f63 | [
"Apache-2.0"
] | null | null | null | attention_decoder.py | AngledLuffa/pointer-generator | 7929237250aecffbee8a02c82c0cb9b0b9508f63 | [
"Apache-2.0"
] | null | null | null | attention_decoder.py | AngledLuffa/pointer-generator | 7929237250aecffbee8a02c82c0cb9b0b9508f63 | [
"Apache-2.0"
] | 1 | 2021-04-26T18:18:48.000Z | 2021-04-26T18:18:48.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file defines the decoder"""
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import variable_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import math_ops
# Note: this function is based on tf.contrib.legacy_seq2seq_attention_decoder, which is now outdated.
# In the future, it would make more sense to write variants on the attention mechanism using the new seq2seq library for tensorflow 1.0: https://www.tensorflow.org/api_guides/python/contrib.seq2seq#Attention
def attention_decoder(decoder_inputs, initial_state, encoder_states, enc_padding_mask, cell, initial_state_attention=False, pointer_gen=True, use_coverage=False, prev_coverage=None):
"""
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
encoder_states: 3D Tensor [batch_size x attn_length x attn_size].
enc_padding_mask: 2D Tensor [batch_size x attn_length] containing 1s and 0s; indicates which of the encoder locations are padding (0) or a real token (1).
cell: rnn_cell.RNNCell defining the cell function and size.
initial_state_attention:
Note that this attention decoder passes each decoder input through a linear layer with the previous step's context vector to get a modified version of the input. If initial_state_attention is False, on the first decoder step the "previous context vector" is just a zero vector. If initial_state_attention is True, we use initial_state to (re)calculate the previous step's context vector. We set this to False for train/eval mode (because we call attention_decoder once for all decoder steps) and True for decode mode (because we call attention_decoder once for each decoder step).
pointer_gen: boolean. If True, calculate the generation probability p_gen for each decoder step.
use_coverage: boolean. If True, use coverage mechanism.
prev_coverage:
If not None, a tensor with shape (batch_size, attn_length). The previous step's coverage vector. This is only not None in decode mode when using coverage.
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x cell.output_size]. The output vectors.
state: The final state of the decoder. A tensor shape [batch_size x cell.state_size].
attn_dists: A list containing tensors of shape (batch_size,attn_length).
The attention distributions for each decoder step.
p_gens: List of length input_size, containing tensors of shape [batch_size, 1]. The values of p_gen for each decoder step. Empty list if pointer_gen=False.
coverage: Coverage vector on the last step computed. None if use_coverage=False.
"""
with variable_scope("attention_decoder") as scope:
batch_size = encoder_states.get_shape()[0].value # if this line fails, it's because the batch size isn't defined
attn_size = encoder_states.get_shape()[2].value # if this line fails, it's because the attention length isn't defined
# Reshape encoder_states (need to insert a dim)
encoder_states = tf.expand_dims(encoder_states, axis=2) # now is shape (batch_size, attn_len, 1, attn_size)
# To calculate attention, we calculate
# v^T tanh(W_h h_i + W_s s_t + b_attn)
# where h_i is an encoder state, and s_t a decoder state.
# attn_vec_size is the length of the vectors v, b_attn, (W_h h_i) and (W_s s_t).
# We set it to be equal to the size of the encoder states.
attention_vec_size = attn_size
# Get the weight matrix W_h and apply it to each encoder state to get (W_h h_i), the encoder features
W_h = tf.get_variable("W_h", [1, 1, attn_size, attention_vec_size])
encoder_features = nn_ops.conv2d(encoder_states, W_h, [1, 1, 1, 1], "SAME") # shape (batch_size,attn_length,1,attention_vec_size)
# Get the weight vectors v and w_c (w_c is for coverage)
v = tf.get_variable("v", [attention_vec_size])
if use_coverage:
with variable_scope.variable_scope("coverage"):
w_c = tf.get_variable("w_c", [1, 1, 1, attention_vec_size])
if prev_coverage is not None: # for beam search mode with coverage
# reshape from (batch_size, attn_length) to (batch_size, attn_len, 1, 1)
prev_coverage = tf.expand_dims(tf.expand_dims(prev_coverage,2),3)
def attention(decoder_state, coverage=None):
"""Calculate the context vector and attention distribution from the decoder state.
Args:
decoder_state: state of the decoder
coverage: Optional. Previous timestep's coverage vector, shape (batch_size, attn_len, 1, 1).
Returns:
context_vector: weighted sum of encoder_states
attn_dist: attention distribution
coverage: new coverage vector. shape (batch_size, attn_len, 1, 1)
"""
with variable_scope("Attention"):
# Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)
decoder_features = linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)
decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)
def masked_attention(e):
"""Take softmax of e then apply enc_padding_mask and re-normalize"""
attn_dist = nn_ops.softmax(e) # take softmax. shape (batch_size, attn_length)
attn_dist *= enc_padding_mask # apply mask
masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)
return attn_dist / tf.reshape(masked_sums, [-1, 1]) # re-normalize
if use_coverage and coverage is not None: # non-first step of coverage
# Multiply coverage vector by w_c to get coverage_features.
coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], "SAME") # c has shape (batch_size, attn_length, 1, attention_vec_size)
# Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)
e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # shape (batch_size,attn_length)
# Calculate attention distribution
attn_dist = masked_attention(e)
# Update coverage vector
coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1])
else:
# Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)
e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) # calculate e
# Calculate attention distribution
attn_dist = masked_attention(e)
if use_coverage: # first step of training
coverage = tf.expand_dims(tf.expand_dims(attn_dist,2),2) # initialize coverage
# Calculate the context vector from attn_dist and encoder_states
context_vector = math_ops.reduce_sum(array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) * encoder_states, [1, 2]) # shape (batch_size, attn_size).
context_vector = array_ops.reshape(context_vector, [-1, attn_size])
return context_vector, attn_dist, coverage
outputs = []
attn_dists = []
p_gens = []
state = initial_state
coverage = prev_coverage # initialize coverage to None or whatever was passed in
context_vector = array_ops.zeros([batch_size, attn_size])
context_vector.set_shape([None, attn_size]) # Ensure the second shape of attention vectors is set.
if initial_state_attention: # true in decode mode
# Re-calculate the context vector from the previous step so that we can pass it through a linear layer with this step's input to get a modified version of the input
context_vector, _, coverage = attention(initial_state, coverage) # in decode mode, this is what updates the coverage vector
for i, inp in enumerate(decoder_inputs):
tf.compat.v1.logging.info("Adding attention_decoder timestep %i of %i", i, len(decoder_inputs))
if i > 0:
tf.get_variable_scope().reuse_variables()
# Merge input and previous attentions into one vector x of the same size as inp
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from input: %s" % inp.name)
x = linear([inp] + [context_vector], input_size, True)
# Run the decoder RNN cell. cell_output = decoder state
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention: # always true in decode mode
with variable_scope(tf.get_variable_scope(), reuse=True): # you need this because you've already run the initial attention(...) call
context_vector, attn_dist, _ = attention(state, coverage) # don't allow coverage to update
else:
context_vector, attn_dist, coverage = attention(state, coverage)
attn_dists.append(attn_dist)
# Calculate p_gen
if pointer_gen:
with tf.variable_scope('calculate_pgen'):
p_gen = linear([context_vector, state.c, state.h, x], 1, True) # Tensor shape (batch_size, 1)
p_gen = tf.sigmoid(p_gen)
p_gens.append(p_gen)
# Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer
# This is V[s_t, h*_t] + b in the paper
with variable_scope("AttnOutputProjection"):
output = linear([cell_output] + [context_vector], cell.output_size, True)
outputs.append(output)
# If using coverage, reshape it
if coverage is not None:
coverage = array_ops.reshape(coverage, [batch_size, -1])
return outputs, state, attn_dists, p_gens, coverage
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(axis=1, values=args), matrix)
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [output_size], initializer=tf.constant_initializer(bias_start))
return res + bias_term
| 52.951965 | 586 | 0.708065 |
e054daade03b94300ff2ac909db8dae36ef2162e | 1,912 | py | Python | pygpu/basic.py | heha180/array | 9fa8fe6e344acf98e53fcece337e2008e15d6279 | [
"0BSD"
] | null | null | null | pygpu/basic.py | heha180/array | 9fa8fe6e344acf98e53fcece337e2008e15d6279 | [
"0BSD"
] | null | null | null | pygpu/basic.py | heha180/array | 9fa8fe6e344acf98e53fcece337e2008e15d6279 | [
"0BSD"
] | 1 | 2021-05-14T00:32:33.000Z | 2021-05-14T00:32:33.000Z | from string import Template
from .gpuarray import GpuArray, GpuKernel, SIZE
def _generate_kernel(ctx, cols, upper=True):
tmpl = Template("""
#include "cluda.h"
KERNEL void extract_tri(GLOBAL_MEM ga_float *a, ga_size a_off, ga_uint N) {
a = (GLOBAL_MEM ga_float *)(((GLOBAL_MEM char *)a) + a_off);
unsigned int idx = GID_1 * LDIM_0 * GDIM_0 +
GID_0 * LDIM_0 + LID_0;
unsigned int ix = idx/${cols};
unsigned int iy = idx%${cols};
if (idx < N) {
if (ix ${le} iy)
a[idx] = 0.0;
}
}
""")
if upper:
le = '>'
else:
le = '<'
src = tmpl.substitute(cols=cols, le=le)
spec = [GpuArray, SIZE, 'uint32']
k = GpuKernel(src, "extract_tri", spec, context=ctx)
return k
def triu(A, inplace=True):
if A.ndim != 2:
raise ValueError("triu only works for 2d arrays")
if A.flags.c_contiguous is A.flags.f_contiguous is False:
raise ValueError("triu only works for contiguous arrays")
if not inplace:
A = A.copy()
if A.flags['F_CONTIGUOUS']:
upper = False
cols = A.shape[0]
else:
upper = True
cols = A.shape[1]
k = _generate_kernel(A.context, cols, upper)
k(A, A.offset, A.shape[0] * A.shape[1], n=A.shape[0] * A.shape[1])
return A
def tril(A, inplace=True):
if A.ndim != 2:
raise ValueError("tril only works for 2d arrays")
if A.flags.c_contiguous is A.flags.f_contiguous is False:
raise ValueError("tril only works for contiguous arrays")
if not inplace:
A = A.copy()
if A.flags['F_CONTIGUOUS']:
upper = True
cols = A.shape[0]
else:
upper = False
cols = A.shape[1]
k = _generate_kernel(A.context, cols, upper)
k(A, A.offset, A.shape[0] * A.shape[1], n=A.shape[0] * A.shape[1])
return A
| 28.969697 | 79 | 0.566946 |
a1a2bf9b9c6fd0e77c83da38bba977263b9a0ae7 | 8,553 | py | Python | frappe/tests/test_twofactor.py | bahaou/frappe | 53d4ddca43b114414f1b81c58944064559a3e823 | [
"MIT"
] | null | null | null | frappe/tests/test_twofactor.py | bahaou/frappe | 53d4ddca43b114414f1b81c58944064559a3e823 | [
"MIT"
] | 1 | 2021-10-01T13:08:45.000Z | 2021-10-01T13:08:45.000Z | frappe/tests/test_twofactor.py | bahaou/frappe | 53d4ddca43b114414f1b81c58944064559a3e823 | [
"MIT"
] | null | null | null | # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest, frappe, pyotp
from frappe.auth import HTTPRequest
from frappe.utils import cint
from frappe.utils import set_request
from frappe.auth import validate_ip_address, get_login_attempt_tracker
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor, get_cached_user_pass,
two_factor_is_enabled_for_, confirm_otp_token, get_otpsecret_for_, get_verification_obj, ExpiredLoginException)
from . import update_system_settings, get_system_setting
import time
class TestTwoFactor(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestTwoFactor, self).__init__(*args, **kwargs)
self.default_allowed_login_attempts = get_system_setting('allow_consecutive_login_attempts')
def setUp(self):
self.http_requests = create_http_request()
self.login_manager = frappe.local.login_manager
self.user = self.login_manager.user
update_system_settings({
'allow_consecutive_login_attempts': 2
})
def tearDown(self):
frappe.local.response['verification'] = None
frappe.local.response['tmp_id'] = None
disable_2fa()
frappe.clear_cache(user=self.user)
update_system_settings({
'allow_consecutive_login_attempts': self.default_allowed_login_attempts
})
def test_should_run_2fa(self):
'''Should return true if enabled.'''
toggle_2fa_all_role(state=True)
self.assertTrue(should_run_2fa(self.user))
toggle_2fa_all_role(state=False)
self.assertFalse(should_run_2fa(self.user))
def test_get_cached_user_pass(self):
'''Cached data should not contain user and pass before 2fa.'''
user,pwd = get_cached_user_pass()
self.assertTrue(all([not user, not pwd]))
def test_authenticate_for_2factor(self):
'''Verification obj and tmp_id should be set in frappe.local.'''
authenticate_for_2factor(self.user)
verification_obj = frappe.local.response['verification']
tmp_id = frappe.local.response['tmp_id']
self.assertTrue(verification_obj)
self.assertTrue(tmp_id)
for k in ['_usr','_pwd','_otp_secret']:
self.assertTrue(frappe.cache().get('{0}{1}'.format(tmp_id,k)),
'{} not available'.format(k))
def test_two_factor_is_enabled(self):
'''
1. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users
2. Should return false, if not enabled
3. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users and ip in restrict_ip
4. Should return true, if enabled and bypass_2fa_for_retricted_ip_users and not restrict_ip
5. Should return false, if enabled and bypass_2fa_for_retricted_ip_users and ip in restrict_ip
'''
#Scenario 1
enable_2fa()
self.assertTrue(should_run_2fa(self.user))
#Scenario 2
disable_2fa()
self.assertFalse(should_run_2fa(self.user))
#Scenario 3
enable_2fa()
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
self.assertTrue(should_run_2fa(self.user))
#Scenario 4
user = frappe.get_doc('User', self.user)
user.restrict_ip = ""
user.save()
enable_2fa(1)
self.assertTrue(should_run_2fa(self.user))
#Scenario 5
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
enable_2fa(1)
self.assertFalse(should_run_2fa(self.user))
def test_two_factor_is_enabled_for_user(self):
'''Should return true if enabled for user.'''
toggle_2fa_all_role(state=True)
self.assertTrue(two_factor_is_enabled_for_(self.user))
self.assertFalse(two_factor_is_enabled_for_("Administrator"))
toggle_2fa_all_role(state=False)
self.assertFalse(two_factor_is_enabled_for_(self.user))
def test_get_otpsecret_for_user(self):
'''OTP secret should be set for user.'''
self.assertTrue(get_otpsecret_for_(self.user))
self.assertTrue(frappe.db.get_default(self.user + '_otpsecret'))
def test_confirm_otp_token(self):
'''Ensure otp is confirmed'''
frappe.flags.otp_expiry = 2
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
frappe.flags.otp_expiry = None
if frappe.flags.tests_verbose:
print('Sleeping for 2 secs to confirm token expires..')
time.sleep(2)
with self.assertRaises(ExpiredLoginException):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
def test_get_verification_obj(self):
'''Confirm verification object is returned.'''
otp_secret = get_otpsecret_for_(self.user)
token = int(pyotp.TOTP(otp_secret).now())
self.assertTrue(get_verification_obj(self.user,token,otp_secret))
def test_render_string_template(self):
'''String template renders as expected with variables.'''
args = {'issuer_name':'Frappe Technologies'}
_str = 'Verification Code from {{issuer_name}}'
_str = frappe.render_template(_str,args)
self.assertEqual(_str,'Verification Code from Frappe Technologies')
def test_bypass_restict_ip(self):
'''
1. Raise error if user not login from one of the restrict_ip, Bypass restrict ip check disabled by default
2. Bypass restrict ip check enabled in System Settings
3. Bypass restrict ip check enabled for User
'''
#1
user = frappe.get_doc('User', self.user)
user.restrict_ip = "192.168.255.254" #Dummy IP
user.bypass_restrict_ip_check_if_2fa_enabled = 0
user.save()
enable_2fa(bypass_restrict_ip_check=0)
with self.assertRaises(frappe.AuthenticationError):
validate_ip_address(self.user)
#2
enable_2fa(bypass_restrict_ip_check=1)
self.assertIsNone(validate_ip_address(self.user))
#3
user = frappe.get_doc('User', self.user)
user.bypass_restrict_ip_check_if_2fa_enabled = 1
user.save()
enable_2fa()
self.assertIsNone(validate_ip_address(self.user))
def test_otp_attempt_tracker(self):
"""Check that OTP login attempts are tracked.
"""
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
# REMOVE ME: current logic allows allow_consecutive_login_attempts+1 attempts
# before raising security exception, remove below line when that is fixed.
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
with self.assertRaises(frappe.SecurityException):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
# Remove tracking cache so that user can try loging in again
tracker = get_login_attempt_tracker(self.user, raise_locked_exception=False)
tracker.add_success_attempt()
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
def create_http_request():
'''Get http request object.'''
set_request(method='POST', path='login')
enable_2fa()
frappe.form_dict['usr'] = 'test@example.com'
frappe.form_dict['pwd'] = 'Eastern_43A1W'
frappe.local.form_dict['cmd'] = 'login'
http_requests = HTTPRequest()
return http_requests
def enable_2fa(bypass_two_factor_auth=0, bypass_restrict_ip_check=0):
'''Enable Two factor in system settings.'''
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 1
system_settings.bypass_2fa_for_retricted_ip_users = cint(bypass_two_factor_auth)
system_settings.bypass_restrict_ip_check_if_2fa_enabled = cint(bypass_restrict_ip_check)
system_settings.two_factor_method = 'OTP App'
system_settings.flags.ignore_mandatory = True
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def disable_2fa():
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 0
system_settings.flags.ignore_mandatory = True
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def toggle_2fa_all_role(state=None):
'''Enable or disable 2fa for 'all' role on the system.'''
all_role = frappe.get_doc('Role','All')
if state is None:
state = False if all_role.two_factor_auth == True else False
if state not in [True, False]: return
all_role.two_factor_auth = cint(state)
all_role.save(ignore_permissions=True)
frappe.db.commit()
def get_otp(user):
otp_secret = get_otpsecret_for_(user)
otp = pyotp.TOTP(otp_secret)
return otp.now()
| 36.241525 | 112 | 0.778557 |
5b04fe92b12f5a747062a312ea36540ed0d99b8c | 57,383 | py | Python | test/test_remote.py | ccacciari/bdbag | f9e9eb257c43c9cc1c6ad83519070d7bdd8bab1e | [
"Apache-2.0"
] | 39 | 2018-04-13T16:36:39.000Z | 2022-02-11T08:59:35.000Z | test/test_remote.py | ccacciari/bdbag | f9e9eb257c43c9cc1c6ad83519070d7bdd8bab1e | [
"Apache-2.0"
] | 30 | 2018-03-30T23:05:41.000Z | 2022-02-24T13:38:52.000Z | test/test_remote.py | ccacciari/bdbag | f9e9eb257c43c9cc1c6ad83519070d7bdd8bab1e | [
"Apache-2.0"
] | 18 | 2018-05-14T12:50:41.000Z | 2021-11-25T15:50:19.000Z | # encoding: utf-8
#
# Copyright 2016 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import io
import sys
import logging
import mock
import json
import unittest
import requests
import tempfile
import bdbag
import bdbag.bdbagit as bdbagit
import bdbag.bdbagit_profile as bdbagit_profile
from os.path import join as ospj
from os.path import exists as ospe
from os.path import isfile as ospif
from bdbag import bdbag_api as bdb, bdbag_config as bdbcfg
from bdbag.fetch import fetcher
from bdbag.fetch.transports.fetch_http import BaseFetchTransport, HTTPFetchTransport
from bdbag.fetch.auth import cookies
from bdbag.fetch.auth.keychain import read_keychain, update_keychain, get_auth_entries
from test.test_common import BaseTest
if sys.version_info > (3,):
from io import StringIO
else:
from StringIO import StringIO
logger = logging.getLogger()
class CustomTestFetchTransport(HTTPFetchTransport):
def __init__(self, config, keychain, **kwargs):
super(CustomTestFetchTransport, self).__init__(config, keychain, **kwargs)
if self.keychain:
logging.debug("Got propagated keychain: %s" % json.dumps(self.keychain))
def fetch(self, url, output_path, **kwargs):
return super(CustomTestFetchTransport, self).fetch(url, output_path, **kwargs)
def cleanup(self):
super(CustomTestFetchTransport, self).cleanup()
class BadCustomTestFetchTransport(BaseFetchTransport):
def __init__(self, config, keychain, **kwargs):
super(BadCustomTestFetchTransport, self).__init__(config, keychain, **kwargs)
class TestRemoteAPI(BaseTest):
def setUp(self):
super(TestRemoteAPI, self).setUp()
self.stream = StringIO()
self.handler = logging.StreamHandler(self.stream)
logger.addHandler(self.handler)
def tearDown(self):
self.stream.close()
logger.removeHandler(self.handler)
super(TestRemoteAPI, self).tearDown()
def _test_bag_with_remote_file_manifest(self, update=False, use_json_stream=False):
try:
bag_dir = self.test_data_dir if not update else self.test_bag_dir
filename = 'test-fetch-manifest.json' if not use_json_stream else 'test-fetch-manifest-2.json'
bag = bdb.make_bag(bag_dir,
algs=["md5", "sha1", "sha256", "sha512"],
update=update,
remote_file_manifest=ospj(self.test_config_dir, filename))
output = self.stream.getvalue()
self.assertIsInstance(bag, bdbagit.BDBag)
self.assertExpectedMessages(['Generating remote file references from', filename], output)
fetch_file = ospj(bag_dir, 'fetch.txt')
self.assertTrue(ospif(fetch_file))
with open(fetch_file) as ff:
fetch_txt = ff.read()
self.assertIn(
'https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/'
'test-fetch-http.txt\t201\tdata/test-fetch-http.txt', fetch_txt)
self.assertIn(
'ark:/57799/b9dd5t\t223\tdata/test-fetch-identifier.txt', fetch_txt)
bdb.validate_bag_structure(bag_dir, True)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_create_bag_from_remote_file_manifest(self):
logger.info(self.getTestHeader('create bag add remote file manifest'))
self._test_bag_with_remote_file_manifest()
def test_update_bag_from_remote_file_manifest(self):
logger.info(self.getTestHeader('update bag add remote file manifest'))
self._test_bag_with_remote_file_manifest(True)
def test_create_bag_from_remote_file_manifest_json_stream(self):
logger.info(self.getTestHeader('create bag add remote file manifest with json stream format'))
self._test_bag_with_remote_file_manifest(use_json_stream=True)
def test_bag_with_unencoded_utf8_remote_file_manifest(self):
try:
bag_dir = self.test_data_dir
filename = 'test-fetch-manifest-encoding.json'
bag = bdb.make_bag(bag_dir,
algs=["md5", "sha1", "sha256", "sha512"],
remote_file_manifest=ospj(self.test_config_dir, filename))
output = self.stream.getvalue()
self.assertIsInstance(bag, bdbagit.BDBag)
self.assertExpectedMessages(['Generating remote file references from', filename], output)
fetch_file = ospj(bag_dir, 'fetch.txt')
self.assertTrue(ospif(fetch_file))
with io.open(fetch_file, encoding='utf-8') as ff:
fetch_txt = ff.read()
self.assertIn(
u'https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http-encoded/'
u'test%20fetch%25http®.txt\t201\tdata/test%0Afetch%0Ahttp®.txt', fetch_txt)
self.assertIn(
u'https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http-encoded/'
u'test%20fetch%25http®.txt\t201\tdata/test fetch http®.txt', fetch_txt)
self.assertIn(
u'https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http-encoded/'
u'test%20fetch%25http®.txt\t201\tdata/test%25fetch http®%0D%0A.txt', fetch_txt)
bdb.validate_bag_structure(bag_dir, True)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_profile(self):
logger.info(self.getTestHeader('validate profile'))
try:
profile = bdb.validate_bag_profile(self.test_bag_dir)
self.assertIsInstance(profile, bdbagit_profile.Profile)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_profile_serialization_zip(self):
logger.info(self.getTestHeader('validate profile serialization zip'))
try:
bag_path = ospj(self.test_archive_dir, 'test-bag.zip')
bdb.validate_bag_serialization(
bag_path,
bag_profile_path=
'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-profile.json')
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_profile_serialization_tar(self):
logger.info(self.getTestHeader('validate profile serialization tar'))
try:
bag_path = ospj(self.test_archive_dir, 'test-bag.tar')
bdb.validate_bag_serialization(
bag_path,
bag_profile_path=
'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-profile.json')
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_profile_serialization_tgz(self):
logger.info(self.getTestHeader('validate profile serialization tgz'))
try:
bag_path = ospj(self.test_archive_dir, 'test-bag.tgz')
bdb.validate_bag_serialization(
bag_path,
bag_profile_path=
'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-profile.json')
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_invalid_profile_serialization_bag_dir(self):
logger.info(self.getTestHeader('validate invalid profile serialization on a bag dir'))
try:
bag_path = ospj(self.test_bag_dir)
self.assertRaises(bdbagit_profile.ProfileValidationError,
bdb.validate_bag_serialization,
bag_path,
bag_profile_path=
'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/'
'bdbag-profile.json')
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_invalid_profile_serialization_unsupported_format(self):
logger.info(self.getTestHeader('validate invalid profile serialization on an unsupported archive format'))
try:
bag_path = ospj(self.test_archive_dir, 'test-bag.7z')
self.assertRaises(bdbagit_profile.ProfileValidationError,
bdb.validate_bag_serialization,
bag_path,
bag_profile_path=
'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/'
'bdbag-profile.json')
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_validate_remote_bag_from_rfm(self):
logger.info(self.getTestHeader('create, resolve, and validate bag from remote file manifest'))
try:
self._test_bag_with_remote_file_manifest()
self.assertTrue(bdb.resolve_fetch(self.test_data_dir), "Fetch incomplete")
bdb.validate_bag(self.test_data_dir, fast=True)
bdb.validate_bag(self.test_data_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http(self):
logger.info(self.getTestHeader('test resolve fetch http'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_no_redirects(self):
logger.info(self.getTestHeader('test resolve fetch http no redirects'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_http_no_redirect_dir,
config_file=ospj(self.test_config_dir, 'test-config-6.json'),
cookie_scan=False), "Fetch complete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_unexpected_filesize(self):
logger.info(self.getTestHeader('test resolve fetch http unexpected filesize warning'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_unexpected_filesize_dir, cookie_scan=False),
"Fetch incomplete")
output = self.stream.getvalue()
self.assertExpectedMessages(["transfer size mismatch. Expected 200 bytes but received 201 bytes"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_encoded_filename(self):
logger.info(self.getTestHeader('test resolve fetch http with encoded filename'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_encoded_filename_dir, cookie_scan=False),
"Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_encoded_filename_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_encoded_filename_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_with_callback_cancel(self):
logger.info(self.getTestHeader('test resolve fetch http'))
try:
def callback(current, total):
if current < total - 1:
return True
else:
return False
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_http_dir, callback=callback, cookie_scan=False))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_bad_request(self):
logger.info(self.getTestHeader('test resolve fetch http bad url path'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_http_bad_dir,
config_file=ospj(self.test_config_dir, 'test-config-3.json'),
cookie_scan=False))
output = self.stream.getvalue()
self.assertExpectedMessages(["HTTP GET Failed for URL",
"HTTP Request Exception",
"Transfer protocol",
"is not supported"],
output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_bypass_ssl_cert_verify_global(self):
logger.info(self.getTestHeader('test resolve fetch http bypass ssl cert verify global'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-11.json'),
cookie_scan=False))
output = self.stream.getvalue()
self.assertExpectedMessages(["Bypassing SSL certificate verification due to global configuration setting."],
output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_bypass_ssl_cert_verify_whitelisted_uri(self):
logger.info(self.getTestHeader('test resolve fetch http bypass ssl cert verify whitelisted uri'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-12.json'),
cookie_scan=False))
output = self.stream.getvalue()
self.assertExpectedMessages(["Bypassing SSL certificate validation for URL",
"due to matching whitelist entry"],
output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_incomplete(self):
logger.info(self.getTestHeader('test resolve fetch incomplete'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_incomplete_fetch_dir,
force=False, cookie_scan=False, quiet=False),
"Fetch incomplete")
bdb.validate_bag(self.test_bag_incomplete_fetch_dir, fast=True)
bdb.validate_bag(self.test_bag_incomplete_fetch_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def _test_resolve_fetch_http_with_filter(self, expr, files):
logger.info(self.getTestHeader('test resolve fetch http with filter expression "%s"' % expr))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
filter_expr=expr,
cookie_scan=False),
"Fetch incomplete")
for test_file in files:
self.assertTrue(ospif(ospj(self.test_bag_fetch_http_dir, test_file)))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_with_filter1(self):
self._test_resolve_fetch_http_with_filter("length<500", ["data/test-fetch-http.txt"])
def test_resolve_fetch_http_with_filter2(self):
self._test_resolve_fetch_http_with_filter("filename==data/test-fetch-http.txt", ["data/test-fetch-http.txt"])
def test_resolve_fetch_http_with_filter3(self):
self._test_resolve_fetch_http_with_filter("url=*/test-data/test-http/",
["data/test-fetch-http.txt", "data/test-fetch-identifier.txt"])
def test_resolve_fetch_http_basic_auth_get(self):
logger.info(self.getTestHeader('test resolve fetch http basic auth GET'))
try:
patched_requests_get = None
def mocked_request_auth_get_success(*args, **kwargs):
args[0].auth = None
patched_requests_get.stop()
return BaseTest.MockResponse({}, 200)
patched_requests_get = mock.patch.multiple("bdbag.fetch.transports.fetch_http.requests.Session",
get=mocked_request_auth_get_success,
auth=None,
create=True)
patched_requests_get.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-1.json'),
cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_basic_auth_get_bad_key(self):
logger.info(self.getTestHeader('test resolve fetch http basic auth GET with bad key'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-bad-1.json'),
cookie_scan=False))
output = self.stream.getvalue()
self.assertExpectedMessages(["Missing required parameters [username, password]"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def _test_resolve_fetch_http_auth_post(self, keychain_file):
try:
def mocked_request_auth_post_success(*args, **kwargs):
args[0].auth = None
patched_requests_post.stop()
return BaseTest.MockResponse({}, 201)
patched_requests_post = mock.patch.multiple("bdbag.fetch.transports.fetch_http.requests.Session",
post=mocked_request_auth_post_success,
auth=None,
create=True)
patched_requests_post.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, keychain_file),
cookie_scan=False),
"Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_basic_auth_post(self):
logger.info(self.getTestHeader('test resolve fetch http basic auth POST'))
self._test_resolve_fetch_http_auth_post("test-keychain-2.json")
def test_resolve_fetch_http_form_auth_post(self):
logger.info(self.getTestHeader('test resolve fetch http form auth POST'))
self._test_resolve_fetch_http_auth_post("test-keychain-3.json")
def test_resolve_fetch_http_cookie_auth(self):
logger.info(self.getTestHeader('test resolve fetch http cookie auth'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-4.json'),
cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_auth_token_get(self):
logger.info(self.getTestHeader('test resolve fetch http token auth'))
try:
patched_requests_get_auth = None
def mocked_request_auth_token_get_success(*args, **kwargs):
args[0].auth = None
args[0].headers = {}
patched_requests_get_auth.stop()
return args[0].get(args[1], **kwargs)
patched_requests_get_auth = mock.patch.multiple("bdbag.fetch.transports.fetch_http.requests.Session",
get=mocked_request_auth_token_get_success,
auth=None,
create=True)
patched_requests_get_auth.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-6.json'),
cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_auth_token_get_with_allowed_redirects(self):
logger.info(self.getTestHeader('test resolve fetch http token auth with allowed redirect'))
try:
patched_requests_get_auth = None
def mocked_request_auth_token_get_success(*args, **kwargs):
headers = args[0].headers or {}
headers.update({"Location": args[1]})
args[0].auth = None
args[0].headers = {}
patched_requests_get_auth.stop()
return BaseTest.MockResponse({}, 302, headers=headers)
patched_requests_get_auth = mock.patch.multiple("bdbag.fetch.transports.fetch_http.requests.Session",
get=mocked_request_auth_token_get_success,
auth=None,
create=True)
patched_requests_get_auth.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-6.json'),
cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_http_auth_token_get_with_disallowed_redirects(self):
logger.info(self.getTestHeader('test resolve fetch http token auth with disallowed redirect'))
try:
patched_requests_get_auth = None
def mocked_request_auth_token_get_success(*args, **kwargs):
headers = args[0].headers or {}
headers.update({"Location": args[1]})
args[0].auth = None
args[0].headers = {}
patched_requests_get_auth.stop()
return BaseTest.MockResponse({}, 302, headers=headers)
patched_requests_get_auth = mock.patch.multiple("bdbag.fetch.transports.fetch_http.requests.Session",
get=mocked_request_auth_token_get_success,
auth=None,
create=True)
patched_requests_get_auth.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
keychain_file=ospj(self.test_config_dir, 'test-keychain-7.json'),
cookie_scan=False),
"Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
output = self.stream.getvalue()
self.assertExpectedMessages(["Authorization bearer token propagation on redirect is disabled"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ark(self):
logger.info(self.getTestHeader('test resolve fetch ark'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_ark_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_ark_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_ark_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ark2(self):
logger.info(self.getTestHeader('test resolve fetch ark2'))
try:
mock_response = {
"admins": [
"urn:globus:auth:identity:7b315147-d8f6-4a80-853d-78b65826d734",
"urn:globus:groups:id:23acce4c-733f-11e8-a40d-0e847f194132",
"urn:globus:auth:identity:b2541312-d274-11e5-9131-bbb9500ff459",
"urn:globus:auth:identity:88204dba-e812-432a-abcd-ec631583a98c",
"urn:globus:auth:identity:58b31676-ef95-11e5-8ff7-5783aaa8fce7"
],
"checksums": [
{
"function": "sha256",
"value": "59e6e0b91b51d49a5fb0e1068980d2e7d2b2001a6d11c59c64156d32e197a626"
}
],
"identifier": "ark:/57799/b91FmdtR3Pf4Ct7",
"landing_page": "https://identifiers.globus.org/ark:/57799/b91FmdtR3Pf4Ct7/landingpage",
"location": [
"https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt",
"http://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt"
],
"metadata": {
"title": "BDBag identifier unit test file"
},
"visible_to": [
"public"
]
}
patched_resolve_ark_get = None
def mocked_request_resolver_ark_get_success(*args, **kwargs):
args[0].auth = None
patched_resolve_ark_get.stop()
return BaseTest.MockResponse(mock_response, 200)
patched_resolve_ark_get = mock.patch.multiple("bdbag.fetch.resolvers.base_resolver.requests.Session",
get=mocked_request_resolver_ark_get_success,
auth=None,
create=True)
patched_resolve_ark_get.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_ark2_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_ark2_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_ark2_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_bad_ark(self):
logger.info(self.getTestHeader('test resolve fetch bad ark'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_ark_bad_dir,
config_file=ospj(self.test_config_dir, 'test-config-3.json'),
cookie_scan=False))
output = self.stream.getvalue()
self.assertExpectedMessages(["No file locations were found for identifier"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ark_bad_response(self):
logger.info(self.getTestHeader('test resolve fetch ark bad response'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_ark2_dir,
config_file=ospj(self.test_config_dir, 'test-config-3.json'),
cookie_scan=False), "Fetch complete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_doi(self):
logger.info(self.getTestHeader('test resolve fetch doi'))
try:
mock_response = {
"@context": "http://schema.org",
"@type": "Dataset",
"@id": "https://doi.org/10.23725/9999-9999", # fake DOI
"identifier": [
{
"@type": "PropertyValue",
"propertyID": "doi",
"value": "https://doi.org/10.23725/9999-9999" # fake DOI
},
{
"@type": "PropertyValue",
"propertyID": "minid",
"value": "ark:/57799/b91FmdtR3Pf4Ct7"
},
{
"@type": "PropertyValue",
"propertyID": "sha256",
"value": "59e6e0b91b51d49a5fb0e1068980d2e7d2b2001a6d11c59c64156d32e197a626"
}
],
"url": "https://ors.datacite.org/doi:/10.23725/9999-9999", # fake DOI
"additionalType": "BDBAG Test file",
"name": "test-fetch-identifier.txt",
"author": {
"name": "BDBag"
},
"description": "BDBag identifier unit test file",
"keywords": "bdbag, unit test",
"datePublished": "2018-09-20",
"contentUrl": [
"https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt",
"http://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt"
],
"schemaVersion": "http://datacite.org/schema/kernel-4",
"publisher": {
"@type": "Organization",
"name": "fair-research.org"
},
"fileFormat": [
"text/plain "
]
}
patched_resolve_doi_get = None
def mocked_request_resolver_doi_get_success(*args, **kwargs):
args[0].auth = None
patched_resolve_doi_get.stop()
return BaseTest.MockResponse(mock_response, 200)
patched_resolve_doi_get = mock.patch.multiple("bdbag.fetch.resolvers.base_resolver.requests.Session",
get=mocked_request_resolver_doi_get_success,
auth=None,
create=True)
patched_resolve_doi_get.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_doi_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_doi_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_doi_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_dataguid(self):
logger.info(self.getTestHeader('test resolve fetch dataguid'))
try:
mock_response = {
"data_object": {
"checksums": [
{
"checksum": "59e6e0b91b51d49a5fb0e1068980d2e7d2b2001a6d11c59c64156d32e197a626",
"type": "sha256"
}
],
"created": "2018-09-20T17:00:21.428857",
"description": "BDBag identifier unit test file",
"id": "dg.4503/a5d79375-1ba8-418f-9dda-eb981375e599", # fake DataGUID
"mime_type": "text/plain",
"name": "test-fetch-identifier.txt",
"size": 223,
"updated": "2018-09-20T17:00:21.428866",
"urls": [
{
"url": "https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt"
},
{
"url": "http://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/test-fetch-identifier.txt"
}
],
"version": "0d318219"
}
}
patched_resolve_dataguid_get = None
def mocked_request_resolver_dataguid_get_success(*args, **kwargs):
args[0].auth = None
patched_resolve_dataguid_get.stop()
return BaseTest.MockResponse(mock_response, 200)
patched_resolve_dataguid_get = mock.patch.multiple("bdbag.fetch.resolvers.base_resolver.requests.Session",
get=mocked_request_resolver_dataguid_get_success,
auth=None,
create=True)
patched_resolve_dataguid_get.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_dataguid_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_dataguid_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_dataguid_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_minid(self):
logger.info(self.getTestHeader('test resolve fetch minid'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_minid_dir, cookie_scan=False), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_minid_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_minid_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_resolver_handler_not_found(self):
logger.info(self.getTestHeader('test resolve fetch with resolver handler not found'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_minid_dir,
config_file=ospj(self.test_config_dir, 'test-config-4.json'),
cookie_scan=False), "Fetch complete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_resolver_handler_unspecified(self):
logger.info(self.getTestHeader('test resolve fetch with resolver handler unspecified'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_minid_dir,
config_file=ospj(self.test_config_dir, 'test-config-5.json'),
cookie_scan=False), "Fetch incomplete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_resolver_scheme_not_supported(self):
logger.info(self.getTestHeader('test resolve fetch with resolver scheme not supported'))
try:
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_minid_dir,
config_file=ospj(self.test_config_dir, 'test-config-6.json'),
cookie_scan=False), "Fetch complete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ftp_no_auth(self):
logger.info(self.getTestHeader('test resolve fetch ftp'))
try:
patched_urlretrieve = None
def mocked_urlretrieve_success(*args, **kwargs):
patched_urlretrieve.stop()
return
patched_urlretrieve = mock.patch.multiple("bdbag.fetch.transports.fetch_ftp",
urlretrieve=mocked_urlretrieve_success)
patched_urlretrieve.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_ftp_dir, force=True), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_ftp_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_ftp_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ftp_auth(self):
logger.info(self.getTestHeader('test resolve fetch ftp with auth'))
try:
patched_urlretrieve = None
def mocked_urlretrieve_success(*args, **kwargs):
patched_urlretrieve.stop()
return
patched_urlretrieve = mock.patch.multiple("bdbag.fetch.transports.fetch_ftp",
urlretrieve=mocked_urlretrieve_success)
patched_urlretrieve.start()
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_auth_dir, force=True,
keychain_file=ospj(self.test_config_dir, 'test-keychain-5.json')), "Fetch incomplete")
bdb.validate_bag(self.test_bag_fetch_auth_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_auth_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_ftp_bad_request(self):
logger.info(self.getTestHeader('test resolve fetch ftp bad requests'))
try:
patched_urlretrieve = None
def mocked_urlretrieve_success(*args, **kwargs):
patched_urlretrieve.stop()
raise Exception("Mocked FTP urlretrieve error")
patched_urlretrieve = mock.patch.multiple("bdbag.fetch.transports.fetch_ftp",
urlretrieve=mocked_urlretrieve_success)
patched_urlretrieve.start()
self.assertFalse(bdb.resolve_fetch(self.test_bag_fetch_ftp_dir, force=True), "Fetch complete")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_tag(self):
logger.info(self.getTestHeader('test resolve fetch tag'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_tag_dir, cookie_scan=False), "Fetch incomplete")
output = self.stream.getvalue()
self.assertExpectedMessages(["The fetch entry for file", "specifies the tag URI"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_fetch_single(self):
logger.info(self.getTestHeader('test fetch single file'))
try:
output_path = ospj(self.test_bag_fetch_http_dir, "test-fetch-http.txt")
fetcher.fetch_single_file(
"https://raw.githubusercontent.com/fair-research/bdbag/master/test/test-data/test-http/"
"test-fetch-http.txt",
output_path)
self.assertTrue(os.path.exists(output_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_materialize_non_bag(self):
logger.info(self.getTestHeader('test materialize non-bag'))
curdir = os.getcwd()
os.chdir(self.tmpdir)
try:
bag_path = bdb.materialize(self.test_data_dir)
self.assertFalse(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
finally:
os.chdir(curdir)
def test_materialize_from_dir(self):
logger.info(self.getTestHeader('test materialize from dir'))
curdir = os.getcwd()
os.chdir(self.tmpdir)
try:
bag_path = bdb.materialize(self.test_bag_fetch_http_dir)
self.assertTrue(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
finally:
os.chdir(curdir)
def test_materialize_from_file(self):
logger.info(self.getTestHeader('test materialize from file'))
curdir = os.getcwd()
os.chdir(self.tmpdir)
try:
bag_path = bdb.materialize(ospj(self.test_archive_dir, 'test-bag-fetch-http.zip'))
self.assertTrue(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
finally:
os.chdir(curdir)
def test_materialize_from_file_to_target_dir(self):
logger.info(self.getTestHeader('test materialize from file to target dir'))
try:
bag_path = bdb.materialize(ospj(self.test_archive_dir, 'test-bag-fetch-http.zip'), self.tmpdir)
self.assertTrue(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_materialize_from_url(self):
logger.info(self.getTestHeader('test materialize from URL'))
curdir = os.getcwd()
os.chdir(self.tmpdir)
try:
bag_path = bdb.materialize("https://github.com/fair-research/bdbag/raw/master/test/test-data/test-archives/"
"test-bag.zip")
self.assertTrue(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
finally:
os.chdir(curdir)
def test_materialize_from_url_to_target_dir(self):
logger.info(self.getTestHeader('test materialize from URL to target dir'))
try:
bag_path = bdb.materialize("https://github.com/fair-research/bdbag/raw/master/test/test-data/test-archives/"
"test-bag.zip", self.tmpdir)
self.assertTrue(bdb.is_bag(bag_path))
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_cookie_load_and_merge(self):
logger.info(self.getTestHeader('test cookie load and merge'))
try:
cookie_jar_paths = [ospj(self.test_config_dir, "test-cookies-1.txt"),
ospj(self.test_config_dir, "test-cookies-2.txt")]
cookies.load_and_merge_cookie_jars(cookie_jar_paths)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_cookie_load_and_merge_failure(self):
logger.info(self.getTestHeader('test cookie load and merge'))
try:
cookie_jar_paths = [ospj(self.test_config_dir, "test-cookies-bad.txt"),
ospj(self.test_config_dir, "test-cookies-2.txt")]
cookies.load_and_merge_cookie_jars(cookie_jar_paths)
output = self.stream.getvalue()
self.assertExpectedMessages(["Unable to load cookie file"], output)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_materialize_from_identifier(self):
logger.info(self.getTestHeader('test materialize from identifier'))
curdir = os.getcwd()
os.chdir(self.tmpdir)
try:
bdb.materialize("minid:b91H6JHBS1u2FTG")
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
finally:
os.chdir(curdir)
def test_update_keychain_single(self):
logger.info(self.getTestHeader('test update keychain single'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
updated_entry = {
"uri": "https://raw.githubusercontent.com/",
"auth_type": "http-basic",
"auth_params": {
"auth_method": "get",
"username": "foo",
"password": "bar!"
}
}
try:
updated_keychain = update_keychain(updated_entry, keychain_file=keychain_file)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://raw.githubusercontent.com/", updated_keychain)
found = False
for entry in entries:
if entry["auth_type"] == "http-basic":
if entry["auth_params"]["username"] == "foo" and entry["auth_params"]["password"] == "bar!":
found = True
break
self.assertTrue(found)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_multi(self):
logger.info(self.getTestHeader('test update keychain multi'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
updated_entries = [
{
"uri": "https://raw.githubusercontent.com/",
"auth_type": "http-basic",
"auth_params": {
"auth_method": "get",
"username": "fake",
"password": "bar!"
}
},
{
"uri": "https://raw.githubusercontent.com/",
"auth_type": "bearer-token",
"auth_params": {
"token": "bar",
"allow_redirects_with_token": "True",
"additional_request_headers": {
"X-Requested-With": "XMLHttpRequest"
}
}
}
]
try:
updated_keychain = update_keychain(updated_entries, keychain_file=keychain_file)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://raw.githubusercontent.com/", updated_keychain)
found1 = found2 = False
for entry in entries:
if entry["auth_type"] == "http-basic":
if entry["auth_params"]["password"] == "bar!":
found1 = True
if entry["auth_type"] == "bearer-token":
if entry["auth_params"]["allow_redirects_with_token"] == "True":
found2 = True
self.assertTrue(found1 and found2)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_add_single(self):
logger.info(self.getTestHeader('test update keychain add single'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
added_entry = {
"uri": "https://foo.bar.com/",
"auth_type": "http-basic",
"auth_params": {
"auth_method": "get",
"username": "foo",
"password": "bar!"
}
}
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("https://foo.bar.com/", keychain)
self.assertFalse(entries)
updated_keychain = update_keychain(added_entry, keychain_file=keychain_file)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://foo.bar.com/", updated_keychain)
self.assertTrue(len(entries) == 1)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_add_multi(self):
logger.info(self.getTestHeader('test update keychain add multi'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
added_entries = [
{
"uri": "https://foo.bar.com/",
"auth_type": "http-basic",
"auth_params": {
"auth_method": "get",
"username": "fake",
"password": "bar!"
}
},
{
"uri": "https://foo.bar.com/",
"auth_type": "bearer-token",
"auth_params": {
"token": "bar",
"allow_redirects_with_token": "True",
"additional_request_headers": {
"X-Requested-With": "XMLHttpRequest"
}
}
}
]
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("https://foo.bar.com/", keychain)
self.assertFalse(entries)
updated_keychain = update_keychain(added_entries, keychain_file=keychain_file)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://foo.bar.com/", updated_keychain)
self.assertTrue(len(entries) == 2)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_del_single(self):
logger.info(self.getTestHeader('test update keychain del single'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
deleted_entry = {
"uri": "ftp://ftp.nist.gov/",
"auth_type": "ftp-basic"
}
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("ftp://ftp.nist.gov/", keychain)
self.assertTrue(len(entries) == 1)
updated_keychain = update_keychain(deleted_entry, keychain_file=keychain_file, delete=True)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("ftp://ftp.nist.gov/", updated_keychain)
self.assertFalse(entries)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_del_multi(self):
logger.info(self.getTestHeader('test update keychain del multi'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
deleted_entries = [
{
"uri": "https://raw.githubusercontent.com/",
"auth_type": "http-basic"
},
{
"uri": "https://raw.githubusercontent.com/",
"auth_type": "bearer-token"
}
]
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("https://raw.githubusercontent.com/", keychain)
self.assertTrue(entries)
updated_keychain = update_keychain(deleted_entries, keychain_file=keychain_file, delete=True)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://raw.githubusercontent.com/", updated_keychain)
self.assertFalse(entries)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_del_by_tag(self):
logger.info(self.getTestHeader('test update keychain del by tag'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
deleted_entries = {"tag": "unit test"}
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("https://raw.githubusercontent.com/", keychain)
self.assertTrue(entries)
updated_keychain = update_keychain(deleted_entries, keychain_file=keychain_file, delete=True)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
entries = get_auth_entries("https://raw.githubusercontent.com/", updated_keychain)
self.assertFalse(entries)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_update_keychain_invalid_params(self):
logger.info(self.getTestHeader('test update keychain invalid params'))
keychain_file = ospj(self.test_config_dir, 'test-keychain-8.json')
deleted_entries = [
{
"uri": "https://raw.githubusercontent.com/",
},
{
"auth_type": "bearer-token"
},
{
"uri": "ftp://ftp.nist.gov/",
"tag": "invalid"
}
]
try:
keychain = read_keychain(keychain_file, create_default=False)
entries = get_auth_entries("https://raw.githubusercontent.com/", keychain)
self.assertTrue(entries)
updated_keychain = update_keychain(deleted_entries, keychain_file=keychain_file, delete=True)
logger.info("Updated keychain: %s" % json.dumps(updated_keychain))
self.assertTrue(len(updated_keychain) == 3)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_custom_handler_no_keychain(self):
logger.info(self.getTestHeader('test resolve fetch custom handler no keychain'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-7.json'),
cookie_scan=False),
"Fetch incomplete")
output = self.stream.getvalue()
self.assertExpectedMessages(["Keychain will not be passed to fetch handler class"], output)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_custom_handler_with_keychain(self):
logger.info(self.getTestHeader('test resolve fetch custom handler with keychain'))
try:
self.assertTrue(bdb.resolve_fetch(self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-8.json'),
keychain_file=ospj(self.test_config_dir, 'test-keychain-9.json'),
cookie_scan=False),
"Fetch incomplete")
output = self.stream.getvalue()
self.assertExpectedMessages(["Got propagated keychain: ", "ftp://ftp.nist.gov/"], output)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=True)
bdb.validate_bag(self.test_bag_fetch_http_dir, fast=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_bad_custom_handler(self):
logger.info(self.getTestHeader('test resolve fetch bad custom handler'))
try:
self.assertRaisesRegex(NotImplementedError,
"Method must be implemented by subclass",
bdb.resolve_fetch,
self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-9.json'),
cookie_scan=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
def test_resolve_fetch_invalid_custom_handler(self):
logger.info(self.getTestHeader('test resolve fetch invalid custom handler'))
try:
self.assertRaisesRegex(RuntimeError,
"Unable to import specified fetch handler class: \\[foo\\.bar\\.InvalidHandler\\]",
bdb.resolve_fetch,
self.test_bag_fetch_http_dir,
config_file=ospj(self.test_config_dir, 'test-config-10.json'),
cookie_scan=False)
except Exception as e:
self.fail(bdbag.get_typed_exception(e))
@unittest.skip("Not implemented")
def test_resolve_fetch_globus(self):
# TODO
pass
if __name__ == '__main__':
unittest.main()
| 48.919864 | 148 | 0.590837 |
f2de3139114025b512d7610ed49160f1924fa4df | 2,774 | py | Python | roulette/builder/utils.py | miararoy/bliz | 0d197b6790ccfa0c71682abf551aa4df83a9b589 | [
"MIT"
] | null | null | null | roulette/builder/utils.py | miararoy/bliz | 0d197b6790ccfa0c71682abf551aa4df83a9b589 | [
"MIT"
] | null | null | null | roulette/builder/utils.py | miararoy/bliz | 0d197b6790ccfa0c71682abf551aa4df83a9b589 | [
"MIT"
] | null | null | null | import numpy as np
from collections import namedtuple
from sklearn.metrics import classification_report, confusion_matrix
Doc = namedtuple(
'Doc',
[
"version",
"type",
"algo",
"param",
"cv",
]
)
def compress_regression_results(l, true_condition=lambda x: x >= 0.6):
out = []
for x in list(l):
if true_condition(x):
out.append(1)
else:
out.append(0)
return out
def generate_model_documentation(
model_version: str,
model_type: str,
model_algorithm: str,
model_parameter_tuning: str = None,
model_cv: str = None,
):
return (
"\n---------- Model Details:\n\n" +
"Model Version == {}\n".format(model_version) +
"Model Type == {}\n".format(model_type) +
"Model Algorithm == {}\n".format(model_algorithm) +
"Model Parameter Tuning == {}\n".format(model_parameter_tuning) +
"Model CV == {}\n".format(model_cv)
)
def create_classification_report(y_real, y_pred):
p = compress_regression_results(list(y_pred))
r = compress_regression_results(list(y_real))
for y_p, y_r_p, y_r, y_r_r in zip(
p,
list(y_pred),
r,
list(y_real),
):
print("Predicted {rp} ~ {p} for result {rr} ~ {r}".format(
p=y_p,
rp=y_r_p,
r=y_r,
rr=y_r_r,
))
print("\n{}".format(
classification_report(
r,
p,
target_names=["bottom_tier", "top_tier"],
)
)
)
tn, fp, fn, tp = confusion_matrix(p, r).ravel()
print(
"tn = {} \n".format(tn / len(p)) +
"tp = {} \n".format(tp / len(p)) +
"fn = {} \n".format(fn / len(p)) +
"fp = {} \n".format(fp / len(p))
)
print(
"Precision = {}\n".format(round(tp / (tp + fp), 2)) +
"Recall = {}\n".format(round(tp / (tp + fn), 2))
)
def min_max_norm(y: np.ndarray) -> np.ndarray:
return (y - y.min()) / (y.max() - y.min())
def is_regression_metric(metric: callable) -> bool:
real = np.asarray([0.1, 0.33, 0.44])
pred_close = np.asarray([0.11, 0.34, 0.45])
pred_far = np.asarray([0.3, 0.6, 0.9])
if (not metric(real, real) == 0.0 and
not metric(real, real) < metric(real, pred_close) < metric(real, pred_far)):
return False
else:
return True
def is_binary_classification_metric(metric: callable) -> bool:
real = np.asarray([1, 1, 0])
pred_close = np.asarray([1, 0, 0])
pred_far = np.asarray([0, 0, 1])
if (not metric(real, real) == 0.0 and
not metric(real, real) < metric(real, pred_close) < metric(real, pred_far)):
return False
else:
return True
| 25.925234 | 88 | 0.540014 |
b23eb4175ade8c5b44dd2bd2a1ace6d541e6540f | 1,494 | py | Python | ai_toolkit/models/cnn.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 7 | 2020-04-07T06:10:29.000Z | 2021-10-30T06:31:46.000Z | ai_toolkit/models/cnn.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 2 | 2021-09-08T01:48:49.000Z | 2022-01-16T19:31:35.000Z | ai_toolkit/models/cnn.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 2 | 2021-04-27T22:45:14.000Z | 2021-07-16T06:40:30.000Z | import torch
from torch import nn
from torch.nn import functional as F
class BasicCNN(nn.Module):
"""Neural network"""
def __init__(self, input_shape):
super().__init__()
self.input_shape = input_shape
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropx1 = nn.Dropout2d(0.25)
self.dropx2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
"""Forward pass for your feedback prediction network."""
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropx1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropx2(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
def forward_with_activations(self, x):
x = self.conv1(x)
first_activation = x
x = F.relu(x)
second_activation = x
x = self.conv2(x)
third_activation = x
x = F.max_pool2d(x, 2)
fourth_activation = x
x = self.dropx1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropx2(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x, [
first_activation,
second_activation,
third_activation,
fourth_activation,
]
| 26.678571 | 64 | 0.522758 |
66b35518304691850f9b54f6bdc530c6aaddb351 | 2,413 | py | Python | tests/precalculations/test_count_vectorizer.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 3 | 2019-06-21T11:41:08.000Z | 2019-10-24T06:41:51.000Z | tests/precalculations/test_count_vectorizer.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 63 | 2019-05-16T12:09:57.000Z | 2022-02-10T00:21:01.000Z | tests/precalculations/test_count_vectorizer.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | null | null | null | import unittest
from shift_detector.precalculations.count_vectorizer import CountVectorizer
from shift_detector.precalculations.store import Store
import pandas as pd
import numpy as np
class TestCountVectorizer(unittest.TestCase):
def setUp(self):
self.count1 = CountVectorizer(columns=['col1'], stop_words='english', max_features=2)
self.count2 = CountVectorizer(columns=['col1'], stop_words='english', max_features=2)
self.count3 = CountVectorizer(columns=['col1'], stop_words='english', max_features=3)
self.df1 = pd.DataFrame({'col1':
['duck', 'duck', 'duck', 'duck', 'duck',
'duck', 'duck', 'duck', 'duck', 'goose']})
self.df2 = pd.DataFrame({'col1':
['goose', 'goose', 'goose', 'goose', 'goose',
'goose', 'goose', 'goose', 'goose', 'duck']})
self.store = Store(self.df1, self.df2)
def test_eq(self):
self.assertEqual(self.count1, self.count2)
self.assertNotEqual(self.count1, self.count3)
def test_exception_for_max_features(self):
self.assertRaises(ValueError, lambda: CountVectorizer(columns=[''], max_features=0))
self.assertRaises(TypeError, lambda: CountVectorizer(columns=[''], max_features=3.5))
def test_exception_for_stop_words(self):
self.assertRaises(Exception, lambda: CountVectorizer(columns=[''], stop_words='abcd'))
self.assertRaises(Exception, lambda: CountVectorizer(columns=[''], stop_words=['english', ' abcd']))
self.assertRaises(TypeError, lambda: CountVectorizer(columns=[''], stop_words=['english', 42]))
def test_hash(self):
self.assertEqual(hash(self.count1), hash(self.count2))
self.assertNotEqual(hash(self.count1), hash(self.count3))
def test_process(self):
res1, res2, feature_names, all_vecs = self.count1.process(self.store)
expected_dict1 = {'col1': np.array([[1,0] if not i == 9 else [0,1] for i in range(10)])}
expected_dict2 = {'col1': np.array([[0,1] if not i == 9 else [1,0] for i in range(10)])}
self.assertEqual(res1.keys(), expected_dict1.keys())
np.testing.assert_equal(res1['col1'], expected_dict1['col1'])
self.assertEqual(res2.keys(), expected_dict2.keys())
np.testing.assert_equal(res2['col1'], expected_dict2['col1'])
| 47.313725 | 108 | 0.640282 |
7fd0d86de9e981bfc074cb2f99263eaa21d92b2a | 4,911 | py | Python | App/CSJam2015/Content/PrebuiltResources/Scripts/texture_atlas_builder.py | KamiGrave/CSJam2015 | e96db2f0797e4e9f33f949b4c36955243c6bbc23 | [
"MIT"
] | null | null | null | App/CSJam2015/Content/PrebuiltResources/Scripts/texture_atlas_builder.py | KamiGrave/CSJam2015 | e96db2f0797e4e9f33f949b4c36955243c6bbc23 | [
"MIT"
] | null | null | null | App/CSJam2015/Content/PrebuiltResources/Scripts/texture_atlas_builder.py | KamiGrave/CSJam2015 | e96db2f0797e4e9f33f949b4c36955243c6bbc23 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# texture_atlas_builder.py
# CSJam2015
# Created by Scott Downie on 30/06/2014.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Tag Games Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys
import os
import subprocess
import shutil
#----------------------------------------
# @param the file path.
# @param the extension.
#
# @author Ian Copland
#
# @return whether or not the path has
# the extension.
#----------------------------------------
def HasExtension(filepath, extension):
lower_filepath = filepath.lower()
lower_extension = extension.lower()
return lower_filepath.endswith(lower_extension)
#----------------------------------------
# Walks the input directory and packs
# all pngs in each folder onto an atlas
# for that folder
#
# @author S Downie
#
# @param Input path
# @param Output path
#----------------------------------------
def BuildTextureAtlases(input_path, output_path):
print("Building atlases...")
if(input_path.endswith("/") == False):
input_path = input_path + "/"
if(output_path.endswith("/") == False):
output_path = output_path + "/"
for directory, sub_dirs, filenames in os.walk(input_path):
for sub_dir in sub_dirs:
input_dir = os.path.join(directory, sub_dir)
output_dir = os.path.join(output_path, input_dir[len(input_path):len(input_dir)]);
if len(sub_dirs) == 0:
contains_png = False
for filename in filenames:
if HasExtension(filename, ".png") == True:
contains_png = True
break
if contains_png == True:
split_dirs = output_dir.split("/")
split_dirs.pop()
final_output_dir = "/".join(split_dirs)
if os.path.exists(final_output_dir) == False:
os.makedirs(final_output_dir);
BuildTextureAtlas(directory, final_output_dir)
print ("Atlas building finished")
print("-----------------------------------------")
print("-----------------------------------------")
#----------------------------------------
# Depending on the file path and the tags
# build a suitable filename: i.e.
# Texture/GUI/Med -> GUI.med.csatlas
# Texture/GUI/Med.Wide -> GUI.med.wide.csatlas
#
# @author S Downie
#
# @param Input path
#
# @return Output name
#----------------------------------------
def GenerateAtlasName(input_filepath):
split_path = input_filepath.split("/")
if(len(split_path) < 2):
print("ERROR: Path has no Tag folders i.e. (Med, High, Low)")
name = split_path[len(split_path) - 2]
split_tags = split_path[len(split_path) - 1].split(".")
for tag in split_tags:
if tag.lower() != "common":
name = name+"."+tag.lower()
return name+".csatlas"
#----------------------------------------
# Builds a single atlas from the pngs
# in the given directory.
#
# @author S Downie
#
# @param Input path
# @param Output path
#----------------------------------------
def BuildTextureAtlas(input_filepath, output_filepath):
name = GenerateAtlasName(input_filepath)
taggedoutput_filepath = os.path.join(output_filepath, name)
print("Building atlas: " + taggedoutput_filepath)
tool_path = os.path.join("..", "..", "ChilliSource", "Tools", "CSAtlasBuilder.jar")
subprocess.call(["java", "-Djava.awt.headless=true", "-Xmx512m", "-jar", tool_path, "--input", input_filepath, "--output", taggedoutput_filepath, "--maxwidth", "4096", "--maxheight", "4096", "--padding", "2"]);
#----------------------------------------------------------------------
# The entry point into the script.
#
# @author S Downie
#
# @param The list of arguments.
#----------------------------------------------------------------------
def main(args):
if not len(args) is 3:
print("ERROR: Missing input and output paths")
return
input_path = args[1]
output_path = args[2]
if os.path.exists(output_path) == True:
shutil.rmtree(output_path)
BuildTextureAtlases(input_path, output_path)
if __name__ == "__main__":
main(sys.argv)
| 29.945122 | 211 | 0.635919 |
92cdb9e7de0436a6771577c5dc7cf2e0a0c64bbf | 1,210 | py | Python | main.py | crrapi/Rin | 41f3ae7421acd03ad214b2f3689d8043a01ae1df | [
"MIT"
] | null | null | null | main.py | crrapi/Rin | 41f3ae7421acd03ad214b2f3689d8043a01ae1df | [
"MIT"
] | null | null | null | main.py | crrapi/Rin | 41f3ae7421acd03ad214b2f3689d8043a01ae1df | [
"MIT"
] | null | null | null | import json
import discord
from discord.ext import commands
# Put your token in a file called config.json, if you want to self-host
with open('config.json') as file:
config = json.load(file)
async def get_prefix(_bot, message):
prefixes = ['rin ']
return commands.when_mentioned_or(*prefixes)(_bot, message)
extensions = ['modules.API.rin_danbooru',
'modules.API.rin_zerochan',
'modules.API.rin_aur',
'modules.discord.moderation',
'modules.utils.errors',
'modules.utils.information']
bot = commands.Bot(command_prefix=get_prefix)
if __name__ == '__main__':
for extension in extensions:
try:
bot.load_extension(extension)
print(f'Loaded {extension}!')
except discord.ClientException:
print(f'{extension} does not have a setup...')
except (ImportError, Exception):
print(f'Failed to load {extension}...')
@bot.event
async def on_ready():
print(f'Hello World, I\'m {bot.user.name}!')
await bot.change_presence(status=discord.Status.idle, activity=discord.Game('In development!'))
bot.load_extension('jishaku')
bot.run(config['token'])
| 27.5 | 99 | 0.65124 |
c950a78eed5b346d273b2e398fa8b3345fe51152 | 4,414 | py | Python | slimelearnpy/slimelearn.py | javierburgosv/slimelearn.py | 2a5f8bd43e52e199f052d51a79c4f097c6f6b547 | [
"MIT"
] | 1 | 2021-06-22T11:40:17.000Z | 2021-06-22T11:40:17.000Z | slimelearnpy/slimelearn.py | javierburgosv/slimelearn.py | 2a5f8bd43e52e199f052d51a79c4f097c6f6b547 | [
"MIT"
] | null | null | null | slimelearnpy/slimelearn.py | javierburgosv/slimelearn.py | 2a5f8bd43e52e199f052d51a79c4f097c6f6b547 | [
"MIT"
] | null | null | null | from asyncio.events import get_event_loop
from asyncio.tasks import all_tasks
from typing import final
import websockets
import asyncio
import json
import nest_asyncio
nest_asyncio.apply()
class SlimeLearn:
ws = None
req_template = {
"req": "",
"payload": {}
}
def __init__(self):
pass
###################################################################
# MONO THREAD IMPLEMENTATION #
###################################################################
def run(self, uri, config, function):
"""
Handles the connection, configuration and subscription to the server all at once
:param uri: The url:port of the server. By default localhost:8080
:param config: The json containing all the server required configuration
:param function: The callback function with the logic for the AI
"""
print(">> Connecting to server...")
self.ws = asyncio.get_event_loop().run_until_complete(self._connect(uri))
if self.ws != None:
print(">> Configuring server...")
cstatus = asyncio.get_event_loop().run_until_complete(self._configure(config))
if cstatus == 200:
print(">> Listening to server...")
asyncio.get_event_loop().run_until_complete(self._listen(function))
async def _connect(self, dir):
try:
uri = "ws:" + dir
ws = await websockets.connect(uri)
return ws
except Exception as e:
print("Error. Please make sure JumpSlimeLearn is open and running correctly: ")
print(e)
async def _configure(self, config):
res_code = None
try:
await self.ws.send(json.dumps(config))
res = json.loads(await self.ws.recv())
res_code = res["code"]
if res_code == 400:
raise Exception(res["data"]["message"])
except Exception as e:
print("Error. Impossible to configure server")
print(e)
finally:
return res_code
async def _listen(self, callback):
try:
async for msg in self.ws:
js = json.loads(msg)
callback(js["data"])
except Exception as e:
print(e)
async def _disconnect(self):
try:
await self.ws.close()
except Exception as e:
print("Error. Impossible to disconnect:")
print(e)
###################################################################
# Requests to Server #
###################################################################
def jump(self, angle = None):
"""
Sends a basic action 'JUMP' to the server
:param angle (optional): The angle for the direction of the jump vector
"""
temp = {
"req": "jump"
}
if angle != None:
temp["payload"] = {
"angle": angle
}
packet = json.dumps(temp)
asyncio.get_event_loop().run_until_complete(self._askForAction(packet))
def reset(self):
"""
Sends a basic action 'Reset' to the server
"""
temp = {
"req": "reset"
}
packet = json.dumps(temp)
asyncio.get_event_loop().run_until_complete(self._askForAction(packet))
async def _askForAction(self, act: str):
try:
await self.ws.send(act)
except Exception as e:
print("Error. Impossible to send action " + act)
print(e)
###################################################################
# JSON Utiles #
###################################################################
def load_config_file(self, file="config.json"):
"""
Loads the .json file as a Python dic
:param file: The route of the config file.
"""
try:
with open(file) as config_file:
return json.load(config_file)
except Exception as e:
print("Error. Something went wrong opening the configuration file")
print(e) | 30.027211 | 91 | 0.478478 |
2056de4c8fc4c155ab7bc0258f2005699183a8cb | 1,742 | py | Python | backend/match_games/models.py | flaviogf/match_games | 7d2c2f35749715f36855031911ba67690cea8cb1 | [
"MIT"
] | null | null | null | backend/match_games/models.py | flaviogf/match_games | 7d2c2f35749715f36855031911ba67690cea8cb1 | [
"MIT"
] | null | null | null | backend/match_games/models.py | flaviogf/match_games | 7d2c2f35749715f36855031911ba67690cea8cb1 | [
"MIT"
] | null | null | null | from match_games import db
from sqlalchemy.orm import relationship
class User(db.Model):
id = db.Column(db.Integer,
primary_key=True)
name = db.Column(db.String(250),
nullable=False)
email = db.Column(db.String(250),
nullable=False)
password = db.Column(db.String(250),
nullable=False)
image = db.Column(db.String(250),
nullable=False,
default='default.jpg')
role = db.Column(db.String(250),
nullable=False)
class Game(db.Model):
id = db.Column(db.Integer,
primary_key=True)
name = db.Column(db.String(250),
nullable=False)
image = db.Column(db.String(250),
nullable=False,
default='default.jpg')
stores = relationship('GameStore', back_populates='game')
class Store(db.Model):
id = db.Column(db.Integer,
primary_key=True)
name = db.Column(db.String(250),
nullable=False)
image = db.Column(db.String(250),
nullable=False,
default='default.jpg')
games = relationship('GameStore', back_populates='store')
class GameStore(db.Model):
id = db.Column(db.Integer,
primary_key=True)
game_id = db.Column(db.Integer,
db.ForeignKey('game.id'))
store_id = db.Column(db.Integer,
db.ForeignKey('store.id'))
value = db.Column(db.Float,
nullable=False)
store = relationship('Store', back_populates='games')
game = relationship('Game', back_populates='stores')
| 32.259259 | 61 | 0.537887 |
1d5ba429fc4f0cc992f0bea6256d036468c81764 | 2,888 | py | Python | core/management/commands/createOrder.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | null | null | null | core/management/commands/createOrder.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | 9 | 2020-01-10T14:10:02.000Z | 2022-03-12T00:08:19.000Z | core/management/commands/createOrder.py | yun-mh/uniwalk | f5307f6970b24736d13b56b4792c580398c35b3a | [
"Apache-2.0"
] | null | null | null | import random, time, datetime, math
from django.core.management.base import BaseCommand
from django_seed import Seed
from reviews.models import Review
from products.models import Product
from orders.models import Order, Step
from users.models import User
# テストデータ用コマンドの作成
def str_time_prop(start, end, format, prop):
stime = time.mktime(time.strptime(start, format))
print(stime)
etime = time.mktime(time.strptime(end, format))
print(etime)
ptime = math.floor(stime + prop * (etime - stime))
print(ptime)
return time.strftime(format, time.localtime(ptime))
def random_date(start, end, prop):
return str_time_prop(start, end, "%Y-%m-%d %H:%M:%S", prop)
class Command(BaseCommand):
help = "This command creates orders."
def add_arguments(self, parser):
parser.add_argument(
"--number", default=2, type=int, help="How many orders you want to create"
)
def handle(self, *args, **options):
number = options.get("number")
users = User.objects.all()
steps = Step.objects.all()
amounts = [12000, 9000, 13000, 21000]
kanji_last = [
"北口",
"宮岡",
"深堀",
"本宮",
"溝田",
"茅野",
"渡会",
"河部",
"関屋",
"徳弘",
"佐渡",
"有友",
"稲津",
"笠見",
"宗本",
"池袋",
"國司",
"込宮",
"羽沢",
"磯谷",
"蘇原",
"南田",
"吉間",
"粟田",
"時森",
]
kanji_first = [
"正宏",
"克美",
"達弥",
"悠希",
"真広",
"保之",
"昭人",
"将人",
"敏則",
"康晃",
"知一",
"佑輝",
"安雄",
"信平",
"昌輝",
"直巳",
"克英",
"禎久",
"勝平",
"典行",
"明英",
"宏至",
"泰晴",
"光人",
"博之",
]
seeder = Seed.seeder()
seeder.add_entity(
Order,
number,
{
"guest": None,
"user": lambda x: random.choice(users),
"step": lambda x: random.choice(steps),
"amount": lambda x: random.choice(amounts),
"last_name_orderer": lambda x: random.choice(kanji_last),
"first_name_orderer": lambda x: random.choice(kanji_first),
"order_date": random_date(
str(datetime.datetime(2020, 1, 1)),
str(datetime.datetime(2020, 3, 3)),
random.random(),
),
},
)
seeder.execute()
self.stdout.write(self.style.SUCCESS(f"{number} orders created!"))
| 25.557522 | 86 | 0.437673 |
8080782e2fe627930333f1566a9f55a55d2727dd | 1,845 | py | Python | setup.py | cordalace/nsjwt | 424b85f9ec003e965db4da2089efc47602566c16 | [
"Apache-2.0"
] | 11 | 2018-02-01T11:29:45.000Z | 2020-05-28T17:39:41.000Z | setup.py | cordalace/nsjwt | 424b85f9ec003e965db4da2089efc47602566c16 | [
"Apache-2.0"
] | null | null | null | setup.py | cordalace/nsjwt | 424b85f9ec003e965db4da2089efc47602566c16 | [
"Apache-2.0"
] | 1 | 2018-05-10T09:25:49.000Z | 2018-05-10T09:25:49.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setuptools script."""
import codecs
from setuptools import setup
with codecs.open('README.rst') as readme:
LONG_DESCRIPTION = readme.read()
setup(
name='nsjwt',
version='0.2.1',
author='Azat Kurbanov',
author_email='cordalace@gmail.com',
description='No shit JWT implementation',
long_description=LONG_DESCRIPTION,
license='Apache License 2.0',
url='https://github.com/cordalace/nsjwt',
install_requires=[
'ujson',
'pybase64',
],
keywords='jwt json web token',
py_modules=['nsjwt'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP :: Session',
],
)
| 34.166667 | 71 | 0.681301 |
215b1bcbb0694d0c75e1e2a7f6eeb82529e4df94 | 16,168 | py | Python | sensors/kalman/auv-kalmand.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 70 | 2015-11-16T18:04:01.000Z | 2022-03-05T09:04:02.000Z | sensors/kalman/auv-kalmand.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T05:13:19.000Z | 2016-08-03T06:19:39.000Z | sensors/kalman/auv-kalmand.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 34 | 2015-12-15T17:29:23.000Z | 2021-11-18T14:15:12.000Z | #!/usr/bin/env python3
'''
The daemon that runs Kalman filters for orientation and position.
'''
import math
import time
from functools import reduce
import numpy as np
import shm
from auv_math.quat import Quaternion
from auv_python_helpers.angles import abs_heading_sub_degrees
from conf.vehicle import sensors, dvl_present, gx_hpr, dvl_offset
from settings import dt
from kalman_unscented import UnscentedKalmanFilter
from kalman_position import PositionFilter
from conf.vehicle import is_mainsub
# Offset for GX4 mounting orientation
GX_ORIENTATION_OFFSET = Quaternion(hpr=gx_hpr)
rec_get_attr = lambda s: \
reduce(lambda acc, e: getattr(acc, e),
s.split('.'), shm)
# Thruster array allows access to thruster values
thrusters = ['port', 'starboard', 'sway_fore', 'sway_aft']
heading_rate_var = rec_get_attr(sensors["heading_rate"])
pitch_rate_var = rec_get_attr(sensors["pitch_rate"])
roll_rate_var = rec_get_attr(sensors["roll_rate"])
depth_var = rec_get_attr(sensors["depth"])
depth_offset_var = rec_get_attr(sensors["depth_offset"])
quat_group = rec_get_attr(sensors["quaternion"])
# Use velocity from the DVL if the vehicle has one
vel_vars = {}
for vel_var in ["velx", "vely", "velz"]:
if dvl_present:
if vel_var in sensors:
vel_vars[vel_var] = rec_get_attr(sensors[vel_var])
else:
raise LookupError("vehicle.dvl_present is True but vehicle.sensors.%s is "
"not defined" % vel_var)
else:
vel_vars[vel_var] = rec_get_attr("kalman." + vel_var)
# DVL Beam vars
beam_vars = [shm.dvl.low_amp_1,
shm.dvl.low_amp_2,
shm.dvl.low_amp_3,
shm.dvl.low_amp_4,
shm.dvl.low_correlation_1,
shm.dvl.low_correlation_2,
shm.dvl.low_correlation_3,
shm.dvl.low_correlation_4]
control_wrench = shm.control_internal_wrench
quat_mode = shm.settings_control.quat_pid.get()
pass_through = False
def fx_quat(x, dt):
q_initial, ang_vel = Quaternion(q=x[:4], unit=False), x[4:]
# I don't think the below is correct.
# Because HPR != axis of angular velocity
# TODO XXX FIX
# disp_quat = Quaternion(hpr=([math.degrees(vel * dt) for vel in ang_vel]))
q_final = q_initial # * disp_quat
x[0] = q_final[0]
x[1] = q_final[1]
x[2] = q_final[2]
x[3] = q_final[3]
return x
def hx_quat(x):
return x
def fx_euler(x, dt):
# x[0] += x[3]*dt
# x[1] += x[4]*dt
# x[2] += x[5]*dt
# x[0] = x[0] % 360
return x
def hx_euler(x):
return x
def get_orientation_shm(quat_values):
return get_orientation([quat_values.q0, quat_values.q1, quat_values.q2, quat_values.q3])
def get_orientation(quat_values):
q_offset = Quaternion(hpr=(shm.kalman_settings.heading_offset.get(), 0, 0))
quat = Quaternion(q=quat_values)
return q_offset * quat
quat_values = quat_group.get()
quat = get_orientation_shm(quat_values).q
quat = (Quaternion(q=[quat[0], quat[1], quat[2], quat[3]]) * GX_ORIENTATION_OFFSET).q
quat_orientation_filter = UnscentedKalmanFilter(7, fx_quat, 7, hx_quat, dt, .1)
quat_orientation_filter.x_hat = np.array([quat[0], quat[1], quat[2], quat[3], 0, 0, 0])
quat_orientation_filter.P *= .5
quat_orientation_filter.R = np.array([[90, 0, 0, 0, 0, 0, 0],
[0, 90, 0, 0, 0, 0, 0],
[0, 0, 90, 0, 0, 0, 0],
[0, 0, 0, 90, 0, 0, 0],
[0, 0, 0, 0, 1.5, 0, 0],
[0, 0, 0, 0, 0, 1.7, 0],
[0, 0, 0, 0, 0, 0, 1.5]])
quat_values = quat_group.get()
hpr = get_orientation_shm(quat_values).hpr()
euler_orientation_filter = UnscentedKalmanFilter(6, fx_euler, 6, hx_euler, dt, .1)
euler_orientation_filter.x_hat = np.array([hpr[0], hpr[1], hpr[2], 0, 0, 0])
euler_orientation_filter.P *= .5
#TODO Fill in covariances-- ordering is H P R Hrate Prate Rrate
euler_orientation_filter.R = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
def convert_dvl_velocities(sub_quat, dvl_vel):
# TODO This transform should maybe be in a configuration file.
# Or perhaps we should configure the DVL to do it for us.
vel_body_frame = Quaternion(hpr=(0, 0, 180)) * dvl_vel
hpr = sub_quat.hpr()
vel_spitz_frame = (Quaternion(hpr=(hpr[0]%360, 0, 0)).conjugate() * sub_quat) * vel_body_frame
return vel_spitz_frame
def get_velocity(sub_quat, heading_rate):
vel = np.array([-vel_vars[vel_var].get() for vel_var in \
["velx", "vely", "velz"]])
if dvl_present:
# Rotate DVL velocities - swap x and y axes
vel[0], vel[1] = vel[1], -vel[0]
# Invert z axis, so that we measure depth rate instead of altitude rate
vel[2] = -vel[2]
# Offset velocity to account for misaligned reference point and DVL position
skew_factor = dvl_offset * 2 * math.pi / 360
dy = skew_factor * heading_rate
vel[1] -= dy
vel = convert_dvl_velocities(sub_quat, vel)
return vel
def get_depth():
return depth_var.get() - depth_offset_var.get()
sub_quat = Quaternion(q=quat_orientation_filter.x_hat[:4])
hpr_rate_vec = np.array([roll_rate_var.get(), pitch_rate_var.get(), heading_rate_var.get()])
hpr_rate_vec = np.eye(3, 3).dot(GX_ORIENTATION_OFFSET.matrix()).dot(hpr_rate_vec)
heading_rate_in = math.radians(hpr_rate_vec[2])
x_vel, y_vel, z_vel = get_velocity(sub_quat, heading_rate_in)
depth = get_depth()
kalman_xHat = np.array([[ x_vel, 0, y_vel, 0, 0, 0, depth, 0]]).reshape(8, 1)
# Pass in ftarray, shared memory handle to controller
position_filter = PositionFilter(kalman_xHat)
start = time.time()
show_rate = False
real_start = time.time()
last_start = 0
start = 0
iteration = 0
while True:
# TODO Should we wait on gx4 group write?
last_start = start
time.sleep(max(0, dt-(start - last_start)))
start = time.time()
if True: # Pls forgive iteration*dt < time.time() - start:
# Avoid timing errors due to time jumps on startup.
# if time.time() - start - iteration*dt > 60:
# start = time.time()
# iteration = 0
hpr_rate_vec = np.array([roll_rate_var.get(), pitch_rate_var.get(), heading_rate_var.get()])
hpr_rate_vec = np.eye(3, 3).dot(GX_ORIENTATION_OFFSET.matrix()).dot(hpr_rate_vec)
heading_rate_in = math.radians(hpr_rate_vec[2])
pitch_rate_in = math.radians(hpr_rate_vec[1])
roll_rate_in = math.radians(hpr_rate_vec[0])
# Bugs arise due to quaternion aliasing, so we choose the quaternion
# closest to the actual state
quat_values = quat_group.get()
quat_values = (Quaternion(q=[quat_values.q0, quat_values.q1, quat_values.q2, quat_values.q3]) * GX_ORIENTATION_OFFSET).q
actual_quat = get_orientation(quat_values).q
negated_quat = [-i for i in actual_quat]
kalman_quat = None
if quat_mode:
kalman_quat = quat_orientation_filter.x_hat[:4]
else:
kalman_quat = Quaternion(hpr=euler_orientation_filter.x_hat[:3])
kalman_quat = kalman_quat.q
actual_delta = [kalman_quat[i] - actual_quat[i] for i in range(4)]
negated_delta = [kalman_quat[i] - negated_quat[i] for i in range(4)]
quat_in = actual_quat
if np.linalg.norm(actual_delta) > np.linalg.norm(negated_delta):
quat_in = negated_quat
outputs = shm.kalman.get()
if shm.settings_kalman.pass_through.get():
print("Just passin through...")
pass_through = True
old_depth = outputs.depth
old_east = outputs.east
old_north = outputs.north
sub_quat = Quaternion(q=quat_in)
vels = get_velocity(sub_quat, heading_rate_in)
hpr = sub_quat.hpr()
c = math.cos(math.radians(hpr[0]))
s = math.sin(math.radians(hpr[0]))
north_vel = vels[0]*c - vels[1]*s
east_vel = vels[0]*s + vels[1]*c
outputs.accelx = 0
outputs.accely = 0
outputs.accelz = 0
outputs.depth = get_depth()
outputs.depth_rate = (outputs.depth - old_depth)/dt
outputs.east = outputs.east + east_vel*dt
outputs.forward = outputs.forward
outputs.heading = hpr[0]
outputs.heading_cumulative = 0
outputs.heading_rate = heading_rate_in
outputs.north = outputs.north + north_vel*dt
outputs.pitch = hpr[1]
outputs.pitch_rate = pitch_rate_in
outputs.q0= quat_in[0]
outputs.q1= quat_in[1]
outputs.q2= quat_in[2]
outputs.q3= quat_in[3]
outputs.roll = hpr[2]
outputs.roll_rate = roll_rate_in
outputs.sway = outputs.sway
outputs.velx = vels[0]
outputs.vely = vels[1]
outputs.velz = vels[2]
shm.kalman.set(outputs)
continue
else:
if pass_through:
pass_through = False
sub_quat = Quaternion(q=quat_in)
vels = get_velocity(sub_quat, heading_rate_in)
hpr = sub_quat.hpr()
c = math.cos(math.radians(hpr[0]))
s = math.sin(math.radians(hpr[0]))
north_vel = vels[0]*c - vels[1]*s
east_vel = vels[0]*s + vels[1]*c
# euler_orientation_filter.x_hat = np.array([hpr[0], hpr[1], hpr[2], heading_rate_in, pitch_rate_in, roll_rate_in])
# quat_orientation_filter.x_hat = np.array([quat_in[0], quat_in[1], quat_in[2], quat_in[3], heading_rate_in, pitch_rate_in, roll_rate_in])
# position_filter.xHat = np.array([[ north_vel, 0, east_vel, 0, 0, 0, get_depth(), outputs.depth_rate]]).reshape(8, 1)
if shm.settings_control.quat_pid.get():
if not quat_mode:
# If we just switched, need to ensure that states between filters agree!
q = Quaternion(hpr=euler_orientation_filter.x_hat[:3])
q_state = list(q.q) + list(euler_orientation_filter.x_hat[3:])
quat_orientation_filter.x_hat = q_state
quat_mode = True
quat_orientation_filter.predict()
# TODO: It doesn't make sense to update regardless of whether there is new sensor data
quat_orientation_filter.update(list(quat_in) + [heading_rate_in, pitch_rate_in, roll_rate_in])
data = quat_orientation_filter.x_hat
sub_quat = Quaternion(q=data[:4])
# Stands for heading-pitch-roll
hpr = sub_quat.hpr()
keys = ['q0', 'q1', 'q2', 'q3', 'heading_rate', 'pitch_rate', 'roll_rate']
output = dict(zip(keys, data))
outputs.update(**output)
outputs.heading_rate = math.degrees(outputs.heading_rate)
outputs.pitch_rate = math.degrees(outputs.pitch_rate)
outputs.roll_rate = math.degrees(outputs.roll_rate)
outputs.update(**{'heading': hpr[0] % 360,
'pitch': hpr[1],
'roll': hpr[2]})
else:
if quat_mode:
# If we just switched, need to ensure that states between filters agree!
hpr = Quaternion(q=quat_orientation_filter.x_hat[:4])
hpr_state = list(hpr.hpr()) + list(quat_orientation_filter.x_hat[4:])
euler_orientation_filter.x_hat = hpr_state
quat_mode = False
euler_orientation_filter.predict()
quat_in_old = quat_in
quat_in = Quaternion(q=list(quat_in))
hpr_in = quat_in.hpr()
euler_orientation_filter.update([hpr_in[0] % 360, hpr_in[1], hpr_in[2], heading_rate_in, pitch_rate_in, roll_rate_in])
data = euler_orientation_filter.x_hat
hpr_quat = Quaternion(hpr=data[:3])
quat = hpr_quat.q
outputs = shm.kalman.get()
keys = ['heading', 'pitch', 'roll', 'heading_rate', 'pitch_rate', 'roll_rate']
deadband = 50
in_bad_zone = hpr_in[0] % 360 < (0 + deadband) or hpr_in[0] % 360 > (360 - deadband)
if in_bad_zone:
data = [hpr_in[0] % 360, hpr_in[1], hpr_in[2], heading_rate_in, pitch_rate_in, roll_rate_in]
outputs.update(**{'q0': quat_in_old[0],
'q1': quat_in_old[1],
'q2': quat_in_old[2],
'q3': quat_in_old[3]})
else:
outputs.heading_rate = math.degrees(outputs.heading_rate)
outputs.pitch_rate = math.degrees(outputs.pitch_rate)
outputs.roll_rate = math.degrees(outputs.roll_rate)
outputs.update(**{'q0': quat[0],
'q1': quat[1],
'q2': quat[2],
'q3': quat[3]})
output = dict(zip(keys, data))
outputs.update(**output)
x_vel, y_vel, z_vel = get_velocity(sub_quat, heading_rate_in)
# Compensate for gravitational acceleration
#grav_x = math.sin( math.radians(outputs.pitch) )*9.8 # XXX: CHRIS DOES NOT LIKE (small angle approx??)
#grav_y = -math.sin( math.radians(outputs.roll) )*9.8
#gx4_grav_y = np.tan(math.radians(outputs.pitch))*np.sqrt(shm.gx4.accelx.get()**2 + shm.gx4.accelz.get()**2)
#gx4_grav_x = -1*np.tan(math.radians(outputs.roll))*shm.gx4.accelz.get()
#him_grav_y = np.tan(math.radians(outputs.pitch))*np.sqrt(shm.him.x_accel.get()**2 + shm.him.z_accel.get()**2)
#him_grav_x = -1*np.tan(math.radians(outputs.roll))*shm.him.z_accel.get()
#x_acc = x_acc - grav_x
#y_acc = y_acc - grav_y
x_acc, y_acc = [0, 0] # temporary
# Check whether the DVL beams are good
beams_good = sum( [not var.get() for var in beam_vars] ) >= 2
# And if not, disable them
if not beams_good and dvl_present:
# This multiplies x and y velocity by 0 in the measurement vector,
# but leaves x and y acceleration, and depth
active_measurements = np.array([0,1,0,1,1]).reshape((5,1))
else:
active_measurements = None
soft_kill = shm.switches.soft_kill.get()
curr_thrusters = dict((t,(1-soft_kill)*shm.motor_desires.__getattribute__(t).get()) for t in thrusters)
u = np.array((control_wrench.f_x.get(), control_wrench.f_y.get(), \
control_wrench.f_z.get(), control_wrench.t_x.get(), \
control_wrench.t_y.get(), control_wrench.t_z.get()))
depth = get_depth()
# Update
# TODO: It doesn't make sense to update regardless of whether there is new sensor data
outputs.update(**position_filter.update(outputs.heading, x_vel, x_acc,
y_vel, y_acc, depth, u,
active_measurements,
curr_thrusters,
outputs.pitch, outputs.roll))
outputs.velz = z_vel
# This really shouldn't be necessary when kalman has a u term (which it does)
if not beams_good and dvl_present:
outputs.velx = 0.0
outputs.vely = 0.0
# Write outputs as group, notify only once
shm.kalman.set(outputs)
iteration += 1
if show_rate:
if (iteration % 100 == 0):
iteration = 0
real_start = time.time()
print(iteration/(real_start-time.time()))
| 38.679426 | 154 | 0.58925 |
e0385727acf47b905910e086e86fee8b1c04a482 | 23,153 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/saml.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/saml.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/saml.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Slightly customized python-social-auth backend for SAML 2.0 support
"""
import logging
from copy import deepcopy
import requests
from django.contrib.sites.models import Site
from django.http import Http404
from django.utils.functional import cached_property
from django_countries import countries
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from social_core.backends.saml import OID_EDU_PERSON_ENTITLEMENT, SAMLAuth, SAMLIdentityProvider
from social_core.exceptions import AuthForbidden
from openedx.core.djangoapps.theming.helpers import get_current_request
from common.djangoapps.third_party_auth.exceptions import IncorrectConfigurationException
STANDARD_SAML_PROVIDER_KEY = 'standard_saml_provider'
SAP_SUCCESSFACTORS_SAML_KEY = 'sap_success_factors'
log = logging.getLogger(__name__)
class SAMLAuthBackend(SAMLAuth): # pylint: disable=abstract-method
"""
Customized version of SAMLAuth that gets the list of IdPs from common.djangoapps.third_party_auth's list of
enabled providers.
"""
name = "tpa-saml"
def get_idp(self, idp_name):
""" Given the name of an IdP, get a SAMLIdentityProvider instance """
from .models import SAMLProviderConfig
return SAMLProviderConfig.current(idp_name).get_config()
def setting(self, name, default=None):
""" Get a setting, from SAMLConfiguration """
try:
return self._config.get_setting(name)
except KeyError:
return self.strategy.setting(name, default, backend=self)
def get_idp_setting(self, idp, name, default=None):
try:
return idp.saml_sp_configuration.get_setting(name)
except KeyError:
return self.setting(name, default)
def generate_saml_config(self, idp=None):
"""
Override of SAMLAuth.generate_saml_config to use an idp's configured saml_sp_configuration if given.
"""
if idp:
abs_completion_url = self.redirect_uri
config = {
'contactPerson': {
'technical': self.get_idp_setting(idp, 'TECHNICAL_CONTACT'),
'support': self.get_idp_setting(idp, 'SUPPORT_CONTACT')
},
'debug': True,
'idp': idp.saml_config_dict if idp else {},
'organization': self.get_idp_setting(idp, 'ORG_INFO'),
'security': {
'metadataValidUntil': '',
'metadataCacheDuration': 'P10D', # metadata valid for ten days
},
'sp': {
'assertionConsumerService': {
'url': abs_completion_url,
# python-saml only supports HTTP-POST
'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
},
'entityId': self.get_idp_setting(idp, 'SP_ENTITY_ID'),
'x509cert': self.get_idp_setting(idp, 'SP_PUBLIC_CERT'),
'privateKey': self.get_idp_setting(idp, 'SP_PRIVATE_KEY'),
},
'strict': True, # We must force strict mode - for security
}
config["security"].update(self.get_idp_setting(idp, "SECURITY_CONFIG", {}))
config["sp"].update(self.get_idp_setting(idp, "SP_EXTRA", {}))
return config
else:
return super().generate_saml_config()
def get_user_id(self, details, response):
"""
Calling the parent function and handling the exception properly.
"""
try:
return super().get_user_id(details, response)
except KeyError as ex:
log.warning(
'[THIRD_PARTY_AUTH] Error in SAML authentication flow. '
'Provider: {idp_name}, Message: {message}'.format(
message=ex.message, # lint-amnesty, pylint: disable=no-member, exception-message-attribute
idp_name=response.get('idp_name')
)
)
raise IncorrectConfigurationException(self) # lint-amnesty, pylint: disable=raise-missing-from
def generate_metadata_xml(self, idp_name=None): # pylint: disable=arguments-differ
"""
Override of SAMLAuth.generate_metadata_xml to accept an optional idp parameter.
"""
idp = self.get_idp(idp_name) if idp_name else None
config = self.generate_saml_config(idp)
saml_settings = OneLogin_Saml2_Settings(
config,
sp_validation_only=True
)
metadata = saml_settings.get_sp_metadata()
errors = saml_settings.validate_metadata(metadata)
return metadata, errors
def auth_url(self):
"""
Check that SAML is enabled and that the request includes an 'idp'
parameter before getting the URL to which we must redirect in order to
authenticate the user.
raise Http404 if SAML authentication is disabled.
"""
if not self._config.enabled:
log.error('[THIRD_PARTY_AUTH] SAML authentication is not enabled')
raise Http404
return super().auth_url()
def disconnect(self, *args, **kwargs):
"""
Override of SAMLAuth.disconnect to unlink the learner from enterprise customer if associated.
"""
from openedx.features.enterprise_support.api import unlink_enterprise_user_from_idp
user = kwargs.get('user', None)
unlink_enterprise_user_from_idp(self.strategy.request, user, self.name)
return super().disconnect(*args, **kwargs)
def _check_entitlements(self, idp, attributes):
"""
Check if we require the presence of any specific eduPersonEntitlement.
raise AuthForbidden if the user should not be authenticated, or do nothing
to allow the login pipeline to continue.
"""
if "requiredEntitlements" in idp.conf:
entitlements = attributes.get(OID_EDU_PERSON_ENTITLEMENT, [])
for expected in idp.conf['requiredEntitlements']:
if expected not in entitlements:
log.warning(
'[THIRD_PARTY_AUTH] SAML user rejected due to missing eduPersonEntitlement. '
'Provider: {provider}, Entitlement: {entitlement}'.format(
provider=idp.name,
entitlement=expected)
)
raise AuthForbidden(self)
def _create_saml_auth(self, idp):
"""
Get an instance of OneLogin_Saml2_Auth
idp: The Identity Provider - a social_core.backends.saml.SAMLIdentityProvider instance
"""
# We only override this method so that we can add extra debugging when debug_mode is True
# Note that auth_inst is instantiated just for the current HTTP request, then is destroyed
auth_inst = super()._create_saml_auth(idp)
from .models import SAMLProviderConfig
if SAMLProviderConfig.current(idp.name).debug_mode:
def wrap_with_logging(method_name, action_description, xml_getter, request_data, next_url):
""" Wrap the request and response handlers to add debug mode logging """
method = getattr(auth_inst, method_name)
def wrapped_method(*args, **kwargs):
""" Wrapped login or process_response method """
result = method(*args, **kwargs)
log.info(
"SAML login %s for IdP %s. Data: %s. Next url %s. XML is:\n%s",
action_description, idp.name, request_data, next_url, xml_getter()
)
return result
setattr(auth_inst, method_name, wrapped_method)
request_data = self.strategy.request_data()
next_url = self.strategy.session_get('next')
wrap_with_logging("login", "request", auth_inst.get_last_request_xml, request_data, next_url)
wrap_with_logging("process_response", "response", auth_inst.get_last_response_xml, request_data, next_url)
return auth_inst
@cached_property
def _config(self):
from .models import SAMLConfiguration
return SAMLConfiguration.current(Site.objects.get_current(get_current_request()), 'default')
class EdXSAMLIdentityProvider(SAMLIdentityProvider):
"""
Customized version of SAMLIdentityProvider that can retrieve details beyond the standard
details supported by the canonical upstream version.
"""
def get_user_details(self, attributes):
"""
Overrides `get_user_details` from the base class; retrieves those details,
then updates the dict with values from whatever additional fields are desired.
"""
details = super().get_user_details(attributes)
extra_field_definitions = self.conf.get('extra_field_definitions', [])
details.update({
field['name']: attributes[field['urn']][0] if field['urn'] in attributes else None
for field in extra_field_definitions
})
return details
def get_attr(self, attributes, conf_key, default_attribute):
"""
Internal helper method.
Get the attribute 'default_attribute' out of the attributes,
unless self.conf[conf_key] overrides the default by specifying
another attribute to use.
"""
key = self.conf.get(conf_key, default_attribute)
if key in attributes:
try:
return attributes[key][0]
except IndexError:
log.warning('[THIRD_PARTY_AUTH] SAML attribute value not found. '
'SamlAttribute: {attribute}'.format(attribute=key))
return self.conf['attr_defaults'].get(conf_key) or None
@property
def saml_sp_configuration(self):
"""Get the SAMLConfiguration for this IdP"""
return self.conf['saml_sp_configuration']
class SapSuccessFactorsIdentityProvider(EdXSAMLIdentityProvider):
"""
Customized version of EdXSAMLIdentityProvider that knows how to retrieve user details
from the SAPSuccessFactors OData API, rather than parse them directly off the
SAML assertion that we get in response to a login attempt.
"""
required_variables = (
'sapsf_oauth_root_url',
'sapsf_private_key',
'odata_api_root_url',
'odata_company_id',
'odata_client_id',
)
# Define the relationships between SAPSF record fields and Open edX logistration fields.
default_field_mapping = {
'firstName': ['username', 'first_name'],
'lastName': 'last_name',
'defaultFullName': 'fullname',
'email': 'email',
'country': 'country',
}
defaults_value_mapping = {
'defaultFullName': 'attr_full_name',
'firstName': 'attr_first_name',
'lastName': 'attr_last_name',
'username': 'attr_username',
'email': 'attr_email',
}
# Define a simple mapping to relate SAPSF values to Open edX-compatible values for
# any given field. By default, this only contains the Country field, as SAPSF supplies
# a country name, which has to be translated to a country code.
default_value_mapping = {
'country': {name: code for code, name in countries}
}
# Unfortunately, not everything has a 1:1 name mapping between Open edX and SAPSF, so
# we need some overrides. TODO: Fill in necessary mappings
default_value_mapping.update({
'United States': 'US',
})
def get_registration_fields(self, response):
"""
Get a dictionary mapping registration field names to default values.
"""
field_mapping = self.field_mappings
value_defaults = self.conf.get('attr_defaults', {})
value_defaults = {key: value_defaults.get(value, '') for key, value in self.defaults_value_mapping.items()}
registration_fields = {}
for odata_name, edx_name in field_mapping.items():
if isinstance(edx_name, list):
for value in edx_name:
registration_fields[value] = response['d'].get(odata_name, value_defaults.get(odata_name, ''))
else:
registration_fields[edx_name] = response['d'].get(odata_name, value_defaults.get(odata_name, ''))
value_mapping = self.value_mappings
for field, value in registration_fields.items():
if field in value_mapping and value in value_mapping[field]:
registration_fields[field] = value_mapping[field][value]
return registration_fields
@property
def field_mappings(self):
"""
Get a dictionary mapping the field names returned in an SAP SuccessFactors
user entity to the field names with which those values should be used in
the Open edX registration form.
"""
overrides = self.conf.get('sapsf_field_mappings', {})
base = self.default_field_mapping.copy()
base.update(overrides)
return base
@property
def value_mappings(self):
"""
Get a dictionary mapping of field names to override objects which each
map values received from SAP SuccessFactors to values expected in the
Open edX platform registration form.
"""
overrides = self.conf.get('sapsf_value_mappings', {})
base = deepcopy(self.default_value_mapping)
for field, override in overrides.items():
if field in base:
base[field].update(override)
else:
base[field] = override[field]
return base
@property
def timeout(self):
"""
The number of seconds OData API requests should wait for a response before failing.
"""
return self.conf.get('odata_api_request_timeout', 10)
@property
def sapsf_idp_url(self):
return self.conf['sapsf_oauth_root_url'] + 'idp'
@property
def sapsf_token_url(self):
return self.conf['sapsf_oauth_root_url'] + 'token'
@property
def sapsf_private_key(self):
return self.conf['sapsf_private_key']
@property
def odata_api_root_url(self):
return self.conf['odata_api_root_url']
@property
def odata_company_id(self):
return self.conf['odata_company_id']
@property
def odata_client_id(self):
return self.conf['odata_client_id']
@property
def oauth_user_id(self):
return self.conf.get('oauth_user_id')
def invalid_configuration(self):
"""
Check that we have all the details we need to properly retrieve rich data from the
SAP SuccessFactors BizX OData API. If we don't, then we should log a warning indicating
the specific variables that are missing.
"""
if not all(var in self.conf for var in self.required_variables):
missing = [var for var in self.required_variables if var not in self.conf]
log.warning(
'[THIRD_PARTY_AUTH] To retrieve rich user data for a SAP SuccessFactors identity provider, '
'the following keys in other_settings are required, but were missing. MissingKeys: {keys}'.format(
keys=missing
)
)
return missing
def log_bizx_api_exception(self, transaction_data, err): # lint-amnesty, pylint: disable=missing-function-docstring
try:
sys_msg = err.response.content
except AttributeError:
sys_msg = 'Not available'
try:
headers = err.response.headers
except AttributeError:
headers = 'Not available'
token_data = transaction_data.get('token_data')
token_data = token_data if token_data else 'Not available'
log_msg_template = (
'SAPSuccessFactors exception received for {operation_name} request. ' +
'URL: {url} ' +
'Company ID: {company_id}. ' +
'User ID: {user_id}. ' +
'Error message: {err_msg}. ' +
'System message: {sys_msg}. ' +
'Headers: {headers}. ' +
'Token Data: {token_data}.'
)
log_msg = log_msg_template.format(
operation_name=transaction_data['operation_name'],
url=transaction_data['endpoint_url'],
company_id=transaction_data['company_id'],
user_id=transaction_data['user_id'],
err_msg=str(err),
sys_msg=sys_msg,
headers=headers,
token_data=token_data,
)
log.warning(log_msg, exc_info=True)
def generate_bizx_oauth_api_saml_assertion(self, user_id):
"""
Obtain a SAML assertion from the SAP SuccessFactors BizX OAuth2 identity provider service using
information specified in the third party authentication configuration "Advanced Settings" section.
Utilizes the OAuth user_id if defined in Advanced Settings in order to generate the SAML assertion,
otherwise utilizes the user_id for the current user in context.
"""
session = requests.Session()
oauth_user_id = self.oauth_user_id if self.oauth_user_id else user_id
transaction_data = {
'token_url': self.sapsf_token_url,
'client_id': self.odata_client_id,
'user_id': oauth_user_id,
'private_key': self.sapsf_private_key,
}
try:
assertion = session.post(
self.sapsf_idp_url,
data=transaction_data,
timeout=self.timeout,
)
assertion.raise_for_status()
except requests.RequestException as err:
transaction_data['operation_name'] = 'generate_bizx_oauth_api_saml_assertion'
transaction_data['endpoint_url'] = self.sapsf_idp_url
transaction_data['company_id'] = self.odata_company_id
self.log_bizx_api_exception(transaction_data, err)
return None
return assertion.text
def generate_bizx_oauth_api_access_token(self, user_id):
"""
Request a new access token from the SuccessFactors BizX OAuth2 identity provider service
using a valid SAML assertion (see generate_bizx_api_saml_assertion) and the infomration specified
in the third party authentication configuration "Advanced Settings" section.
"""
session = requests.Session()
transaction_data = {
'client_id': self.odata_client_id,
'company_id': self.odata_company_id,
'grant_type': 'urn:ietf:params:oauth:grant-type:saml2-bearer',
}
assertion = self.generate_bizx_oauth_api_saml_assertion(user_id)
if not assertion:
return None
try:
transaction_data['assertion'] = assertion
token_response = session.post(
self.sapsf_token_url,
data=transaction_data,
timeout=self.timeout,
)
token_response.raise_for_status()
except requests.RequestException as err:
transaction_data['operation_name'] = 'generate_bizx_oauth_api_access_token'
transaction_data['endpoint_url'] = self.sapsf_token_url
transaction_data['user_id'] = user_id
self.log_bizx_api_exception(transaction_data, err)
return None
return token_response.json()
def get_bizx_odata_api_client(self, user_id): # lint-amnesty, pylint: disable=missing-function-docstring
session = requests.Session()
access_token_data = self.generate_bizx_oauth_api_access_token(user_id)
if not access_token_data:
return None
token_string = access_token_data['access_token']
session.headers.update({'Authorization': f'Bearer {token_string}', 'Accept': 'application/json'})
session.token_data = access_token_data
return session
def get_user_details(self, attributes):
"""
Attempt to get rich user details from the SAP SuccessFactors OData API. If we're missing any
of the info we need to do that, or if the request triggers an exception, then fail nicely by
returning the basic user details we're able to extract from just the SAML response.
"""
basic_details = super().get_user_details(attributes)
if self.invalid_configuration():
return basic_details
user_id = basic_details['username']
# endpoint_url is constructed from field_mappings setting of SAML Provider config.
# We convert field_mappings to make comma separated list of the fields which needs to be pulled from BizX
fields = ','.join(self.field_mappings)
endpoint_url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format(
root_url=self.odata_api_root_url,
user_id=user_id,
fields=fields,
)
client = self.get_bizx_odata_api_client(user_id=user_id)
if not client:
return basic_details
try:
response = client.get(
endpoint_url,
timeout=self.timeout,
)
response.raise_for_status()
response = response.json()
except requests.RequestException as err:
transaction_data = {
'operation_name': 'get_user_details',
'endpoint_url': endpoint_url,
'user_id': user_id,
'company_id': self.odata_company_id,
'token_data': client.token_data,
}
self.log_bizx_api_exception(transaction_data, err)
return basic_details
log.info('[THIRD_PARTY_AUTH] BizX Odata response for user [%s] %s', user_id, response)
return self.get_registration_fields(response)
def get_saml_idp_choices():
"""
Get a list of the available SAMLIdentityProvider subclasses that can be used to process
SAML requests, for use in the Django administration form.
"""
return (
(STANDARD_SAML_PROVIDER_KEY, 'Standard SAML provider'),
(SAP_SUCCESSFACTORS_SAML_KEY, 'SAP SuccessFactors provider'),
)
def get_saml_idp_class(idp_identifier_string):
"""
Given a string ID indicating the type of identity provider in use during a given request, return
the SAMLIdentityProvider subclass able to handle requests for that type of identity provider.
"""
choices = {
STANDARD_SAML_PROVIDER_KEY: EdXSAMLIdentityProvider,
SAP_SUCCESSFACTORS_SAML_KEY: SapSuccessFactorsIdentityProvider,
}
if idp_identifier_string not in choices:
log.error(
'[THIRD_PARTY_AUTH] Invalid EdXSAMLIdentityProvider subclass--'
'using EdXSAMLIdentityProvider base class. Provider: {provider}'.format(provider=idp_identifier_string)
)
return choices.get(idp_identifier_string, EdXSAMLIdentityProvider)
| 41.492832 | 120 | 0.638319 |
2a29a06cb682f4030b8aa3e1fde4f7ee3bc42be4 | 1,465 | py | Python | Serial.py | flasonil/Deep-Neural-Network-for-CS-based-signal-reconstruction-on-STM32-MCU-board | 57cc1e57e4497bea93a12b71c18db8f472ae4295 | [
"MIT"
] | 11 | 2020-07-06T13:23:35.000Z | 2021-09-04T17:41:00.000Z | Serial.py | flasonil/Deep-Neural-Network-for-CS-based-signal-reconstruction-on-STM32-MCU-board | 57cc1e57e4497bea93a12b71c18db8f472ae4295 | [
"MIT"
] | null | null | null | Serial.py | flasonil/Deep-Neural-Network-for-CS-based-signal-reconstruction-on-STM32-MCU-board | 57cc1e57e4497bea93a12b71c18db8f472ae4295 | [
"MIT"
] | 1 | 2021-04-04T23:05:29.000Z | 2021-04-04T23:05:29.000Z | from scipy import signal
from scipy import io
from scipy import random
import os
import serial
import sys
import numpy as np
import time
import struct
from dnnCS_functions import *
# The data, split between train and test sets:
# Load y_test float32 type
data = io.loadmat('float_y.mat')
y_test = data['ans'][:32,:].T
# sparsity basis
D = io.loadmat('D.mat')['D']
# sensing matrix
A = io.loadmat('A.mat')['A'][:32,:]
# dataset
data = io.loadmat('test_set.mat')
X_test, S_test = data['X_test'], data['S_test']
B = A@D
#Check the correct COMx port associated to the STLink and choose a consistend baud rate
port = 'COM6'
baud = 115200
ser = serial.Serial(port, baud, timeout=0)
for i in range(100):
for j in range(32):
#Iterations over the 32 elements of an y_test vector
ser.write(bytearray(y_test[i][j]))
time.sleep(.1)
ser.flushOutput()
#Reading the sent data. Note the output is an hexadecimal string of 128 bytes filled with 0s and 1s
reading = ser.readline()
ser.flushOutput()
#Converting the data into an numpy boolean array type, required by xi_estimation function
s_hat = struct.unpack('????????????????????????????????????????????????????????????????', reading)
s_hat = np.asarray(s_hat)
xi = xi_estimation(y_test[i], s_hat, B)
x_hat = D@xi
print('RSNR = {r:6.2f} with support missmatch = {ms:d}'.format(r=RSNR(X_test[i],x_hat),ms=int(sum(np.abs(S_test[i]-s_hat)))))
ser.close()
| 31.170213 | 129 | 0.659386 |
e7b6ca5c579e2636abe80ec265cea7682ae93534 | 1,049 | py | Python | packages/pyright-internal/src/tests/samples/classVar1.py | lipovsek/pytea | c536515a5e5947fac8871784323ba7eddc58956d | [
"MIT"
] | null | null | null | packages/pyright-internal/src/tests/samples/classVar1.py | lipovsek/pytea | c536515a5e5947fac8871784323ba7eddc58956d | [
"MIT"
] | null | null | null | packages/pyright-internal/src/tests/samples/classVar1.py | lipovsek/pytea | c536515a5e5947fac8871784323ba7eddc58956d | [
"MIT"
] | null | null | null | # This sample tests the type checker's handling of ClassVar
# as described in PEP 526.
from typing import Any, ClassVar, Dict
class MyDescriptor:
def __get__(self, *args: Any) -> str:
return ""
def __set__(self, obj: Any, value: str):
pass
class Starship:
captain: str = "Picard"
damage: int
stats: ClassVar[Dict[str, int]] = {}
desc: ClassVar[MyDescriptor] = MyDescriptor()
def __init__(self, damage: int, captain: str = None):
self.damage = damage
if captain:
self.captain = captain # Else keep the default
def hit(self):
Starship.stats["hits"] = Starship.stats.get("hits", 0) + 1
enterprise_d = Starship(3000)
Starship.stats = {}
a = enterprise_d.stats
# This should be flagged as an error because stats cannot
# be set via a class instance because it's a ClassVar.
enterprise_d.stats = {}
# This should not generate an error because "desc" is a
# descriptor instance on the class.
enterprise_d.desc = "OK"
| 24.97619 | 67 | 0.63775 |
11a960c94e7eecc7c10896bae1eaab08ab8d5447 | 5,548 | py | Python | app.py | david-fried/sqlalchemy-challenge | 3886f1f701f38344fe740d8d9e2a07178754a901 | [
"ADSL"
] | 1 | 2020-09-23T22:40:42.000Z | 2020-09-23T22:40:42.000Z | app.py | david-fried/SQL_Alchemy | 3886f1f701f38344fe740d8d9e2a07178754a901 | [
"ADSL"
] | null | null | null | app.py | david-fried/SQL_Alchemy | 3886f1f701f38344fe740d8d9e2a07178754a901 | [
"ADSL"
] | null | null | null | from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
from flask import Flask, jsonify, request
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
import pandas as pd
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from scipy.stats import ttest_ind
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
# When user hits the index route
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/[start_date]<br/>"
f"/api/v1.0/[start_date]/[end_date]"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
from flask import jsonify
results = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.asc()).all()
session.close()
big_list = []
for result in results:
dresults = {}
dresults[result[0]] = result[1]
big_list.append(dresults)
return jsonify(big_list)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
stations = session.query(Station.station).all()
session.close()
st_list = []
for station in stations:
st_list.append(station[0])
return jsonify(st_list)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
station_data = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active = station_data[0][0]
last_date = session.query(Measurement.date).order_by(Measurement.date.asc())[-1][0]
past_year = (dt.datetime.strptime(last_date, '%Y-%m-%d') - dt.timedelta(days=365)).date()
results = session.query(Measurement.tobs)\
.filter((Measurement.station == most_active) & \
(Measurement.date >= past_year))\
.order_by(Measurement.date.asc()).all()
session.close()
temps = []
for row in results:
temps.append(row[0])
return jsonify(temps)
@app.route("/api/v1.0/<start>")
def start_date(start):
"""TMIN, TAVG, and TMAX for a list of dates.
Arg:
start_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
session = Session(engine)
ordered_dates = session.query(Measurement.date).order_by(Measurement.date.asc()).all()
first_date = dt.datetime.strptime(ordered_dates[0][0], '%Y-%m-%d').date()
last_date = dt.datetime.strptime(ordered_dates[-1][0], '%Y-%m-%d').date()
try:
dt.datetime.strptime(start, '%Y-%m-%d').date()
start = dt.datetime.strptime(start, '%Y-%m-%d').date()
if start >= first_date and start <= last_date:
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs),\
func.max(Measurement.tobs)).filter((Measurement.date >= start) & \
(Measurement.date <= last_date)).all()
session.close()
results = list(results)
return jsonify(results[0])
else:
return f"Please enter a date between {first_date} and {last_date}."
except ValueError:
return jsonify({"error": f"Your response, {start}, was not formatted correctly"}), 404
@app.route("/api/v1.0/<start>/<end>")
def date_range(start, end):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
session = Session(engine)
ordered_dates = session.query(Measurement.date).order_by(Measurement.date.asc()).all()
first_date = dt.datetime.strptime(ordered_dates[0][0], '%Y-%m-%d').date()
last_date = dt.datetime.strptime(ordered_dates[-1][0], '%Y-%m-%d').date()
try:
dt.datetime.strptime(start, '%Y-%m-%d').date() and dt.datetime.strptime(end, '%Y-%m-%d').date()
start = dt.datetime.strptime(start, '%Y-%m-%d').date()
end = dt.datetime.strptime(end, '%Y-%m-%d').date()
if (start >= first_date and start <= last_date) and \
(end >= first_date and end <= last_date):
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs),\
func.max(Measurement.tobs)).filter(Measurement.date >= start).filter\
(Measurement.date <= end).all()
session.close()
results = list(results)
return jsonify(results[0])
else:
return f"Please enter dates between {first_date} and {last_date}."
except ValueError:
return jsonify({"error": f"Your responses, {start} and {end}, were not formatted correctly"}), 404
if __name__ == '__main__':
app.run(debug=True)
| 32.635294 | 172 | 0.616078 |
11cc0cafc28fbfb9ddc05957964d4cb83f059ad6 | 1,538 | py | Python | setup.py | csdms/standard_names | 385e8700125c9823720ca16f112ef5e1c3903b1f | [
"MIT"
] | 1 | 2017-12-19T04:01:36.000Z | 2017-12-19T04:01:36.000Z | setup.py | csdms/standard_names | 385e8700125c9823720ca16f112ef5e1c3903b1f | [
"MIT"
] | 1 | 2019-04-09T16:02:47.000Z | 2019-04-09T16:02:47.000Z | setup.py | csdms/standard_names | 385e8700125c9823720ca16f112ef5e1c3903b1f | [
"MIT"
] | 2 | 2015-04-15T23:23:46.000Z | 2017-08-28T20:35:35.000Z | #!/usr/bin/env python
from setuptools import setup
import versioneer
setup(
name="standard_names",
version=versioneer.get_version(),
description="CSDMS standard names",
author="Eric Hutton",
author_email="eric.hutton@colorado.edu",
url="https://csdms.colorado.edu",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Cython",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Physics",
],
install_requires=[
"pyyaml",
"six",
"packaging",
"jinja2",
"py-scripting",
"binaryornot",
],
packages=[
"standard_names",
"standard_names.cmd",
"standard_names.utilities",
"standard_names.tests",
],
cmdclass=versioneer.get_cmdclass(),
entry_points={
"console_scripts": [
"snbuild = standard_names.cmd.snbuild:run",
"sndump = standard_names.cmd.sndump:run",
"snscrape = standard_names.cmd.snscrape:run",
"snsql = standard_names.cmd.snsql:run",
"snvalidate = standard_names.cmd.snvalidate:run",
]
},
package_data={"": ["data/*txt"]},
test_suite="standard_names.tests",
)
| 28.481481 | 70 | 0.592328 |
660ec3e96506fb2783a47308fb24478f564ced8d | 3,948 | py | Python | selfdrive/controls/lib/lane_planner.py | vincentw56/openpilot | 7eca4d62078b14de62c5f2ce08e382eb31cc7076 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | vincentw56/openpilot | 7eca4d62078b14de62c5f2ce08e382eb31cc7076 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | vincentw56/openpilot | 7eca4d62078b14de62c5f2ce08e382eb31cc7076 | [
"MIT"
] | null | null | null | import numpy as np
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.realtime import DT_MDL
from selfdrive.hardware import EON, TICI
from selfdrive.swaglog import cloudlog
TRAJECTORY_SIZE = 33
# camera offset is meters from center car to camera
if EON:
CAMERA_OFFSET = 0.06
PATH_OFFSET = 0.0
elif TICI:
CAMERA_OFFSET = -0.04
PATH_OFFSET = -0.04
else:
CAMERA_OFFSET = 0.0
PATH_OFFSET = 0.0
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.lane_width_estimate = FirstOrderFilter(3.7, 9.95, DT_MDL)
self.lane_width_certainty = FirstOrderFilter(1.0, 0.95, DT_MDL)
self.lane_width = 3.7
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
def parse_model(self, md):
lane_lines = md.laneLines
if len(lane_lines) == 4 and len(lane_lines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(lane_lines[1].t) + np.array(lane_lines[2].t))/2
# left and right ll x is the same
self.ll_x = lane_lines[1].x
# only offset left and right lane lines; offsetting path does not make sense
self.lll_y = np.array(lane_lines[1].y) - self.camera_offset
self.rll_y = np.array(lane_lines[2].y) - self.camera_offset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
# Reduce reliance on lanelines that are too far apart or
# will be in a few seconds
path_xyz[:, 1] -= self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in (0.0, 1.5, 3.0):
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
# Find current lanewidth
self.lane_width_certainty.update(l_prob * r_prob)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate.update(current_lane_width)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty.x * self.lane_width_estimate.x + \
(1 - self.lane_width_certainty.x) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
safe_idxs = np.isfinite(self.ll_t)
if safe_idxs[0]:
lane_path_y_interp = np.interp(path_t, self.ll_t[safe_idxs], lane_path_y[safe_idxs])
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
else:
cloudlog.warning("Lateral mpc - NaNs in laneline times, ignoring")
return path_xyz
| 37.245283 | 109 | 0.692756 |
d878d3dcc19441f3b92683d1156ec712354d222a | 2,232 | py | Python | being/logging.py | andsteing/being | 0d0dca71edc512df47fe5ff3bea692e728f90924 | [
"MIT"
] | 2 | 2021-11-11T12:16:43.000Z | 2022-01-13T06:06:20.000Z | being/logging.py | andsteing/being | 0d0dca71edc512df47fe5ff3bea692e728f90924 | [
"MIT"
] | 5 | 2022-01-13T08:01:54.000Z | 2022-02-22T12:28:02.000Z | being/logging.py | andsteing/being | 0d0dca71edc512df47fe5ff3bea692e728f90924 | [
"MIT"
] | 3 | 2022-01-11T18:16:35.000Z | 2022-01-13T13:14:26.000Z | """Being logging.
Resources:
- https://stackoverflow.com/questions/7016056/python-logging-not-outputting-anything
"""
import logging
import logging.handlers
import os
from typing import Optional
from logging import Logger
from being.configuration import CONFIG
from being.constants import MB
LEVEL = CONFIG['Logging']['LEVEL']
DIRECTORY = CONFIG['Logging']['DIRECTORY']
FILENAME = CONFIG['Logging']['FILENAME']
BEING_LOGGER = logging.getLogger('being')
"""Being root logger."""
DEFAULT_EXCLUDES = ['parso', 'matplotlib', 'can', 'canopen', 'aiohttp',]
def get_logger(name: Optional[str] = None, parent: Optional[Logger] = BEING_LOGGER) -> Logger:
"""Get logger. Wrap for `logging.getLogger` in order to keep track of being
loggers (via evil global variable BEING_LOGGERS).
Args:
name: Logger name. None for root logger if not parent logger.
parent: Parent logger. BEING_LOGGER by default.
"""
if name is None:
return BEING_LOGGER
if parent:
return parent.getChild(name)
return logging.getLogger(name)
def suppress_other_loggers(*excludes):
"""Suppress log messages from some of the other common loggers."""
if len(excludes) == 0:
excludes = DEFAULT_EXCLUDES
for name in logging.root.manager.loggerDict:
for part in excludes:
if part in name:
logging.getLogger(name).disabled = True
def setup_logging(level=LEVEL):
"""Setup being loggers."""
# Note using logging.basicConfig(level=level) would route all the other
# loggers to stdout
logging.root.setLevel(level)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d - %(levelname)5s - %(name)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
if DIRECTORY:
os.makedirs(DIRECTORY, exist_ok=True)
filename = os.path.join(DIRECTORY, FILENAME)
print(f'Logging to {filename!r}')
handler = logging.handlers.RotatingFileHandler(
filename,
maxBytes=100 * MB,
backupCount=5,
)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
#handler.setLevel(level)
logging.root.addHandler(handler)
| 27.9 | 94 | 0.667115 |
bb74e00c63f7f77d8cef2ac22b8fecc46549e816 | 18,812 | py | Python | ppcls/arch/backbone/model_zoo/levit.py | qili93/PaddleClas | 21a89ee365613890b601001343a6bef2cbd99c2c | [
"Apache-2.0"
] | 2 | 2021-06-22T06:28:20.000Z | 2021-06-22T06:28:23.000Z | ppcls/arch/backbone/model_zoo/levit.py | sunjianfengHub/PaddleClas | dad9fa8b54da97691d2c7f2b6e0c2b4f077177b7 | [
"Apache-2.0"
] | null | null | null | ppcls/arch/backbone/model_zoo/levit.py | sunjianfengHub/PaddleClas | dad9fa8b54da97691d2c7f2b6e0c2b4f077177b7 | [
"Apache-2.0"
] | 1 | 2021-06-25T17:50:30.000Z | 2021-06-25T17:50:30.000Z | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import warnings
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import TruncatedNormal, Constant
from paddle.regularizer import L2Decay
from .vision_transformer import trunc_normal_, zeros_, ones_, Identity
from ppcls.utils.save_load import load_dygraph_pretrain, load_dygraph_pretrain_from_url
MODEL_URLS = {
"LeViT_128S": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128S_pretrained.pdparams",
"LeViT_128": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_128_pretrained.pdparams",
"LeViT_192": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_192_pretrained.pdparams",
"LeViT_256": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_256_pretrained.pdparams",
"LeViT_384": "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/LeViT_384_pretrained.pdparams",
}
__all__ = list(MODEL_URLS.keys())
def cal_attention_biases(attention_biases, attention_bias_idxs):
gather_list = []
attention_bias_t = paddle.transpose(attention_biases, (1, 0))
for idx in attention_bias_idxs:
gather = paddle.gather(attention_bias_t, idx)
gather_list.append(gather)
shape0, shape1 = attention_bias_idxs.shape
return paddle.transpose(paddle.concat(gather_list), (1, 0)).reshape(
(0, shape0, shape1))
class Conv2d_BN(nn.Sequential):
def __init__(self,
a,
b,
ks=1,
stride=1,
pad=0,
dilation=1,
groups=1,
bn_weight_init=1,
resolution=-10000):
super().__init__()
self.add_sublayer(
'c',
nn.Conv2D(
a, b, ks, stride, pad, dilation, groups, bias_attr=False))
bn = nn.BatchNorm2D(b)
ones_(bn.weight)
zeros_(bn.bias)
self.add_sublayer('bn', bn)
class Linear_BN(nn.Sequential):
def __init__(self, a, b, bn_weight_init=1):
super().__init__()
self.add_sublayer('c', nn.Linear(a, b, bias_attr=False))
bn = nn.BatchNorm1D(b)
ones_(bn.weight)
zeros_(bn.bias)
self.add_sublayer('bn', bn)
def forward(self, x):
l, bn = self._sub_layers.values()
x = l(x)
return paddle.reshape(bn(x.flatten(0, 1)), x.shape)
class BN_Linear(nn.Sequential):
def __init__(self, a, b, bias=True, std=0.02):
super().__init__()
self.add_sublayer('bn', nn.BatchNorm1D(a))
l = nn.Linear(a, b, bias_attr=bias)
trunc_normal_(l.weight)
if bias:
zeros_(l.bias)
self.add_sublayer('l', l)
def b16(n, activation, resolution=224):
return nn.Sequential(
Conv2d_BN(
3, n // 8, 3, 2, 1, resolution=resolution),
activation(),
Conv2d_BN(
n // 8, n // 4, 3, 2, 1, resolution=resolution // 2),
activation(),
Conv2d_BN(
n // 4, n // 2, 3, 2, 1, resolution=resolution // 4),
activation(),
Conv2d_BN(
n // 2, n, 3, 2, 1, resolution=resolution // 8))
class Residual(nn.Layer):
def __init__(self, m, drop):
super().__init__()
self.m = m
self.drop = drop
def forward(self, x):
if self.training and self.drop > 0:
return x + self.m(x) * paddle.rand(
x.size(0), 1, 1,
device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
return x + self.m(x)
class Attention(nn.Layer):
def __init__(self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
activation=None,
resolution=14):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim**-0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
self.h = self.dh + nh_kd * 2
self.qkv = Linear_BN(dim, self.h)
self.proj = nn.Sequential(
activation(), Linear_BN(
self.dh, dim, bn_weight_init=0))
points = list(itertools.product(range(resolution), range(resolution)))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = self.create_parameter(
shape=(num_heads, len(attention_offsets)),
default_initializer=zeros_,
attr=paddle.ParamAttr(regularizer=L2Decay(0.0)))
tensor_idxs = paddle.to_tensor(idxs, dtype='int64')
self.register_buffer('attention_bias_idxs',
paddle.reshape(tensor_idxs, [N, N]))
@paddle.no_grad()
def train(self, mode=True):
if mode:
super().train()
else:
super().eval()
if mode and hasattr(self, 'ab'):
del self.ab
else:
self.ab = cal_attention_biases(self.attention_biases,
self.attention_bias_idxs)
def forward(self, x):
self.training = True
B, N, C = x.shape
qkv = self.qkv(x)
qkv = paddle.reshape(qkv,
[B, N, self.num_heads, self.h // self.num_heads])
q, k, v = paddle.split(
qkv, [self.key_dim, self.key_dim, self.d], axis=3)
q = paddle.transpose(q, perm=[0, 2, 1, 3])
k = paddle.transpose(k, perm=[0, 2, 1, 3])
v = paddle.transpose(v, perm=[0, 2, 1, 3])
k_transpose = paddle.transpose(k, perm=[0, 1, 3, 2])
if self.training:
attention_biases = cal_attention_biases(self.attention_biases,
self.attention_bias_idxs)
else:
attention_biases = self.ab
attn = ((q @k_transpose) * self.scale + attention_biases)
attn = F.softmax(attn)
x = paddle.transpose(attn @v, perm=[0, 2, 1, 3])
x = paddle.reshape(x, [B, N, self.dh])
x = self.proj(x)
return x
class Subsample(nn.Layer):
def __init__(self, stride, resolution):
super().__init__()
self.stride = stride
self.resolution = resolution
def forward(self, x):
B, N, C = x.shape
x = paddle.reshape(x, [B, self.resolution, self.resolution,
C])[:, ::self.stride, ::self.stride]
x = paddle.reshape(x, [B, -1, C])
return x
class AttentionSubsample(nn.Layer):
def __init__(self,
in_dim,
out_dim,
key_dim,
num_heads=8,
attn_ratio=2,
activation=None,
stride=2,
resolution=14,
resolution_=7):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim**-0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * self.num_heads
self.attn_ratio = attn_ratio
self.resolution_ = resolution_
self.resolution_2 = resolution_**2
self.training = True
h = self.dh + nh_kd
self.kv = Linear_BN(in_dim, h)
self.q = nn.Sequential(
Subsample(stride, resolution), Linear_BN(in_dim, nh_kd))
self.proj = nn.Sequential(activation(), Linear_BN(self.dh, out_dim))
self.stride = stride
self.resolution = resolution
points = list(itertools.product(range(resolution), range(resolution)))
points_ = list(
itertools.product(range(resolution_), range(resolution_)))
N = len(points)
N_ = len(points_)
attention_offsets = {}
idxs = []
i = 0
j = 0
for p1 in points_:
i += 1
for p2 in points:
j += 1
size = 1
offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2),
abs(p1[1] * stride - p2[1] + (size - 1) / 2))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = self.create_parameter(
shape=(num_heads, len(attention_offsets)),
default_initializer=zeros_,
attr=paddle.ParamAttr(regularizer=L2Decay(0.0)))
tensor_idxs_ = paddle.to_tensor(idxs, dtype='int64')
self.register_buffer('attention_bias_idxs',
paddle.reshape(tensor_idxs_, [N_, N]))
@paddle.no_grad()
def train(self, mode=True):
if mode:
super().train()
else:
super().eval()
if mode and hasattr(self, 'ab'):
del self.ab
else:
self.ab = cal_attention_biases(self.attention_biases,
self.attention_bias_idxs)
def forward(self, x):
self.training = True
B, N, C = x.shape
kv = self.kv(x)
kv = paddle.reshape(kv, [B, N, self.num_heads, -1])
k, v = paddle.split(kv, [self.key_dim, self.d], axis=3)
k = paddle.transpose(k, perm=[0, 2, 1, 3]) # BHNC
v = paddle.transpose(v, perm=[0, 2, 1, 3])
q = paddle.reshape(
self.q(x), [B, self.resolution_2, self.num_heads, self.key_dim])
q = paddle.transpose(q, perm=[0, 2, 1, 3])
if self.training:
attention_biases = cal_attention_biases(self.attention_biases,
self.attention_bias_idxs)
else:
attention_biases = self.ab
attn = (q @paddle.transpose(
k, perm=[0, 1, 3, 2])) * self.scale + attention_biases
attn = F.softmax(attn)
x = paddle.reshape(
paddle.transpose(
(attn @v), perm=[0, 2, 1, 3]), [B, -1, self.dh])
x = self.proj(x)
return x
class LeViT(nn.Layer):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
class_dim=1000,
embed_dim=[192],
key_dim=[64],
depth=[12],
num_heads=[3],
attn_ratio=[2],
mlp_ratio=[2],
hybrid_backbone=None,
down_ops=[],
attention_activation=nn.Hardswish,
mlp_activation=nn.Hardswish,
distillation=True,
drop_path=0):
super().__init__()
self.class_dim = class_dim
self.num_features = embed_dim[-1]
self.embed_dim = embed_dim
self.distillation = distillation
self.patch_embed = hybrid_backbone
self.blocks = []
down_ops.append([''])
resolution = img_size // patch_size
for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio,
mlp_ratio, down_ops)):
for _ in range(dpth):
self.blocks.append(
Residual(
Attention(
ed,
kd,
nh,
attn_ratio=ar,
activation=attention_activation,
resolution=resolution, ),
drop_path))
if mr > 0:
h = int(ed * mr)
self.blocks.append(
Residual(
nn.Sequential(
Linear_BN(ed, h),
mlp_activation(),
Linear_BN(
h, ed, bn_weight_init=0), ),
drop_path))
if do[0] == 'Subsample':
#('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
resolution_ = (resolution - 1) // do[5] + 1
self.blocks.append(
AttentionSubsample(
*embed_dim[i:i + 2],
key_dim=do[1],
num_heads=do[2],
attn_ratio=do[3],
activation=attention_activation,
stride=do[5],
resolution=resolution,
resolution_=resolution_))
resolution = resolution_
if do[4] > 0: # mlp_ratio
h = int(embed_dim[i + 1] * do[4])
self.blocks.append(
Residual(
nn.Sequential(
Linear_BN(embed_dim[i + 1], h),
mlp_activation(),
Linear_BN(
h, embed_dim[i + 1], bn_weight_init=0), ),
drop_path))
self.blocks = nn.Sequential(*self.blocks)
# Classifier head
self.head = BN_Linear(embed_dim[-1],
class_dim) if class_dim > 0 else Identity()
if distillation:
self.head_dist = BN_Linear(
embed_dim[-1], class_dim) if class_dim > 0 else Identity()
def forward(self, x):
x = self.patch_embed(x)
x = x.flatten(2)
x = paddle.transpose(x, perm=[0, 2, 1])
x = self.blocks(x)
x = x.mean(1)
if self.distillation:
x = self.head(x), self.head_dist(x)
if not self.training:
x = (x[0] + x[1]) / 2
else:
x = self.head(x)
return x
def model_factory(C, D, X, N, drop_path, class_dim, distillation):
embed_dim = [int(x) for x in C.split('_')]
num_heads = [int(x) for x in N.split('_')]
depth = [int(x) for x in X.split('_')]
act = nn.Hardswish
model = LeViT(
patch_size=16,
embed_dim=embed_dim,
num_heads=num_heads,
key_dim=[D] * 3,
depth=depth,
attn_ratio=[2, 2, 2],
mlp_ratio=[2, 2, 2],
down_ops=[
#('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
['Subsample', D, embed_dim[0] // D, 4, 2, 2],
['Subsample', D, embed_dim[1] // D, 4, 2, 2],
],
attention_activation=act,
mlp_activation=act,
hybrid_backbone=b16(embed_dim[0], activation=act),
class_dim=class_dim,
drop_path=drop_path,
distillation=distillation)
return model
specification = {
'LeViT_128S': {
'C': '128_256_384',
'D': 16,
'N': '4_6_8',
'X': '2_3_4',
'drop_path': 0
},
'LeViT_128': {
'C': '128_256_384',
'D': 16,
'N': '4_8_12',
'X': '4_4_4',
'drop_path': 0
},
'LeViT_192': {
'C': '192_288_384',
'D': 32,
'N': '3_5_6',
'X': '4_4_4',
'drop_path': 0
},
'LeViT_256': {
'C': '256_384_512',
'D': 32,
'N': '4_6_8',
'X': '4_4_4',
'drop_path': 0
},
'LeViT_384': {
'C': '384_512_768',
'D': 32,
'N': '6_9_12',
'X': '4_4_4',
'drop_path': 0.1
},
}
def _load_pretrained(pretrained, model, model_url, use_ssld=False):
if pretrained is False:
pass
elif pretrained is True:
load_dygraph_pretrain_from_url(model, model_url, use_ssld=use_ssld)
elif isinstance(pretrained, str):
load_dygraph_pretrain(model, pretrained)
else:
raise RuntimeError(
"pretrained type is not available. Please use `string` or `boolean` type."
)
def LeViT_128S(pretrained=False, use_ssld=False, class_dim=1000, distillation=False, **kwargs):
model = model_factory(
**specification['LeViT_128S'],
class_dim=class_dim,
distillation=distillation)
_load_pretrained(pretrained, model, MODEL_URLS["LeViT_128S"], use_ssld=use_ssld)
return model
def LeViT_128(pretrained=False, use_ssld=False, class_dim=1000, distillation=False, **kwargs):
model = model_factory(
**specification['LeViT_128'],
class_dim=class_dim,
distillation=distillation)
_load_pretrained(pretrained, model, MODEL_URLS["LeViT_128"], use_ssld=use_ssld)
return model
def LeViT_192(pretrained=False, use_ssld=False, class_dim=1000, distillation=False, **kwargs):
model = model_factory(
**specification['LeViT_192'],
class_dim=class_dim,
distillation=distillation)
_load_pretrained(pretrained, model, MODEL_URLS["LeViT_192"], use_ssld=use_ssld)
return model
def LeViT_256(pretrained=False, use_ssld=False, class_dim=1000, distillation=False, **kwargs):
model = model_factory(
**specification['LeViT_256'],
class_dim=class_dim,
distillation=distillation)
_load_pretrained(pretrained, model, MODEL_URLS["LeViT_256"], use_ssld=use_ssld)
return model
def LeViT_384(pretrained=False, use_ssld=False, class_dim=1000, distillation=False, **kwargs):
model = model_factory(
**specification['LeViT_384'],
class_dim=class_dim,
distillation=distillation)
_load_pretrained(pretrained, model, MODEL_URLS["LeViT_384"], use_ssld=use_ssld)
return model
| 34.328467 | 119 | 0.536253 |
1e45fd48aa214d10ea9511ba99af204eaedc136a | 3,704 | py | Python | log_queries.py | calpoly-csai/swanson | 8f5aed72ebabf06daaf37ba6a6feefec06654802 | [
"MIT"
] | 3 | 2020-07-26T21:08:58.000Z | 2021-04-26T04:07:25.000Z | log_queries.py | calpoly-csai/swanson | 8f5aed72ebabf06daaf37ba6a6feefec06654802 | [
"MIT"
] | 34 | 2020-07-26T20:32:13.000Z | 2021-02-02T01:07:13.000Z | log_queries.py | calpoly-csai/swanson | 8f5aed72ebabf06daaf37ba6a6feefec06654802 | [
"MIT"
] | 2 | 2020-08-25T23:10:15.000Z | 2021-11-08T21:24:34.000Z | import datetime
import pickle
from flask import Flask, request
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from os import path, environ
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] # Allows read and write access
# Needs to be up here, since we call it on the next line.
def get_spreadsheet_id():
with open("id.txt") as id_file:
return id_file.readline()
# The ID, range, and auth path for appending to the spreadsheet.
SPREADSHEET_ID = environ.get("SPREADSHEET_ID", get_spreadsheet_id())
RANGE_NAME = 'A1' # Should always place the new query correctly at the bottom of the table
AUTH_PATH = 'credentials.json'
BAD_REQUEST = 400
SUCCESS = 200
SERVER_ERROR = 500
app = Flask(__name__)
def config_api():
"""
Configures the Google Sheets API service to be used for appending data
:return: The service as a Resource object, or None if there is no pickle and the authentication JSON can't be found
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
try:
flow = InstalledAppFlow.from_client_secrets_file(AUTH_PATH, SCOPES)
except FileNotFoundError as e:
print(e)
return None
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('sheets', 'v4', credentials=creds)
def log_query(service, question: str, answer: str, sentiment: str = "N/A", spreadsheet_id: str = SPREADSHEET_ID) -> int:
"""
Logs a user query and chat bot response to a Google Sheet as well as the timestamp
:param service: Google Sheets API service
:param question: User question
:param answer: Chat bot answer
:param sentiment: Positive or negative sentiment associated with the question/answer, currently defaults to "N/A"
:param spreadsheet_id: ID of the spreadsheet to append to, currently defaults to an empty string
:return: 0 on success, 1 on failure
"""
timestamp = str(datetime.datetime.now())
values = [[question, answer, sentiment, timestamp]]
body = {
'values': values
}
try:
service.spreadsheets().values().append(
spreadsheetId=spreadsheet_id,
range=RANGE_NAME,
valueInputOption='RAW',
body=body
).execute()
except Exception as e:
print(e)
return 1
return 0
@app.route("/query", methods=["POST"])
def log_route():
request_body = request.get_json()
question = request_body.get("question")
answer = request_body.get("answer")
sentiment = request_body.get("sentiment")
if question is None or answer is None or sentiment is None:
return "Request was missing a required parameter", BAD_REQUEST
if(log_query(config_api(), question, answer, sentiment) == 0):
return "Success", SUCCESS
else:
return "Failed to log the query", SERVER_ERROR
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 32.778761 | 120 | 0.674946 |
cafa01eebf61dad95766432bc5b7bbda89b102b2 | 16,983 | py | Python | Savethemblobs/PythonistaKit.framework/pylib/HTMLParser.py | iApeiron/Savethemblobs_app | 38184facf78b55ba89a727be7b1fc08d6085f20c | [
"MIT"
] | 19 | 2017-05-17T16:48:02.000Z | 2020-08-18T18:21:45.000Z | Savethemblobs/PythonistaKit.framework/pylib/HTMLParser.py | iApeiron/Savethemblobs_app | 38184facf78b55ba89a727be7b1fc08d6085f20c | [
"MIT"
] | 2 | 2017-05-17T06:41:47.000Z | 2017-05-17T17:27:13.000Z | Savethemblobs/PythonistaKit.framework/pylib/HTMLParser.py | iApeiron/Savethemblobs_app | 38184facf78b55ba89a727be7b1fc08d6085f20c | [
"MIT"
] | 4 | 2017-05-17T03:56:25.000Z | 2018-11-09T00:00:20.000Z |
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<!':
self.error('unexpected call to parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+2] not in ('<!', '</'):
self.error('unexpected call to parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
| 35.829114 | 87 | 0.508626 |
07deb9b9939240c6fc5e284d4562317b41b5a3b7 | 1,669 | py | Python | examples/keras_example.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 22 | 2019-04-06T17:33:12.000Z | 2021-12-13T21:46:47.000Z | examples/keras_example.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 15 | 2018-11-27T17:50:16.000Z | 2019-04-23T17:07:43.000Z | examples/keras_example.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 8 | 2018-12-12T12:24:21.000Z | 2022-02-06T21:09:55.000Z | from keras.layers import Dense, Activation
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from ceteris_paribus.explainer import explain
from ceteris_paribus.plots.plots import plot
from ceteris_paribus.profiles import individual_variable_profile
boston = load_boston()
x = boston.data
y = boston.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
def network_architecture():
model = Sequential()
model.add(Dense(640, input_dim=x.shape[1]))
model.add(Activation('tanh'))
model.add(Dense(320))
model.add(Activation('tanh'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def keras_model():
estimators = [
('scaler', StandardScaler()),
('mlp', KerasRegressor(build_fn=network_architecture, epochs=200))
]
model = Pipeline(estimators)
model.fit(x_train, y_train)
return model, x_train, y_train, boston.feature_names
if __name__ == "__main__":
model, x_train, y_train, var_names = keras_model()
explainer_keras = explain(model, var_names, x_train, y_train, label='KerasMLP')
cp = individual_variable_profile(explainer_keras, x_train[:10], y=y_train[:10],
variables=["CRIM", "ZN", "AGE", "INDUS", "B"])
plot(cp, show_residuals=True, selected_variables=["CRIM", "ZN", "AGE", "B"], show_observations=True,
show_rugs=True)
| 35.510638 | 104 | 0.724985 |
f7affd78d47b0ca00a5ca32636b890ef6eea5516 | 1,941 | py | Python | config/wsgi.py | stephenaiesi/customcreations.site | 6c47f033c740b180d7598ef2bfdbbe8a0f71447c | [
"MIT"
] | null | null | null | config/wsgi.py | stephenaiesi/customcreations.site | 6c47f033c740b180d7598ef2bfdbbe8a0f71447c | [
"MIT"
] | null | null | null | config/wsgi.py | stephenaiesi/customcreations.site | 6c47f033c740b180d7598ef2bfdbbe8a0f71447c | [
"MIT"
] | null | null | null | """
WSGI config for Custom Creations project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# ccreations directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'ccreations'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 44.113636 | 79 | 0.796497 |
1b6dec3580eab314335f2dc2e82c74a5031177d9 | 1,309 | py | Python | Lab 3/word_count_system_architecture_client.py | lucca30/distributed-systems | e463a894f679b8ac6db6cddfc0e0302d2f08927f | [
"MIT"
] | null | null | null | Lab 3/word_count_system_architecture_client.py | lucca30/distributed-systems | e463a894f679b8ac6db6cddfc0e0302d2f08927f | [
"MIT"
] | null | null | null | Lab 3/word_count_system_architecture_client.py | lucca30/distributed-systems | e463a894f679b8ac6db6cddfc0e0302d2f08927f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from socket import *
import time
import json
host = 'localhost'
port = 7001
retry_time_secs = 10
wait_time_msecs = 1000
msg = ''
sckt = socket()
def acquire_connection():
connected = False
# Retry until is not connected
while not connected:
try:
print ("Trying to acquire connection in " + str(host) + ":" + str(port))
sckt.connect((host, port))
print (" Successfully connected!")
connected = True
except:
print ("Failed acquire connection in " + str(host) + ":" + str(port))
print (" Trying again in " + str(retry_time_secs) + " seconds...")
connected = False
time.sleep(retry_time_secs)
def wait_for_message():
msg = sckt.recv(wait_time_msecs)
print(" Content:\n" + msg.decode("utf-8"))
def user_interaction():
while True:
file_name = input("Enter the file name, -q to quit:")
if file_name == '-q':
print ('The connection was closed by the user')
break
word = input("Enter the word to be counted:")
sckt.send(bytes(json.dumps({'file_name':file_name, 'word':word}), encoding='utf8'))
wait_for_message()
acquire_connection()
user_interaction()
sckt.close() | 25.666667 | 91 | 0.588235 |
64e6fa8a3bac37ed29fd74a76fc6c7a78b766e6d | 1,760 | py | Python | assistant/core/management/commands/consume.py | kapiak/ware_prod | ae61256890834c434d2e38cc2ccacf00b638665a | [
"MIT"
] | null | null | null | assistant/core/management/commands/consume.py | kapiak/ware_prod | ae61256890834c434d2e38cc2ccacf00b638665a | [
"MIT"
] | null | null | null | assistant/core/management/commands/consume.py | kapiak/ware_prod | ae61256890834c434d2e38cc2ccacf00b638665a | [
"MIT"
] | null | null | null | from django.utils.translation import gettext_lazy as _
from django.core.management.base import BaseCommand
from assistant.core.pub_sub import Consumer
class Command(BaseCommand):
"""A management command to run a worker to consume events from rabbitmq."""
help = _("Consumer Worker from RabbitMQ")
callback_functions = {"update_user": "update_user"}
def add_arguments(self, parser):
parser.add_argument(
"exchange", nargs="+", type=str, help=_("The exchange to consume from."),
)
parser.add_argument(
"queue", nargs="+", type=str, help=_("The queue to consume from.")
)
parser.add_argument(
"routing_key",
nargs="+",
type=str,
help=_("The routing key to consume from."),
)
# parser.add_argument('action', nargs='+', type=str, help=_("The callback function key in the dictionary."))
def _callback(self, channel, method, properties, body):
self.stdout.write(
self.style.SUCCESS(f"{channel} - {method} - {properties} - {body}")
)
def _onerror_callback(self, error):
self.stdout.write(self.style.ERROR(f"{error}"))
def handle(self, *args, **options):
self.stdout.write(self.style.SUCCESS(f"{options['exchange'][0]}"))
self.stdout.write(self.style.SUCCESS(f"{options['queue'][0]}"))
self.stdout.write(self.style.SUCCESS(f"{options['routing_key'][0]}"))
consumer = Consumer(
exchange_name=options["exchange"],
queue_name=options["queue"],
routing_key=options["routing_key"],
callback=self._callback,
error_callback=self._onerror_callback,
)
consumer.start()
| 35.918367 | 116 | 0.615341 |
85f0db23b028addcd806bcdcd286d89eccf8bbe8 | 4,685 | py | Python | bot/cogs/owner.py | jnpoJuwan/Just-a-bot | 31fc52d61312bcca75515b988de68676573ea401 | [
"MIT"
] | null | null | null | bot/cogs/owner.py | jnpoJuwan/Just-a-bot | 31fc52d61312bcca75515b988de68676573ea401 | [
"MIT"
] | null | null | null | bot/cogs/owner.py | jnpoJuwan/Just-a-bot | 31fc52d61312bcca75515b988de68676573ea401 | [
"MIT"
] | null | null | null | import io
import textwrap
import traceback
from contextlib import redirect_stdout
from pathlib import Path
from discord.ext import commands
from ..utils import checks
class Owner(commands.Cog, command_attrs=dict(hidden=True)):
def __init__(self, bot):
self.bot = bot
@commands.command(name='quit', aliases=['die', 'logout', 'sleep'])
@checks.is_bot_owner()
async def _quit(self, ctx):
"""Logout from Discord."""
await ctx.send('**change da world**\n**my final message. Goodb ye**')
await self.bot.logout()
@staticmethod
def cleanup_code(content):
# Remove ```py\n```.
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# Remove `foo`.
return content.strip('` \n')
# CRED: @Rapptz (https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/admin.py#L216)
@commands.command(name='eval', pass_context=True)
@checks.is_bot_owner()
async def eval_(self, ctx, *, body: str):
"""Evaluates Python code."""
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except:
value = stdout.getvalue()
await ctx.send(f'```\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```\n{value}{ret}\n```')
# CRED: @Rapptz (https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/admin.py#L116)
@commands.command()
@checks.is_bot_owner()
async def load(self, ctx, module):
"""Loads a module."""
try:
self.bot.load_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been loaded.')
@commands.command()
@checks.is_bot_owner()
async def unload(self, ctx, module):
"""Unloads a module."""
try:
self.bot.unload_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been unloaded.')
@commands.command()
@checks.is_bot_owner()
async def reload(self, ctx, module):
"""Reloads a module."""
try:
self.bot.reload_extension(module)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {module}. Traceback:\n```{traceback_msg}```')
else:
await ctx.send(f'`{module}` has been reloaded.')
@commands.command()
@checks.is_bot_owner()
async def reload_all(self, ctx):
"""Reloads all extensions."""
content = 'Reloading modules...'
message = await ctx.send('Reloading modules...')
for extension_path in Path('bot/cogs').glob('*.py'):
extension_name = extension_path.stem
dotted_path = f'bot.cogs.{extension_name}'
try:
self.bot.reload_extension(dotted_path)
content += f'\nReloaded `{dotted_path}`.'
await message.edit(content=content)
except Exception as e:
traceback_msg = ''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))
await ctx.send(f'Failed to load cog {dotted_path}. Traceback:\n```{traceback_msg}```')
content += '\nSuccessfully reloaded all extensions.'
await message.edit(content=content)
def setup(bot):
bot.add_cog(Owner(bot))
| 34.19708 | 111 | 0.573533 |
448c5968b1c12540fa3ddba1e372f1a591079285 | 849 | py | Python | scipy_central/feeds/urls.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | scipy_central/feeds/urls.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | scipy_central/feeds/urls.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
from django.contrib.comments.feeds import LatestCommentFeed
import feeds
urlpatterns = patterns('scipy_central.feeds.views',
# latest comments
url(r'^comments/$', LatestCommentFeed(), name="spc-rss-latest-comments"),
# all revision comments
url(r'^comments/(?P<item_id>\d+)/(?P<rev_id>\d+)/$', feeds.RssCommentFeed(), name="spc-rss-comment-feed"),
# recent submission feed in rss
url(r'^$', feeds.RssSiteFeed(), name='spc-rss-recent-submissions'),
# submission feed in rss
url(r'^(?P<item_id>\d+)/$', feeds.RssSubmissionFeed(), name='spc-rss-submission-feed'),
# show tag feeds in rss
url(r'^(?P<tag_slug>.+)/$', feeds.RssTagFeed(), name='spc-rss-tag-feed'),
# recent submission feed in atom
url(r'^atom/$', feeds.AtomSiteFeed(), name="spc-atom-recent-submissions"),
)
| 44.684211 | 107 | 0.691402 |
975fff16ecb812ee2d92144b2486c964a9da73e4 | 326 | py | Python | day02/python/murku/tests/test_solution.py | murku/aoc-2020 | bc74b78905524e185b10b2afabcd89ee9f3c2fa7 | [
"MIT"
] | null | null | null | day02/python/murku/tests/test_solution.py | murku/aoc-2020 | bc74b78905524e185b10b2afabcd89ee9f3c2fa7 | [
"MIT"
] | null | null | null | day02/python/murku/tests/test_solution.py | murku/aoc-2020 | bc74b78905524e185b10b2afabcd89ee9f3c2fa7 | [
"MIT"
] | null | null | null | import pytest
# from solution import multiply_list
multiply_list_testdata = [
([1], 1),
([2, 3], 3),
([2, 5, 6], 60)
]
@pytest.mark.parametrize("numbers,expected", multiply_list_testdata)
def test_multiply_list(numbers, expected):
actual = multiply_list(numbers)
assert actual == expected, "test failed"
| 23.285714 | 68 | 0.690184 |
f5cf542b13af485c545f526728a79993112b594f | 8,230 | py | Python | server/planarserver.py | Kruptein/PlanarAlly | 6c7dd9d02474a86610f0aa0b2afb92f400438818 | [
"MIT"
] | 300 | 2018-03-22T12:09:15.000Z | 2022-03-30T05:33:39.000Z | server/planarserver.py | Kruptein/PlanarAlly | 6c7dd9d02474a86610f0aa0b2afb92f400438818 | [
"MIT"
] | 482 | 2018-03-08T23:44:56.000Z | 2022-03-30T18:06:04.000Z | server/planarserver.py | Kruptein/PlanarAlly | 6c7dd9d02474a86610f0aa0b2afb92f400438818 | [
"MIT"
] | 79 | 2018-05-12T18:42:59.000Z | 2022-03-29T17:12:22.000Z | """
PlanarAlly backend server code.
This is the code responsible for starting the backend and reacting to socket IO events.
"""
# Check for existence of './templates/' as it is not present if client was not built before
from argparse import ArgumentParser
import getpass
import os
import sys
from urllib.parse import quote, unquote
from export.campaign import import_campaign
from utils import FILE_DIR
from types import SimpleNamespace
# Mimetype recognition for js files apparently is not always properly setup out of the box for some users out there.
import mimetypes
import save
save_newly_created = save.check_existence()
import asyncio
import configparser
from aiohttp import web
import api.http
import routes
from state.asset import asset_state
from state.game import game_state
# Force loading of socketio routes
from api.socket import *
from api.socket.constants import GAME_NS
from app import api_app, app as main_app, runners, setup_runner, sio
from config import config
from models import User, Room
from utils import logger
loop = asyncio.get_event_loop()
# This is a fix for asyncio problems on windows that make it impossible to do ctrl+c
if sys.platform.startswith("win"):
def _wakeup():
loop.call_later(0.1, _wakeup)
loop.call_later(0.1, _wakeup)
async def on_shutdown(_):
for sid in [*game_state._sid_map.keys(), *asset_state._sid_map.keys()]:
await sio.disconnect(sid, namespace=GAME_NS)
async def start_http(app: web.Application, host, port):
logger.warning(" RUNNING IN NON SSL CONTEXT ")
await setup_runner(app, web.TCPSite, host=host, port=port)
async def start_https(app: web.Application, host, port, chain, key):
import ssl
ctx = ssl.SSLContext()
try:
ctx.load_cert_chain(chain, key)
except FileNotFoundError:
logger.critical("SSL FILES ARE NOT FOUND. ABORTING LAUNCH.")
sys.exit(2)
await setup_runner(
app,
web.TCPSite,
host=host,
port=port,
ssl_context=ctx,
)
async def start_socket(app: web.Application, sock):
await setup_runner(app, web.UnixSite, path=sock)
async def start_server(server_section: str):
socket = config.get(server_section, "socket", fallback=None)
app = main_app
method = "unknown"
if server_section == "APIserver":
app = api_app
if socket:
await start_socket(app, socket)
method = socket
else:
host = config.get(server_section, "host")
port = config.getint(server_section, "port")
environ = os.environ.get("PA_BASEPATH", "/")
if config.getboolean(server_section, "ssl"):
try:
chain = config.get(server_section, "ssl_fullchain")
key = config.get(server_section, "ssl_privkey")
except configparser.NoOptionError:
logger.critical(
"SSL CONFIGURATION IS NOT CORRECTLY CONFIGURED. ABORTING LAUNCH."
)
sys.exit(2)
await start_https(app, host, port, chain, key)
method = f"https://{host}:{port}{environ}"
else:
await start_http(app, host, port)
method = f"http://{host}:{port}{environ}"
print(f"======== Starting {server_section} on {method} ========")
async def start_servers():
print()
await start_server("Webserver")
print()
if config.getboolean("APIserver", "enabled"):
await start_server("APIserver")
else:
print("API Server disabled")
print()
print("(Press CTRL+C to quit)")
print()
def server_main(args):
"""Start the PlanarAlly server."""
if (not (FILE_DIR / "templates").exists()) and args.dev:
print(
"You must gather your par— you must build the client, before starting the server.\nSee https://www.planarally.io/server/setup/self-hosting/ on how to build the client or import a pre-built client."
)
sys.exit(1)
mimetypes.init()
mimetypes.types_map[".js"] = "application/javascript; charset=utf-8"
if not save_newly_created:
save.check_outdated()
loop.create_task(start_servers())
try:
main_app.on_shutdown.append(on_shutdown)
loop.run_forever()
except:
pass
finally:
for runner in runners:
loop.run_until_complete(runner.cleanup())
def list_main(args):
"""List all of the requested resource type."""
resource = args.resource.lower()
if resource == "user":
for user in User.select():
print(user.name)
elif resource == "room":
for room in Room.select():
print(f"{quote(room.creator.name, safe='')}/{quote(room.name, safe='')}")
def get_room(path) -> Room:
try:
user, room = path.split("/")
except ValueError:
print("Invalid room. The room should have a single '/'")
sys.exit(1)
user = User.by_name(unquote(user))
return Room.get(name=unquote(room), creator=user)
def remove_main(args):
"""Remove a requested resource."""
resource = args.resource.lower()
if resource == "user":
user = User.by_name(args.name)
user.delete_instance()
elif resource == "room":
room = get_room(args.name)
room.delete_instance()
def reset_password_main(args):
"""Reset a users password. Will prompt for the new password if not provided."""
password = args.password
user = User.by_name(args.name)
if not user:
print(f"User with name {args.name} not found.")
sys.exit(1)
if not password:
first_password = getpass.getpass()
second_password = getpass.getpass("Retype password:")
while first_password != second_password:
print("Passwords do not match.")
first_password = getpass.getpass()
second_password = getpass.getpass("Retype password:")
password = first_password
user.set_password(password)
user.save()
def import_main(args):
import_campaign(args.file)
def add_subcommand(name, func, parent_parser, args):
sub_parser = parent_parser.add_parser(name, help=func.__doc__)
for arg in args:
sub_parser.add_argument(arg[0], **arg[1])
sub_parser.set_defaults(func=func)
def main():
if len(sys.argv) < 2 or (len(sys.argv) == 2 and sys.argv[1] == "dev"):
# To keep the previous syntax, if this script is called with no args,
# Or with just dev, we should start the server.
args = SimpleNamespace(dev=len(sys.argv) == 2)
server_main(args)
return
parser = ArgumentParser()
subparsers = parser.add_subparsers()
add_subcommand(
"serve",
server_main,
subparsers,
[
(
"dev",
{
"nargs": "?",
"choices": ["dev"],
"help": "Start the server with a development version of the client.",
},
)
],
)
resource_names = ["room", "user"]
add_subcommand(
"list",
list_main,
subparsers,
[("resource", {"choices": resource_names, "help": "The resource to list."})],
)
add_subcommand(
"remove",
remove_main,
subparsers,
[
(
"resource",
{"choices": resource_names, "help": "The type of resource to remove"},
),
("name", {"help": "The name of the resource to remove"}),
],
)
add_subcommand(
"reset",
reset_password_main,
subparsers,
[
("name", {"help": "The name of the user."}),
(
"--password",
{"help": "The new password. Will be prompted for if not provided."},
),
],
)
add_subcommand(
"import",
import_main,
subparsers,
[
(
"--file",
{"help": "The new password. Will be prompted for if not provided."},
),
],
)
options = parser.parse_args()
options.func(options)
if __name__ == "__main__":
main()
| 26.807818 | 209 | 0.608019 |
6cb9bbb0555ba2a5c7a5a94cfaaf5f2a82d1f4f2 | 1,154 | py | Python | stack/stack_fixed_listSize.py | vsjadhav/DSA_python | 1dd71b418bb604ebfd561c3dc9b8e123486ad8ef | [
"MIT"
] | null | null | null | stack/stack_fixed_listSize.py | vsjadhav/DSA_python | 1dd71b418bb604ebfd561c3dc9b8e123486ad8ef | [
"MIT"
] | null | null | null | stack/stack_fixed_listSize.py | vsjadhav/DSA_python | 1dd71b418bb604ebfd561c3dc9b8e123486ad8ef | [
"MIT"
] | null | null | null |
class stack:
def __init__(self,maxsize):
self.maxsize = maxsize
self.list = []
def __str__(self):
l = self.list.copy()
l.reverse()
l1 = [str(i) for i in l]
return "\n".join(l1)
def isempty(self):
if self.list:
return False
return True
def isfull(self):
if len(self.list)==self.maxsize:
return True
else: return False
def push(self,value):
if self.isfull():
print("stack is full")
else:
self.list.append(value)
def pop(self):
if self.isempty():
print("stack is empty")
return None
else:
return self.list.pop()
def peek(self):
if self.isempty():
return None
return self.list[-1]
stack1 = stack(5)
print(stack1.isempty())
stack1.push(2)
stack1.push(7)
stack1.push(9)
stack1.push(6)
stack1.push(11)
stack1.push(17)
print(stack1)
print(stack1.isempty())
print(f"peek: {stack1.peek()}")
stack1.pop()
print(f"peek after pop: {stack1.peek()}")
| 20.981818 | 42 | 0.517331 |
35b0eba9267b6373aa25aaecb78149840bf6dd2d | 8,647 | py | Python | python/pyspark/ml/wrapper.py | kandu009/Apache_Spark | 7092a1600a6a151a77d6bf9d64e00fcd43f979e4 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1 | 2017-06-02T09:51:13.000Z | 2017-06-02T09:51:13.000Z | python/pyspark/ml/wrapper.py | kandu009/Apache_Spark | 7092a1600a6a151a77d6bf9d64e00fcd43f979e4 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | python/pyspark/ml/wrapper.py | kandu009/Apache_Spark | 7092a1600a6a151a77d6bf9d64e00fcd43f979e4 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from pyspark import SparkContext
from pyspark.sql import DataFrame
from pyspark.ml import Estimator, Transformer, Model
from pyspark.ml.param import Params
from pyspark.ml.util import _jvm
from pyspark.mllib.common import inherit_doc, _java2py, _py2java
@inherit_doc
class JavaWrapper(Params):
"""
Utility class to help create wrapper classes from Java/Scala
implementations of pipeline components.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Initialize the wrapped java object to None
"""
super(JavaWrapper, self).__init__()
#: The wrapped Java companion object. Subclasses should initialize
#: it properly. The param values in the Java object should be
#: synced with the Python wrapper in fit/transform/evaluate/copy.
self._java_obj = None
@staticmethod
def _new_java_obj(java_class, *args):
"""
Construct a new Java object.
"""
sc = SparkContext._active_spark_context
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
java_args = [_py2java(sc, arg) for arg in args]
return java_obj(*java_args)
def _make_java_param_pair(self, param, value):
"""
Makes a Java parm pair.
"""
sc = SparkContext._active_spark_context
param = self._resolveParam(param)
java_param = self._java_obj.getParam(param.name)
java_value = _py2java(sc, value)
return java_param.w(java_value)
def _transfer_params_to_java(self):
"""
Transforms the embedded params to the companion Java object.
"""
paramMap = self.extractParamMap()
for param in self.params:
if param in paramMap:
pair = self._make_java_param_pair(param, paramMap[param])
self._java_obj.set(pair)
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
sc = SparkContext._active_spark_context
for param in self.params:
if self._java_obj.hasParam(param.name):
java_param = self._java_obj.getParam(param.name)
if self._java_obj.isDefined(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._paramMap[param] = value
@staticmethod
def _empty_java_param_map():
"""
Returns an empty Java ParamMap reference.
"""
return _jvm().org.apache.spark.ml.param.ParamMap()
def _to_java(self):
"""
Transfer this instance's Params to the wrapped Java object, and return the Java object.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method.
:return: Java object equivalent to this instance.
"""
self._transfer_params_to_java()
return self._java_obj
@staticmethod
def _from_java(java_stage):
"""
Given a Java object, create and return a Python wrapper of it.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method as a classmethod.
"""
def __get_class(clazz):
"""
Loads Python class from its name.
"""
parts = clazz.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
stage_name = java_stage.getClass().getName().replace("org.apache.spark", "pyspark")
# Generate a default new instance from the stage_name class.
py_type = __get_class(stage_name)
if issubclass(py_type, JavaWrapper):
# Load information from java_stage to the instance.
py_stage = py_type()
py_stage._java_obj = java_stage
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
elif hasattr(py_type, "_from_java"):
py_stage = py_type._from_java(java_stage)
else:
raise NotImplementedError("This Java stage cannot be loaded into Python currently: %r"
% stage_name)
return py_stage
@inherit_doc
class JavaEstimator(Estimator, JavaWrapper):
"""
Base class for :py:class:`Estimator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _create_model(self, java_model):
"""
Creates a model from the input Java model reference.
"""
raise NotImplementedError()
def _fit_java(self, dataset):
"""
Fits a Java model to the input dataset.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:param params: additional params (overwriting embedded values)
:return: fitted Java model
"""
self._transfer_params_to_java()
return self._java_obj.fit(dataset._jdf)
def _fit(self, dataset):
java_model = self._fit_java(dataset)
return self._create_model(java_model)
@inherit_doc
class JavaTransformer(Transformer, JavaWrapper):
"""
Base class for :py:class:`Transformer`s that wrap Java/Scala
implementations. Subclasses should ensure they have the transformer Java object
available as _java_obj.
"""
__metaclass__ = ABCMeta
def _transform(self, dataset):
self._transfer_params_to_java()
return DataFrame(self._java_obj.transform(dataset._jdf), dataset.sql_ctx)
@inherit_doc
class JavaModel(Model, JavaTransformer):
"""
Base class for :py:class:`Model`s that wrap Java/Scala
implementations. Subclasses should inherit this class before
param mix-ins, because this sets the UID from the Java model.
"""
__metaclass__ = ABCMeta
def __init__(self, java_model=None):
"""
Initialize this instance with a Java model object.
Subclasses should call this constructor, initialize params,
and then call _transformer_params_from_java.
This instance can be instantiated without specifying java_model,
it will be assigned after that, but this scenario only used by
:py:class:`JavaMLReader` to load models. This is a bit of a
hack, but it is easiest since a proper fix would require
MLReader (in pyspark.ml.util) to depend on these wrappers, but
these wrappers depend on pyspark.ml.util (both directly and via
other ML classes).
"""
super(JavaModel, self).__init__()
if java_model is not None:
self._java_obj = java_model
self.uid = java_model.uid()
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. This implementation first calls Params.copy and
then make a copy of the companion Java model with extra params.
So both the Python wrapper and the Java model get copied.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = super(JavaModel, self).copy(extra)
if self._java_obj is not None:
that._java_obj = self._java_obj.copy(self._empty_java_param_map())
that._transfer_params_to_java()
return that
def _call_java(self, name, *args):
m = getattr(self._java_obj, name)
sc = SparkContext._active_spark_context
java_args = [_py2java(sc, arg) for arg in args]
return _java2py(sc, m(*java_args))
| 35.293878 | 98 | 0.649474 |
83e51c71e5bdaa1e79b0f051b9929c261e25a3fd | 1,681 | py | Python | tests/st/ops/ascend/test_biasAddGrad.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | 1 | 2020-05-13T11:31:21.000Z | 2020-05-13T11:31:21.000Z | tests/st/ops/ascend/test_biasAddGrad.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/ascend/test_biasAddGrad.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
context.set_context(device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.bias_add_grad = G.BiasAddGrad()
# self.dout = Parameter(initializer(
# 'normal', [2, 3, 3, 4]), name='dout')
@ms_function
def construct(self, dout):
return self.bias_add_grad(dout)
dout = np.ones([2, 3, 4, 4]).astype(np.float32)
bias_add_grad = Net()
output = bias_add_grad(Tensor(dout))
expect_output = np.array([32., 32., 32.]).astype(np.float32)
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
print(output.asnumpy())
| 36.543478 | 114 | 0.710886 |
ee32ebc522ac8a42999da011ebc916a8df314599 | 1,517 | py | Python | samples/snippets/language_sentiment_analysis_predict_test.py | renovate-bot/python-automl | 5c1a9a383680bc5b89f514aa9a3aef47af9feac2 | [
"Apache-2.0"
] | 68 | 2020-01-31T18:13:23.000Z | 2022-03-28T14:57:12.000Z | samples/snippets/language_sentiment_analysis_predict_test.py | renovate-bot/python-automl | 5c1a9a383680bc5b89f514aa9a3aef47af9feac2 | [
"Apache-2.0"
] | 184 | 2020-01-31T17:34:00.000Z | 2022-03-30T22:42:11.000Z | samples/snippets/language_sentiment_analysis_predict_test.py | isabella232/python-automl | dbf1bf1bcc7575cd5ab85921311e18ecfed27dc7 | [
"Apache-2.0"
] | 29 | 2020-01-31T19:32:55.000Z | 2022-01-29T08:07:34.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from google.cloud import automl
import pytest
import language_sentiment_analysis_predict
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["SENTIMENT_ANALYSIS_MODEL_ID"]
@pytest.fixture(scope="function", autouse=True)
def setup():
# Verify the model is deployed before trying to predict
client = automl.AutoMlClient()
model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID)
model = client.get_model(name=model_full_id)
if model.deployment_state == automl.Model.DeploymentState.UNDEPLOYED:
# Deploy model if it is not deployed
response = client.deploy_model(name=model_full_id)
response.result()
def test_sentiment_analysis_predict(capsys):
text = "Hopefully this Claritin kicks in soon"
language_sentiment_analysis_predict.predict(PROJECT_ID, MODEL_ID, text)
out, _ = capsys.readouterr()
assert "Predicted sentiment score: " in out
| 34.477273 | 75 | 0.759394 |
6d1bd0bedda48972f98a2608dc4637dd8bffd3bf | 454 | py | Python | backend/music/migrations/0004_artist_scene_name.py | kiselevvn/django-music | 3c61319fb03d52f8642b21eefc80dccb912ddcd0 | [
"MIT"
] | null | null | null | backend/music/migrations/0004_artist_scene_name.py | kiselevvn/django-music | 3c61319fb03d52f8642b21eefc80dccb912ddcd0 | [
"MIT"
] | null | null | null | backend/music/migrations/0004_artist_scene_name.py | kiselevvn/django-music | 3c61319fb03d52f8642b21eefc80dccb912ddcd0 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-20 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0003_auto_20211120_0344'),
]
operations = [
migrations.AddField(
model_name='artist',
name='scene_name',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='Сценическое имя исполнителя'),
),
]
| 23.894737 | 118 | 0.629956 |
be6683a5fbaac35658c38a5c9e751bf1dbdc4559 | 888 | py | Python | computer_network_real/6/singleProcessServer.py | mtjin/University_and_AndroidProjects | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | 1 | 2021-04-13T12:06:51.000Z | 2021-04-13T12:06:51.000Z | computer_network_real/6/singleProcessServer.py | mtjin/University | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | 2 | 2022-01-21T23:46:50.000Z | 2022-01-21T23:48:45.000Z | computer_network_real/6/singleProcessServer.py | mtjin/university | c0ac3394043fd10730e68b391866d55c3be2c23b | [
"MIT"
] | null | null | null | import time
import os
import socket
def send_recv(client_socket):
data = client_socket.recv(1024)
print("[client {}] {}".format(os.getpid(), data.decode()))
response = "HTTP/1.1 200 OK\r\n"
client_socket.send(response.encode('utf-8'))
client_socket.send(data)
client_socket.close()
def main(FLAGS):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('', FLAGS.port))
serversocket.listen()
while True:
(clientsocket, address) = serversocket.accept()
print("accept client from", address)
send_recv(clientsocket)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port',
type = int,
default = 8000,
help="input port number")
FLAGS, _ = parser.parse_known_args()
main(FLAGS)
| 26.117647 | 68 | 0.643018 |
3230369953fec889fbe51ac623fc79d0887a48af | 12,107 | py | Python | composer/airflow_1_samples/kubernetes_pod_operator.py | BaljitSingh919/Project360 | b8ec08f6598e6b4d6d190b63c6b64f268225bd2d | [
"Apache-2.0"
] | 5,938 | 2015-05-18T05:04:37.000Z | 2022-03-31T20:16:39.000Z | composer/airflow_1_samples/kubernetes_pod_operator.py | BaljitSingh919/Project360 | b8ec08f6598e6b4d6d190b63c6b64f268225bd2d | [
"Apache-2.0"
] | 4,730 | 2015-05-07T19:00:38.000Z | 2022-03-31T21:59:41.000Z | composer/airflow_1_samples/kubernetes_pod_operator.py | BaljitSingh919/Project360 | b8ec08f6598e6b4d6d190b63c6b64f268225bd2d | [
"Apache-2.0"
] | 6,734 | 2015-05-05T17:06:20.000Z | 2022-03-31T12:02:51.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example DAG demonstrating Kubernetes Pod Operator."""
# [START composer_kubernetespodoperator_airflow_1]
import datetime
from airflow import models
from airflow.contrib.kubernetes import secret
from airflow.contrib.operators import kubernetes_pod_operator
# A Secret is an object that contains a small amount of sensitive data such as
# a password, a token, or a key. Such information might otherwise be put in a
# Pod specification or in an image; putting it in a Secret object allows for
# more control over how it is used, and reduces the risk of accidental
# exposure.
# [START composer_kubernetespodoperator_secretobject_airflow_1]
secret_env = secret.Secret(
# Expose the secret as environment variable.
deploy_type='env',
# The name of the environment variable, since deploy_type is `env` rather
# than `volume`.
deploy_target='SQL_CONN',
# Name of the Kubernetes Secret
secret='airflow-secrets',
# Key of a secret stored in this Secret object
key='sql_alchemy_conn')
secret_volume = secret.Secret(
deploy_type='volume',
# Path where we mount the secret as volume
deploy_target='/var/secrets/google',
# Name of Kubernetes Secret
secret='service-account',
# Key in the form of service account file name
key='service-account.json')
# [END composer_kubernetespodoperator_secretobject_airflow_1]
YESTERDAY = datetime.datetime.now() - datetime.timedelta(days=1)
# If a Pod fails to launch, or has an error occur in the container, Airflow
# will show the task as failed, as well as contain all of the task logs
# required to debug.
with models.DAG(
dag_id='composer_sample_kubernetes_pod',
schedule_interval=datetime.timedelta(days=1),
start_date=YESTERDAY) as dag:
# Only name, namespace, image, and task_id are required to create a
# KubernetesPodOperator. In Cloud Composer, currently the operator defaults
# to using the config file found at `/home/airflow/composer_kube_config if
# no `config_file` parameter is specified. By default it will contain the
# credentials for Cloud Composer's Google Kubernetes Engine cluster that is
# created upon environment creation.
# [START composer_kubernetespodoperator_minconfig_airflow_1]
kubernetes_min_pod = kubernetes_pod_operator.KubernetesPodOperator(
# The ID specified for the task.
task_id='pod-ex-minimum',
# Name of task you want to run, used to generate Pod ID.
name='pod-ex-minimum',
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['echo'],
# The namespace to run within Kubernetes, default namespace is
# `default`. There is the potential for the resource starvation of
# Airflow workers and scheduler within the Cloud Composer environment,
# the recommended solution is to increase the amount of nodes in order
# to satisfy the computing requirements. Alternatively, launching pods
# into a custom namespace will stop fighting over resources.
namespace='default',
# Docker image specified. Defaults to hub.docker.com, but any fully
# qualified URLs will point to a custom repository. Supports private
# gcr.io images if the Composer Environment is under the same
# project-id as the gcr.io images and the service account that Composer
# uses has permission to access the Google Container Registry
# (the default service account has permission)
image='gcr.io/gcp-runtimes/ubuntu_18_0_4')
# [END composer_kubernetespodoperator_minconfig_airflow_1]
# [START composer_kubernetespodoperator_templateconfig_airflow_1]
kubenetes_template_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-kube-templates',
name='ex-kube-templates',
namespace='default',
image='bash',
# All parameters below are able to be templated with jinja -- cmds,
# arguments, env_vars, and config_file. For more information visit:
# https://airflow.apache.org/docs/apache-airflow/stable/macros-ref.html
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['echo'],
# DS in jinja is the execution date as YYYY-MM-DD, this docker image
# will echo the execution date. Arguments to the entrypoint. The docker
# image's CMD is used if this is not provided. The arguments parameter
# is templated.
arguments=['{{ ds }}'],
# The var template variable allows you to access variables defined in
# Airflow UI. In this case we are getting the value of my_value and
# setting the environment variable `MY_VALUE`. The pod will fail if
# `my_value` is not set in the Airflow UI.
env_vars={'MY_VALUE': '{{ var.value.my_value }}'},
# Sets the config file to a kubernetes config file specified in
# airflow.cfg. If the configuration file does not exist or does
# not provide validcredentials the pod will fail to launch. If not
# specified, config_file defaults to ~/.kube/config
config_file="{{ conf.get('core', 'kube_config') }}")
# [END composer_kubernetespodoperator_templateconfig_airflow_1]
# [START composer_kubernetespodoperator_secretconfig_airflow_1]
kubernetes_secret_vars_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-kube-secrets',
name='ex-kube-secrets',
namespace='default',
image='ubuntu',
startup_timeout_seconds=300,
# The secrets to pass to Pod, the Pod will fail to create if the
# secrets you specify in a Secret object do not exist in Kubernetes.
secrets=[secret_env, secret_volume],
# env_vars allows you to specify environment variables for your
# container to use. env_vars is templated.
env_vars={
'EXAMPLE_VAR': '/example/value',
'GOOGLE_APPLICATION_CREDENTIALS': '/var/secrets/google/service-account.json '})
# [END composer_kubernetespodoperator_secretconfig_airflow_1]
# [START composer_kubernetespodaffinity_airflow_1]
kubernetes_affinity_ex = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-pod-affinity',
name='ex-pod-affinity',
namespace='default',
image='perl',
cmds=['perl'],
arguments=['-Mbignum=bpi', '-wle', 'print bpi(2000)'],
# affinity allows you to constrain which nodes your pod is eligible to
# be scheduled on, based on labels on the node. In this case, if the
# label 'cloud.google.com/gke-nodepool' with value
# 'nodepool-label-value' or 'nodepool-label-value2' is not found on any
# nodes, it will fail to schedule.
affinity={
'nodeAffinity': {
# requiredDuringSchedulingIgnoredDuringExecution means in order
# for a pod to be scheduled on a node, the node must have the
# specified labels. However, if labels on a node change at
# runtime such that the affinity rules on a pod are no longer
# met, the pod will still continue to run on the node.
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [{
'matchExpressions': [{
# When nodepools are created in Google Kubernetes
# Engine, the nodes inside of that nodepool are
# automatically assigned the label
# 'cloud.google.com/gke-nodepool' with the value of
# the nodepool's name.
'key': 'cloud.google.com/gke-nodepool',
'operator': 'In',
# The label key's value that pods can be scheduled
# on.
'values': [
'pool-0',
'pool-1',
]
}]
}]
}
}
})
# [END composer_kubernetespodaffinity_airflow_1]
# [START composer_kubernetespodoperator_fullconfig_airflow_1]
kubernetes_full_pod = kubernetes_pod_operator.KubernetesPodOperator(
task_id='ex-all-configs',
name='pi',
namespace='default',
image='perl',
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=['perl'],
# Arguments to the entrypoint. The docker image's CMD is used if this
# is not provided. The arguments parameter is templated.
arguments=['-Mbignum=bpi', '-wle', 'print bpi(2000)'],
# The secrets to pass to Pod, the Pod will fail to create if the
# secrets you specify in a Secret object do not exist in Kubernetes.
secrets=[],
# Labels to apply to the Pod.
labels={'pod-label': 'label-name'},
# Timeout to start up the Pod, default is 120.
startup_timeout_seconds=120,
# The environment variables to be initialized in the container
# env_vars are templated.
env_vars={'EXAMPLE_VAR': '/example/value'},
# If true, logs stdout output of container. Defaults to True.
get_logs=True,
# Determines when to pull a fresh image, if 'IfNotPresent' will cause
# the Kubelet to skip pulling an image if it already exists. If you
# want to always pull a new image, set it to 'Always'.
image_pull_policy='Always',
# Annotations are non-identifying metadata you can attach to the Pod.
# Can be a large range of data, and can include characters that are not
# permitted by labels.
annotations={'key1': 'value1'},
# Resource specifications for Pod, this will allow you to set both cpu
# and memory limits and requirements.
# Prior to Airflow 1.10.4, resource specifications were
# passed as a Pod Resources Class object,
# If using this example on a version of Airflow prior to 1.10.4,
# import the "pod" package from airflow.contrib.kubernetes and use
# resources = pod.Resources() instead passing a dict
# For more info see:
# https://github.com/apache/airflow/pull/4551
resources={'limit_memory': "250M", 'limit_cpu': "100m"},
# Specifies path to kubernetes config. If no config is specified will
# default to '~/.kube/config'. The config_file is templated.
config_file='/home/airflow/composer_kube_config',
# If true, the content of /airflow/xcom/return.json from container will
# also be pushed to an XCom when the container ends.
do_xcom_push=False,
# List of Volume objects to pass to the Pod.
volumes=[],
# List of VolumeMount objects to pass to the Pod.
volume_mounts=[],
# Affinity determines which nodes the Pod can run on based on the
# config. For more information see:
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity={})
# [END composer_kubernetespodoperator_fullconfig_airflow_1]
# [END composer_kubernetespodoperator_airflow_1]
| 51.084388 | 91 | 0.664905 |
3375c9df434c5eb9f95398b6e36650226bb0dc6c | 740 | py | Python | local_deploy/rx_info/migrations/0005_rxclaim.py | jojordan3/prescription-search-django | 7c36766e7bac5d22e42d6bd2bae7ca3d5865f93b | [
"MIT"
] | 1 | 2019-05-21T11:33:32.000Z | 2019-05-21T11:33:32.000Z | local_deploy/rx_info/migrations/0005_rxclaim.py | jojordan3/prescription-search-django | 7c36766e7bac5d22e42d6bd2bae7ca3d5865f93b | [
"MIT"
] | 3 | 2020-02-11T23:48:18.000Z | 2021-06-10T21:15:44.000Z | local_deploy/rx_info/migrations/0005_rxclaim.py | jojordan3/prescription-search-django | 7c36766e7bac5d22e42d6bd2bae7ca3d5865f93b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-10 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rx_info', '0004_auto_20190310_0353'),
]
operations = [
migrations.CreateModel(
name='RxClaim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('UnitCost', models.FloatField()),
('DrugLabelName', models.CharField(max_length=200)),
('PBMVendor', models.CharField(max_length=200)),
('PharmacyID', models.ForeignKey(on_delete='set null', to='rx_info.PharmacyInfo')),
],
),
]
| 30.833333 | 114 | 0.583784 |
2f8c75cc950bb0dcd0e35775f08911de99cc5e44 | 460 | py | Python | commands/cmd_stats.py | Serbirial/regiusBot | ac54e10f721ccbd6c1848130abecff2aa8552778 | [
"MIT"
] | 2 | 2018-01-07T06:12:23.000Z | 2018-03-29T12:52:54.000Z | commands/cmd_stats.py | zekroTJA/regiusBot | ac54e10f721ccbd6c1848130abecff2aa8552778 | [
"MIT"
] | null | null | null | commands/cmd_stats.py | zekroTJA/regiusBot | ac54e10f721ccbd6c1848130abecff2aa8552778 | [
"MIT"
] | 14 | 2017-06-28T11:46:04.000Z | 2020-09-06T17:46:42.000Z | from discord import Embed, Color
description = "Shows the link to the statistics page of the guild"
async def ex(message, client):
await client.send_message(message.channel, embed=Embed(color=Color.gold(),
title="Discord Member Stats",
description="[STATICSTICS](http://s.zekro.de/dcstats)"))
await client.delete_message(message)
| 38.333333 | 115 | 0.563043 |
6ff96649c38d7c815726ba986224419811968f77 | 12,814 | py | Python | stingray/crosscorrelation.py | pierfra-ro/stingray | d255e3ced23e97d297be7d8d659ddf163b808064 | [
"MIT"
] | 133 | 2016-02-05T09:37:00.000Z | 2022-03-30T04:49:18.000Z | stingray/crosscorrelation.py | pierfra-ro/stingray | d255e3ced23e97d297be7d8d659ddf163b808064 | [
"MIT"
] | 578 | 2016-02-04T10:38:28.000Z | 2022-03-31T13:27:13.000Z | stingray/crosscorrelation.py | pierfra-ro/stingray | d255e3ced23e97d297be7d8d659ddf163b808064 | [
"MIT"
] | 125 | 2016-02-04T17:04:43.000Z | 2022-03-24T18:05:22.000Z | import warnings
import numpy as np
from scipy import signal
try:
from pyfftw.interfaces.scipy_fft import ifft, fftfreq
except ImportError:
warnings.warn("pyfftw not installed. Using standard scipy fft")
from scipy.fft import ifft, fftfreq
from stingray.lightcurve import Lightcurve
from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum
from stingray.exceptions import StingrayError
import stingray.utils as utils
__all__ = ['CrossCorrelation', 'AutoCorrelation']
class CrossCorrelation(object):
"""Make a cross-correlation from light curves or a cross spectrum.
You can also make an empty :class:`Crosscorrelation` object to populate
with your own cross-correlation data.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object, optional, default ``None``
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum` object, default ``None``
The cross spectrum data for the correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]_
for more details.
Attributes
----------
lc1: :class:`stingray.Lightcurve`
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve`
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum`
The cross spectrum data for the correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from two light curves
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each light curve (used in ``time_lag`` calculations)
time_shift: float
Time lag that gives maximum value of correlation between two light curves.
There will be maximum correlation between light curves if one of the light curve
is shifted by ``time_shift``.
n: int
Number of points in ``self.corr`` (length of cross-correlation data)
auto: bool
An internal flag to indicate whether this is a cross-correlation or an auto-correlation.
References
----------
.. [scipy-docs] https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.correlate.html
"""
def __init__(self, lc1=None, lc2=None, cross=None, mode='same'):
self.auto = False
if isinstance(mode, str) is False:
raise TypeError("mode must be a string")
if mode.lower() not in ["full", "valid", "same"]:
raise ValueError("mode must be 'full', 'valid' or 'same'!")
self.mode = mode.lower()
self.lc1 = None
self.lc2 = None
self.cross = None
# Populate all attributes by ``None` if user passes no lightcurve data
if lc1 is None or lc2 is None:
if lc1 is not None or lc2 is not None:
raise TypeError("You can't do a cross correlation with just one "
"light curve!")
else:
if cross is None:
# all object input params are ``None``
self.corr = None
self.time_shift = None
self.time_lags = None
self.dt = None
self.n = None
else:
self._make_cross_corr(cross)
return
else:
self._make_corr(lc1, lc2)
def _make_cross_corr(self, cross):
"""
Do some checks on the cross spectrum supplied to the method,
and then calculate the time shifts, time lags and cross correlation.
Parameters
----------
cross: :class:`stingray.Crossspectrum` object
The crossspectrum, averaged or not.
"""
if not isinstance(cross, Crossspectrum):
if not isinstance(cross, AveragedCrossspectrum):
raise TypeError("cross must be a crossspectrum.Crossspectrum \
or crossspectrum.AveragedCrossspectrum object")
if self.cross is None:
self.cross = cross
self.dt = 1/(cross.df * cross.n)
if self.dt is None:
self.dt = 1/(cross.df * cross.n)
prelim_corr = abs(ifft(cross.power).real) # keep only the real
self.n = len(prelim_corr)
# ifft spits out an array that looks like [0,1,...n,-n,...-1]
# where n is the last positive frequency
# correcting for this by putting them in order
times = fftfreq(self.n, cross.df)
time, corr = np.array(sorted(zip(times, prelim_corr))).T
self.corr = corr
self.time_shift, self.time_lags, self.n = self.cal_timeshift(dt=self.dt)
def _make_corr(self, lc1, lc2):
"""
Do some checks on the light curves supplied to the method, and then calculate the time
shifts, time lags and cross correlation.
Parameters
----------
lc1::class:`stingray.Lightcurve` object
The first light curve data.
lc2::class:`stingray.Lightcurve` object
The second light curve data.
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if not np.isclose(lc1.dt, lc2.dt):
raise StingrayError("Light curves do not have "
"same time binning dt.")
else:
# ignore very small differences in dt neglected by np.isclose()
lc1.dt = lc2.dt
self.dt = lc1.dt
# self.lc1 and self.lc2 may get assigned values explicitly in which case there is no need to copy data
if self.lc1 is None:
self.lc1 = lc1
if self.lc2 is None:
self.lc2 = lc2
# Subtract means before passing scipy.signal.correlate into correlation
lc1_counts = self.lc1.counts - np.mean(self.lc1.counts)
lc2_counts = self.lc2.counts - np.mean(self.lc2.counts)
# Calculates cross-correlation of two lightcurves
self.corr = signal.correlate(lc1_counts, lc2_counts, self.mode)
self.n = len(self.corr)
self.time_shift, self.time_lags, self.n = self.cal_timeshift(dt=self.dt)
def cal_timeshift(self, dt=1.0):
"""
Calculate the cross correlation against all possible time lags, both positive and negative.
Parameters
----------
dt: float, optional, default ``1.0``
Time resolution of the light curve, should be passed when object is populated with
correlation data and no information about light curve can be extracted. Used to
calculate ``time_lags``.
Returns
-------
self.time_shift: float
Value of the time lag that gives maximum value of correlation between two light curves.
self.time_lags: numpy.ndarray
An array of ``time_lags`` calculated from correlation data
"""
if self.dt is None:
self.dt = dt
if self.corr is None:
if (self.lc1 is None or self.lc2 is None) and (self.cross is None):
raise StingrayError('Please provide either two lightcurve objects or \
a [average]crossspectrum object to calculate correlation and time_shift')
else:
# This will cover very rare case of assigning self.lc1 and lc2
# or self.cross and also self.corr = ``None``.
# In this case, correlation is calculated using self.lc1
# and self.lc2 and using that correlation data,
# time_shift is calculated.
if self.cross is not None:
self._make_cross_corr(self.cross)
else:
self._make_corr(self.lc1, self.lc2)
self.n = len(self.corr)
dur = int(self.n / 2)
# Correlation against all possible lags, positive as well as negative lags are stored
x_lags = np.linspace(-dur, dur, self.n)
self.time_lags = x_lags * self.dt
# time_shift is the time lag for max. correlation
self.time_shift = self.time_lags[np.argmax(self.corr)]
return self.time_shift, self.time_lags, self.n
def plot(self, labels=None, axis=None, title=None, marker='-', save=False, filename=None, ax=None):
"""
Plot the :class:`Crosscorrelation` as function using Matplotlib.
Plot the Crosscorrelation object on a graph ``self.time_lags`` on x-axis and
``self.corr`` on y-axis
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` function.
title : str, default ``None``
The title of the plot.
marker : str, default ``-``
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional (default=False)
If True, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
ax : ``matplotlib.Axes`` object
An axes object to fill with the cross correlation plot.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
ax.plot(self.time_lags, self.corr, marker)
if labels is not None:
try:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
except TypeError:
utils.simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
utils.simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
# axis is a tuple containing formatting information
if axis is not None:
ax.axis(axis)
if title is not None:
ax.set_title(title)
if save:
if filename is None:
plt.savefig('corr.pdf', format="pdf")
else:
plt.savefig(filename)
else:
plt.show(block=False)
return ax
class AutoCorrelation(CrossCorrelation):
"""
Make an auto-correlation from a light curve.
You can also make an empty Autocorrelation object to populate with your
own auto-correlation data.
Parameters
----------
lc: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]
for more details.
Attributes
----------
lc1, lc2::class:`stingray.Lightcurve`
The light curve data for correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from lightcurve data
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each lightcurve (used in time_lag calculations)
time_shift: float, zero
Max. Value of AutoCorrelation is always at zero lag.
n: int
Number of points in self.corr(Length of auto-correlation data)
"""
def __init__(self, lc=None, mode='same'):
CrossCorrelation.__init__(self, lc1=lc, lc2=lc, mode=mode)
self.auto = True
| 35.994382 | 110 | 0.604729 |
9df3fbb38695bb52cbdc2f05bc725c14e12b94f3 | 5,061 | py | Python | app/user/tests/test_user_api.py | Kshitiz-Karki/recipe-app-api | 22526e58200235197e17b25cfd06f23de12160a1 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | Kshitiz-Karki/recipe-app-api | 22526e58200235197e17b25cfd06f23de12160a1 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | Kshitiz-Karki/recipe-app-api | 22526e58200235197e17b25cfd06f23de12160a1 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(
user.check_password(payload['password'])
)
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'Test'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {
'email': 'test@londonappdev.com',
'password': 'pw',
'name': 'Test'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='scaleta.23@gmail.com',
password='testpass',
name='Test'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retreiving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that post is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 34.903448 | 77 | 0.647105 |
526845c85ca9e714514e62361698377e120e89b4 | 5,555 | py | Python | nova/tests/unit/fake_notifier.py | confi-surya/nova | adda77352cbe037f47c86bbd809c94fee269eaae | [
"Apache-2.0"
] | 1 | 2018-12-28T06:47:39.000Z | 2018-12-28T06:47:39.000Z | nova/tests/unit/fake_notifier.py | confi-surya/nova | adda77352cbe037f47c86bbd809c94fee269eaae | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/fake_notifier.py | confi-surya/nova | adda77352cbe037f47c86bbd809c94fee269eaae | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import pprint
import threading
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova import rpc
class _Sub(object):
"""Allow a subscriber to efficiently wait for an event to occur, and
retrieve events which have occured.
"""
def __init__(self):
self._cond = threading.Condition()
self._notifications = []
def received(self, notification):
with self._cond:
self._notifications.append(notification)
self._cond.notifyAll()
def wait_n(self, n, event, timeout):
"""Wait until at least n notifications have been received, and return
them. May return less than n notifications if timeout is reached.
"""
with timeutils.StopWatch(timeout) as timer:
with self._cond:
while len(self._notifications) < n:
if timer.expired():
notifications = pprint.pformat(
{event: sub._notifications
for event, sub in VERSIONED_SUBS.items()})
raise AssertionError(
"Notification %(event)s hasn't been "
"received. Received:\n%(notifications)s" % {
'event': event,
'notifications': notifications,
})
self._cond.wait(timer.leftover())
# Return a copy of the notifications list
return list(self._notifications)
VERSIONED_SUBS = collections.defaultdict(_Sub)
VERSIONED_NOTIFICATIONS = []
NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
del VERSIONED_NOTIFICATIONS[:]
VERSIONED_SUBS.clear()
FakeMessage = collections.namedtuple('Message',
['publisher_id', 'priority',
'event_type', 'payload', 'context'])
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
self._serializer = serializer or messaging.serializer.NoOpSerializer()
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id,
serializer=self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
jsonutils.to_primitive(payload)
# NOTE(melwitt): Try to serialize the context, as the rpc would.
# An exception will be raised if something is wrong
# with the context.
self._serializer.serialize_context(ctxt)
msg = FakeMessage(self.publisher_id, priority, event_type,
payload, ctxt)
NOTIFICATIONS.append(msg)
def is_enabled(self):
return True
class FakeVersionedNotifier(FakeNotifier):
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
notification = {'publisher_id': self.publisher_id,
'priority': priority,
'event_type': event_type,
'payload': payload}
VERSIONED_NOTIFICATIONS.append(notification)
VERSIONED_SUBS[event_type].received(notification)
def stub_notifier(test):
test.stub_out('oslo_messaging.Notifier', FakeNotifier)
if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER:
test.stub_out('nova.rpc.LEGACY_NOTIFIER',
FakeNotifier(rpc.LEGACY_NOTIFIER.transport,
rpc.LEGACY_NOTIFIER.publisher_id,
serializer=getattr(rpc.LEGACY_NOTIFIER,
'_serializer',
None)))
test.stub_out('nova.rpc.NOTIFIER',
FakeVersionedNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=getattr(rpc.NOTIFIER,
'_serializer',
None)))
def wait_for_versioned_notifications(event_type, n_events=1, timeout=10.0):
return VERSIONED_SUBS[event_type].wait_n(n_events, event_type, timeout)
| 38.047945 | 78 | 0.59622 |
c40b0a433fa8e3a5c20daeeb9f134993911bada7 | 5,613 | py | Python | tests/glsl.py | Konsonanz/dnload | 52668c73339f6d11b54c8b41e6fe3ba6c4ef6b77 | [
"BSD-3-Clause"
] | 63 | 2016-11-03T09:13:58.000Z | 2022-03-29T12:54:58.000Z | tests/glsl.py | Konsonanz/dnload | 52668c73339f6d11b54c8b41e6fe3ba6c4ef6b77 | [
"BSD-3-Clause"
] | 6 | 2018-07-27T18:21:25.000Z | 2021-03-19T08:04:07.000Z | tests/glsl.py | Konsonanz/dnload | 52668c73339f6d11b54c8b41e6fe3ba6c4ef6b77 | [
"BSD-3-Clause"
] | 10 | 2016-07-27T17:03:00.000Z | 2021-03-13T19:34:36.000Z | #!/usr/bin/env python
import argparse
import os
import re
import sys
(pathname, basename) = os.path.split(__file__)
if pathname and (pathname != "."):
sys.path.append(pathname + "/..")
from dnload.common import executable_check
from dnload.common import executable_search
from dnload.common import is_verbose
from dnload.common import run_command
from dnload.common import set_verbose
from dnload.custom_help_formatter import CustomHelpFormatter
from dnload.preprocessor import Preprocessor
########################################
# Functions ############################
########################################
def compress_file(compression, src, dst):
"""Compress a file to be a self-extracting file-dumping executable."""
if "lzma" == compression:
command = ["xz", "--format=lzma", "--lzma1=preset=9,lc=1,lp=0,nice=273,pb=0", "--stdout"]
elif "xz" == compression:
command = ["xz", "--format=xz", "--lzma2=preset=9,lc=1,nice=273,pb=0", "--stdout"]
else:
raise RuntimeError("unknown compression format '%s'" % compression)
(compressed, se) = run_command(command + [src], False)
wfd = open(dst, "wb")
wfd.write(compressed)
wfd.close()
print("Wrote '%s': %i -> %i bytes" % (dst, os.path.getsize(src), os.path.getsize(dst)))
def extract_shader_payload(preprocessor, src, dst):
"""Extract only the quoted content and write a file."""
text = preprocessor.preprocess(src)
match = re.match(r'.*char[^"]+"(.*)"\s*;[^"]+', text, re.MULTILINE | re.DOTALL)
if not match:
raise RuntimeError("could not extract shader blob")
text = re.sub(r'"\s*\n\s*"', "", match.group(1))
fd = open(dst, "w")
fd.write(text.replace("\\n", "\n"))
fd.close()
if is_verbose():
print("Wrote shader payload: '%s'" % (dst))
def find_executable(basename, pathname, path="."):
"""Find executable with basename and pathname."""
if os.path.exists(path + "/" + basename):
return os.path.normpath(path + "/" + basename)
if os.path.exists(path + "/" + pathname):
return os.path.normpath(path + "/" + pathname + "/" + basename)
new_path = os.path.normpath(path + "/..")
if os.path.exists(new_path) and (os.path.realpath(new_path) != os.path.realpath(path)):
return find_executable(basename, pathname, new_path)
return None
########################################
# Main #################################
########################################
def main():
"""Main function."""
default_preprocessor_list = ["cpp", "clang-cpp"]
preprocessor = None
parser = argparse.ArgumentParser(usage="GLSL minifying test.", formatter_class=CustomHelpFormatter, add_help=False)
parser.add_argument("-h", "--help", action="store_true", help="Print this help string and exit.")
parser.add_argument("--preprocessor", default=None, help="Try to use given preprocessor executable as opposed to autodetect.")
parser.add_argument("-v", "--verbose", action="store_true", help="Print more info about what is being done.")
parser.add_argument("source", default=[], nargs="*", help="Source file(s) to process.")
args = parser.parse_args()
preprocessor = args.preprocessor
if args.help:
print(parser.format_help().strip())
return 0
# Verbosity.
if args.verbose:
set_verbose(True)
# Source files to process.
if not args.source:
raise RuntimeError("no source files to process")
source_files = []
for ii in args.source:
if re.match(r'.*\.(glsl|vert|geom|frag)$', ii, re.I):
source_files += [ii]
else:
raise RuntimeError("unknown source file: '%s'" % (ii))
dl = find_executable("dnload.py", "dnload")
if is_verbose():
print("found dnload: '%s'" % (dl))
sm = find_executable("shader_minifier.exe", "Shader_Minifier")
if is_verbose():
print("found shader_minifier: '%s'" % (sm))
# Find preprocessor.
if preprocessor:
if not executable_check(preprocessor):
raise RuntimeError("could not use supplied preprocessor '%s'" % (preprocessor))
else:
preprocessor_list = default_preprocessor_list
if os.name == "nt":
preprocessor_list = ["cl.exe"] + preprocessor_list
preprocessor = executable_search(preprocessor_list, "preprocessor")
if not preprocessor:
raise RuntimeError("suitable preprocessor not found")
preprocessor = Preprocessor(preprocessor)
for ii in source_files:
fname = "/tmp/" + os.path.basename(ii)
fname_dn = fname + ".dnload"
fname_dn_in = fname_dn + ".h"
fname_dn_out = fname_dn + ".payload"
fname_sm = fname + ".shaderminifier"
fname_sm_in = fname_sm + ".h"
fname_sm_out = fname_sm + ".payload"
run_command(["python", dl, ii, "-o", fname_dn_in])
if is_verbose():
print("Wrote dnload -minified shader: '%s'" % (fname_dn_in))
run_command(["mono", sm, ii, "-o", fname_sm_in])
if is_verbose():
print("Wrote shader_minifier -minified shader: '%s'" % (fname_sm_in))
extract_shader_payload(preprocessor, fname_dn_in, fname_dn_out)
extract_shader_payload(preprocessor, fname_sm_in, fname_sm_out)
compress_file("lzma", fname_dn_out, fname_dn + ".lzma")
compress_file("lzma", fname_sm_out, fname_sm + ".lzma")
return 0
########################################
# Entry point ##########################
########################################
if __name__ == "__main__":
sys.exit(main())
| 38.445205 | 130 | 0.605737 |
5e8b9eac9c03c795b43fc0064a660bdb33a27529 | 13,261 | py | Python | Code/CAML/zzz_ReduceDataPoints.py | NoldAndreas/FINDER | a3d947c5d59a7cd6e54400b0e9aeb9e111689976 | [
"MIT"
] | null | null | null | Code/CAML/zzz_ReduceDataPoints.py | NoldAndreas/FINDER | a3d947c5d59a7cd6e54400b0e9aeb9e111689976 | [
"MIT"
] | null | null | null | Code/CAML/zzz_ReduceDataPoints.py | NoldAndreas/FINDER | a3d947c5d59a7cd6e54400b0e9aeb9e111689976 | [
"MIT"
] | 1 | 2022-02-08T17:03:39.000Z | 2022-02-08T17:03:39.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Reduces the number of points in SMLM images to a given maximum count or fraction.
This script takes a folder of SMLM files and, for each file, saves a copy of it up
to the specified maximum number of points in the output folder.
The number of points is given either as an exact value or a percentage of the
initial total points.
Images with less than the specified maximum are simply copied to the output folder.
@author: Dave
"""
import os
import numpy as np
from natsort import natsorted
import gc
import datetime
import json
proc_wd = os.path.dirname(os.path.abspath(__file__))
if os.getcwd() != proc_wd:
os.chdir(proc_wd)
print('Changed working directory to ' + proc_wd)
import FuncEtc as fn_etc
# Screen for identical xy coordinate pairs
# If found only the first pair will be retained and the other removed.
# Identical points give rise to zero-length neighbour distances which can break
# things further down the line.
doFilterDuplicates = True
# Begin processing at this file in the list of files. Should normally be zero to
# start with the first file but you can jump ahead if you wish to resume processing
# an earlier set or if you are batch-processing across machines.
# NB: remember Python's zero-based indexing. To process the first file in the list
# this needs to be set to zero.
starting_index = 0 # normally zero (begin with the first file)
# End processing at this file index in the list of files. Should normally be zero
# to process all files in the list but you can terminate the list early, e.g.
# to only process a subset of the files or if you are batch-processing across
# multiple machines
# NB: remember Python's zero-based indexing.
finishing_index = 0 # normally zero (end with the last file)
# NB starting_index can be greater than finishing_index; the script will just
# process the files in the reverse order. e.g.
# Files = [0,1,2,3,4,5,6,7,8,9]
# starting_index = 0 and finishing_index = 5 >>> Processes Files 0,1,2,3,4
# starting_index = 8 and finishing_index = 3 >>> Processes Files 8,7,6,5,4
if __name__ == '__main__':
# Initial processing settings (ProcSettings) are loaded from a JSON file
good_json = False
default_json_file = ''
while not good_json:
if 'input_PrepJSON' in locals():
default_json_file = input_PrepJSON # recycle the previous input
input_PrepJSON = fn_etc.askforinput(
message = 'Full path to JSON file describing the data',
errormessage = 'The file you provided does not exist or you supplied a path only. Check that your path includes the file you want and try again.',
defaultval = default_json_file,
isvalid = lambda v : os.path.isfile(v))
with open(input_PrepJSON, 'r') as file:
ps = json.loads(file.read())
fn_etc.info_msg('Imported JSON variables:')
print(' │')
print(' ├─InputFileDelimiter:\t' + ps['InputFileDelimiter'])
print(' ├─InputFileExt:\t' + ps['InputFileExt'])
print(' │')
print(' ├─xCol:\t\t' + str(ps['xCol']))
print(' ├─yCol:\t\t' + str(ps['yCol']))
print(' ├─ClusMembershipIDCol:\t' + str(ps['ClusMembershipIDCol']))
print(' ├─ChanIDCol:\t\t' + str(ps['ChanIDCol']))
print(' ├─UIDCol:\t\t' + str(ps['UIDCol']))
print(' │')
print(' ├─AutoAxes:\t\t' + str(ps['AutoAxes']))
if ps['AutoAxes']:
print(' ├─AutoAxesNearest:\t' + str(ps['AutoAxesNearest']))
print(' ├─ImageSize:\t\tTo be determined')
print(' ├─xMin:\t\tTo be determined')
print(' ├─xMax:\t\tTo be determined')
print(' ├─yMin:\t\tTo be determined')
print(' ├─yMax:\t\tTo be determined')
else:
print(' ├─AutoAxesNearest:\tNot applicable')
print(' ├─ImageSize:\t\t' + str(ps['ImageSize']))
print(' ├─xMin:\t\t' + str(ps['xMin']))
print(' ├─xMax:\t\t' + str(ps['xMax']))
print(' ├─yMin:\t\t' + str(ps['yMin']))
print(' ├─yMax:\t\t' + str(ps['yMax']))
print(' │')
print(' ├─ClosestFriend:\t' + str(ps['ClosestFriend']))
print(' └─FurthestFriend:\t' + str(ps['FurthestFriend']))
verify_good_json = fn_etc.askforinput(
message = 'Are these settings correct? Enter \'Y\' to proceed or enter \'N\' to select another JSON file (or the same file, after you have edited it with the correct settings)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if verify_good_json.lower() in ['y','yes']:
print('JSON file accepted.')
good_json = True
else:
print('JSON file rejected.')
default_input_path = os.path.dirname(input_PrepJSON)
# get the data from the folder
inputpath = fn_etc.askforinput(
message = 'Enter the path of the folder containing ' + ps['InputFileExt'] + ' data tables',
errormessage= 'The folder you provided does not exist or you have provided the path to a file.',
defaultval= default_input_path,
isvalid = lambda v : os.path.isdir(v))
reduction_method = fn_etc.askforinput(
message = 'Do you want to reduce data to [1]-Maximum total points or [2]-Fraction of original points? (Enter 1 or 2)',
errormessage= 'Type the number 1 or 2 and press enter',
defaultval= '2',
isvalid = lambda v : v in ['1','2'])
if reduction_method in ['1']:
max_points_per_set = fn_etc.askforinput(
message = 'Enter the maximum number of points to be retained from each dataset',
errormessage= 'Please enter a non-zero positive integer',
defaultval= '100000',
isvalid = lambda v: v.isdigit() and int(v) >= 1)
max_points_per_set = int(max_points_per_set)
fraction_points_per_set = 0
default_outfolder = os.path.join(inputpath, 'Reduced_' + str(max_points_per_set) + '_Pts_Max')
elif reduction_method in ['2']:
fraction_points_per_set = fn_etc.askforinput(
message = 'Enter the fraction of points to be retained from each dataset',
errormessage= 'Please enter a number which is greater than zero and less than one',
defaultval= '0.5',
isvalid = lambda v: v.replace('.','').isdigit() and float(v) > 0 and float(v) < 1)
fraction_points_per_set = float(fraction_points_per_set)
max_points_per_set = 0
default_outfolder = os.path.join(inputpath, 'Reduced_' + str(fraction_points_per_set) + 'x_Pts')
s1_prep_outputpath = fn_etc.askforinput(
message = 'Enter the name of the output folder',
errormessage= 'The output folder must be named',
defaultval= os.path.abspath(default_outfolder),
isvalid = lambda v : len(v) > 0)
# get a list of input files from the given inputfolder
files = natsorted([i for i in os.listdir(inputpath) if os.path.isfile(os.path.join(inputpath, i)) and ps['InputFileExt'] in i])
total_files = np.shape(files)[0]
# check the starting_index value in case we are restarting a run
if starting_index != 0:
reset_starting_index = fn_etc.askforinput(
message = 'Current Index is set to ' + str(starting_index) + ', i.e. begin with File ' + str(starting_index + 1) + '. Do you want to reset it to zero? (Y or N)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if reset_starting_index.lower() in ['y','yes']:
starting_index = 0
print('Current index has been reset to zero. Processing will begin from the first file in the list.')
else:
print('Keeping the current index. Processing will begin with File ' + str(starting_index + 1) + ' in the list.')
current_index = starting_index
# check the finishing_index value in case we are restarting a run
if finishing_index != 0:
reset_finishing_index = fn_etc.askforinput(
message = 'Current Index is set to ' + str(finishing_index) + ', i.e. end processing after File ' + str(finishing_index - 1) + ' is done. Do you want to reset it and process all files? (Y or N)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if reset_finishing_index.lower() in ['y','yes']:
finishing_index = total_files
print('Finishing Index has been reset and all files in the folder will be processed.')
else:
print('Keeping the current index. Processing will end once File ' + str(finishing_index) + ' is done.')
else:
finishing_index = total_files
proceed_with_processing = fn_etc.askforinput(
message = 'When you are ready to proceed type P and Enter (or X to cancel everything and exit)',
errormessage= 'Type P or X',
defaultval= 'P',
isvalid = lambda v : v.lower() in ['p','x'])
if proceed_with_processing.lower() in ['p']:
print('Rightyo, off we go...')
else:
print("That's ok. Maybe next time?")
exit()
#make the folder for the output data
if not os.path.exists(s1_prep_outputpath):
os.makedirs(s1_prep_outputpath)
# process all the files
for fileIdx in range(starting_index, finishing_index):
current_file = files[fileIdx]
output_prefix = os.path.splitext(current_file)[0]
fn_etc.info_msg(str(fileIdx + 1) + ' of ' + str(total_files) + '\t' + current_file)
print('Loading data...', end='', flush=True)
datatable = np.genfromtxt(os.path.join(inputpath, current_file),
delimiter=ps['InputFileDelimiter'],
skip_header=1) # names=True
# will be exporting as tab-delimited from here, so swap out original delimiters in the header for tabs
with open(os.path.join(inputpath, current_file), 'r') as f:
ps['TableHeaders'] = f.readline().strip()
TotalPointsThisImage = datatable.shape[0]
print('Done (' + str(TotalPointsThisImage) + ' points)')
#duplicate xy screening
if doFilterDuplicates:
data_xy = np.concatenate((datatable[:, ps['xCol'], None], datatable[:, ps['yCol'], None]), axis=1)
_, uniq_idx = np.unique(data_xy, axis=0, return_index=True)
if uniq_idx.shape[0] < datatable.shape[0]:
uniq_idx = np.sort(uniq_idx)
datatable = datatable[uniq_idx,:]
oldTotalPoints = TotalPointsThisImage
TotalPointsThisImage = datatable.shape[0]
DuplicatePointsRemoved = oldTotalPoints - TotalPointsThisImage
else:
DuplicatePointsRemoved = 0
print('Screened for duplicate xy points: none were found')
del data_xy
if fraction_points_per_set > 0:
max_points_per_set = int(fraction_points_per_set * TotalPointsThisImage)
if TotalPointsThisImage <= max_points_per_set:
print('This file has the same or fewer points than the specified maximum (' + str(max_points_per_set) + ') points. It will be copied to the output folder as-is.')
datatable_reduced = datatable
reduced_output_fname = os.path.join(s1_prep_outputpath, output_prefix + '_copied' + ps['InputFileExt'])
else:
print('Choosing ' + str(max_points_per_set) + ' points for the reduced dataset (' + str(round((max_points_per_set / TotalPointsThisImage) * 100, 2)) + '% of original total points)')
# keep a random subset of the points up to the maximum number of points specified
keepers_idx = np.random.choice(datatable.shape[0], max_points_per_set, replace=False)
datatable_reduced = datatable[keepers_idx]
reduced_output_fname = os.path.join(s1_prep_outputpath, output_prefix + '_reduced' + ps['InputFileExt'])
# save the reduced datatable to the output folder
print('Saving reduced dataset...', end='', flush=True)
np.savetxt(reduced_output_fname, datatable_reduced, delimiter=ps['InputFileDelimiter'], header=ps['TableHeaders'], fmt="%s")
print('Done')
# clean up
_ = gc.collect()
print('Finished file ' + str(fileIdx + 1) + ' - ' + output_prefix + ' - at ' + datetime.datetime.now().strftime('%H:%M:%S on %d %B') + '\n')
###
#####
# End of per-fileIdx processing
#####
###
fn_etc.ok_msg('Finished data preparation for all images.')
print('The input folder was\t' + inputpath)
print('The output folder was\t' + s1_prep_outputpath)
print('-------------------------------------------------\n\tCompleted!\n-------------------------------------------------')
| 46.858657 | 207 | 0.618882 |
d9843954431b5b52ec43e3ae9e5373ec437b4722 | 106,536 | py | Python | scripts/pfg.py | sjkelly/openlane | 0fec8c8fb2382d3d487127face5109ec7d2baa51 | [
"Apache-2.0"
] | 2 | 2021-09-23T01:55:01.000Z | 2022-03-10T04:06:44.000Z | scripts/pfg.py | sjkelly/openlane | 0fec8c8fb2382d3d487127face5109ec7d2baa51 | [
"Apache-2.0"
] | null | null | null | scripts/pfg.py | sjkelly/openlane | 0fec8c8fb2382d3d487127face5109ec7d2baa51 | [
"Apache-2.0"
] | 2 | 2021-09-23T01:55:04.000Z | 2022-01-30T10:01:33.000Z | #!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#--------------------------------------------------------
# Padframe Editor and Core Floorplanner
#
#--------------------------------------------------------
# Written by Tim Edwards
# efabless, inc.
# April 24, 2019
# Version 0.5
# Based on https://github.com/YosysHQ/padring (requirement)
# Update: May 9, 2019 to add console message window
# Update: May 10, 2019 to incorporate core floorplanning
# Update: Jan 31, 2020 to allow batch operation
#--------------------------------------------------------
import os
import re
import sys
import glob
import json
import math
import shutil
import signal
import select
import subprocess
import faulthandler
import tkinter
from tkinter import ttk
from tkinter import filedialog
import tksimpledialog
from consoletext import ConsoleText
# User preferences file (if it exists)
prefsfile = '~/design/.profile/prefs.json'
#------------------------------------------------------
# Dialog for entering a pad
#------------------------------------------------------
class PadNameDialog(tksimpledialog.Dialog):
def body(self, master, warning=None, seed=None):
if warning:
ttk.Label(master, text=warning).grid(row = 0, columnspan = 2, sticky = 'wns')
ttk.Label(master, text="Enter new group name:").grid(row = 1, column = 0, sticky = 'wns')
self.nentry = ttk.Entry(master)
self.nentry.grid(row = 1, column = 1, sticky = 'ewns')
if seed:
self.nentry.insert(0, seed)
return self.nentry # initial focus
def apply(self):
return self.nentry.get()
#------------------------------------------------------
# Dialog for entering core dimensions
#------------------------------------------------------
class CoreSizeDialog(tksimpledialog.Dialog):
def body(self, master, warning="Chip core dimensions", seed=None):
if warning:
ttk.Label(master, text=warning).grid(row = 0, columnspan = 2, sticky = 'wns')
ttk.Label(master, text="Enter core width x height (microns):").grid(row = 1, column = 0, sticky = 'wns')
self.nentry = ttk.Entry(master)
self.nentry.grid(row = 1, column = 1, sticky = 'ewns')
if seed:
self.nentry.insert(0, seed)
return self.nentry # initial focus
def apply(self):
return self.nentry.get()
#------------------------------------------------
# SoC Floorplanner and Padframe Generator GUI
#------------------------------------------------
class SoCFloorplanner(ttk.Frame):
"""Open Galaxy Pad Frame Generator."""
def __init__(self, parent = None, *args, **kwargs):
'''See the __init__ for Tkinter.Toplevel.'''
ttk.Frame.__init__(self, parent, *args[1:], **kwargs)
self.root = parent
self.init_data()
if args[0] == True:
self.do_gui = True
self.init_gui()
else:
self.do_gui = False
self.use_console = False
def on_quit(self):
"""Exits program."""
quit()
def init_gui(self):
"""Builds GUI."""
global prefsfile
message = []
fontsize = 11
# Read user preferences file, get default font size from it.
prefspath = os.path.expanduser(prefsfile)
if os.path.exists(prefspath):
with open(prefspath, 'r') as f:
self.prefs = json.load(f)
if 'fontsize' in self.prefs:
fontsize = self.prefs['fontsize']
else:
self.prefs = {}
s = ttk.Style()
available_themes = s.theme_names()
s.theme_use(available_themes[0])
s.configure('normal.TButton', font=('Helvetica', fontsize), border = 3, relief = 'raised')
s.configure('title.TLabel', font=('Helvetica', fontsize, 'bold italic'),
foreground = 'brown', anchor = 'center')
s.configure('blue.TLabel', font=('Helvetica', fontsize), foreground = 'blue')
s.configure('normal.TLabel', font=('Helvetica', fontsize))
s.configure('normal.TCheckbutton', font=('Helvetica', fontsize))
s.configure('normal.TMenubutton', font=('Helvetica', fontsize))
s.configure('normal.TEntry', font=('Helvetica', fontsize), background='white')
s.configure('pad.TLabel', font=('Helvetica', fontsize), foreground = 'blue', relief = 'flat')
s.configure('select.TLabel', font=('Helvetica', fontsize, 'bold'), foreground = 'white',
background = 'blue', relief = 'flat')
# parent.withdraw()
self.root.title('Padframe Generator and Core Floorplanner')
self.root.option_add('*tearOff', 'FALSE')
self.pack(side = 'top', fill = 'both', expand = 'true')
self.root.protocol("WM_DELETE_WINDOW", self.on_quit)
pane = tkinter.PanedWindow(self, orient = 'vertical', sashrelief = 'groove',
sashwidth = 6)
pane.pack(side = 'top', fill = 'both', expand = 'true')
self.toppane = ttk.Frame(pane)
self.botpane = ttk.Frame(pane)
self.toppane.columnconfigure(0, weight = 1)
self.toppane.rowconfigure(0, weight = 1)
self.botpane.columnconfigure(0, weight = 1)
self.botpane.rowconfigure(0, weight = 1)
# Scrolled frame using canvas widget
self.pframe = tkinter.Frame(self.toppane)
self.pframe.grid(row = 0, column = 0, sticky = 'news')
self.pframe.rowconfigure(0, weight = 1)
self.pframe.columnconfigure(0, weight = 1)
# Add column on the left, listing all groups and the pads they belong to.
# This starts as just a frame to be filled. Use a canvas to create a
# scrolled frame.
# The primary frame holds the canvas
self.canvas = tkinter.Canvas(self.pframe, background = "white")
self.canvas.grid(row = 0, column = 0, sticky = 'news')
# Add Y scrollbar to pad list window
xscrollbar = ttk.Scrollbar(self.pframe, orient = 'horizontal')
xscrollbar.grid(row = 1, column = 0, sticky = 'news')
yscrollbar = ttk.Scrollbar(self.pframe, orient = 'vertical')
yscrollbar.grid(row = 0, column = 1, sticky = 'news')
self.canvas.config(xscrollcommand = xscrollbar.set)
xscrollbar.config(command = self.canvas.xview)
self.canvas.config(yscrollcommand = yscrollbar.set)
yscrollbar.config(command = self.canvas.yview)
self.canvas.bind("<Button-4>", self.on_scrollwheel)
self.canvas.bind("<Button-5>", self.on_scrollwheel)
# Configure callback
self.canvas.bind("<Configure>", self.frame_configure)
# Add a text window to capture output. Redirect print statements to it.
self.console = ttk.Frame(self.botpane)
self.console.grid(column = 0, row = 0, sticky = "news")
self.text_box = ConsoleText(self.console, wrap='word', height = 4)
self.text_box.pack(side='left', fill='both', expand='true')
console_scrollbar = ttk.Scrollbar(self.console)
console_scrollbar.pack(side='right', fill='y')
# Attach console to scrollbar
self.text_box.config(yscrollcommand = console_scrollbar.set)
console_scrollbar.config(command = self.text_box.yview)
# Add the bottom bar with buttons
self.bbar = ttk.Frame(self.botpane)
self.bbar.grid(column = 0, row = 1, sticky = "news")
self.bbar.import_button = ttk.Button(self.bbar, text='Import',
command=self.vlogimport, style='normal.TButton')
self.bbar.import_button.grid(column=0, row=0, padx = 5)
self.bbar.generate_button = ttk.Button(self.bbar, text='Generate',
command=self.generate, style='normal.TButton')
self.bbar.generate_button.grid(column=1, row=0, padx = 5)
self.bbar.save_button = ttk.Button(self.bbar, text='Save',
command=self.save, style='normal.TButton')
self.bbar.save_button.grid(column=2, row=0, padx = 5)
self.bbar.cancel_button = ttk.Button(self.bbar, text='Quit',
command=self.on_quit, style='normal.TButton')
self.bbar.cancel_button.grid(column=3, row=0, padx = 5)
pane.add(self.toppane)
pane.add(self.botpane)
pane.paneconfig(self.toppane, stretch='first')
def init_data(self):
self.vlogpads = []
self.corecells = []
self.Npads = []
self.Spads = []
self.Epads = []
self.Wpads = []
self.NEpad = []
self.NWpad = []
self.SEpad = []
self.SWpad = []
self.coregroup = []
self.celldefs = []
self.coredefs = []
self.selected = []
self.ioleflibs = []
self.llx = 0
self.lly = 0
self.urx = 0
self.ury = 0
self.event_data = {}
self.event_data['x0'] = 0
self.event_data['y0'] = 0
self.event_data['x'] = 0
self.event_data['y'] = 0
self.event_data['tag'] = None
self.scale = 1.0
self.margin = 100
self.pad_rotation = 0
self.init_messages = []
self.stdout = None
self.stderr = None
self.keep_cfg = False
self.ef_format = False
self.use_console = False
def init_padframe(self):
self.set_project()
self.vlogimport()
self.readplacement(precheck=True)
self.resolve()
self.generate(0)
# Local routines for handling printing to the text console
def print(self, message, file=None, end='\n', flush=True):
if not file:
if not self.use_console:
file = sys.stdout
else:
file = ConsoleText.StdoutRedirector(self.text_box)
if self.stdout:
print(message, file=file, end=end)
if flush:
self.stdout.flush()
self.update_idletasks()
else:
self.init_messages.append(message)
def text_to_console(self):
# Redirect stdout and stderr to the console as the last thing to do. . .
# Otherwise errors in the GUI get sucked into the void.
self.stdout = sys.stdout
self.stderr = sys.stderr
if self.use_console:
sys.stdout = ConsoleText.StdoutRedirector(self.text_box)
sys.stderr = ConsoleText.StderrRedirector(self.text_box)
if len(self.init_messages) > 0:
for message in self.init_messages:
self.print(message)
self.init_messages = []
# Set the project name(s). This is the name of the top-level verilog.
# The standard protocol is that the project directory contains a file
# project.json that defines a name 'ip-name' that is the same as the
# layout name, the verilog module name, etc.
def set_project(self):
# Check pwd
pwdname = self.projectpath if self.projectpath else os.getcwd()
subdir = os.path.split(pwdname)[1]
if subdir == 'mag' or subdir == 'verilog':
projectpath = os.path.split(pwdname)[0]
else:
projectpath = pwdname
projectroot = os.path.split(projectpath)[0]
projectdirname = os.path.split(projectpath)[1]
# Check for project.json
jsonname = None
if os.path.isfile(projectpath + '/project.json'):
jsonname = projectpath + '/project.json'
elif os.path.isfile(projectroot + '/' + projectdirname + '.json'):
jsonname = projectroot + '/' + projectdirname + '.json'
if os.path.isfile(projectroot + '/project.json'):
# Just in case this was started from some other subdirectory
projectpath = projectroot
jsonname = projectroot + '/project.json'
if jsonname:
self.print('Reading project JSON file ' + jsonname)
with open(jsonname, 'r') as ifile:
topdata = json.load(ifile)
if 'data-sheet' in topdata:
dsheet = topdata['data-sheet']
if 'ip-name' in dsheet:
self.project = dsheet['ip-name']
self.projectpath = projectpath
else:
self.print('No project JSON file; using directory name as the project name.')
self.project = os.path.split(projectpath)[1]
self.projectpath = projectpath
self.print('Project name is ' + self.project + ' (' + self.projectpath + ')')
# Functions for drag-and-drop capability
def add_draggable(self, tag):
self.canvas.tag_bind(tag, '<ButtonPress-1>', self.on_button_press)
self.canvas.tag_bind(tag, '<ButtonRelease-1>', self.on_button_release)
self.canvas.tag_bind(tag, '<B1-Motion>', self.on_button_motion)
self.canvas.tag_bind(tag, '<ButtonPress-2>', self.on_button2_press)
self.canvas.tag_bind(tag, '<ButtonPress-3>', self.on_button3_press)
def on_button_press(self, event):
'''Begining drag of an object'''
# Find the closest item, then record its tag.
locx = event.x + self.canvas.canvasx(0)
locy = event.y + self.canvas.canvasy(0)
item = self.canvas.find_closest(locx, locy)[0]
self.event_data['tag'] = self.canvas.gettags(item)[0]
self.event_data['x0'] = event.x
self.event_data['y0'] = event.y
self.event_data['x'] = event.x
self.event_data['y'] = event.y
def on_button2_press(self, event):
'''Flip an object (excluding corners)'''
locx = event.x + self.canvas.canvasx(0)
locy = event.y + self.canvas.canvasy(0)
item = self.canvas.find_closest(locx, locy)[0]
tag = self.canvas.gettags(item)[0]
try:
corecell = next(item for item in self.coregroup if item['name'] == tag)
except:
try:
pad = next(item for item in self.Npads if item['name'] == tag)
except:
pad = None
if not pad:
try:
pad = next(item for item in self.Epads if item['name'] == tag)
except:
pad = None
if not pad:
try:
pad = next(item for item in self.Spads if item['name'] == tag)
except:
pad = None
if not pad:
try:
pad = next(item for item in self.Wpads if item['name'] == tag)
except:
pad = None
if not pad:
self.print('Error: Object cannot be flipped.')
else:
# Flip the pad (in the only way meaningful for the pad).
orient = pad['o']
if orient == 'N':
pad['o'] = 'FN'
elif orient == 'E':
pad['o'] = 'FW'
elif orient == 'S':
pad['o'] = 'FS'
elif orient == 'W':
pad['o'] = 'FE'
elif orient == 'FN':
pad['o'] = 'N'
elif orient == 'FE':
pad['o'] = 'W'
elif orient == 'FS':
pad['o'] = 'S'
elif orient == 'FW':
pad['o'] = 'E'
else:
# Flip the cell. Use the DEF meaning of flip, which is to
# add or subtract 'F' from the orientation.
orient = corecell['o']
if not 'F' in orient:
corecell['o'] = 'F' + orient
else:
corecell['o'] = orient[1:]
# Redraw
self.populate(0)
def on_button3_press(self, event):
'''Rotate a core object (no pads) '''
locx = event.x + self.canvas.canvasx(0)
locy = event.y + self.canvas.canvasy(0)
item = self.canvas.find_closest(locx, locy)[0]
tag = self.canvas.gettags(item)[0]
try:
corecell = next(item for item in self.coregroup if item['name'] == tag)
except:
self.print('Error: Object cannot be rotated.')
else:
# Modify its orientation
orient = corecell['o']
if orient == 'N':
corecell['o'] = 'E'
elif orient == 'E':
corecell['o'] = 'S'
elif orient == 'S':
corecell['o'] = 'W'
elif orient == 'W':
corecell['o'] = 'N'
elif orient == 'FN':
corecell['o'] = 'FW'
elif orient == 'FW':
corecell['o'] = 'FS'
elif orient == 'FS':
corecell['o'] = 'FE'
elif orient == 'FE':
corecell['o'] = 'FN'
# rewrite the core DEF file
self.write_core_def()
# Redraw
self.populate(0)
def on_button_motion(self, event):
'''Handle dragging of an object'''
# compute how much the mouse has moved
delta_x = event.x - self.event_data['x']
delta_y = event.y - self.event_data['y']
# move the object the appropriate amount
self.canvas.move(self.event_data['tag'], delta_x, delta_y)
# record the new position
self.event_data['x'] = event.x
self.event_data['y'] = event.y
def on_button_release(self, event):
'''End drag of an object'''
# Find the pad associated with the tag and update its position information
tag = self.event_data['tag']
# Collect pads in clockwise order. Note that E and S rows are not clockwise
allpads = []
allpads.extend(self.Npads)
allpads.extend(self.NEpad)
allpads.extend(reversed(self.Epads))
allpads.extend(self.SEpad)
allpads.extend(reversed(self.Spads))
allpads.extend(self.SWpad)
allpads.extend(self.Wpads)
allpads.extend(self.NWpad)
# Create a list of row references (also in clockwise order, but no reversing)
padrows = [self.Npads, self.NEpad, self.Epads, self.SEpad, self.Spads, self.SWpad, self.Wpads, self.NWpad]
# Record the row or corner where this pad was located before the move
for row in padrows:
try:
pad = next(item for item in row if item['name'] == tag)
except:
pass
else:
padrow = row
break
# Currently there is no procedure to move a pad out of the corner
# position; corners are fixed by definition.
if padrow == self.NEpad or padrow == self.SEpad or padrow == self.SWpad or padrow == self.NWpad:
# Easier to run generate() than to put the pad back. . .
self.generate(0)
return
# Find the original center point of the pad being moved
padllx = pad['x']
padlly = pad['y']
if pad['o'] == 'N' or pad['o'] == 'S':
padurx = padllx + pad['width']
padury = padlly + pad['height']
else:
padurx = padllx + pad['height']
padury = padlly + pad['width']
padcx = (padllx + padurx) / 2
padcy = (padlly + padury) / 2
# Add distance from drag information (note that drag position in y
# is negative relative to the chip dimensions)
padcx += (self.event_data['x'] - self.event_data['x0']) / self.scale
padcy -= (self.event_data['y'] - self.event_data['y0']) / self.scale
# reset the drag information
self.event_data['tag'] = None
self.event_data['x'] = 0
self.event_data['y'] = 0
self.event_data['x0'] = 0
self.event_data['y0'] = 0
# Find the distance from the pad to all other pads, and get the two
# closest entries.
wwidth = self.urx - self.llx
dist0 = wwidth
dist1 = wwidth
pad0 = None
pad1 = None
for npad in allpads:
if npad == pad:
continue
npadllx = npad['x']
npadlly = npad['y']
if npad['o'] == 'N' or npad['o'] == 'S':
npadurx = npadllx + npad['width']
npadury = npadlly + npad['height']
else:
npadurx = npadllx + npad['height']
npadury = npadlly + npad['width']
npadcx = (npadllx + npadurx) / 2
npadcy = (npadlly + npadury) / 2
deltx = npadcx - padcx
delty = npadcy - padcy
pdist = math.sqrt(deltx * deltx + delty * delty)
if pdist < dist0:
dist1 = dist0
pad1 = pad0
dist0 = pdist
pad0 = npad
elif pdist < dist1:
dist1 = pdist
pad1 = npad
# Diagnostic
# self.print('Two closest pads to pad ' + pad['name'] + ' (' + pad['cell'] + '): ')
# self.print(pad0['name'] + ' (' + pad0['cell'] + ') dist = ' + str(dist0))
# self.print(pad1['name'] + ' (' + pad1['cell'] + ') dist = ' + str(dist1))
# Record the row or corner where these pads are
for row in padrows:
try:
testpad = next(item for item in row if item['name'] == pad0['name'])
except:
pass
else:
padrow0 = row
break
for row in padrows:
try:
testpad = next(item for item in row if item['name'] == pad1['name'])
except:
pass
else:
padrow1 = row
break
# Remove pad from its own row
padrow.remove(pad)
# Insert pad into new row. Watch for wraparound from the last entry to the first
padidx0 = allpads.index(pad0)
padidx1 = allpads.index(pad1)
if padidx0 == 0 and padidx1 > 2:
padidx1 = -1
if padidx1 > padidx0:
padafter = pad1
rowafter = padrow1
padbefore = pad0
rowbefore = padrow0
else:
padafter = pad0
rowafter = padrow0
padbefore = pad1
rowbefore = padrow1
# Do not replace corner positions (? may be necessary ?)
if rowafter == self.NWpad:
self.Wpads.append(pad)
elif rowafter == self.NWpad:
self.Npads.append(pad)
elif rowafter == self.SEpad:
self.Epads.insert(0, pad)
elif rowafter == self.SWpad:
self.Spads.insert(0, pad)
elif rowafter == self.Wpads or rowafter == self.Npads:
idx = rowafter.index(padafter)
rowafter.insert(idx, pad)
elif rowbefore == self.NEpad:
self.Epads.append(pad)
elif rowbefore == self.SEpad:
self.Spads.append(pad)
else:
# rows E and S are ordered counterclockwise
idx = rowbefore.index(padbefore)
rowbefore.insert(idx, pad)
# Re-run padring
self.generate(0)
def on_scrollwheel(self, event):
if event.num == 4:
zoomval = 1.1;
elif event.num == 5:
zoomval = 0.9;
else:
zoomval = 1.0;
self.scale *= zoomval
self.canvas.scale('all', -15 * zoomval, -15 * zoomval, zoomval, zoomval)
self.event_data['x'] *= zoomval
self.event_data['y'] *= zoomval
self.event_data['x0'] *= zoomval
self.event_data['y0'] *= zoomval
self.frame_configure(event)
# Callback functions similar to the pad event callbacks above, but for
# core cells. Unlike pad cells, core cells can be rotated and flipped
# arbitrarily, and they do not force a recomputation of the padframe
# unless their position forces the padframe to expand
def add_core_draggable(self, tag):
self.canvas.tag_bind(tag, '<ButtonPress-1>', self.on_button_press)
self.canvas.tag_bind(tag, '<ButtonRelease-1>', self.core_on_button_release)
self.canvas.tag_bind(tag, '<B1-Motion>', self.on_button_motion)
self.canvas.tag_bind(tag, '<ButtonPress-2>', self.on_button2_press)
self.canvas.tag_bind(tag, '<ButtonPress-3>', self.on_button3_press)
def core_on_button_release(self, event):
'''End drag of a core cell'''
# Find the pad associated with the tag and update its position information
tag = self.event_data['tag']
try:
corecell = next(item for item in self.coregroup if item['name'] == tag)
except:
self.print('Error: cell ' + item['name'] + ' is not in coregroup!')
else:
# Modify its position values
corex = corecell['x']
corey = corecell['y']
# Add distance from drag information (note that drag position in y
# is negative relative to the chip dimensions)
deltax = (self.event_data['x'] - self.event_data['x0']) / self.scale
deltay = (self.event_data['y'] - self.event_data['y0']) / self.scale
corecell['x'] = corex + deltax
corecell['y'] = corey - deltay
# rewrite the core DEF file
self.write_core_def()
# reset the drag information
self.event_data['tag'] = None
self.event_data['x'] = 0
self.event_data['y'] = 0
self.event_data['x0'] = 0
self.event_data['y0'] = 0
# Critically needed or else frame does not resize to scrollbars!
def grid_configure(self, padx, pady):
pass
# Redraw the chip frame view in response to changes in the pad list
def redraw_frame(self):
self.canvas.coords('boundary', self.llx, self.urx, self.lly, self.ury)
# Update the canvas scrollregion to incorporate all the interior windows
def frame_configure(self, event):
if self.do_gui == False:
return
self.update_idletasks()
bbox = self.canvas.bbox("all")
try:
newbbox = (-15, -15, bbox[2] + 15, bbox[3] + 15)
except:
pass
else:
self.canvas.configure(scrollregion = newbbox)
# Fill the GUI entries with resident data
def populate(self, level):
if self.do_gui == False:
return
if level > 1:
self.print('Recursion error: Returning now.')
return
self.print('Populating floorplan view.')
# Remove all entries from the canvas
self.canvas.delete('all')
allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad
notfoundlist = []
for pad in allpads:
if 'x' not in pad:
self.print('Error: Pad ' + pad['name'] + ' has no placement information.')
continue
llx = int(pad['x'])
lly = int(pad['y'])
pado = pad['o']
try:
padcell = next(item for item in self.celldefs if item['name'] == pad['cell'])
except:
# This should not happen (failsafe)
if pad['cell'] not in notfoundlist:
self.print('Warning: there is no cell named ' + pad['cell'] + ' in the libraries.')
notfoundlist.append(pad['cell'])
continue
padw = padcell['width']
padh = padcell['height']
if 'N' in pado or 'S' in pado:
urx = int(llx + padw)
ury = int(lly + padh)
else:
urx = int(llx + padh)
ury = int(lly + padw)
pad['llx'] = llx
pad['lly'] = lly
pad['urx'] = urx
pad['ury'] = ury
# Note that the DEF coordinate system is reversed in Y from the canvas. . .
height = self.ury - self.lly
for pad in allpads:
llx = pad['llx']
lly = height - pad['lly']
urx = pad['urx']
ury = height - pad['ury']
tag_id = pad['name']
if 'subclass' in pad:
if pad['subclass'] == 'POWER':
pad_color = 'orange2'
elif pad['subclass'] == 'INOUT':
pad_color = 'yellow'
elif pad['subclass'] == 'OUTPUT':
pad_color = 'powder blue'
elif pad['subclass'] == 'INPUT':
pad_color = 'goldenrod1'
elif pad['subclass'] == 'SPACER':
pad_color = 'green yellow'
elif pad['class'] == 'ENDCAP':
pad_color = 'green yellow'
elif pad['subclass'] == '' or pad['class'] == ';':
pad_color = 'khaki1'
else:
self.print('Unhandled pad class ' + pad['class'])
pad_color = 'gray'
else:
pad_color = 'gray'
sllx = self.scale * llx
slly = self.scale * lly
surx = self.scale * urx
sury = self.scale * ury
self.canvas.create_rectangle((sllx, slly), (surx, sury), fill=pad_color, tags=[tag_id])
cx = (sllx + surx) / 2
cy = (slly + sury) / 2
s = 10 if pad['width'] >= 10 else pad['width']
if pad['height'] < s:
s = pad['height']
# Create an indicator line at the bottom left corner of the cell
if pad['o'] == 'N':
allx = sllx
ally = slly - s
aurx = sllx + s
aury = slly
elif pad['o'] == 'E':
allx = sllx
ally = sury + s
aurx = sllx + s
aury = sury
elif pad['o'] == 'S':
allx = surx
ally = sury + s
aurx = surx - s
aury = sury
elif pad['o'] == 'W':
allx = surx
ally = slly - s
aurx = surx - s
aury = slly
elif pad['o'] == 'FN':
allx = surx
ally = slly - s
aurx = surx - s
aury = slly
elif pad['o'] == 'FE':
allx = surx
ally = sury + s
aurx = surx - s
aury = sury
elif pad['o'] == 'FS':
allx = sllx
ally = sury + s
aurx = sllx + s
aury = sury
elif pad['o'] == 'FW':
allx = sllx
ally = slly - s
aurx = sllx + s
aury = slly
self.canvas.create_line((allx, ally), (aurx, aury), tags=[tag_id])
# Rotate text on top and bottom rows if the tkinter version allows it.
if tkinter.TclVersion >= 8.6:
if pad['o'] == 'N' or pad['o'] == 'S':
angle = 90
else:
angle = 0
self.canvas.create_text((cx, cy), text=pad['name'], angle=angle, tags=[tag_id])
else:
self.canvas.create_text((cx, cy), text=pad['name'], tags=[tag_id])
# Make the pad draggable
self.add_draggable(tag_id)
# Now add the core cells
for cell in self.coregroup:
if 'x' not in cell:
self.print('Error: Core cell ' + cell['name'] + ' has no placement information.')
continue
# else:
# self.print('Diagnostic: Creating object for core cell ' + cell['name'])
llx = int(cell['x'])
lly = int(cell['y'])
cello = cell['o']
try:
corecell = next(item for item in self.coredefs if item['name'] == cell['cell'])
except:
# This should not happen (failsafe)
if cell['cell'] not in notfoundlist:
self.print('Warning: there is no cell named ' + cell['cell'] + ' in the libraries.')
notfoundlist.append(cell['cell'])
continue
cellw = corecell['width']
cellh = corecell['height']
if 'N' in cello or 'S' in cello:
urx = int(llx + cellw)
ury = int(lly + cellh)
else:
urx = int(llx + cellh)
ury = int(lly + cellw)
print('NOTE: cell ' + corecell['name'] + ' is rotated, w = ' + str(urx - llx) + '; h = ' + str(ury - lly))
cell['llx'] = llx
cell['lly'] = lly
cell['urx'] = urx
cell['ury'] = ury
# Watch for out-of-window position in core cells.
corellx = self.llx
corelly = self.lly
coreurx = self.urx
coreury = self.ury
for cell in self.coregroup:
if 'llx' not in cell:
# Error message for this was handled above
continue
llx = cell['llx']
lly = height - cell['lly']
urx = cell['urx']
ury = height - cell['ury']
# Check for out-of-window cell
if llx < corellx:
corellx = llx
if lly < corelly:
corelly = lly
if urx > coreurx:
coreurx = urx
if ury > coreury:
coreury = ury
tag_id = cell['name']
cell_color = 'gray40'
sllx = self.scale * llx
slly = self.scale * lly
surx = self.scale * urx
sury = self.scale * ury
self.canvas.create_rectangle((sllx, slly), (surx, sury), fill=cell_color, tags=[tag_id])
cx = (sllx + surx) / 2
cy = (slly + sury) / 2
s = 10 if cell['width'] >= 10 else cell['width']
if cell['height'] < s:
s = cell['height']
# Create an indicator line at the bottom left corner of the cell
if cell['o'] == 'N':
allx = sllx
ally = slly - s
aurx = sllx + s
aury = slly
elif cell['o'] == 'E':
allx = sllx
ally = sury + s
aurx = sllx + s
aury = sury
elif cell['o'] == 'S':
allx = surx
ally = sury + s
aurx = surx - s
aury = sury
elif cell['o'] == 'W':
allx = surx
ally = slly - s
aurx = surx - s
aury = slly
elif cell['o'] == 'FN':
allx = surx
ally = slly - s
aurx = surx - s
aury = slly
elif cell['o'] == 'FE':
allx = surx
ally = sury + s
aurx = surx - s
aury = sury
elif cell['o'] == 'FS':
allx = sllx
ally = sury + s
aurx = sllx + s
aury = sury
elif cell['o'] == 'FW':
allx = sllx
ally = slly - s
aurx = sllx + s
aury = slly
self.canvas.create_line((allx, ally), (aurx, aury), tags=[tag_id])
# self.print('Created entry for cell ' + cell['name'] + ' at {0:g}, {1:g}'.format(cx, cy))
# Rotate text on top and bottom rows if the tkinter version allows it.
if tkinter.TclVersion >= 8.6:
if 'N' in cell['o'] or 'S' in cell['o']:
angle = 90
else:
angle = 0
self.canvas.create_text((cx, cy), text=cell['name'], angle=angle, tags=[tag_id])
else:
self.canvas.create_text((cx, cy), text=cell['name'], tags=[tag_id])
# Make the core cell draggable
self.add_core_draggable(tag_id)
# Is there a boundary size defined?
if self.urx > self.llx and self.ury > self.lly:
self.create_boundary()
# Did the core extend into negative X or Y? If so, adjust all canvas
# coordinates to fit in the window, or else objects cannot be reached
# even by zooming out (since zooming is pinned on the top corner).
offsetx = 0
offsety = 0
# NOTE: Probably want to check if the core exceeds the inner
# dimension of the pad ring, not the outer (to check and to do).
if corellx < self.llx:
offsetx = self.llx - corellx
if corelly < self.lly:
offsety = self.lly - corelly
if offsetx > 0 or offsety > 0:
self.canvas.move("all", offsetx, offsety)
# An offset implies that the chip is core limited, and the
# padframe requires additional space. This can be accomplished
# simply by running "Generate". NOTE: Since generate() calls
# populate(), be VERY SURE that this does not infinitely recurse!
self.generate(level)
# Generate a DEF file of the core area
def write_core_def(self):
self.print('Writing core placementment information in DEF file "core.def".')
mag_path = self.projectpath + '/mag'
# The core cells must always clear the I/O pads on the left and
# bottom (with the ad-hoc margin of self.margin). If core cells have
# been moved to the left or down past the padframe edge, then the
# entire core needs to be repositioned.
# To be done: draw a boundary around the core, let the edges of that
# boundary be draggable, and let the difference between the boundary
# and the core area define the margin.
if self.SWpad != []:
corellx = self.SWpad[0]['x'] + self.SWpad[0]['width'] + self.margin
corelly = self.SWpad[0]['y'] + self.SWpad[0]['height'] + self.margin
else:
corellx = self.Wpads[0]['x'] + self.Wpads[0]['height'] + self.margin
corelly = self.Spads[0]['x'] + self.Spads[0]['height'] + self.margin
offsetx = 0
offsety = 0
for corecell in self.coregroup:
if corecell['x'] < corellx:
if corellx - corecell['x'] > offsetx:
offsetx = corellx - corecell['x']
if corecell['y'] < corelly:
if corelly - corecell['y'] > offsety:
offsety = corelly - corecell['y']
if offsetx > 0 or offsety > 0:
for corecell in self.coregroup:
corecell['x'] += offsetx
corecell['y'] += offsety
# Now write the core DEF file
with open(mag_path + '/core.def', 'w') as ofile:
print('DESIGN CORE ;', file=ofile)
print('UNITS DISTANCE MICRONS 1000 ;', file=ofile)
print('COMPONENTS {0:d} ;'.format(len(self.coregroup)), file=ofile)
for corecell in self.coregroup:
print(' - ' + corecell['name'] + ' ' + corecell['cell'], file=ofile, end='')
print(' + PLACED ( {0:d} {1:d} ) {2:s} ;'.format(int(corecell['x'] * 1000), int(corecell['y'] * 1000), corecell['o']), file=ofile)
print ('END COMPONENTS', file=ofile)
print ('END DESIGN', file=ofile)
# Create the chip boundary area
def create_boundary(self):
scale = self.scale
llx = (self.llx - 10) * scale
lly = (self.lly - 10) * scale
urx = (self.urx + 10) * scale
ury = (self.ury + 10) * scale
pad_color = 'plum1'
tag_id = 'boundary'
self.canvas.create_rectangle((llx, lly), (urx, ury), outline=pad_color, width=2, tags=[tag_id])
# Add text to the middle representing the chip and core areas
cx = ((self.llx + self.urx) / 2) * scale
cy = ((self.lly + self.ury) / 2) * scale
width = self.urx - self.llx
height = self.ury - self.lly
areatext = 'Chip dimensions (um): {0:g} x {1:g}'.format(width, height)
tag_id = 'chiparea'
self.canvas.create_text((cx, cy), text=areatext, tags=[tag_id])
# Rotate orientation according to self.pad_rotation.
def rotate_orientation(self, orient_in):
orient_v = ['N', 'E', 'S', 'W', 'N', 'E', 'S', 'W']
idxadd = int(self.pad_rotation / 90)
idx = orient_v.index(orient_in)
return orient_v[idx + idxadd]
# Read a list of cell macros (name, size, class) from a LEF library
def read_lef_macros(self, libpath, libname = None, libtype = 'iolib'):
if libtype == 'iolib':
libtext = 'I/O '
elif libtype == 'celllib':
libtext = 'core '
else:
libtext = ''
macros = []
if libname:
if os.path.splitext(libname)[1] == '':
libname += '.lef'
leffiles = glob.glob(libpath + '/' + libname)
else:
leffiles = glob.glob(libpath + '/*.lef')
if leffiles == []:
if libname:
self.print('WARNING: No file ' + libpath + '/' + libname + '.lef')
else:
self.print('WARNING: No files ' + libpath + '/*.lef')
for leffile in leffiles:
libpath = os.path.split(leffile)[0]
libname = os.path.split(libpath)[1]
self.print('Reading LEF ' + libtext + 'library ' + leffile)
with open(leffile, 'r') as ifile:
ilines = ifile.read().splitlines()
in_macro = False
for iline in ilines:
iparse = iline.split()
if iparse == []:
continue
elif iparse[0] == 'MACRO':
in_macro = True
newmacro = {}
newmacro['name'] = iparse[1]
newmacro[libtype] = leffile
macros.append(newmacro)
elif in_macro:
if iparse[0] == 'END':
if len(iparse) > 1 and iparse[1] == newmacro['name']:
in_macro = False
elif iparse[0] == 'CLASS':
newmacro['class'] = iparse[1]
if len(iparse) > 2:
newmacro['subclass'] = iparse[2]
# Use the 'ENDCAP' class to identify pad rotations
# other than BOTTOMLEFT. This is somewhat ad-hoc
# depending on the foundry; may not be generally
# applicable.
if newmacro['class'] == 'ENDCAP':
if newmacro['subclass'] == 'TOPLEFT':
self.pad_rotation = 90
elif newmacro['subclass'] == 'TOPRIGHT':
self.pad_rotation = 180
elif newmacro['subclass'] == 'BOTTOMRIGHT':
self.pad_rotation = 270
else:
newmacro['subclass'] = None
elif iparse[0] == 'SIZE':
newmacro['width'] = float(iparse[1])
newmacro['height'] = float(iparse[3])
elif iparse[0] == 'ORIGIN':
newmacro['x'] = float(iparse[1])
newmacro['y'] = float(iparse[2])
return macros
# Read a list of cell names from a verilog file
# If filename is relative, then check in the same directory as the verilog
# top-level netlist (vlogpath) and in the subdirectory 'source/' of the top-
# level directory. Also check in the ~/design/ip/ directory. These are
# common include paths for the simulation.
def read_verilog_lib(self, incpath, vlogpath):
iocells = []
if not os.path.isfile(incpath) and incpath[0] != '/':
locincpath = vlogpath + '/' + incpath
if not os.path.isfile(locincpath):
locincpath = vlogpath + '/source/' + incpath
if not os.path.isfile(locincpath):
projectpath = os.path.split(vlogpath)[0]
designpath = os.path.split(projectpath)[0]
locincpath = designpath + '/ip/' + incpath
else:
locincpath = incpath
if not os.path.isfile(locincpath):
self.print('File ' + incpath + ' not found (at ' + locincpath + ')!')
else:
self.print('Reading verilog library ' + locincpath)
with open(locincpath, 'r') as ifile:
ilines = ifile.read().splitlines()
for iline in ilines:
iparse = re.split('[\t ()]', iline)
while '' in iparse:
iparse.remove('')
if iparse == []:
continue
elif iparse[0] == 'module':
iocells.append(iparse[1])
return iocells
# Generate a LEF abstract view from a magic layout. If "outpath" is not
# "None", then write output to outputpath (this is required if the input
# file is in a read-only directory).
def write_lef_file(self, magfile, outpath=None):
mag_path = os.path.split(magfile)[0]
magfullname = os.path.split(magfile)[1]
module = os.path.splitext(magfullname)[0]
if outpath:
write_path = outpath
else:
write_path = mag_path
self.print('Generating LEF view from layout for module ' + module)
with open(write_path + '/pfg_write_lef.tcl', 'w') as ofile:
print('drc off', file=ofile)
print('box 0 0 0 0', file=ofile)
# NOTE: Using "-force" option in case an IP with a different but
# compatible tech is used (e.g., EFHX035A IP inside EFXH035C). This
# is not checked for legality!
if outpath:
print('load ' + magfile + ' -force', file=ofile)
else:
print('load ' + module + ' -force', file=ofile)
print('lef write', file=ofile)
print('quit', file=ofile)
magicexec = self.magic_path if self.magic_path else 'magic'
mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole',
'pfg_write_lef.tcl'],
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, cwd = write_path, universal_newlines = True)
self.watch(mproc)
os.remove(write_path + '/pfg_write_lef.tcl')
# Watch a running process, polling for output and updating the GUI message
# window as output arrives. Return only when the process has exited.
# Note that this process cannot handle stdin(), so any input to the process
# must be passed from a file.
def watch(self, process):
if process == None:
return
while True:
status = process.poll()
if status != None:
try:
outputpair = process.communicate(timeout=1)
except ValueError:
self.print("Process forced stop, status " + str(status))
else:
for line in outputpair[0].splitlines():
self.print(line)
for line in outputpair[1].splitlines():
self.print(line, file=sys.stderr)
break
else:
sresult = select.select([process.stdout, process.stderr], [], [], 0)[0]
if process.stdout in sresult:
outputline = process.stdout.readline().strip()
self.print(outputline)
elif process.stderr in sresult:
outputline = process.stderr.readline().strip()
self.print(outputline, file=sys.stderr)
else:
self.update_idletasks()
# Reimport the pad list by reading the top-level verilog netlist. Determine
# what pads are listed in the file, and check against the existing pad list.
# The verilog/ directory should have a .v file containing a module of the
# same name as self.project (ip-name). The .v filename should have the
# same name as well (but not necessarily). To do: Handle import of
# projects having a top-level schematic instead of a verilog netlist.
def vlogimport(self):
self.print('Importing verilog sources.')
# First find the process PDK name for this project. Read the nodeinfo.json
# file and find the list of I/O cell libraries.
techpath = self.techpath if self.techpath else self.projectpath
if os.path.exists(techpath + '/.config'):
config_dir = '/.config'
else:
config_dir = '/.ef-config'
if os.path.exists(techpath + config_dir):
self.ef_format = True
pdkpath = self.techpath if self.techpath else os.path.realpath(self.projectpath + config_dir + '/techdir')
nodeinfopath = pdkpath + config_dir + '/nodeinfo.json'
ioleflist = []
if os.path.exists(nodeinfopath):
self.print('Reading known I/O cell libraries from ' + nodeinfopath)
with open(nodeinfopath, 'r') as ifile:
itop = json.load(ifile)
if 'iocells' in itop:
ioleflist = []
for iolib in itop['iocells']:
if '/' in iolib:
# Entries <lib>/<cell> refer to specific files
libcell = iolib.split('/')
if self.ef_format:
iolibpath = pdkpath + '/libs.ref/lef/' + libcell[0]
else:
iolibpath = pdkpath + '/libs.ref/' + libcell[0] + '/lef/'
ioleflist.extend(glob.glob(iolibpath + '/' + libcell[1] + '.lef'))
else:
# All other entries refer to everything in the directory.
if self.ef_format:
iolibpath = pdkpath + '/libs.ref/lef/' + iolib
else:
iolibpath = pdkpath + '/libs.ref/' + iolib + '/lef/'
print(iolibpath)
ioleflist.extend(glob.glob(iolibpath + '/*.lef'))
else:
# Diagnostic
print('Cannot read PDK information file ' + nodeinfopath)
# Fallback behavior: List everything in libs.ref/lef/ beginning with "IO"
if len(ioleflist) == 0:
if self.ef_format:
ioleflist = glob.glob(pdkpath + '/libs.ref/lef/IO*/*.lef')
else:
ioleflist = glob.glob(pdkpath + '/libs.ref/IO*/lef/*.lef')
if len(ioleflist) == 0:
self.print('Cannot find any I/O cell libraries for this technology')
return
# Read the LEF libraries to get a list of all available cells. Keep
# this list of cells in "celldefs".
celldefs = []
ioliblist = []
ioleflibs = []
for iolib in ioleflist:
iolibpath = os.path.split(iolib)[0]
iolibfile = os.path.split(iolib)[1]
ioliblist.append(os.path.split(iolibpath)[1])
celldefs.extend(self.read_lef_macros(iolibpath, iolibfile, 'iolib'))
verilogcells = []
newpadlist = []
coredefs = []
corecells = []
corecelllist = []
lefprocessed = []
busrex = re.compile('.*\[[ \t]*([0-9]+)[ \t]*:[ \t]*([0-9]+)[ \t]*\]')
vlogpath = self.projectpath + '/verilog'
vlogfile = vlogpath + '/' + self.project + '.v'
if os.path.isfile(vlogfile):
with open(vlogfile, 'r') as ifile:
vloglines = ifile.read().splitlines()
for vlogline in vloglines:
vlogparse = re.split('[\t ()]', vlogline)
while '' in vlogparse:
vlogparse.remove('')
if vlogparse == []:
continue
elif vlogparse[0] == '//':
continue
elif vlogparse[0] == '`include':
incpath = vlogparse[1].strip('"')
libpath = os.path.split(incpath)[0]
libname = os.path.split(libpath)[1]
libfile = os.path.split(incpath)[1]
# Read the verilog library for module names to match
# against macro names in celldefs.
modulelist = self.read_verilog_lib(incpath, vlogpath)
matching = list(item for item in celldefs if item['name'] in modulelist)
for imatch in matching:
verilogcells.append(imatch['name'])
leffile = imatch['iolib']
if leffile not in ioleflibs:
ioleflibs.append(leffile)
# Read a corresponding LEF file entry for non-I/O macros, if one
# can be found (this handles files in the PDK).
if len(matching) == 0:
if libname != '':
# (NOTE: Assumes full path starting with '/')
lefpath = libpath.replace('verilog', 'lef')
lefname = libfile.replace('.v', '.lef')
if not os.path.exists(lefpath + '/' + lefname):
leffiles = glob.glob(lefpath + '/*.lef')
else:
leffiles = [lefpath + '/' + lefname]
for leffile in leffiles:
if leffile in ioleflibs:
continue
elif leffile in lefprocessed:
continue
else:
lefprocessed.append(leffile)
lefname = os.path.split(leffile)[1]
newcoredefs = self.read_lef_macros(lefpath, lefname, 'celllib')
coredefs.extend(newcoredefs)
corecells.extend(list(item['name'] for item in newcoredefs))
if leffiles == []:
maglefname = libfile.replace('.v', '.mag')
# Handle PDK files with a maglef/ view but no LEF file.
maglefpath = libpath.replace('verilog', 'maglef')
if not os.path.exists(maglefpath + '/' + maglefname):
magleffiles = glob.glob(maglefpath + '/*.mag')
else:
magleffiles = [maglefpath + '/' + maglefname]
if magleffiles == []:
# Handle user ip/ files with a maglef/ view but
# no LEF file.
maglefpath = libpath.replace('verilog', 'maglef')
designpath = os.path.split(self.projectpath)[0]
maglefpath = designpath + '/ip/' + maglefpath
if not os.path.exists(maglefpath + '/' + maglefname):
magleffiles = glob.glob(maglefpath + '/*.mag')
else:
magleffiles = [maglefpath + '/' + maglefname]
for magleffile in magleffiles:
# Generate LEF file. Since PDK and ip/ entries
# are not writeable, write into the project mag/
# directory.
magpath = self.projectpath + '/mag'
magname = os.path.split(magleffile)[1]
magroot = os.path.splitext(magname)[0]
leffile = magpath + '/' + magroot + '.lef'
if not os.path.isfile(leffile):
self.write_lef_file(magleffile, magpath)
if leffile in ioleflibs:
continue
elif leffile in lefprocessed:
continue
else:
lefprocessed.append(leffile)
lefname = os.path.split(leffile)[1]
newcoredefs = self.read_lef_macros(magpath, lefname, 'celllib')
coredefs.extend(newcoredefs)
corecells.extend(list(item['name'] for item in newcoredefs))
# LEF files generated on-the-fly are not needed
# after they have been parsed.
# os.remove(leffile)
# Check if all modules in modulelist are represented by
# corresponding LEF macros. If not, then go looking for a LEF
# file in the mag/ or maglef/ directory. Then, go looking for
# a .mag file in the mag/ or maglef/ directory, and build a
# LEF macro from it.
matching = list(item['name'] for item in coredefs if item['name'] in modulelist)
for module in modulelist:
if module not in matching:
lefpath = self.projectpath + '/lef'
magpath = self.projectpath + '/mag'
maglefpath = self.projectpath + '/mag'
lefname = libfile.replace('.v', '.lef')
# If the verilog file root name is not the same as
# the module name, then make a quick check for a
# LEF file with the same root name as the verilog.
# That indicates that the module does not exist in
# the LEF file, probably because it is a primary
# module that does not correspond to any layout.
leffile = lefpath + '/' + lefname
if os.path.exists(leffile):
self.print('Diagnostic: module ' + module + ' is not in ' + leffile + ' (probably a primary module)')
continue
leffile = magpath + '/' + lefname
istemp = False
if not os.path.exists(leffile):
magname = libfile.replace('.v', '.mag')
magfile = magpath + '/' + magname
if os.path.exists(magfile):
self.print('Diagnostic: Found a .mag file for ' + module + ' in ' + magfile)
self.write_lef_file(magfile)
istemp = True
else:
magleffile = maglefpath + '/' + lefname
if not os.path.exists(magleffile):
self.print('Diagnostic: (module ' + module + ') has no LEF file ' + leffile + ' or ' + magleffile)
magleffile = maglefpath + '/' + magname
if os.path.exists(magleffile):
self.print('Diagnostic: Found a .mag file for ' + module + ' in ' + magleffile)
if os.access(maglefpath, os.W_OK):
self.write_lef_file(magleffile)
leffile = magleffile
istemp = True
else:
self.write_lef_file(magleffile, magpath)
else:
self.print('Did not find a file ' + magfile)
# self.print('Warning: module ' + module + ' has no LEF or .mag views')
pass
else:
self.print('Diagnostic: Found a LEF file for ' + module + ' in ' + magleffile)
leffile = magleffile
else:
self.print('Diagnostic: Found a LEF file for ' + module + ' in ' + leffile)
if os.path.exists(leffile):
if leffile in lefprocessed:
continue
else:
lefprocessed.append(leffile)
newcoredefs = self.read_lef_macros(magpath, lefname, 'celllib')
# The LEF file generated on-the-fly is not needed
# any more after parsing the macro(s).
# if istemp:
# os.remove(leffile)
coredefs.extend(newcoredefs)
corecells.extend(list(item['name'] for item in newcoredefs))
else:
# self.print('Failed to find a LEF view for module ' + module)
pass
elif vlogparse[0] in verilogcells:
# Check for array of pads
bushigh = buslow = -1
if len(vlogparse) >= 3:
bmatch = busrex.match(vlogline)
if bmatch:
bushigh = int(bmatch.group(1))
buslow = int(bmatch.group(2))
for i in range(buslow, bushigh + 1):
newpad = {}
if i >= 0:
newpad['name'] = vlogparse[1] + '[' + str(i) + ']'
else:
newpad['name'] = vlogparse[1]
# hack
newpad['name'] = newpad['name'].replace("\\", "")
newpad['cell'] = vlogparse[0]
padcell = next(item for item in celldefs if item['name'] == vlogparse[0])
newpad['iolib'] = padcell['iolib']
newpad['class'] = padcell['class']
newpad['subclass'] = padcell['subclass']
newpad['width'] = padcell['width']
newpad['height'] = padcell['height']
newpadlist.append(newpad)
elif vlogparse[0] in corecells:
# Check for array of cells
bushigh = buslow = -1
if len(vlogparse) >= 3:
bmatch = busrex.match(vlogline)
if bmatch:
bushigh = int(bmatch.group(1))
buslow = int(bmatch.group(2))
for i in range(buslow, bushigh + 1):
newcorecell = {}
if i >= 0:
newcorecell['name'] = vlogparse[1] + '[' + str(i) + ']'
else:
newcorecell['name'] = vlogparse[1]
newcorecell['cell'] = vlogparse[0]
corecell = next(item for item in coredefs if item['name'] == vlogparse[0])
newcorecell['celllib'] = corecell['celllib']
newcorecell['class'] = corecell['class']
newcorecell['subclass'] = corecell['subclass']
newcorecell['width'] = corecell['width']
newcorecell['height'] = corecell['height']
corecelllist.append(newcorecell)
self.print('')
self.print('Source file information:')
self.print('Source filename: ' + vlogfile)
self.print('Number of I/O libraries is ' + str(len(ioleflibs)))
self.print('Number of library cells in I/O libraries used: ' + str(len(verilogcells)))
self.print('Number of core celldefs is ' + str(len(coredefs)))
self.print('')
self.print('Number of I/O cells in design: ' + str(len(newpadlist)))
self.print('Number of core cells in design: ' + str(len(corecelllist)))
self.print('')
# Save the results
self.celldefs = celldefs
self.coredefs = coredefs
self.vlogpads = newpadlist
self.corecells = corecelllist
self.ioleflibs = ioleflibs
# Check self.vlogpads, which was created during import (above) against
# self.(N,S,W,E)pads, which was read from the DEF file (if there was one)
# Also check self.corecells, which was created during import against
# self.coregroup, which was read from the DEF file.
def resolve(self):
self.print('Resolve differences in verilog and LEF views.')
samepads = []
addedpads = []
removedpads = []
# (1) Create list of entries that are in both self.vlogpads and self.()pads
# (2) Create list of entries that are in self.vlogpads but not in self.()pads
allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad
for pad in self.vlogpads:
newpadname = pad['name']
try:
lpad = next(item for item in allpads if item['name'] == newpadname)
except:
addedpads.append(pad)
else:
samepads.append(lpad)
# (3) Create list of entries that are in allpads but not in self.vlogpads
for pad in allpads:
newpadname = pad['name']
try:
lpad = next(item for item in self.vlogpads if item['name'] == newpadname)
except:
removedpads.append(pad)
# Print results
if len(addedpads) > 0:
self.print('Added pads:')
for pad in addedpads:
self.print(pad['name'] + ' (' + pad['cell'] + ')')
if len(removedpads) > 0:
plist = []
nspacers = 0
for pad in removedpads:
if 'subclass' in pad:
if pad['subclass'] != 'SPACER':
plist.append(pad)
else:
nspacers += 1
if nspacers > 0:
self.print(str(nspacers) + ' spacer cells ignored.')
if len(plist) > 0:
self.print('Removed pads:')
for pad in removedpads:
self.print(pad['name'] + ' (' + pad['cell'] + ')')
if len(addedpads) + len(removedpads) == 0:
self.print('Pad list has not changed.')
# Remove all cells from the "removed" list, with comment
allpads = [self.Npads, self.NEpad, self.Epads, self.SEpad, self.Spads, self.SWpad, self.Wpads, self.NWpad]
for pad in removedpads:
rname = pad['name']
for row in allpads:
try:
rpad = next(item for item in row if item['name'] == rname)
except:
rpad = None
else:
row.remove(rpad)
# Now the verilog file has no placement information, so the old padlist
# entries (if they exist) are preferred. Add to these the new padlist
# entries
# First pass for unassigned pads: Use of "CLASS ENDCAP" is preferred
# for identifying corner pads. Otherwise, if 'CORNER' or 'corner' is
# in the pad name, then make sure there is one per row in the first
# position. This is not foolproof and depends on the cell library
# using the text 'corner' in the name of the corner cell. However,
# if the ad hoc methods fail, the user can still manually move the
# corner cells to the right place (to be done: Identify if library
# uses ENDCAP designation for corner cells up front; don't go
# looking for 'corner' text if the cells are easily identifiable by
# LEF class).
for pad in addedpads[:]:
iscorner = False
if 'class' in pad and pad['class'] == 'ENDCAP':
iscorner = True
elif 'CORNER' in pad['cell'].upper():
iscorner = True
if iscorner:
if self.NWpad == []:
self.NWpad.append(pad)
pad['o'] = 'E'
addedpads.remove(pad)
elif self.NEpad == []:
self.NEpad.append(pad)
pad['o'] = 'S'
addedpads.remove(pad)
elif self.SEpad == []:
self.SEpad.append(pad)
pad['o'] = 'W'
addedpads.remove(pad)
elif self.SWpad == []:
self.SWpad.append(pad)
pad['o'] = 'N'
addedpads.remove(pad)
numN = len(self.Npads)
numS = len(self.Spads)
numE = len(self.Epads)
numW = len(self.Wpads)
minnum = min(numN, numS, numE, numW)
minnum = max(minnum, int(len(addedpads) / 4))
# Add pads in clockwise order. Note that S and E pads are defined counterclockwise
for pad in addedpads:
if numN < minnum:
self.Npads.append(pad)
numN += 1
pad['o'] = 'S'
self.print("Adding pad " + pad['name'] + " to Npads")
elif numE < minnum:
self.Epads.insert(0, pad)
numE += 1
pad['o'] = 'W'
self.print("Adding pad " + pad['name'] + " to Epads")
elif numS < minnum:
self.Spads.insert(0, pad)
numS += 1
pad['o'] = 'N'
self.print("Adding pad " + pad['name'] + " to Spads")
# elif numW < minnum:
else:
self.Wpads.append(pad)
numW += 1
pad['o'] = 'E'
self.print("Adding pad " + pad['name'] + " to Wpads")
minnum = min(numN, numS, numE, numW)
minnum = max(minnum, int(len(addedpads) / 4))
# Make sure all pads have included information from the cell definition
allpads = self.Npads + self.NEpad + self.Epads + self.SEpad + self.Spads + self.SWpad + self.Wpads + self.NWpad
for pad in allpads:
if 'width' not in pad:
try:
celldef = next(item for item in celldefs if item['name'] == pad['cell'])
except:
self.print('Cell ' + pad['cell'] + ' not found!')
else:
pad['width'] = celldef['width']
pad['height'] = celldef['height']
pad['class'] = celldef['class']
pad['subclass'] = celldef['subclass']
# Now treat the core cells in the same way (resolve list parsed from verilog
# against the list parsed from DEF)
# self.print('Diagnostic: ')
# self.print('self.corecells = ' + str(self.corecells))
# self.print('self.coregroup = ' + str(self.coregroup))
samecore = []
addedcore = []
removedcore = []
# (1) Create list of entries that are in both self.corecells and self.coregroup
# (2) Create list of entries that are in self.corecells but not in self.coregroup
for cell in self.corecells:
newcellname = cell['name']
try:
lcore = next(item for item in self.coregroup if item['name'] == newcellname)
except:
addedcore.append(cell)
else:
samecore.append(lcore)
# (3) Create list of entries that are in self.coregroup but not in self.corecells
for cell in self.coregroup:
newcellname = cell['name']
try:
lcore = next(item for item in self.corecells if item['name'] == newcellname)
except:
removedcore.append(cell)
# Print results
if len(addedcore) > 0:
self.print('Added core cells:')
for cell in addedcore:
self.print(cell['name'] + ' (' + cell['cell'] + ')')
if len(removedcore) > 0:
clist = []
for cell in removedcore:
clist.append(cell)
if len(clist) > 0:
self.print('Removed core cells:')
for cell in removedcore:
self.print(cell['name'] + ' (' + cell['cell'] + ')')
if len(addedcore) + len(removedcore) == 0:
self.print('Core cell list has not changed.')
# Remove all cells from the "removed" list
coregroup = self.coregroup
for cell in removedcore:
rname = cell['name']
try:
rcell = next(item for item in coregroup if item['name'] == rname)
except:
rcell = None
else:
coregroup.remove(rcell)
# Add all cells from the "added" list to coregroup
for cell in addedcore:
rname = cell['name']
try:
rcell = next(item for item in coregroup if item['name'] == rname)
except:
coregroup.append(cell)
if not 'o' in cell:
cell['o'] = 'N'
if not 'x' in cell:
if len(self.Wpads) > 0:
pad = self.Wpads[0]
padx = pad['x'] if 'x' in pad else 0
cell['x'] = padx + pad['height'] + self.margin
else:
cell['x'] = self.margin
if not 'y' in cell:
if len(self.Spads) > 0:
pad = self.Spads[0]
pady = pad['y'] if 'y' in pad else 0
cell['y'] = pady + pad['height'] + self.margin
else:
cell['y'] = self.margin
else:
rcell = None
# Make sure all core cells have included information from the cell definition
for cell in coregroup:
if 'width' not in cell:
try:
coredef = next(item for item in coredefs if item['name'] == cell['cell'])
except:
self.print('Cell ' + cell['cell'] + ' not found!')
else:
cell['width'] = coredef['width']
cell['height'] = coredef['height']
cell['class'] = coredef['class']
cell['subclass'] = coredef['subclass']
# Generate a new padframe by writing the configuration file, running
# padring, reading back the DEF file, and (re)poplulating the workspace
def generate(self, level):
self.print('Generate legal padframe using padring')
# Write out the configuration file
self.writeconfig()
# Run the padring app
self.runpadring()
# Rotate pads in the output if pad orientations are different from
# padring's expectations
self.rotate_pads_in_def()
# Read the placement information back from the generated DEF file
self.readplacement()
# Resolve differences (e.g., remove spacers)
self.resolve()
# Recreate and draw the padframe view on the canvas
self.populate(level + 1)
self.frame_configure(None)
# Write a new configuration file
def writeconfig(self):
mag_path = self.projectpath + '/mag'
self.print('Writing padring configuration file.')
# Determine cell width and height from pad sizes.
# NOTE: This compresses the chip to the minimum dimensions
# allowed by the arrangement of pads. Use a "core" block to
# force the area larger than minimum (not yet implemented)
topwidth = 0
for pad in self.Npads:
if 'width' not in pad:
self.print('No width: pad = ' + str(pad))
topwidth += pad['width']
# Add in the corner cells
if self.NWpad != []:
topwidth += self.NWpad[0]['height']
if self.NEpad != []:
topwidth += self.NEpad[0]['width']
botwidth = 0
for pad in self.Spads:
botwidth += pad['width']
# Add in the corner cells
if self.SWpad != []:
botwidth += self.SWpad[0]['width']
if self.SEpad != []:
botwidth += self.SEpad[0]['height']
width = max(botwidth, topwidth)
# if width < self.urx - self.llx:
# width = self.urx - self.llx
leftheight = 0
for pad in self.Wpads:
leftheight += pad['width']
# Add in the corner cells
if self.NWpad != []:
leftheight += self.NWpad[0]['height']
if self.SWpad != []:
leftheight += self.SWpad[0]['width']
rightheight = 0
for pad in self.Epads:
rightheight += pad['width']
# Add in the corner cells
if self.NEpad != []:
rightheight += self.NEpad[0]['width']
if self.SEpad != []:
rightheight += self.SEpad[0]['height']
height = max(leftheight, rightheight)
# Check the dimensions of the core cells. If they exceed the available
# padframe area, then expand the padframe to accomodate the core.
corellx = coreurx = (self.llx + self.urx) / 2
corelly = coreury = (self.lly + self.ury) / 2
for corecell in self.coregroup:
corient = corecell['o']
if 'S' in corient or 'N' in corient:
cwidth = corecell['width']
cheight = corecell['height']
else:
cwidth = corecell['height']
cheight = corecell['width']
if corecell['x'] < corellx:
corellx = corecell['x']
if corecell['x'] + cwidth > coreurx:
coreurx = corecell['x'] + cwidth
if corecell['y'] < corelly:
corelly = corecell['y']
if corecell['y'] + cheight > coreury:
coreury = corecell['y'] + cheight
coreheight = coreury - corelly
corewidth = coreurx - corellx
# Ignoring the possibility of overlaps with nonstandard-sized pads,
# assuming that the user has visually separated them. Only check
# the core bounds against the standard padframe inside edge.
if self.SWpad != [] and self.SEpad != []:
if corewidth > width - self.SWpad[0]['width'] - self.SEpad[0]['width']:
width = corewidth + self.SWpad[0]['width'] + self.SEpad[0]['width']
if self.NWpad != [] and self.SWpad != []:
if coreheight > height - self.NWpad[0]['height'] - self.SWpad[0]['height']:
height = coreheight + self.NWpad[0]['height'] + self.SWpad[0]['height']
# Core cells are given a margin of self.margin from the pad inside edge, so the
# core area passed to the padring app is 2 * self.margin larger than the
# measured size of the core area.
width += 2 * self.margin
height += 2 * self.margin
# SCALE UP
# width *= 1.4
# height *= 1.4
if self.keep_cfg == False or not os.path.exists(mag_path + '/padframe.cfg'):
if os.path.exists(mag_path + '/padframe.cfg'):
# Copy the previous padframe.cfg file to a backup. In case something
# goes badly wrong, this should be the only file overwritten, and can
# be recovered from the backup.
shutil.copy(mag_path + '/padframe.cfg', mag_path + '/padframe.cfg.bak')
with open(mag_path + '/padframe.cfg', 'w') as ofile:
print('AREA ' + str(int(width)) + ' ' + str(int(height)) + ' ;',
file=ofile)
print('', file=ofile)
for pad in self.NEpad:
print('CORNER ' + pad['name'] + ' SW ' + pad['cell'] + ' ;',
file=ofile)
for pad in self.SEpad:
print('CORNER ' + pad['name'] + ' NW ' + pad['cell'] + ' ;',
file=ofile)
for pad in self.SWpad:
print('CORNER ' + pad['name'] + ' NE ' + pad['cell'] + ' ;',
file=ofile)
for pad in self.NWpad:
print('CORNER ' + pad['name'] + ' SE ' + pad['cell'] + ' ;',
file=ofile)
for pad in self.Npads:
flip = 'F ' if 'F' in pad['o'] else ''
print('PAD ' + pad['name'] + ' N ' + flip + pad['cell'] + ' ;',
file=ofile)
for pad in self.Epads:
flip = 'F ' if 'F' in pad['o'] else ''
print('PAD ' + pad['name'] + ' E ' + flip + pad['cell'] + ' ;',
file=ofile)
for pad in self.Spads:
flip = 'F ' if 'F' in pad['o'] else ''
print('PAD ' + pad['name'] + ' S ' + flip + pad['cell'] + ' ;',
file=ofile)
for pad in self.Wpads:
flip = 'F ' if 'F' in pad['o'] else ''
print('PAD ' + pad['name'] + ' W ' + flip + pad['cell'] + ' ;',
file=ofile)
# Run the padring app.
def runpadring(self):
self.print('Running padring')
mag_path = self.projectpath + '/mag'
if self.padring_path:
padringopts = [self.padring_path]
else:
padringopts = ['padring']
# Diagnostic
# self.print('Used libraries (self.ioleflibs) = ' + str(self.ioleflibs))
for iolib in self.ioleflibs:
padringopts.append('-L')
padringopts.append(iolib)
padringopts.append('--def')
padringopts.append('padframe.def')
padringopts.append('padframe.cfg')
self.print('Running ' + str(padringopts))
p = subprocess.Popen(padringopts, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, cwd = mag_path)
self.watch(p)
# Read placement information from the DEF file generated by padring.
def readplacement(self, precheck=False):
self.print('Reading placement information from DEF file')
mag_path = self.projectpath + '/mag'
if not os.path.isfile(mag_path + '/padframe.def'):
if not precheck:
self.print('No file padframe.def: pad frame was not generated.')
return False
# Very simple DEF file parsing. The placement DEF only contains a
# COMPONENTS section. Certain assumptions are made about the syntax
# that depends on the way 'padring' writes its output. This is not
# a rigorous DEF parser!
units = 1000
in_components = False
Npadlist = []
Spadlist = []
Epadlist = []
Wpadlist = []
NEpad = []
NWpad = []
SEpad = []
SWpad = []
coregroup = []
# Reset bounds
self.llx = self.lly = self.urx = self.ury = 0
corners = 0
with open(mag_path + '/padframe.def', 'r') as ifile:
deflines = ifile.read().splitlines()
for line in deflines:
if 'UNITS DISTANCE MICRONS' in line:
units = line.split()[3]
elif in_components:
lparse = line.split()
if lparse[0] == '-':
instname = lparse[1]
cellname = lparse[2]
elif lparse[0] == '+':
if lparse[1] == 'FIXED':
placex = lparse[3]
placey = lparse[4]
placeo = lparse[6]
newpad = {}
newpad['name'] = instname
newpad['cell'] = cellname
try:
celldef = next(item for item in self.celldefs if item['name'] == cellname)
except:
celldef = None
else:
newpad['iolib'] = celldef['iolib']
newpad['width'] = celldef['width']
newpad['height'] = celldef['height']
newpad['class'] = celldef['class']
newpad['subclass'] = celldef['subclass']
newpad['x'] = float(placex) / float(units)
newpad['y'] = float(placey) / float(units)
newpad['o'] = placeo
# Adjust bounds
if celldef:
if newpad['x'] < self.llx:
self.llx = newpad['x']
if newpad['y'] < self.lly:
self.lly = newpad['y']
if newpad['o'] == 'N' or newpad['o'] == 'S':
padurx = newpad['x'] + celldef['width']
padury = newpad['y'] + celldef['height']
else:
padurx = newpad['x'] + celldef['height']
padury = newpad['y'] + celldef['width']
if padurx > self.urx:
self.urx = padurx
if padury > self.ury:
self.ury = padury
# First four entries in the DEF file are corners
# padring puts the lower left corner at zero, so
# use the zero coordinates to determine which pads
# are which. Note that padring assumes the corner
# pad is drawn in the SW corner position!
if corners < 4:
if newpad['x'] == 0 and newpad['y'] == 0:
SWpad.append(newpad)
elif newpad['x'] == 0:
NWpad.append(newpad)
elif newpad['y'] == 0:
SEpad.append(newpad)
else:
NEpad.append(newpad)
corners += 1
else:
# Place according to orientation. If orientation
# is not standard, be sure to make it standard!
placeo = self.rotate_orientation(placeo)
if placeo == 'N':
Spadlist.append(newpad)
elif placeo == 'E':
Wpadlist.append(newpad)
elif placeo == 'S':
Npadlist.append(newpad)
else:
Epadlist.append(newpad)
elif 'END COMPONENTS' in line:
in_components = False
elif 'COMPONENTS' in line:
in_components = True
self.Npads = Npadlist
self.Wpads = Wpadlist
self.Spads = Spadlist
self.Epads = Epadlist
self.NWpad = NWpad
self.NEpad = NEpad
self.SWpad = SWpad
self.SEpad = SEpad
# The padframe has its own DEF file from the padring app, but the core
# does not. The core needs to be floorplanned in a very similar manner.
# This will be done by searching for a DEF file of the project top-level
# layout. If none exists, it is created by generating it from the layout.
# If the top-level layout does not exist, then all core cells are placed
# at the origin, and the origin placed at the padframe inside corner.
mag_path = self.projectpath + '/mag'
if not os.path.isfile(mag_path + '/' + self.project + '.def'):
if os.path.isfile(mag_path + '/' + self.project + '.mag'):
# Create a DEF file from the layout
with open(mag_path + '/pfg_write_def.tcl', 'w') as ofile:
print('drc off', file=ofile)
print('box 0 0 0 0', file=ofile)
print('load ' + self.project, file=ofile)
print('def write', file=ofile)
print('quit', file=ofile)
magicexec = self.magic_path if self.magic_path else 'magic'
mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole',
'pfg_write_def.tcl'],
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, cwd = mag_path, universal_newlines = True)
self.watch(mproc)
os.remove(mag_path + '/pfg_write_def.tcl')
elif not os.path.isfile(mag_path + '/core.def'):
# With no other information available, copy the corecells
# (from the verilog file) into the coregroup list.
# Position all core cells starting at the padframe top left
# inside corner, and arranging in rows without overlapping.
# Note that no attempt is made to organize the cells or
# otherwise produce an efficient layout. Any dimension larger
# than the current padframe overruns to the right or bottom.
if self.SWpad != []:
corellx = SWpad[0]['x'] + SWpad[0]['width'] + self.margin
corelly = SWpad[0]['y'] + SWpad[0]['height'] + self.margin
else:
corellx = Wpadlist[0]['x'] + Wpadlist[0]['height'] + self.margin
corelly = Spadlist[0]['x'] + Spadlist[0]['height'] + self.margin
if self.NEpad != []:
coreurx = NEpad[0]['x'] - self.margin
coreury = NEpad[0]['y'] - self.margin
else:
coreurx = Epadlist[0]['x'] - self.margin
coreury = Npadlist[0]['x'] - self.margin
locllx = corellx
testllx = corellx
loclly = corelly
testlly = corelly
nextlly = corelly
for cell in self.corecells:
testllx = locllx + cell['width']
if testllx > coreurx:
locllx = corellx
corelly = nextlly
loclly = nextlly
newcore = cell
newcore['x'] = locllx
newcore['y'] = loclly
newcore['o'] = 'N'
locllx += cell['width'] + self.margin
testlly = corelly + cell['height'] + self.margin
if testlly > nextlly:
nextlly = testlly
coregroup.append(newcore)
self.coregroup = coregroup
if os.path.isfile(mag_path + '/' + self.project + '.def'):
# Read the top-level DEF, and use it to position the core cells.
self.print('Reading the top-level cell DEF for core cell placement.')
units = 1000
in_components = False
with open(mag_path + '/' + self.project + '.def', 'r') as ifile:
deflines = ifile.read().splitlines()
for line in deflines:
if 'UNITS DISTANCE MICRONS' in line:
units = line.split()[3]
elif in_components:
lparse = line.split()
if lparse[0] == '-':
instname = lparse[1]
# NOTE: Magic should not drop the entire path to the
# cell for the cellname; this needs to be fixed! To
# work around it, remove any path components.
cellpath = lparse[2]
cellname = os.path.split(cellpath)[1]
elif lparse[0] == '+':
if lparse[1] == 'PLACED':
placex = lparse[3]
placey = lparse[4]
placeo = lparse[6]
newcore = {}
newcore['name'] = instname
newcore['cell'] = cellname
try:
celldef = next(item for item in self.coredefs if item['name'] == cellname)
except:
celldef = None
else:
newcore['celllib'] = celldef['celllib']
newcore['width'] = celldef['width']
newcore['height'] = celldef['height']
newcore['class'] = celldef['class']
newcore['subclass'] = celldef['subclass']
newcore['x'] = float(placex) / float(units)
newcore['y'] = float(placey) / float(units)
newcore['o'] = placeo
coregroup.append(newcore)
elif 'END COMPONENTS' in line:
in_components = False
elif 'COMPONENTS' in line:
in_components = True
self.coregroup = coregroup
elif os.path.isfile(mag_path + '/core.def'):
# No DEF or .mag file, so fallback position is the last core.def
# file generated by this script.
self.read_core_def(precheck=precheck)
return True
# Read placement information from the "padframe.def" file and rotate
# all cells according to self.pad_rotation. This accounts for the
# problem that the default orientation of pads is arbitrarily defined
# by the foundry, while padring assumes that the corner pad is drawn
# in the lower-left position and other pads are drawn with the pad at
# the bottom and the buses at the top.
def rotate_pads_in_def(self):
if self.pad_rotation == 0:
return
self.print('Rotating pads in padframe DEF file.')
mag_path = self.projectpath + '/mag'
if not os.path.isfile(mag_path + '/padframe.def'):
self.print('No file padframe.def: Cannot modify pad rotations.')
return
deflines = []
with open(mag_path + '/padframe.def', 'r') as ifile:
deflines = ifile.read().splitlines()
outlines = []
in_components = False
for line in deflines:
if in_components:
lparse = line.split()
if lparse[0] == '+':
if lparse[1] == 'PLACED':
lparse[1] = 'FIXED'
neworient = self.rotate_orientation(lparse[6])
lparse[6] = neworient
line = ' '.join(lparse)
elif 'END COMPONENTS' in line:
in_components = False
elif 'COMPONENTS' in line:
in_components = True
outlines.append(line)
with open(mag_path + '/padframe.def', 'w') as ofile:
for line in outlines:
print(line, file=ofile)
# Read placement information from the DEF file for the core (created by
# a previous run of this script)
def read_core_def(self, precheck=False):
self.print('Reading placement information from core DEF file.')
mag_path = self.projectpath + '/mag'
if not os.path.isfile(mag_path + '/core.def'):
if not precheck:
self.print('No file core.def: core placement was not generated.')
return False
# Very simple DEF file parsing, similar to the padframe.def reading
# routine above.
units = 1000
in_components = False
coregroup = []
with open(mag_path + '/core.def', 'r') as ifile:
deflines = ifile.read().splitlines()
for line in deflines:
if 'UNITS DISTANCE MICRONS' in line:
units = line.split()[3]
elif in_components:
lparse = line.split()
if lparse[0] == '-':
instname = lparse[1]
cellname = lparse[2]
elif lparse[0] == '+':
if lparse[1] == 'PLACED':
placex = lparse[3]
placey = lparse[4]
placeo = lparse[6]
newcore = {}
newcore['name'] = instname
newcore['cell'] = cellname
try:
celldef = next(item for item in self.coredefs if item['name'] == cellname)
except:
celldef = None
else:
newcore['celllib'] = celldef['celllib']
newcore['width'] = celldef['width']
newcore['height'] = celldef['height']
newcore['class'] = celldef['class']
newcore['subclass'] = celldef['subclass']
newcore['x'] = float(placex) / float(units)
newcore['y'] = float(placey) / float(units)
newcore['o'] = placeo
coregroup.append(newcore)
elif 'END COMPONENTS' in line:
in_components = False
elif 'COMPONENTS' in line:
in_components = True
self.coregroup = coregroup
return True
# Save the layout to a Magic database file (to be completed)
def save(self):
self.print('Saving results in a magic layout database.')
# Generate a list of (unique) LEF libraries for all padframe and core cells
leflist = []
for pad in self.celldefs:
if pad['iolib'] not in leflist:
leflist.append(pad['iolib'])
for core in self.coredefs:
if core['celllib'] not in leflist:
leflist.append(core['celllib'])
# Run magic, and generate the padframe with a series of commands
mag_path = self.projectpath + '/mag'
with open(mag_path + '/pfg_write_mag.tcl', 'w') as ofile:
print('drc off', file=ofile)
print('box 0 0 0 0', file=ofile)
for leffile in leflist:
print('lef read ' + leffile, file=ofile)
print('def read padframe', file=ofile)
print('select top cell', file=ofile)
print('select area', file=ofile)
print('select save padframe', file=ofile)
print('delete', file=ofile)
print('def read core', file=ofile)
print('getcell padframe', file=ofile)
print('save ' + self.project, file=ofile)
print('writeall force ' + self.project, file=ofile)
print('quit', file=ofile)
magicexec = self.magic_path if self.magic_path else 'magic'
mproc = subprocess.Popen([magicexec, '-dnull', '-noconsole',
'pfg_write_mag.tcl'],
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, cwd = mag_path, universal_newlines = True)
self.watch(mproc)
os.remove(mag_path + '/pfg_write_mag.tcl')
self.print('Done writing layout ' + self.project + '.mag')
# Write the core DEF file if it does not exist yet.
if not os.path.isfile(mag_path + '/core.def'):
self.write_core_def()
if __name__ == '__main__':
faulthandler.register(signal.SIGUSR2)
options = []
arguments = []
for item in sys.argv[1:]:
if item.find('-', 0) == 0:
options.append(item)
else:
arguments.append(item)
if '-help' in options:
print(sys.argv[0] + ' [options]')
print('')
print('options:')
print(' -noc Print output to terminal, not the gui window')
print(' -nog No graphics, run in batch mode')
print(' -cfg Use existing padframe.cfg, do not regenerate')
print(' -padring-path=<path> path to padring executable')
print(' -magic-path=<path> path to magic executable')
print(' -tech-path=<path> path to tech root folder')
print(' -project-path=<path> path to project root folder')
print(' -help Print this usage information')
print('')
sys.exit(0)
root = tkinter.Tk()
do_gui = False if ('-nog' in options or '-nogui' in options) else True
app = SoCFloorplanner(root, do_gui)
# Allow option -noc to bypass the text-to-console redirection, so crash
# information doesn't disappear with the app.
app.use_console = False if ('-noc' in options or '-noconsole' in options) else True
if do_gui == False:
app.use_console = False
# efabless format can be specified on the command line, but note that it
# is otherwise auto-detected by checking for .config vs. .ef-config in
# the project space.
app.ef_format = True if '-ef_format' in options else False
app.keep_cfg = True if '-cfg' in options else False
app.padring_path = None
app.magic_path = None
app.techpath = None
app.projectpath = None
for option in options:
if option.split('=')[0] == '-padring-path':
app.padring_path = option.split('=')[1]
elif option.split('=')[0] == '-magic-path':
app.magic_path = option.split('=')[1]
elif option.split('=')[0] == '-tech-path':
app.techpath = option.split('=')[1]
elif option.split('=')[0] == '-project-path':
app.projectpath = option.split('=')[1]
app.projectpath = app.projectpath[:-1] if app.projectpath[-1] == '/' else app.projectpath
app.text_to_console()
app.init_padframe()
if app.do_gui:
root.mainloop()
else:
# Run 'save' in non-GUI mode
app.save()
sys.exit(0)
| 41.101852 | 146 | 0.484315 |
117c6caed0886eec8c0aac5561ccec9deb809112 | 577 | py | Python | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 2 | 2019-12-26T02:49:59.000Z | 2020-02-11T09:22:07.000Z | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 3 | 2021-03-31T19:33:59.000Z | 2021-12-13T20:29:21.000Z | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 2 | 2020-01-25T12:05:47.000Z | 2020-07-19T02:57:12.000Z | import sys
import traceback
from bproxypool.scheduler import run
from bproxypool.server import create_app
from bproxypool.utils.notify import ding
app = create_app()
if __name__ == '__main__':
# app.run(debug=True)
if len(sys.argv) == 2:
if sys.argv[1] == 'scheduler':
try:
run()
except Exception as e:
tp, msg, tb = sys.exc_info()
e_msg = '>'.join(traceback.format_exception(tp, msg, tb))
ding(f'> ProxyPoolError: \n{e_msg}', 'ProxyPoolError')
raise e
| 26.227273 | 73 | 0.577123 |
7967904c21083809b412cf1d907dfc6346771e2c | 833 | py | Python | short_reads_pipeline/tools/deepargClass.py | antkak/deeparg-keras | 0f3b640249d22a0c5aea7e8ad9c9af0b667d2176 | [
"MIT"
] | null | null | null | short_reads_pipeline/tools/deepargClass.py | antkak/deeparg-keras | 0f3b640249d22a0c5aea7e8ad9c9af0b667d2176 | [
"MIT"
] | null | null | null | short_reads_pipeline/tools/deepargClass.py | antkak/deeparg-keras | 0f3b640249d22a0c5aea7e8ad9c9af0b667d2176 | [
"MIT"
] | null | null | null | import os
import sys
def run(R, data, path_to_deeparg='/deeparg/'):
# print sys.path
try:
cmd = " ".join(
['python '+path_to_deeparg+'deepARG.py',
'--align',
'--type nucl',
'--reads',
'--input', R,
'--output', R+'.deeparg',
'--iden', str(data['deep_arg_parameters']['identity']),
'--prob', str(data['deep_arg_parameters']['probability']),
'--evalue', str(data['deep_arg_parameters']['evalue'])
])
print(cmd)
x = os.popen(cmd).read()
return True
except Exception as inst:
print str(inst)
return False
def dsize():
return {i.split()[0].split("|")[-1].upper(): i.split() for i in open(path_to_deeparg+'/database/v2/features.gene.length')}
| 28.724138 | 126 | 0.510204 |
ced0a153679ae5cae7f1dfab12edba497d745736 | 851 | py | Python | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EXPERT/148_sum_of_digits.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EXPERT/148_sum_of_digits.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EXPERT/148_sum_of_digits.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | """
https://edabit.com/challenge/K7NbqZBYD5xzZLro9 EXPERT----
Sum of Digits
Create a function that takes a range of numbers and returns the sum of each digit from start to stop.
Examples
digits_sum(1, 10) ➞ 46
# total numbers in the range are = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
# sum of each digits is = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 1 + 0 = 46
digits_sum(1, 20) ➞ 102
digits_sum(1, 100) ➞ 901
Notes
start and stop are inclusive in the range.
"""
import math
def sum_d(n):
if n<10:
return n*(n+1)//2
logn=int(math.log10(n))
p= 10**logn #pow(10,d)
coff=n//p
return (coff*45*logn*10**(logn-1))+(coff*(coff-1)//2*p)+(coff*(1+n%p)+sum_d(n%p))
def digits_sum(start, stop):
return sum_d(stop)-sum_d(start-1)
#digits_sum(1, 20) #➞ 102
digits_sum(1, 100) #➞ 901
digits_sum(1, 100000000) #, 3600000001) | 23 | 101 | 0.620447 |
66d97823e69fccc90f89137feb031bf67263da85 | 6,453 | py | Python | lib/data/datasets/aicity20_ReOri.py | Johere/AICity2020-VOC-ReID | 21268535595c8c90b87cd1ee89ddbcb341a86d76 | [
"MIT"
] | 100 | 2020-04-25T03:58:01.000Z | 2022-03-30T18:24:17.000Z | lib/data/datasets/aicity20_ReOri.py | hanleiyu/prcv | df5ad9469b38b8176121357fe5de2b1cf30aae1c | [
"MIT"
] | 30 | 2020-04-27T07:15:00.000Z | 2022-01-03T19:49:49.000Z | lib/data/datasets/aicity20_ReOri.py | hanleiyu/prcv | df5ad9469b38b8176121357fe5de2b1cf30aae1c | [
"MIT"
] | 25 | 2020-04-25T22:53:30.000Z | 2022-03-28T00:46:51.000Z | # encoding: utf-8
import glob
import re
import os
import os.path as osp
import xml.etree.ElementTree as ET
import json
from .bases import BaseImageDataset
from .aicity20 import AICity20
class AICity20ReOri(AICity20):
"""
Simulation data: include attribute information
- orientation
- color
- cls type (truck, suv)
"""
dataset_dir = 'AIC20_ReID_Simulation'
def __init__(self, root='', verbose=True, **kwargs):
super(AICity20, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train')
self.list_train_path = osp.join(self.dataset_dir, 'name_train.txt')
self.train_label_path = osp.join(self.dataset_dir, 'train_label.xml')
self._check_before_run()
train = self._process_dir(self.train_dir, self.list_train_path, self.train_label_path, relabel=False)
#train_num = 190000
train_num = 100000
#train_num = 50000
query_num = 500
gallery_num = 5000
query = train[train_num:train_num+query_num]
gallery = train[train_num+query_num: train_num+query_num+gallery_num]
train = train[:train_num]
if verbose:
print("=> AI CITY 2020 sim data loaded")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _process_dir(self, img_dir, list_path, label_path, relabel=False):
dataset = []
if label_path:
tree = ET.parse(label_path, parser=ET.XMLParser(encoding='utf-8'))
objs = tree.find('Items')
for obj in objs:
image_name = obj.attrib['imageName']
img_path = osp.join(img_dir, image_name)
pid = int(float(obj.attrib['orientation']) / 10)
camid = int(obj.attrib['cameraID'][1:])
dataset.append((img_path, pid, camid))
if relabel: dataset = self.relabel(dataset)
else:
with open(list_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
img_path = osp.join(img_dir, line)
pid = 0
camid = 0
dataset.append((img_path, pid, camid))
return dataset
if __name__ == '__main__':
dataset = AICity20ReOri(root='/home/zxy/data/ReID/vehicle')
#
# # encoding: utf-8
#
# import glob
# import re
# import os
# import os.path as osp
# import xml.etree.ElementTree as ET
# import json
#
# from .bases import BaseImageDataset
#
#
# class AICity20ReOri(BaseImageDataset):
# """
# ----------------------------------------
# subset | # ids | # images | # cameras
# ----------------------------------------
# train | 333 | 36935 | 36
# query | 333 | 1052 | ?
# gallery | 333 | 18290 | ?
# ----------------------------------------
#
# """
# dataset_dir = 'AIC20_ReID/'
# dataset_aug_dir = 'AIC20_ReID_Cropped'
# def __init__(self, root='', verbose=True, **kwargs):
# super(AICity20ReOri, self).__init__()
# self.dataset_dir = osp.join(root, self.dataset_dir)
# self.dataset_aug_dir = osp.join(root, self.dataset_aug_dir)
#
# self.train_dir = osp.join(self.dataset_aug_dir, 'image_train')
# self.query_dir = osp.join(self.dataset_aug_dir, 'image_query')
# self.gallery_dir = osp.join(self.dataset_aug_dir, 'image_test')
# self.train_aug_dir = osp.join(self.dataset_aug_dir, 'image_train')
#
# self.orientation_train_path = osp.join(self.dataset_dir, 'orientation', 'orientation_train.json')
# self.orientation_query_path = osp.join(self.dataset_dir, 'orientation', 'orientation_query.json')
# self.orientation_gallery_path = osp.join(self.dataset_dir, 'orientation', 'orientation_test.json')
#
# self._check_before_run()
#
# train = self._process_dir(self.train_dir, self.orientation_train_path, relabel=False)
# query = self._process_dir(self.query_dir, self.orientation_query_path)
# gallery = self._process_dir(self.gallery_dir, self.orientation_gallery_path)
#
# #train = self.relabel(train)
# if verbose:
# print("=> AI CITY 2020 data loaded")
# #self.print_dataset_statistics(train, query, gallery)
#
# self.train = train
# self.query = query
# self.gallery = gallery
#
# self.train_tracks = self._read_tracks(os.path.join(self.dataset_dir, 'train_track_id.txt'))
# self.test_tracks = self._read_tracks(os.path.join(self.dataset_dir, 'test_track_id.txt'))
#
# self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
# self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
# self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
#
#
# def _check_before_run(self):
# """Check if all files are available before going deeper"""
# if not osp.exists(self.dataset_dir):
# raise RuntimeError("'{}' is not available".format(self.dataset_dir))
# if not osp.exists(self.train_dir):
# raise RuntimeError("'{}' is not available".format(self.train_dir))
#
# def _read_orientation_info(self, path):
# with open(path, 'r') as f:
# orientation = json.load(f)
# return orientation
#
# def _process_dir(self, img_dir, json_path, relabel=False):
# dataset = []
# orientation_dict = self._read_orientation_info(json_path)
# for k, v in orientation_dict.items():
# img_path = osp.join(img_dir, k)
# pid = int(float(v) * 360 / 10)
# camid = 0
# dataset.append([img_path, pid, camid])
# if relabel: self.relabel(dataset)
# return dataset
#
# if __name__ == '__main__':
# dataset = AICity20ReOri(root='/home/zxy/data/ReID/vehicle')
| 38.640719 | 117 | 0.619867 |
0d2dca954c15b19a9192f409d0a9ae287a44d785 | 2,325 | py | Python | algorithms/patterns/01. Sliding window/07. Longest Substring with Same Letters after Replacement.py | csanry/dsa | 3ba72e0a76a24f261b9c0974cdad862ed97cf3bc | [
"MIT"
] | 1 | 2022-01-06T11:35:28.000Z | 2022-01-06T11:35:28.000Z | algorithms/patterns/01. Sliding window/07. Longest Substring with Same Letters after Replacement.py | csanry/dsa | 3ba72e0a76a24f261b9c0974cdad862ed97cf3bc | [
"MIT"
] | null | null | null | algorithms/patterns/01. Sliding window/07. Longest Substring with Same Letters after Replacement.py | csanry/dsa | 3ba72e0a76a24f261b9c0974cdad862ed97cf3bc | [
"MIT"
] | null | null | null | '''
Problem Statement
Given a string with lowercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter, find the length of the longest substring having the same letters after replacement.
Example 1: Input: String="aabccbb", k=2
Output: 5
Explanation: Replace the two 'c' with 'b' to have a longest repeating substring "bbbbb".
Example 2:
Input: String="abbcb", k=1
Output: 4
Explanation: Replace the 'c' with 'b' to have a longest repeating substring "bbbb".
Example 3:
Input: String="abccde", k=1
Output: 3
Explanation: Replace the 'b' or 'd' with 'c' to have the longest repeating substring "ccc".
'''
# implementation
def length_of_longest_substring(st, k):
window_start, max_length, max_repeat_letter_count = 0, 0, 0
frequency_map = {}
# try to extend the range [window_start, window_end]
for window_end in range(len(st)):
right_char = st[window_end]
if right_char not in frequency_map:
frequency_map[right_char] = 0
frequency_map[right_char] += 1
max_repeat_letter_count = max(max_repeat_letter_count, frequency_map[right_char])
# current window is from window_start to window_end, overall we have a letter which is
# repeating max_repeat_letter_count times, this means we should replace the remaining letters
# in the window. If the remaining letters are more than k, we need to shrink the window as
# we are not allowed to replace more than k times
if (window_end - window_start + 1 - max_repeat_letter_count) > k:
left_char = st[window_start]
frequency_map[left_char] -= 1
window_start += 1
max_length = max(max_length, window_end - window_start + 1)
return max_length
def main():
print(length_of_longest_substring("aabccbb", 2)) # 5
print(length_of_longest_substring("abbcb", 1)) # 4
print(length_of_longest_substring("abccde", 1)) # 3
main()
'''
Time Complexity
The time complexity of the above algorithm will be O(N) where ‘N’ is the number of letters in the input string.
Space Complexity
As we are expecting only the lower case letters in the input string, we can conclude that the space complexity will be O(26), to store each letter’s frequency in the HashMap, which is asymptotically equal to O(1).
'''
| 40.086207 | 213 | 0.715269 |
0c1c0ff68126806825202c312a1983297c28a3a3 | 1,030 | py | Python | server.py | BladeSides/ApertER-API | 538c3f2e8b602ad7c0296cf5b6621bd32e26ea55 | [
"MIT"
] | null | null | null | server.py | BladeSides/ApertER-API | 538c3f2e8b602ad7c0296cf5b6621bd32e26ea55 | [
"MIT"
] | null | null | null | server.py | BladeSides/ApertER-API | 538c3f2e8b602ad7c0296cf5b6621bd32e26ea55 | [
"MIT"
] | null | null | null | from bottle import get, route, post, run, request # or route
from aperter import er_, json_
@route('/')
def root():
return '''
<h1>Wrong Route</h1>
'''
@route('/api')
def api():
url = request.query.url
try:
em = -1
em = er_(url=url)
except:
print("url: "+url+" ,emotion: "+str(em))
json = json_(status="fail")
return json
else:
print("url: "+url+" ,emotion: "+str(em))
json = json_(status="success",em=em,url=url)
return json
@post('/er')
def er():
try:
em=-1
url = request.forms.get('url')
em = er_(url=url)
except:
print("url: "+url+" ,emotion: "+str(em))
json = json_(status="fail")
return json
else:
print("url: "+url+" ,emotion: "+str(em))
json = json_(status="success",em=em,url=url)
return json
if __name__ == '__main__':
print("Do not close this window")
run(host='localhost', port=8880) | 23.953488 | 61 | 0.505825 |
fc2242b17a2df5b65645ac7bb1b898fee82e5a39 | 35,868 | py | Python | venv/Lib/site-packages/fontTools/merge.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | 7 | 2016-03-22T12:14:47.000Z | 2021-05-07T20:21:11.000Z | activate/Lib/site-packages/fontTools/merge.py | Tanushree28/Cyberbullying-Detection | 3a69ade5cf068b640a0d6d1f176ff0d0e2040501 | [
"MIT"
] | 3 | 2019-03-28T18:17:45.000Z | 2021-06-30T23:37:13.000Z | activate/Lib/site-packages/fontTools/merge.py | Tanushree28/Cyberbullying-Detection | 3a69ade5cf068b640a0d6d1f176ff0d0e2040501 | [
"MIT"
] | 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools.misc.timeTools import timestampNow
from fontTools import ttLib, cffLib
from fontTools.ttLib.tables import otTables, _h_e_a_d
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.misc.loggingTools import Timer
from fontTools.pens.recordingPen import DecomposingRecordingPen
from functools import reduce
import sys
import time
import operator
import logging
import os
log = logging.getLogger("fontTools.merge")
timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
def _add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get('allowDefaultTable', False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done: continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
assert method.__name__ not in clazz.__dict__, \
"Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
# General utility functions for merging values from different fonts
def equal(lst):
lst = list(lst)
t = iter(lst)
first = next(t)
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first
def first(lst):
return next(iter(lst))
def recalculate(lst):
return NotImplemented
def current_time(lst):
return timestampNow()
def bitwise_and(lst):
return reduce(operator.and_, lst)
def bitwise_or(lst):
return reduce(operator.or_, lst)
def avg_int(lst):
lst = list(lst)
return sum(lst) // len(lst)
def onlyExisting(func):
"""Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list,
and only so if there's at least one item remaining.
Otherwise returns NotImplemented."""
def wrapper(lst):
items = [item for item in lst if item is not NotImplemented]
return func(items) if items else NotImplemented
return wrapper
def sumLists(lst):
l = []
for item in lst:
l.extend(item)
return l
def sumDicts(lst):
d = {}
for item in lst:
d.update(item)
return d
def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented]
if not lst:
return NotImplemented
lst = [item for item in lst if item is not None]
if not lst:
return None
clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst
logic = clazz.mergeMap
returnTable = clazz()
returnDict = {}
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, clazz.__name__))
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
if value is not NotImplemented:
returnDict[key] = value
returnTable.__dict__ = returnDict
return returnTable
def mergeBits(bitmap):
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap['size']):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap['*']
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
return wrapper
@_add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
if not hasattr(self, 'mergeMap'):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)
ttLib.getTableClass('maxp').mergeMap = {
'*': max,
'tableTag': equal,
'tableVersion': equal,
'numGlyphs': sum,
'maxStorage': first,
'maxFunctionDefs': first,
'maxInstructionDefs': first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
'size': 16,
'*': bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero
}
ttLib.getTableClass('head').mergeMap = {
'tableTag': equal,
'tableVersion': max,
'fontRevision': max,
'checkSumAdjustment': lambda lst: 0, # We need *something* here
'magicNumber': equal,
'flags': mergeBits(headFlagsMergeBitMap),
'unitsPerEm': equal,
'created': current_time,
'modified': current_time,
'xMin': min,
'yMin': min,
'xMax': max,
'yMax': max,
'macStyle': first,
'lowestRecPPEM': max,
'fontDirectionHint': lambda lst: 2,
'indexToLocFormat': first,
'glyphDataFormat': equal,
}
ttLib.getTableClass('hhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceWidthMax': max,
'minLeftSideBearing': min,
'minRightSideBearing': min,
'xMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfHMetrics': recalculate,
}
ttLib.getTableClass('vhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceHeightMax': max,
'minTopSideBearing': min,
'minBottomSideBearing': min,
'yMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfVMetrics': recalculate,
}
os2FsTypeMergeBitMap = {
'size': 16,
'*': lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted
}
def mergeOs2FsType(lst):
lst = list(lst)
if all(item == 0 for item in lst):
return 0
# Compute least restrictive logic for each fsType value
for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C:
lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008:
lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed
elif lst[i] == 0:
lst[i] = 0x000C
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002:
fsType &= ~0x000C
return fsType
ttLib.getTableClass('OS/2').mergeMap = {
'*': first,
'tableTag': equal,
'version': max,
'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
'fsType': mergeOs2FsType, # Will be overwritten
'panose': first, # FIXME: should really be the first Latin font
'ulUnicodeRange1': bitwise_or,
'ulUnicodeRange2': bitwise_or,
'ulUnicodeRange3': bitwise_or,
'ulUnicodeRange4': bitwise_or,
'fsFirstCharIndex': min,
'fsLastCharIndex': max,
'sTypoAscender': max,
'sTypoDescender': min,
'sTypoLineGap': max,
'usWinAscent': max,
'usWinDescent': max,
# Version 1
'ulCodePageRange1': onlyExisting(bitwise_or),
'ulCodePageRange2': onlyExisting(bitwise_or),
# Version 2, 3, 4
'sxHeight': onlyExisting(max),
'sCapHeight': onlyExisting(max),
'usDefaultChar': onlyExisting(first),
'usBreakChar': onlyExisting(first),
'usMaxContext': onlyExisting(max),
# version 5
'usLowerOpticalPointSize': onlyExisting(min),
'usUpperOpticalPointSize': onlyExisting(max),
}
@_add_method(ttLib.getTableClass('OS/2'))
def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300
if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set.
if self.fsType & 0x0008:
self.fsType &= ~0x0004
return self
ttLib.getTableClass('post').mergeMap = {
'*': first,
'tableTag': equal,
'formatType': max,
'isFixedPitch': min,
'minMemType42': max,
'maxMemType42': lambda lst: 0,
'minMemType1': max,
'maxMemType1': lambda lst: 0,
'mapping': onlyExisting(sumDicts),
'extraNames': lambda lst: [],
}
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
'tableTag': equal,
'metrics': sumDicts,
}
ttLib.getTableClass('name').mergeMap = {
'tableTag': equal,
'names': first, # FIXME? Does mixing name records make sense?
}
ttLib.getTableClass('loca').mergeMap = {
'*': recalculate,
'tableTag': equal,
}
ttLib.getTableClass('glyf').mergeMap = {
'tableTag': equal,
'glyphs': sumDicts,
'glyphOrder': sumLists,
}
@_add_method(ttLib.getTableClass('glyf'))
def merge(self, m, tables):
for i,table in enumerate(tables):
for g in table.glyphs.values():
if i:
# Drop hints for all but first font, since
# we don't map functions / CVT values.
g.removeHinting()
# Expand composite glyphs to load their
# composite glyph names.
if g.isComposite():
g.expand(table)
return DefaultTable.merge(self, m, tables)
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
@_add_method(ttLib.getTableClass('CFF '))
def merge(self, m, tables):
if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError(
"Merging CID-keyed CFF tables is not supported yet"
)
newcff = tables[0]
newfont = newcff.cff[0]
private = newfont.Private
storedNamesStrings = []
glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder())
for name in newfont.strings.strings:
if name not in glyphOrder:
storedNamesStrings.append(name)
else:
glyphOrderStrings.append(name)
chrset = list(newfont.charset)
newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs))
for i, table in enumerate(tables[1:], start=1):
font = table.cff[0]
font.Private = private
fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings:
if name in fontGlyphOrder:
glyphOrderStrings.append(name)
cs = font.CharStrings
gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset)
if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None)
for name in cs.charStrings:
newcs[name] = cs[name]
newfont.charset = chrset
newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
return newcff
def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2):
pen1 = DecomposingRecordingPen(glyphSet1)
pen2 = DecomposingRecordingPen(glyphSet2)
g1 = glyphSet1[glyph1]
g2 = glyphSet2[glyph2]
g1.draw(pen1)
g2.draw(pen2)
return (pen1.value == pen2.value and
g1.width == g2.width and
(not hasattr(g1, 'height') or g1.height == g2.height))
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings":
# https://docs.microsoft.com/en-us/typography/opentype/spec/name
class CmapUnicodePlatEncodings:
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
@_add_method(ttLib.getTableClass('cmap'))
def merge(self, m, tables):
# TODO Handle format=14.
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables
# If there is a format 12 table for the same font, ignore the format 4 table
cmapTables = []
for fontIdx,table in enumerate(tables):
format4 = None
format12 = None
for subtable in table.tables:
properties = (subtable.format, subtable.platformID, subtable.platEncID)
if properties in CmapUnicodePlatEncodings.BMP:
format4 = subtable
elif properties in CmapUnicodePlatEncodings.FullRepertoire:
format12 = subtable
else:
log.warning(
"Dropped cmap subtable from font [%s]:\t"
"format %2s, platformID %2s, platEncID %2s",
fontIdx, subtable.format, subtable.platformID, subtable.platEncID
)
if format12 is not None:
cmapTables.append((format12, fontIdx))
elif format4 is not None:
cmapTables.append((format4, fontIdx))
# Build a unicode mapping, then decide which format is needed to store it.
cmap = {}
fontIndexForGlyph = {}
glyphSets = [None for f in m.fonts] if hasattr(m, 'fonts') else None
for table,fontIdx in cmapTables:
# handle duplicates
for uni,gid in table.cmap.items():
oldgid = cmap.get(uni, None)
if oldgid is None:
cmap[uni] = gid
fontIndexForGlyph[gid] = fontIdx
elif oldgid != gid:
# Char previously mapped to oldgid, now to gid.
# Record, to fix up in GSUB 'locl' later.
if m.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
if glyphSets is not None:
oldFontIdx = fontIndexForGlyph[oldgid]
for idx in (fontIdx, oldFontIdx):
if glyphSets[idx] is None:
glyphSets[idx] = m.fonts[idx].getGlyphSet()
if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
continue
m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
elif m.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
# Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character.
# TODO: Try harder to do something about these.
log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid)
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule('cmap')
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3
cmapTable.platEncID = 10
cmapTable.language = 0
cmapTable.cmap = cmap
self.tables.append(cmapTable)
# always create format-4
cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3
cmapTable.platEncID = 1
cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding
self.tables.insert(0, cmapTable)
self.tableVersion = 0
self.numSubTables = len(self.tables)
return self
def mergeLookupLists(lst):
# TODO Do smarter merge.
return sumLists(lst)
def mergeFeatures(lst):
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst):
d = {}
for l in lst:
for f in l:
tag = f.FeatureTag
if tag not in d:
d[tag] = []
d[tag].append(f.Feature)
ret = []
for tag in sorted(d.keys()):
rec = otTables.FeatureRecord()
rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag])
ret.append(rec)
return ret
def mergeLangSyses(lst):
assert lst
# TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst):
assert lst
if len(lst) == 1:
return lst[0]
langSyses = {}
for sr in lst:
for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = []
for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag
lsrecords.append(lsr)
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst):
d = {}
for l in lst:
for s in l:
tag = s.ScriptTag
if tag not in d:
d[tag] = []
d[tag].append(s.Script)
ret = []
for tag in sorted(d.keys()):
rec = otTables.ScriptRecord()
rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag])
ret.append(rec)
return ret
otTables.ScriptList.mergeMap = {
'ScriptCount': lambda lst: None, # TODO
'ScriptRecord': mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
'BaseScriptCount': lambda lst: None, # TODO
# TODO: Merge duplicate entries
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
}
otTables.FeatureList.mergeMap = {
'FeatureCount': sum,
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
'LookupCount': sum,
'Lookup': sumLists,
}
otTables.Coverage.mergeMap = {
'Format': min,
'glyphs': sumLists,
}
otTables.ClassDef.mergeMap = {
'Format': min,
'classDefs': sumDicts,
}
otTables.LigCaretList.mergeMap = {
'Coverage': mergeObjects,
'LigGlyphCount': sum,
'LigGlyph': sumLists,
}
otTables.AttachList.mergeMap = {
'Coverage': mergeObjects,
'GlyphCount': sum,
'AttachPoint': sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
'MarkSetTableFormat': equal,
'MarkSetCount': sum,
'Coverage': sumLists,
}
otTables.Axis.mergeMap = {
'*': mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
'BaseTagCount': sum,
'BaselineTag': sumLists,
}
otTables.GDEF.mergeMap = \
otTables.GSUB.mergeMap = \
otTables.GPOS.mergeMap = \
otTables.BASE.mergeMap = \
otTables.JSTF.mergeMap = \
otTables.MATH.mergeMap = \
{
'*': mergeObjects,
'Version': max,
}
ttLib.getTableClass('GDEF').mergeMap = \
ttLib.getTableClass('GSUB').mergeMap = \
ttLib.getTableClass('GPOS').mergeMap = \
ttLib.getTableClass('BASE').mergeMap = \
ttLib.getTableClass('JSTF').mergeMap = \
ttLib.getTableClass('MATH').mergeMap = \
{
'tableTag': onlyExisting(equal), # XXX clean me up
'table': mergeObjects,
}
@_add_method(ttLib.getTableClass('GSUB'))
def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups: continue
assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups)
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == 'DFLT': continue # XXX
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
if langsys is None: continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = 'locl'
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
f.LookupListIndex = []
langsys.FeatureIndex.append(synthFeature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
if not synthLookup:
subtable = otTables.SingleSubst()
subtable.mapping = dups
synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0
synthLookup.LookupType = 1
synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable]
if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0
# TODO: make them do the same
table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1
feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1
DefaultTable.merge(self, m, tables)
return self
@_add_method(otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
otTables.ReverseChainSingleSubst,
otTables.SinglePos,
otTables.PairPos,
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos)
def mapLookups(self, lookupMap):
pass
# Copied and trimmed down from subset.py
@_add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord'
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleSet = ChainTyp+'RuleSet'
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleSet = ChainTyp+'ClassSet'
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
@_add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
def mapLookups(self, lookupMap):
c = self.__merge_classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
for r in getattr(rs, c.Rule):
if not r: continue
for ll in getattr(r, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(otTables.ExtensionSubst,
otTables.ExtensionPos)
def mapLookups(self, lookupMap):
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
for st in self.SubTable:
if not st: continue
st.mapLookups(lookupMap)
@_add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
for l in self.Lookup:
if not l: continue
l.mapLookups(lookupMap)
@_add_method(otTables.Feature)
def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@_add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
for f in self.FeatureRecord:
if not f or not f.Feature: continue
f.Feature.mapLookups(lookupMap)
@_add_method(otTables.DefaultLangSys,
otTables.LangSys)
def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@_add_method(otTables.Script)
def mapFeatures(self, featureMap):
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys: continue
l.LangSys.mapFeatures(featureMap)
@_add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
for s in self.ScriptRecord:
if not s or not s.Script: continue
s.Script.mapFeatures(featureMap)
class Options(object):
class UnknownOptionError(Exception):
pass
def __init__(self, **kwargs):
self.verbose = False
self.timing = False
self.set(**kwargs)
def set(self, **kwargs):
for k,v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=[]):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i+1:]
ok = k
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
class _AttendanceRecordingIdentityDict(object):
"""A dictionary-like object that records indices of items actually accessed
from a list."""
def __init__(self, lst):
self.l = lst
self.d = {id(v):i for i,v in enumerate(lst)}
self.s = set()
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
class _GregariousIdentityDict(object):
"""A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list."""
def __init__(self, lst):
self.l = lst
self.s = set(id(v) for v in lst)
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
class _NonhashableDict(object):
"""A dictionary-like object mapping objects to values."""
def __init__(self, keys, values=None):
if values is None:
self.d = {id(v):i for i,v in enumerate(keys)}
else:
self.d = {id(k):v for k,v in zip(keys, values)}
def __getitem__(self, k):
return self.d[id(k)]
def __setitem__(self, k, v):
self.d[id(k)] = v
def __delitem__(self, k):
del self.d[id(k)]
class Merger(object):
"""Font merger.
This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
all the fonts).
If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft#1`` etc.
Restrictions:
- All fonts must currently have TrueType outlines (``glyf`` table).
Merging fonts with CFF outlines is not supported.
- All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table.
Attributes:
options: Currently unused.
"""
def __init__(self, options=None):
if not options:
options = Options()
self.options = options
def merge(self, fontfiles):
"""Merges fonts together.
Args:
fontfiles: A list of file names to be merged
Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file.
"""
#
# Settle on a mega glyph order.
#
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
glyphOrders = [font.getGlyphOrder() for font in fonts]
megaGlyphOrder = self._mergeGlyphOrders(glyphOrders)
# Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion
cffTables = [None] * len(fonts)
if sfntVersion == "OTTO":
for i, font in enumerate(fonts):
font['CFF '].cff.desubroutinize()
cffTables[i] = font['CFF ']
# Reload fonts and set new glyph names on them.
# TODO Is it necessary to reload font? I think it is. At least
# it's safer, in case tables were loaded to provide glyph names.
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font, glyphOrder, cffTable in zip(fonts, glyphOrders, cffTables):
font.setGlyphOrder(glyphOrder)
if cffTable:
# Rename CFF CharStrings to match the new glyphOrder.
# Using cffTable from before reloading the fonts, because reasons.
self._renameCFFCharStrings(glyphOrder, cffTable)
font['CFF '] = cffTable
mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(megaGlyphOrder)
for font in fonts:
self._preMerge(font)
self.fonts = fonts
self.duplicateGlyphsPerFont = [{} for _ in fonts]
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove('GlyphOrder')
# Make sure we process cmap before GSUB as we have a dependency there.
if 'GSUB' in allTags:
allTags.remove('GSUB')
allTags = ['GSUB'] + list(allTags)
if 'cmap' in allTags:
allTags.remove('cmap')
allTags = ['cmap'] + list(allTags)
for tag in allTags:
with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts]
log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables)
if table is not NotImplemented and table is not False:
mega[tag] = table
log.info("Merged '%s'.", tag)
else:
log.info("Dropped '%s'.", tag)
del self.duplicateGlyphsPerFont
del self.fonts
self._postMerge(mega)
return mega
def _mergeGlyphOrders(self, glyphOrders):
"""Modifies passed-in glyphOrders to reflect new glyph names.
Returns glyphOrder for the merged font."""
mega = {}
for glyphOrder in glyphOrders:
for i,glyphName in enumerate(glyphOrder):
if glyphName in mega:
n = mega[glyphName]
while (glyphName + "#" + repr(n)) in mega:
n += 1
mega[glyphName] = n
glyphName += "#" + repr(n)
glyphOrder[i] = glyphName
mega[glyphName] = 1
return list(mega.keys())
def _renameCFFCharStrings(self, glyphOrder, cffTable):
"""Rename topDictIndex charStrings based on glyphOrder."""
td = cffTable.cff.topDictIndex[0]
charStrings = {}
for i, v in enumerate(td.CharStrings.charStrings.values()):
glyphName = glyphOrder[i]
charStrings[glyphName] = v
cffTable.cff.topDictIndex[0].CharStrings.charStrings = charStrings
def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future
# for options and logging.
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic['*']
except KeyError:
raise Exception("Don't know how to merge key %s of class %s" %
(key, returnTable.__class__.__name__))
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
return returnTable
def _preMerge(self, font):
# Map indices to references
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
for t in [GSUB, GPOS]:
if not t: continue
if t.table.LookupList:
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap)
if t.table.FeatureList and t.table.ScriptList:
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap)
# TODO GDEF/Lookup MarkFilteringSets
# TODO FeatureParams nameIDs
def _postMerge(self, font):
# Map references back to indices
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
for t in [GSUB, GPOS]:
if not t: continue
if t.table.FeatureList and t.table.ScriptList:
# Collect unregistered (new) features.
featureMap = _GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Record used features.
featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
# Map back to indices.
featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
if t.table.LookupList:
# Collect unregistered (new) lookups.
lookupMap = _GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Record used lookups.
lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s
# Remove unused lookups
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
# Map back to indices.
lookupMap = _NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
# TODO GDEF/Lookup MarkFilteringSets
# TODO FeatureParams nameIDs
__all__ = [
'Options',
'Merger',
'main'
]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
"""Merge multiple fonts into one"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
options = Options()
args = options.parse_opts(args, ignore_unknown=['output-file'])
outfile = 'merged.ttf'
fontfiles = []
for g in args:
if g.startswith('--output-file='):
outfile = g[14:]
continue
fontfiles.append(g)
if len(args) < 1:
print("usage: pyftmerge font...", file=sys.stderr)
return 1
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing:
timer.logger.setLevel(logging.DEBUG)
else:
timer.logger.disabled = True
merger = Merger(options=options)
font = merger.merge(fontfiles)
with timer("compile and save font"):
font.save(outfile)
if __name__ == "__main__":
sys.exit(main())
| 27.956352 | 134 | 0.704974 |
c26534002f66ede36e3690eb0558960e0bfc378e | 2,906 | py | Python | manage.py | MahmoudYounes/BeOS | e832950a9103f154903eb600b0496d95c9826efb | [
"MIT"
] | null | null | null | manage.py | MahmoudYounes/BeOS | e832950a9103f154903eb600b0496d95c9826efb | [
"MIT"
] | null | null | null | manage.py | MahmoudYounes/BeOS | e832950a9103f154903eb600b0496d95c9826efb | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
script that manages BeOS development operations
TODO: move to make (done)
this build script is obsolete please use make instead (this build script won't be supported)
"""
import os, sys, subprocess, shutil
CURRENT_DIR = os.getcwd()
BOOTLOADER_DIR = "{}/bootLoader/".format(CURRENT_DIR)
BUILD_DIR = "{}/build/".format(CURRENT_DIR)
BIN_DIR = "{}/bin/".format(CURRENT_DIR)
ISOROOT_DIR = "{}/iso_root/".format(CURRENT_DIR)
BOCHS_PATHS = ['bochsout.txt']
SUPPORTED_COMMANDS = ['build', 'run', 'help', 'clean', 'debug']
def executeCommand(command):
"""
execute a shell command in linux/unix environment
"""
print "this build script is obsolete please move to make!"
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
while True:
out = process.stderr.read(1)
exit_code = process.poll()
if out == '' and exit_code != None:
exit(exit_code)
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
def help():
"""
"""
print "to use this script: python manage.py <command> or ./manage.py"
print "where command can be:"
print "\thelp : to print this help message."
print "\tbuild : to build the ISO file of the OS."
print "\trun : to run os in bochs."
print "\tclean : to remove build files."
def clean():
"""
"""
if os.path.isdir(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
if os.path.isdir(ISOROOT_DIR):
shutil.rmtree(ISOROOT_DIR)
if os.path.isdir(BIN_DIR):
shutil.rmtree(BIN_DIR)
for path in BOCHS_PATHS:
if os.path.exists(path):
os.remove(path)
def build():
"""
for info about build commands check this answer: https://stackoverflow.com/a/33619597
"""
clean()
if not os.path.exists(BUILD_DIR):
os.mkdir(BUILD_DIR)
if not os.path.exists(ISOROOT_DIR):
os.mkdir(ISOROOT_DIR)
if not os.path.exists(BIN_DIR):
os.mkdir(BIN_DIR)
executeCommand("""
nasm -g -f bin -o build/bootloader.bin bootLoader/bootloader.asm;
nasm -g -f elf32 -F dwarf -o build/kernel.o src/kernel.asm;
ld -melf_i386 -Tlinker.ld -nostdlib --nmagic -o build/kernel.elf build/kernel.o;
objcopy -O binary build/kernel.elf build/kernel.bin;
cp build/bootloader.bin iso_root/
cp build/kernel.bin iso_root/
mkisofs -c bootcat -b bootloader.bin -no-emul-boot -boot-load-size 4 -o ./bin/BeOS.iso ./iso_root
""")
def run():
if os.path.isdir(BUILD_DIR):
executeCommand("bochs -f bochsrc.txt")
else:
print "Build directory was not found. please build the os first."
def debug():
if os.path.isdir(BUILD_DIR) and os.path.isdir(BIN_DIR) and os.path.exists("qemu_dbg.gdb"):
executeCommand("gdb -x qemu_dbg.gdb")
else:
print "required directories not found. please build the os first."
def main():
"""
"""
if len(sys.argv) == 1:
printUsage()
exit(0)
command = sys.argv[1]
if command in SUPPORTED_COMMANDS:
globals()[command]()
else:
print "command not supported."
help()
if __name__ == "__main__":
main()
| 25.051724 | 98 | 0.698899 |
d3786434e8d88119ce25a92b0d44565708206b7c | 1,717 | py | Python | custom_components/dwains_dashboard/load_plugins.py | pavolholes/dwains-lovelace-dashboard | bab816a165832ac0c36e4faf5ae3a7fd67c31004 | [
"MIT"
] | null | null | null | custom_components/dwains_dashboard/load_plugins.py | pavolholes/dwains-lovelace-dashboard | bab816a165832ac0c36e4faf5ae3a7fd67c31004 | [
"MIT"
] | null | null | null | custom_components/dwains_dashboard/load_plugins.py | pavolholes/dwains-lovelace-dashboard | bab816a165832ac0c36e4faf5ae3a7fd67c31004 | [
"MIT"
] | null | null | null | import logging
DATA_EXTRA_MODULE_URL = 'frontend_extra_module_url'
_LOGGER = logging.getLogger(__name__)
def load_plugins(hass, name):
if DATA_EXTRA_MODULE_URL not in hass.data:
hass.data[DATA_EXTRA_MODULE_URL] = set()
url_set = set()
url_set.add("/dwains_dashboard/js/dwains-dashboard.js")
#Cards by others
url_set.add("/dwains_dashboard/cards/button-card/button-card.js")
url_set.add("/dwains_dashboard/cards/light-entity-card/light-entity-card.js")
#Cards by dwains
url_set.add("/dwains_dashboard/cards/dwains-header-card/dwains-header-card.js")
url_set.add("/dwains_dashboard/cards/dwains-heading-card/dwains-heading-card.js")
url_set.add("/dwains_dashboard/cards/dwains-wrapper-card/dwains-wrapper-card.js")
url_set.add("/dwains_dashboard/cards/dwains-flexbox-card/dwains-flexbox-card.js")
url_set.add("/dwains_dashboard/cards/dwains-hash-switch-card/dwains-hash-switch-card.js")
url_set.add("/dwains_dashboard/cards/dwains-weather-card/dwains-weather-card.js")
url_set.add("/dwains_dashboard/cards/dwains-notification-card/dwains-notification-card.js")
url_set.add("/dwains_dashboard/cards/dwains-collapse-card/dwains-collapse-card.js")
url_set.add("/dwains_dashboard/cards/dwains-cover-card/dwains-cover-card.js")
url_set.add("/dwains_dashboard/cards/dwains-auto-entities-card/dwains-auto-entities-card.js")
hass.data[DATA_EXTRA_MODULE_URL].update(url_set)
hass.http.register_static_path("/dwains_dashboard/js", hass.config.path(f"custom_components/{name}/js"), True)
hass.http.register_static_path("/dwains_dashboard/cards", hass.config.path(f"custom_components/{name}/cards"), True) | 53.65625 | 120 | 0.75364 |
9028f820032f80750b0d5881028659e2c6f86ba6 | 3,427 | py | Python | kubernetes/client/models/v1_load_balancer_status.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_load_balancer_status.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_load_balancer_status.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LoadBalancerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ingress': 'list[V1LoadBalancerIngress]'
}
attribute_map = {
'ingress': 'ingress'
}
def __init__(self, ingress=None):
"""
V1LoadBalancerStatus - a model defined in Swagger
"""
self._ingress = None
self.discriminator = None
if ingress is not None:
self.ingress = ingress
@property
def ingress(self):
"""
Gets the ingress of this V1LoadBalancerStatus.
Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.
:return: The ingress of this V1LoadBalancerStatus.
:rtype: list[V1LoadBalancerIngress]
"""
return self._ingress
@ingress.setter
def ingress(self, ingress):
"""
Sets the ingress of this V1LoadBalancerStatus.
Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.
:param ingress: The ingress of this V1LoadBalancerStatus.
:type: list[V1LoadBalancerIngress]
"""
self._ingress = ingress
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LoadBalancerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.984252 | 147 | 0.570178 |
ed95a22194a487caea28d40f2ae6c4e04bb31ea0 | 938 | py | Python | SparkState/src/SparkState/urls.py | lumanjiao/XLS_BigData | 2c4c37872b8636df1c8b0e005bc12a635a753c7a | [
"Apache-2.0"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | SparkState/src/SparkState/urls.py | lumanjiao/XLS_BigData | 2c4c37872b8636df1c8b0e005bc12a635a753c7a | [
"Apache-2.0"
] | null | null | null | SparkState/src/SparkState/urls.py | lumanjiao/XLS_BigData | 2c4c37872b8636df1c8b0e005bc12a635a753c7a | [
"Apache-2.0"
] | 5 | 2019-06-29T03:13:02.000Z | 2020-04-23T04:47:11.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from SparkState.views import index
urlpatterns = patterns('SparkState',
url(r'^$', 'views.index'),
)
| 40.782609 | 74 | 0.763326 |
96d8e8fa18f8aeed4c6c83986c6ad970a29dda05 | 251 | py | Python | calender/calender/doctype/wisests/wisests.py | bahaaabed/AumAlqura | 9d12d7917225d9e82b4a480c3bc8f7acf8edab77 | [
"MIT"
] | null | null | null | calender/calender/doctype/wisests/wisests.py | bahaaabed/AumAlqura | 9d12d7917225d9e82b4a480c3bc8f7acf8edab77 | [
"MIT"
] | null | null | null | calender/calender/doctype/wisests/wisests.py | bahaaabed/AumAlqura | 9d12d7917225d9e82b4a480c3bc8f7acf8edab77 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2022, bahaa and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Wisests(Document):
pass
| 22.818182 | 49 | 0.768924 |
3838da9ae3173fb4e8eab12556058dc965d4dcab | 97 | py | Python | domain/example.py | kirberich/django-heroku-template | 425443f6b7d12f0db99a218f6809d9bba5a57443 | [
"MIT"
] | null | null | null | domain/example.py | kirberich/django-heroku-template | 425443f6b7d12f0db99a218f6809d9bba5a57443 | [
"MIT"
] | null | null | null | domain/example.py | kirberich/django-heroku-template | 425443f6b7d12f0db99a218f6809d9bba5a57443 | [
"MIT"
] | null | null | null | from data.models import TestModel
def get(obj_id):
return TestModel.objects.get(id=obj_id)
| 16.166667 | 43 | 0.762887 |
536e69b7f446e2d85d7b4c89a09698724694f79e | 965 | py | Python | logger.py | andyts93/plex-coming-soon | f7d83f2bf6d41675af4e772dcb2cecf545d9e3ba | [
"MIT"
] | 1 | 2020-03-17T22:54:43.000Z | 2020-03-17T22:54:43.000Z | logger.py | andyts93/plex-coming-soon | f7d83f2bf6d41675af4e772dcb2cecf545d9e3ba | [
"MIT"
] | 1 | 2020-05-05T21:38:38.000Z | 2020-05-18T10:43:27.000Z | logger.py | andyts93/plex-coming-soon | f7d83f2bf6d41675af4e772dcb2cecf545d9e3ba | [
"MIT"
] | null | null | null | import logging
import sys
import os
from logging.handlers import TimedRotatingFileHandler
LOG_FILE_NAME = os.path.dirname(__file__)+'/logs/log.log'
# set up formatting
formatter = logging.Formatter('[%(asctime)s] %(levelname)s (%(process)d) %(module)s:%(lineno)d %(message)s')
# set up logging to STDOUT for all levels WARNING and higher
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.WARNING)
sh.setFormatter(formatter)
# set up logging to a file for all levels DEBUG and higher
#fh = logging.FileHandler(LOG_FILE_NAME)
fh = TimedRotatingFileHandler(LOG_FILE_NAME, when="d", interval=1, backupCount=7)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# create logger object
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
logger.addHandler(fh)
# shortcuts
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical | 29.242424 | 109 | 0.75544 |
8116be124837867eb1f298247bc96380e91be18f | 35 | py | Python | python/testData/codeInsight/controlflow/MatchStatementSingleClauseBindingSequencePattern.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/codeInsight/controlflow/MatchStatementSingleClauseBindingSequencePattern.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/codeInsight/controlflow/MatchStatementSingleClauseBindingSequencePattern.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | match 42:
case [x]:
y
z | 8.75 | 13 | 0.4 |
9cad242776a41b0bec1bc250b4b244b12b4279ba | 1,275 | py | Python | testsuites/test_douban.py | BranRoyal/automation_framework | acb9434b58e18a61605935f7e66c9c68224e8de1 | [
"MIT"
] | null | null | null | testsuites/test_douban.py | BranRoyal/automation_framework | acb9434b58e18a61605935f7e66c9c68224e8de1 | [
"MIT"
] | null | null | null | testsuites/test_douban.py | BranRoyal/automation_framework | acb9434b58e18a61605935f7e66c9c68224e8de1 | [
"MIT"
] | null | null | null | # _*_ coding=utf-8 _*_
import time
import unittest
from framework.browser_engine import BrowserEngine
from pageobjects.home_page import HomePage
class TestDouBan(unittest.TestCase):
@classmethod
def setUp(cls):
"""
测试固件的setUp()的代码,主要是测试的前提准备工作
:return:
"""
browser = BrowserEngine(cls)
cls.driver = browser.open_browser(cls)
@classmethod
def tearDown(cls):
"""
测试结束后的操作,这里基本上都是关闭浏览器
:return:
"""
cls.driver.quit()
def test_douban(self):
"""
这里一定要test开头,把测试逻辑代码封装到一个test开头的方法里。
:return:
"""
homepage = HomePage(self.driver)
self.driver.find_element_by_xpath("//div[contains(text(), '基于词袋模型')]").click()
self.driver.find_element_by_xpath("//li[contains(text(), '豆瓣评论')]").click()
homepage.send_textarea('这是一部好电影。') # 调用页面对象中的方法
homepage.send_begin_btn() # 调用页面对象类中的点击按钮方法
time.sleep(2)
homepage.get_windows_img() # 调用基类截图方法
result = self.driver.find_element_by_class_name("animated")
print(result)
try:
assert result.text == '4'
print('Test Pass.')
except Exception as e:
print('Test Fail.', format(e))
| 26.5625 | 86 | 0.600784 |
edae3254d91c148a979e5f2b3f83b9f3a776950f | 8,825 | py | Python | doc/source/conf.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,874 | 2015-01-04T05:18:34.000Z | 2022-03-31T03:30:28.000Z | doc/source/conf.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 40 | 2015-04-13T02:32:42.000Z | 2022-02-16T02:28:06.000Z | doc/source/conf.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,996 | 2015-01-04T15:11:51.000Z | 2022-03-31T11:03:13.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# nova documentation build configuration file
#
# Refer to the Sphinx documentation for advice on configuring this file:
#
# http://www.sphinx-doc.org/en/stable/config.html
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'openstackdocstheme',
'sphinx_feature_classification.support_matrix',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'oslo_policy.sphinxpolicygen',
'oslo_policy.sphinxext',
'ext.versioned_notifications',
'ext.feature_matrix',
'ext.extra_specs',
'sphinxcontrib.actdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.rsvgconverter',
]
config_generator_config_file = '../../etc/nova/nova-config-generator.conf'
sample_config_basename = '_static/nova'
policy_generator_config_file = [
('../../etc/nova/nova-policy-generator.conf', '_static/nova'),
]
actdiag_html_image_format = 'SVG'
actdiag_antialias = True
seqdiag_html_image_format = 'SVG'
seqdiag_antialias = True
todo_include_todos = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2010-present, OpenStack Foundation'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
_man_pages = [
('nova-api', 'Server for the OpenStack Compute API service.'),
(
'nova-api-metadata',
'Server for the OpenStack Compute metadata API service.',
),
(
'nova-api-os-compute',
'Server for the OpenStack Compute API service.',
),
('nova-compute', 'Server for the OpenStack Compute compute service.'),
('nova-conductor', 'Server for the OpenStack Compute conductor service.'),
('nova-manage', 'Management tool for the OpenStack Compute services.'),
(
'nova-novncproxy',
'Server for the OpenStack Compute VNC console proxy service.'
),
(
'nova-rootwrap',
'Root wrapper daemon for the OpenStack Compute service.',
),
(
'nova-policy',
'Inspect policy configuration for the OpenStack Compute services.',
),
(
'nova-scheduler',
'Server for the OpenStack Compute scheduler service.',
),
(
'nova-serialproxy',
'Server for the OpenStack Compute serial console proxy service.',
),
(
'nova-spicehtml5proxy',
'Server for the OpenStack Compute SPICE console proxy service.',
),
(
'nova-status',
'Inspect configuration status for the OpenStack Compute services.',
),
]
man_pages = [
('cli/%s' % name, name, description, ['openstack@lists.openstack.org'], 1)
for name, description in _man_pages]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'doc-nova.tex', u'Nova Documentation',
u'OpenStack Foundation', 'manual'),
]
# Allow deeper levels of nesting for \begin...\end stanzas
latex_elements = {
'maxlistdepth': 10,
'extraclassoptions': 'openany,oneside',
'preamble': r'''
\setcounter{tocdepth}{3}
\setcounter{secnumdepth}{3}
''',
}
# Disable use of xindy since that's another binary dependency that's not
# available on all platforms
latex_use_xindy = False
# -- Options for openstackdocstheme -------------------------------------------
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/nova'
openstackdocs_bug_project = 'nova'
openstackdocs_bug_tag = 'doc'
openstackdocs_pdf_link = True
# keep this ordered to keep mriedem happy
#
# NOTE(stephenfin): Projects that don't have a release branch, like TripleO and
# reno, should not be included here
openstackdocs_projects = [
'ceilometer',
'cinder',
'cyborg',
'glance',
'horizon',
'ironic',
'keystone',
'neutron',
'nova',
'oslo.log',
'oslo.messaging',
'oslo.i18n',
'oslo.versionedobjects',
'placement',
'python-novaclient',
'python-openstackclient',
'watcher',
]
# -- Custom extensions --------------------------------------------------------
# NOTE(mdbooth): (2019-03-20) Sphinx loads policies defined in setup.cfg, which
# includes the placement policy at nova/api/openstack/placement/policies.py.
# Loading this imports nova/api/openstack/__init__.py, which imports
# nova.monkey_patch, which will do eventlet monkey patching to the sphinx
# process. As well as being unnecessary and a bad idea, this breaks on
# python3.6 (but not python3.7), so don't do that.
os.environ['OS_NOVA_DISABLE_EVENTLET_PATCHING'] = '1'
def monkey_patch_blockdiag():
"""Monkey patch the blockdiag library.
The default word wrapping in blockdiag is poor, and breaks on a fixed
text width rather than on word boundaries. There's a patch submitted to
resolve this [1]_ but it's unlikely to merge anytime soon.
In addition, blockdiag monkey patches a core library function,
``codecs.getreader`` [2]_, to work around some Python 3 issues. Because
this operates in the same environment as other code that uses this library,
it ends up causing issues elsewhere. We undo these destructive changes
pending a fix.
TODO: Remove this once blockdiag is bumped to 1.6, which will hopefully
include the fix.
.. [1] https://bitbucket.org/blockdiag/blockdiag/pull-requests/16/
.. [2] https://bitbucket.org/blockdiag/blockdiag/src/1.5.3/src/blockdiag/utils/compat.py # noqa
"""
import codecs
from codecs import getreader
from blockdiag.imagedraw import textfolder
from blockdiag.utils import compat # noqa
# oh, blockdiag. Let's undo the mess you made.
codecs.getreader = getreader
def splitlabel(text):
"""Split text to lines as generator.
Every line will be stripped. If text includes characters "\n\n", treat
as line separator. Ignore '\n' to allow line wrapping.
"""
lines = [x.strip() for x in text.splitlines()]
out = []
for line in lines:
if line:
out.append(line)
else:
yield ' '.join(out)
out = []
yield ' '.join(out)
def splittext(metrics, text, bound, measure='width'):
folded = [' ']
for word in text.split():
# Try appending the word to the last line
tryline = ' '.join([folded[-1], word]).strip()
textsize = metrics.textsize(tryline)
if getattr(textsize, measure) > bound:
# Start a new line. Appends `word` even if > bound.
folded.append(word)
else:
folded[-1] = tryline
return folded
# monkey patch those babies
textfolder.splitlabel = splitlabel
textfolder.splittext = splittext
monkey_patch_blockdiag()
| 32.208029 | 99 | 0.66051 |
046ec54238ecec204769f55470dbe32cf28c7219 | 518 | py | Python | api/migrations/0004_auto_20200906_1752.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | null | null | null | api/migrations/0004_auto_20200906_1752.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | null | null | null | api/migrations/0004_auto_20200906_1752.py | sh2MAN/yamdb_final | 17f84bacd832237d88d3389605cf2acdf2a590f5 | [
"BSD-3-Clause"
] | 12 | 2021-02-11T16:39:00.000Z | 2022-03-30T19:18:24.000Z | # Generated by Django 3.0.5 on 2020-09-06 14:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200906_1745'),
]
operations = [
migrations.RemoveConstraint(
model_name='review',
name='unique_review',
),
migrations.AddConstraint(
model_name='review',
constraint=models.UniqueConstraint(fields=('text', 'score'), name='unique_review'),
),
]
| 23.545455 | 95 | 0.600386 |
80237a621c7ba7b199d8f1c02c8f009484e6a9fd | 596 | py | Python | model/deeplabv3/backbone/__init__.py | steermomo/pytorch-template | bddd62ee292c9f356b4b0a305fb0832176912c80 | [
"MIT"
] | 1 | 2021-08-21T09:37:07.000Z | 2021-08-21T09:37:07.000Z | model/deeplabv3/backbone/__init__.py | steermomo/pytorch-template | bddd62ee292c9f356b4b0a305fb0832176912c80 | [
"MIT"
] | null | null | null | model/deeplabv3/backbone/__init__.py | steermomo/pytorch-template | bddd62ee292c9f356b4b0a305fb0832176912c80 | [
"MIT"
] | null | null | null | from model.deeplabv3.backbone import resnet, mobilenet # xception, drn,
def build_backbone(backbone, output_stride, BatchNorm):
if backbone == 'resnet':
return resnet.ResNet101(output_stride, BatchNorm)
elif backbone == 'xception':
# return xception.AlignedXception(output_stride, BatchNorm)
raise NotImplementedError
elif backbone == 'drn':
# return drn.drn_d_54(BatchNorm)
raise NotImplementedError
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
else:
raise NotImplementedError | 37.25 | 72 | 0.706376 |
fe34ea49356ebcb17761ed5f70cf15a890065b3a | 2,187 | py | Python | helper.py | zhiji95/cDCGAN-face-completion-classification | 1eaad325eea5e0e8e233b805f4dbec23e1052fb3 | [
"MIT"
] | 11 | 2019-01-11T15:00:27.000Z | 2020-11-11T10:05:39.000Z | helper.py | zhiji95/cDCGAN-face-completion-classification | 1eaad325eea5e0e8e233b805f4dbec23e1052fb3 | [
"MIT"
] | 12 | 2020-01-28T22:06:44.000Z | 2022-03-11T23:39:26.000Z | helper.py | zhiji95/cDCGAN-face-completion-classification | 1eaad325eea5e0e8e233b805f4dbec23e1052fb3 | [
"MIT"
] | 3 | 2019-05-07T13:41:14.000Z | 2019-07-21T18:42:18.000Z | import os
import hashlib
from urllib.request import urlretrieve
import shutil
from tqdm import tqdm
def download_extract(data_path):
"""
Download and extract database
:param database_name: Database name
"""
database_name = 'CelebA'
url = 'https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip'
hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'
extract_path = os.path.join(data_path, 'img_align_celeba')
save_path = os.path.join(data_path, 'celeba.zip')
extract_fn = _unzip
if os.path.exists(extract_path):
print('Found {} Data'.format(database_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, database_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
# Remove compressed data
os.remove(save_path)
class DLProgress(tqdm):
"""
Handle Progress Bar while Downloading
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
A hook function that will be called once on establishment of the network connection and
once after each block read thereafter.
:param block_num: A count of blocks transferred so far
:param block_size: Block size in bytes
:param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
a file size in response to a retrieval request.
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
| 31.695652 | 116 | 0.657979 |
858876c47b7ccdcc172524c1522bf318be23ab48 | 320 | py | Python | lib/models/linear.py | qipengwang/TransPose | 2ca260768f3b0afdb92c7a0425c3c28e9cdd379d | [
"MIT"
] | null | null | null | lib/models/linear.py | qipengwang/TransPose | 2ca260768f3b0afdb92c7a0425c3c28e9cdd379d | [
"MIT"
] | null | null | null | lib/models/linear.py | qipengwang/TransPose | 2ca260768f3b0afdb92c7a0425c3c28e9cdd379d | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class LinearProjection(nn.Module):
def __init__(self) -> None:
super().__init__()
self.features = [nn.Conv2d(3, 3, 8, 8)]
self.model = nn.Sequential(*self.features)
self.features_out = [3]
def forward(self, x):
return self.model(x) | 26.666667 | 50 | 0.60625 |
bb4877ba9e24d38f6654520d945348a3b6ac7c01 | 3,723 | py | Python | tests/test_electrochem.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | 47 | 2020-02-29T21:47:01.000Z | 2022-03-17T13:27:30.000Z | tests/test_electrochem.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | 53 | 2020-04-07T01:40:31.000Z | 2022-03-17T12:15:44.000Z | tests/test_electrochem.py | donnyyy777/pyteomics | dc0cafb16823767457fa52342574e3fa1c61b970 | [
"Apache-2.0"
] | 23 | 2020-02-29T21:47:13.000Z | 2021-11-26T04:32:07.000Z | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics.electrochem import charge, pI
from pyteomics.auxiliary import PyteomicsError
class ElectrochemTest(unittest.TestCase):
def setUp(self):
pass
def test_charge_calculations_str(self):
self.assertTrue(
abs(charge('AAA', 5.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', 0.0) - 1.0) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', (2.34 + 9.69) / 2.0)) < 0.01)
def test_charge_calculations_list(self):
self.assertRaises(PyteomicsError,
charge, ['A','A','A'], 5.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], 0.0) - 1.0) < 0.01)
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], (2.34 + 9.69) / 2.0)) < 0.01)
def test_charge_calculations_dict(self):
self.assertRaises(PyteomicsError, charge, {'H-': 1, '-OH': 1, 'E': 1},
7, pK_nterm={'H-': {'A': [(9., 1)]}})
self.assertTrue(
abs(charge({'A': 3, 'H-': 1, '-OH': 1}, 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge({'A': 1, 'H-': 1, '-OH': 1, 'ntermB': 1, 'ctermA': 1},
14.0, pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)], 'B': [(3., 1)]}}) + 1.0)
< 0.01)
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ntermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ntermA': 2, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, 'ntermA': 1, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
def test_pI_calculations(self):
self.assertTrue(
abs(pI('H-AAA-OH') - (2.34 + 9.69) / 2.0) < 0.01)
def test_pI_precision(self):
pI_best = pI('PEPTIDE', precision_pI=1e-15)
for i in range(16):
precision = 10 ** (-i)
self.assertTrue(
abs(pI('PEPTIDE', precision_pI=precision) - pI_best) < precision)
def test_charge_input(self):
for i in range(0, 14):
self.assertAlmostEqual(
charge('H-ACDEFGH-OH', i),
charge(['H-', 'A', 'C', 'D', 'E', 'F', 'G', 'H', '-OH'], i))
for i in range(0, 14):
self.assertAlmostEqual(
charge('H-ACDEFGH-OH', i),
charge({'H-': 1, 'A': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1, '-OH': 1}, i))
if __name__ == '__main__':
unittest.main()
| 41.831461 | 96 | 0.425195 |
296b1521278318f8e12397bdc41f811ab8c7df58 | 8,019 | py | Python | src/lib/detectors/car_pose.py | ngoductuanlhp/GAC3D | 5eb8dbb025546f7d800559c685bb96969991fa87 | [
"MIT"
] | 5 | 2021-07-31T13:20:38.000Z | 2022-02-22T16:50:19.000Z | src/lib/detectors/car_pose.py | ngoductuanlhp/GAC3D | 5eb8dbb025546f7d800559c685bb96969991fa87 | [
"MIT"
] | null | null | null | src/lib/detectors/car_pose.py | ngoductuanlhp/GAC3D | 5eb8dbb025546f7d800559c685bb96969991fa87 | [
"MIT"
] | 2 | 2021-08-07T10:13:13.000Z | 2021-10-12T02:05:51.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms_39
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import multi_pose_decode, _topk
from models.decode import car_pose_decode
from models.utils import flip_tensor, flip_lr_off, flip_lr
from utils.image import get_affine_transform
from utils.post_process import multi_pose_post_process
from utils.post_process import car_pose_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
from torch.onnx import OperatorExportTypes
# import onnxruntime
# import onnx
class CarPoseDetector(BaseDetector):
def __init__(self, opt, onnx=False):
super(CarPoseDetector, self).__init__(opt)
self.flip_idx = opt.flip_idx
self.not_depth_guide = opt.not_depth_guide
self.backbonea_arch = opt.arch.split('_')[0]
self.export_onnx = onnx
def process(self, images, depths, meta, return_time=False):
# NOTE export ONNX
if self.export_onnx:
with torch.no_grad():
onnx_path = self.opt.load_model[:-4] + ".onnx"
# hm, features = self.model(images) # remember the order of outputs
hm, hps, rot, dim, prob = self.model(images)
torch.onnx.export(self.model, images,
onnx_path,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
verbose=True,
input_names= ['input'],
output_names=["hm", "hps", "rot", "dim", "prob"])
print("Export ONNX successful. Model is saved at", onnx_path)
quit()
with torch.no_grad():
# if self.not_depth_guide or self.backbonea_arch == 'dla':
# output = self.model(images)[-1]
# else:
# output = self.model(images, depths)[-1]
# output = self.model(images)[-1]
# output = self.model(images)[-1]
outputs = self.model(images)
# hm, hps, rot, dim, prob = self.model(images)
hm, hps, rot, dim, prob = outputs['hm'], outputs['hps'], outputs['rot'], outputs['dim'], outputs['prob']
hm = hm.sigmoid_()
dets = car_pose_decode(
hm, hps, dim, rot, prob,
reg=outputs['reg'], wh=outputs['wh'], K=self.opt.K, meta=meta, const=self.const,
dynamic_dim=self.opt.dynamic_dim, axis_head_angle=self.opt.axis_head_angle, not_joint_task=self.opt.not_joint_task)
# dets = car_pose_decode(
# output['hm'], output['hps'], output['dim'], output['rot'], output['prob'],
# reg=output['reg'], wh=output['wh'], K=self.opt.K, meta=meta, const=self.const,
# dynamic_dim=self.opt.dynamic_dim, axis_head_angle=self.opt.axis_head_angle, not_joint_task=self.opt.not_joint_task)
if return_time:
return None, dets, 0
else:
return None, dets
def preprocess_depth(self, depth):
n = 40
delta = 2 * 80 / (n * (n + 1))
depth = 1 + 8 * (depth) / delta
depth = -0.5 + 0.5 * np.sqrt(depth) # 0 -> 40
depth = depth / 40 # 0 -> 1
return depth
def pre_process(self, image, depth, meta=None):
height, width = image.shape[0:2]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = np.array([width, height], dtype=np.float32)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp_image = cv2.warpAffine(
image, trans_input, (self.opt.input_w, self.opt.input_h), flags=cv2.INTER_LINEAR)
inp_image = (inp_image / 255).astype(np.float32)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1).reshape(
1, 3, self.opt.input_h, self.opt.input_w)
images = torch.from_numpy(images)
# FIXME test depth
# print(resized_depth.shape)
# dummy_depth = np.random.randint(0, 10000, size = (new_height, new_width)).astype(np.uint16)
# resized_depth = dummy_depth
# print(resized_depth)
# dummy_depth = np.ones_like(resized_depth) * 10 * 256
# s = resized_depth.shape
# resized_depth = np.random.randn(new_width, new_height, 1)
# resized_depth = dummy_depth
# resized_depth = np.arange(new_width * new_height).reshape(new_height,new_width)
# resized_depth = np.clip(resized_depth, 0, 255 * 100)
# print(resized_depth.shape)
# resized_depth = cv2.resize(depth, (new_width, new_height))
inp_depth = cv2.warpAffine(
depth, trans_input, (self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp_depth = inp_depth.astype(np.float32) / 256.0
# NOTE test new depth preproc
# inp_depth = self.preprocess_depth(inp_depth)
inp_depth = inp_depth[:, :, np.newaxis]
inp_depth = (inp_depth - self.depth_mean) / self.depth_std
# print(np.max(inp_depth), np.min(inp_depth))
# inp_depth = inp_depth * 10000
depths = inp_depth.transpose(2, 0, 1).reshape(
1, 1, self.opt.input_h, self.opt.input_w)
depths = torch.from_numpy(depths)
meta = {'c': c, 's': s,
'out_height': self.opt.input_h // self.opt.down_ratio,
'out_width': self.opt.input_w // self.opt.down_ratio}
trans_output_inv = get_affine_transform(
c, s, 0, [meta['out_width'], meta['out_height']], inv=1)
trans_output_inv = torch.from_numpy(
trans_output_inv).unsqueeze(0).to(self.opt.device)
meta['trans_output_inv'] = trans_output_inv
return images, depths, meta
def post_process(self, dets, meta):
dets = dets.squeeze(0).detach().cpu().numpy() # for batch size 1
return dets
def merge_outputs(self, detections):
results = {}
results[1] = np.concatenate(
[detection[1] for detection in detections], axis=0).astype(np.float32)
if self.opt.nms or len(self.opt.test_scales) > 1:
soft_nms_39(results[1], Nt=0.5, method=2)
results[1] = results[1].tolist()
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy().copy()
dets[:, :, :4] *= self.opt.down_ratio
dets[:, :, 5:39] *= self.opt.down_ratio
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if self.opt.hm_hp:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
def show_results(self, debugger, image, results, calib):
debugger.add_img(image, img_id='car_pose')
for bbox in results:
if bbox[4] > self.opt.vis_thresh:
# debugger.add_coco_bbox(bbox[:4], bbox[40], bbox[4], img_id='car_pose')
# debugger.add_kitti_hp(bbox[5:23], img_id='car_pose')
# debugger.add_bev(bbox, img_id='car_pose',is_faster=self.opt.faster)
# debugger.add_3d_detection(bbox, calib, img_id='car_pose')
debugger.save_kitti_format(
bbox, self.image_path, self.opt, img_id='car_pose')
if self.opt.vis:
debugger.show_all_imgs(pause=self.pause)
| 42.882353 | 133 | 0.59995 |
3fc85871f2011bfda3cc238aad277770e381962b | 4,364 | py | Python | src/logs/__init__.py | michelle-holmusk/biobank | 8d170373b4e7bc81188cda56b13816be16085ba6 | [
"MIT"
] | null | null | null | src/logs/__init__.py | michelle-holmusk/biobank | 8d170373b4e7bc81188cda56b13816be16085ba6 | [
"MIT"
] | 1 | 2021-08-23T20:43:55.000Z | 2021-08-23T20:43:55.000Z | src/logs/__init__.py | michelle-holmusk/biobank | 8d170373b4e7bc81188cda56b13816be16085ba6 | [
"MIT"
] | null | null | null | '''Module containing helper classes for logging
This module contains two classes. The first one will be used for
generating a new logger object, and another one for uisng that logging
object for new tasks. Each class is modeled as a decorator, that will
inject a ``logging.getLogger`` instance as a first parameter of the
function. This function furthermore logs the starting and ending times
of the logs, as well as the time taken for the function, using the
``time.time`` module.
Configuration Information
=========================
Configuring the logger is done with the help of the configuration file
``config/config.json``. Specifically, the ``logging`` key identifies all
configuration associated with logging information within this file. An
example if the ``logging`` section is shown below. Details of the different
sections will be described in the documentation that follows.
.. code-block:: python
:emphasize-lines: 5,10,14
"logging":{
"logBase" : "biobank",
"level" : "INFO",
"specs" : {
"file":{
"todo" : true,
"logFolder": "logs"
},
"stdout":{
"todo" : false
},
"logstash":{
"todo" : false,
"version" : 1,
"port" : 5959,
"host" : "localhost"
}
}
}
The ``"level"`` Segment
-----------------------
The logging module comes preconfigured to log at the ``"INFO"`` level.
However this can be set to one of the following levels, and is mapped
to their respective logging levels.
- ``'CRITICAL'`` mapped to ``logging.CRITICAL``
- ``'ERROR'`` mapped to ``logging.ERROR``
- ``'WARNING'`` mapped to ``logging.WARNING``
- ``'INFO'`` mapped to ``logging.INFO``
- ``'DEBUG'`` mapped to ``logging.DEBUG``
The ``"specs"`` Segment
-----------------------
This module comes preconfigured for a number of logging sinks. The logs can go
either to a logging file, to the stdout, or to logstash. Each section has a
parameter ``"todo"`` that will determine whether a particular sink shall be
added to the logging handler. The other parameters for each section is described
below.
The ``"specs.file"`` Segment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This segment is used for sending the logger output directly to a file. A base folder
should be soecified within which the logging file should be generated. Each time the
program is run, a new file is generated in the form ``YYYY-MM-DD_hh-mm-ss.log``. The
default formatting string used is:
``"%(asctime)s - %(name)s - %(levelname)s - %(message)s"``.
The ``"specs.stdout"`` Segment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The output can potentially also be sent to the standard output if this section is turned
on using the ``doto`` key. By default, this section is turned off. The default formatting
string used is:
``"%(asctime)s - %(name)s - %(levelname)s - %(message)s"``.
The ``"specs.logstash"`` Segment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is also possible to use logstash as a sink. This is entirely JSON based. This uses TCP
rather than the standard UDP. For configuring the logstash server, make sure to add the
input:
.. code-block:: python
tcp {
'port' => '5959'
'codec' => 'json'
}
The input port should match the port specified in the ``config/config.json`` the config file.
If your logstash is running on a different machine, make sure that you specify the host IP
along with the port. An example output is shown:
.. code-block:: python
{
"@timestamp" => 2018-08-12T03:49:25.212Z,
"level" => "ERROR",
"type" => "logstash",
"port" => 55195,
"@version" => "1",
"host" => "Sankha-desktop.local",
"path" => "/Users/user/Documents/programming/python/test/mytests/mnop/src/lib/testLib/simpleLib.py",
"message" => "Unable to add the two values [3] and [a]:\\nunsupported operand type(s) for +: 'int' and 'str'",
"tags" => [],
"logger_name" => "mnop.lib.simpleLib.simpleTestFunction",
"stack_info" => nil
}
This can then be sent to elasticsearch. If you need specific things filtered, you can
directly use the filtering capabilities of logstash to generate this information.
''' | 34.912 | 118 | 0.624427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.