text
stringlengths
2
999k
import logging import re import time from datetime import datetime from django.conf import settings from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy as _lazy from timezone_field import TimeZoneField from kitsune.lib.countries import COUNTRIES from kitsune.products.models import Product from kitsune.search.es_utils import UnindexMeBro from kitsune.search.models import ( SearchMappingType, SearchMixin, register_for_indexing, register_mapping_type, ) from kitsune.sumo.models import LocaleField, ModelBase from kitsune.sumo.urlresolvers import reverse from kitsune.sumo.utils import auto_delete_files from kitsune.users.validators import TwitterValidator log = logging.getLogger("k.users") SHA1_RE = re.compile("^[a-f0-9]{40}$") CONTRIBUTOR_GROUP = "Registered as contributor" SET_ID_PREFIX = "https://schemas.accounts.firefox.com/event/" @auto_delete_files class Profile(ModelBase, SearchMixin): """Profile model for django users.""" user = models.OneToOneField( User, on_delete=models.CASCADE, primary_key=True, verbose_name=_lazy("User") ) name = models.CharField( max_length=255, null=True, blank=True, verbose_name=_lazy("Display name") ) public_email = models.BooleanField( # show/hide email default=False, verbose_name=_lazy("Make my email address visible to logged in users") ) avatar = models.ImageField( upload_to=settings.USER_AVATAR_PATH, null=True, blank=True, verbose_name=_lazy("Avatar"), max_length=settings.MAX_FILEPATH_LENGTH, ) bio = models.TextField( null=True, blank=True, verbose_name=_lazy("Biography"), help_text=_lazy( "Some HTML supported: <abbr title> " + "<acronym title> <b> " + "<blockquote> <code> " + "<em> <i> <li> " + "<ol> <strong> <ul>. " + "Links are forbidden." ), ) website = models.URLField(max_length=255, null=True, blank=True, verbose_name=_lazy("Website")) twitter = models.CharField( max_length=15, null=True, blank=True, validators=[TwitterValidator], verbose_name=_lazy("Twitter Username"), ) community_mozilla_org = models.CharField( max_length=255, default="", blank=True, verbose_name=_lazy("Community Portal Username") ) people_mozilla_org = models.CharField( max_length=255, blank=True, default="", verbose_name=_lazy("People Directory Username") ) matrix_handle = models.CharField( max_length=255, default="", blank=True, verbose_name=_lazy("Matrix Nickname") ) timezone = TimeZoneField( null=True, blank=True, default="US/Pacific", verbose_name=_lazy("Timezone") ) country = models.CharField( max_length=2, choices=COUNTRIES, null=True, blank=True, verbose_name=_lazy("Country") ) # No city validation city = models.CharField(max_length=255, null=True, blank=True, verbose_name=_lazy("City")) locale = LocaleField(default=settings.LANGUAGE_CODE, verbose_name=_lazy("Preferred language")) first_answer_email_sent = models.BooleanField( default=False, help_text=_lazy("Has been sent a first answer contribution email.") ) first_l10n_email_sent = models.BooleanField( default=False, help_text=_lazy("Has been sent a first revision contribution email.") ) involved_from = models.DateField( null=True, blank=True, verbose_name=_lazy("Involved with Mozilla from") ) csat_email_sent = models.DateField( null=True, blank=True, verbose_name=_lazy("When the user was sent a community " "health survey"), ) is_fxa_migrated = models.BooleanField(default=False) fxa_uid = models.CharField(blank=True, null=True, unique=True, max_length=128) fxa_avatar = models.URLField(max_length=512, blank=True, default="") products = models.ManyToManyField(Product, related_name="subscribed_users") fxa_password_change = models.DateTimeField(blank=True, null=True) class Meta(object): permissions = ( ("view_karma_points", "Can view karma points"), ("deactivate_users", "Can deactivate users"), ("screen_share", "Can screen share"), ) def __str__(self): try: return str(self.user) except Exception as exc: return str("%d (%r)" % (self.pk, exc)) def get_absolute_url(self): return reverse("users.profile", args=[self.user_id]) def clear(self): """Clears out the users profile""" self.name = "" self.public_email = False self.avatar = None self.bio = "" self.website = "" self.twitter = "" self.community_mozilla_org = "" self.people_mozilla_org = "" self.matrix_handle = "" self.city = "" self.is_fxa_migrated = False self.fxa_uid = "" @property def display_name(self): return self.name if self.name else self.user.username @property def twitter_usernames(self): from kitsune.customercare.models import Reply return list( Reply.objects.filter(user=self.user) .values_list("twitter_username", flat=True) .distinct() ) @classmethod def get_mapping_type(cls): return UserMappingType @classmethod def get_serializer(cls, serializer_type="full"): # Avoid circular import from kitsune.users import api if serializer_type == "full": return api.ProfileSerializer elif serializer_type == "fk": return api.ProfileFKSerializer else: raise ValueError('Unknown serializer type "{}".'.format(serializer_type)) @property def last_contribution_date(self): """Get the date of the user's last contribution.""" from kitsune.customercare.models import Reply from kitsune.questions.models import Answer from kitsune.wiki.models import Revision dates = [] # Latest Army of Awesome reply: try: aoa_reply = Reply.objects.filter(user=self.user).latest("created") dates.append(aoa_reply.created) except Reply.DoesNotExist: pass # Latest Support Forum answer: try: answer = Answer.objects.filter(creator=self.user).latest("created") dates.append(answer.created) except Answer.DoesNotExist: pass # Latest KB Revision edited: try: revision = Revision.objects.filter(creator=self.user).latest("created") dates.append(revision.created) except Revision.DoesNotExist: pass # Latest KB Revision reviewed: try: revision = Revision.objects.filter(reviewer=self.user).latest("reviewed") # Old revisions don't have the reviewed date. dates.append(revision.reviewed or revision.created) except Revision.DoesNotExist: pass if len(dates) == 0: return None return max(dates) @property def settings(self): return self.user.settings @property def answer_helpfulness(self): # Avoid circular import from kitsune.questions.models import AnswerVote return AnswerVote.objects.filter(answer__creator=self.user, helpful=True).count() @register_mapping_type class UserMappingType(SearchMappingType): list_keys = [ "twitter_usernames", "itwitter_usernames", ] @classmethod def get_model(cls): return Profile @classmethod def get_index_group(cls): return "non-critical" @classmethod def get_mapping(cls): return { "properties": { "id": {"type": "long"}, "model": {"type": "string", "index": "not_analyzed"}, "url": {"type": "string", "index": "not_analyzed"}, "indexed_on": {"type": "integer"}, "username": {"type": "string", "index": "not_analyzed"}, "display_name": {"type": "string", "index": "not_analyzed"}, "twitter_usernames": {"type": "string", "index": "not_analyzed"}, "last_contribution_date": {"type": "date"}, # lower-cased versions for querying: "iusername": {"type": "string", "index": "not_analyzed"}, "idisplay_name": {"type": "string", "analyzer": "whitespace"}, "itwitter_usernames": {"type": "string", "index": "not_analyzed"}, "avatar": {"type": "string", "index": "not_analyzed"}, "suggest": {"type": "completion", "analyzer": "whitespace", "payloads": True}, } } @classmethod def extract_document(cls, obj_id, obj=None): """Extracts interesting thing from a Thread and its Posts""" if obj is None: model = cls.get_model() obj = model.objects.select_related("user").get(pk=obj_id) if not obj.user.is_active: raise UnindexMeBro() d = {} d["id"] = obj.pk d["model"] = cls.get_mapping_type_name() d["url"] = obj.get_absolute_url() d["indexed_on"] = int(time.time()) d["username"] = obj.user.username d["display_name"] = obj.display_name d["twitter_usernames"] = obj.twitter_usernames d["last_contribution_date"] = obj.last_contribution_date d["iusername"] = obj.user.username.lower() d["idisplay_name"] = obj.display_name.lower() d["itwitter_usernames"] = [u.lower() for u in obj.twitter_usernames] from kitsune.users.templatetags.jinja_helpers import profile_avatar d["avatar"] = profile_avatar(obj.user, size=120) d["suggest"] = { "input": [d["iusername"], d["idisplay_name"]], "output": _("{displayname} ({username})").format( displayname=d["display_name"], username=d["username"] ), "payload": {"user_id": d["id"]}, } return d @classmethod def suggest_completions(cls, text): """Suggest completions for the text provided.""" USER_SUGGEST = "user-suggest" es = UserMappingType.search().get_es() results = es.suggest( index=cls.get_index(), body={USER_SUGGEST: {"text": text.lower(), "completion": {"field": "suggest"}}}, ) if results[USER_SUGGEST][0]["length"] > 0: return results[USER_SUGGEST][0]["options"] return [] register_for_indexing("users", Profile) def get_profile(u): try: return Profile.objects.get(user=u) except Profile.DoesNotExist: return None register_for_indexing("users", User, instance_to_indexee=get_profile) class Setting(ModelBase): """User specific value per setting""" user = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name=_lazy("User"), related_name="settings" ) name = models.CharField(max_length=100) value = models.CharField(blank=True, max_length=60, verbose_name=_lazy("Value")) class Meta(object): unique_together = (("user", "name"),) def __str__(self): return "%s %s:%s" % (self.user, self.name, self.value or "[none]") @classmethod def get_for_user(cls, user, name): from kitsune.users.forms import SettingsForm form = SettingsForm() if name not in list(form.fields.keys()): raise KeyError( ("'{name}' is not a field in user.forms.SettingsFrom()").format(name=name) ) try: setting = Setting.objects.get(user=user, name=name) except Setting.DoesNotExist: value = form.fields[name].initial or "" setting = Setting.objects.create(user=user, name=name, value=value) # Cast to the field's Python type. return form.fields[name].to_python(setting.value) class RegistrationProfile(models.Model): """ A simple profile which stores an activation key used for user account registration. Generally, you will not want to interact directly with instances of this model; the provided manager includes methods for creating and activating new accounts. """ user = models.ForeignKey( User, on_delete=models.CASCADE, unique=True, verbose_name=_lazy("user") ) activation_key = models.CharField(verbose_name=_lazy("activation key"), max_length=40) class Meta: verbose_name = _lazy("registration profile") verbose_name_plural = _lazy("registration profiles") def __str__(self): return "Registration information for %s" % self.user def activation_key_expired(self): """ Determine whether this ``RegistrationProfile``'s activation key has expired, returning a boolean -- ``True`` if the key has expired. Key expiration is determined by: 1. The date the user signed up is incremented by the number of days specified in the setting ``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of days after signup during which a user is allowed to activate their account); if the result is less than or equal to the current date, the key has expired and this method returns ``True``. """ return True activation_key_expired.boolean = True class EmailChange(models.Model): """Stores email with activation key when user requests a change.""" ACTIVATED = "ALREADY_ACTIVATED" user = models.ForeignKey( User, on_delete=models.CASCADE, unique=True, verbose_name=_lazy("user") ) activation_key = models.CharField(verbose_name=_lazy("activation key"), max_length=40) email = models.EmailField(db_index=True, null=True) def __str__(self): return "Change email request to %s for %s" % (self.email, self.user) class Deactivation(models.Model): """Stores user deactivation logs.""" user = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name=_lazy("user"), related_name="+" ) moderator = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name=_lazy("moderator"), related_name="deactivations", ) date = models.DateTimeField(default=datetime.now) def __str__(self): return "%s was deactivated by %s on %s" % (self.user, self.moderator, self.date) class AccountEvent(models.Model): """Stores the events received from Firefox Accounts. These events are processed by celery and the correct status is assigned in each entry. """ # Status of an event entry. UNPROCESSED = 1 PROCESSED = 2 IGNORED = 3 NOT_IMPLEMENTED = 4 EVENT_STATUS = ( (UNPROCESSED, "unprocessed"), (PROCESSED, "processed"), (IGNORED, "ignored"), (NOT_IMPLEMENTED, "not-implemented"), ) PASSWORD_CHANGE = "password-change" PROFILE_CHANGE = "profile-change" SUBSCRIPTION_STATE_CHANGE = "subscription-state-change" DELETE_USER = "delete-user" status = models.PositiveSmallIntegerField( choices=EVENT_STATUS, default=UNPROCESSED, blank=True ) created_at = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True) body = models.TextField(max_length=4096, blank=False) event_type = models.CharField(max_length=256, default="", blank=True) fxa_uid = models.CharField(max_length=128, default="", blank=True) jwt_id = models.CharField(max_length=256) issued_at = models.CharField(max_length=32) profile = models.ForeignKey( Profile, on_delete=models.CASCADE, related_name="account_events", null=True ) class Meta(object): ordering = ["-last_modified"]
# DEFAULT ROLES class ROLE: ADMIN = "admin" HOST = "host" # docker class DOCKER: DEFAULT_REMOTE_PORT = 4243
def main(): f = open('../../oldgit/covid_19_articles.sentences', 'r') while True: sentence_num = f.readline() sentences = [] for rows in range(int(sentence_num)): sentence = f.readline() br = f.readline() #print(sentence) #print(br) sentences.append(sentence) print(sentences) br = f.readline() print('\n\n') if sentence_num == '-1\n': break f.close() main()
# automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers class Pool2DOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsPool2DOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Pool2DOptions() x.Init(buf, n + offset) return x @classmethod def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Pool2DOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Pool2DOptions def Padding(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Pool2DOptions def StrideW(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def StrideH(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FilterWidth(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FilterHeight(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def Pool2DOptionsStart(builder): builder.StartObject(6) def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0) def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0) def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0) def Pool2DOptionsEnd(builder): return builder.EndObject()
#!/usr/bin/env python3 r""" Define the tally_sheet class. """ import sys import collections import copy import re try: from robot.utils import DotDict except ImportError: pass import gen_print as gp class tally_sheet: r""" This class is the implementation of a tally sheet. The sheet can be viewed as rows and columns. Each row has a unique key field. This class provides methods to tally the results (totals, etc.). Example code: # Create an ordered dict to represent your field names/initial values. try: boot_results_fields = collections.OrderedDict([('total', 0), ('pass', 0), ('fail', 0)]) except AttributeError: boot_results_fields = DotDict([('total', 0), ('pass', 0), ('fail', 0)]) # Create the tally sheet. boot_test_results = tally_sheet('boot type', boot_results_fields, 'boot_test_results') # Set your sum fields (fields which are to be totalled). boot_test_results.set_sum_fields(['total', 'pass', 'fail']) # Set calc fields (within a row, a certain field can be derived from other fields in the row. boot_test_results.set_calc_fields(['total=pass+fail']) # Create some records. boot_test_results.add_row('BMC Power On') boot_test_results.add_row('BMC Power Off') # Increment field values. boot_test_results.inc_row_field('BMC Power On', 'pass') boot_test_results.inc_row_field('BMC Power Off', 'pass') boot_test_results.inc_row_field('BMC Power On', 'fail') # Have the results tallied... boot_test_results.calc() # And printed... boot_test_results.print_report() Example result: Boot Type Total Pass Fail ----------------------------------- ----- ---- ---- BMC Power On 2 1 1 BMC Power Off 1 1 0 =================================================== Totals 3 2 1 """ def __init__(self, row_key_field_name='Description', init_fields_dict=dict(), obj_name='tally_sheet'): r""" Create a tally sheet object. Description of arguments: row_key_field_name The name of the row key field (e.g. boot_type, team_name, etc.) init_fields_dict A dictionary which contains field names/initial values. obj_name The name of the tally sheet. """ self.__obj_name = obj_name # The row key field uniquely identifies the row. self.__row_key_field_name = row_key_field_name # Create a "table" which is an ordered dictionary. # If we're running python 2.7 or later, collections has an OrderedDict we can use. Otherwise, we'll # try to use the DotDict (a robot library). If neither of those are available, we fail. try: self.__table = collections.OrderedDict() except AttributeError: self.__table = DotDict() # Save the initial fields dictionary. self.__init_fields_dict = init_fields_dict self.__totals_line = init_fields_dict self.__sum_fields = [] self.__calc_fields = [] def init(self, row_key_field_name, init_fields_dict, obj_name='tally_sheet'): self.__init__(row_key_field_name, init_fields_dict, obj_name='tally_sheet') def set_sum_fields(self, sum_fields): r""" Set the sum fields, i.e. create a list of field names which are to be summed and included on the totals line of reports. Description of arguments: sum_fields A list of field names. """ self.__sum_fields = sum_fields def set_calc_fields(self, calc_fields): r""" Set the calc fields, i.e. create a list of field names within a given row which are to be calculated for the user. Description of arguments: calc_fields A string expression such as 'total=pass+fail' which shows which field on a given row is derived from other fields in the same row. """ self.__calc_fields = calc_fields def add_row(self, row_key, init_fields_dict=None): r""" Add a row to the tally sheet. Description of arguments: row_key A unique key value. init_fields_dict A dictionary of field names/initial values. The number of fields in this dictionary must be the same as what was specified when the tally sheet was created. If no value is passed, the value used to create the tally sheet will be used. """ if row_key in self.__table: # If we allow this, the row values get re-initialized. message = "An entry for \"" + row_key + "\" already exists in" message += " tally sheet." raise ValueError(message) if init_fields_dict is None: init_fields_dict = self.__init_fields_dict try: self.__table[row_key] = collections.OrderedDict(init_fields_dict) except AttributeError: self.__table[row_key] = DotDict(init_fields_dict) def update_row_field(self, row_key, field_key, value): r""" Update a field in a row with the specified value. Description of arguments: row_key A unique key value that identifies the row to be updated. field_key The key that identifies which field in the row that is to be updated. value The value to set into the specified row/field. """ self.__table[row_key][field_key] = value def inc_row_field(self, row_key, field_key): r""" Increment the value of the specified field in the specified row. The value of the field must be numeric. Description of arguments: row_key A unique key value that identifies the row to be updated. field_key The key that identifies which field in the row that is to be updated. """ self.__table[row_key][field_key] += 1 def dec_row_field(self, row_key, field_key): r""" Decrement the value of the specified field in the specified row. The value of the field must be numeric. Description of arguments: row_key A unique key value that identifies the row to be updated. field_key The key that identifies which field in the row that is to be updated. """ self.__table[row_key][field_key] -= 1 def calc(self): r""" Calculate totals and row calc fields. Also, return totals_line dictionary. """ self.__totals_line = copy.deepcopy(self.__init_fields_dict) # Walk through the rows of the table. for row_key, value in self.__table.items(): # Walk through the calc fields and process them. for calc_field in self.__calc_fields: tokens = [i for i in re.split(r'(\d+|\W+)', calc_field) if i] cmd_buf = "" for token in tokens: if token in ("=", "+", "-", "*", "/"): cmd_buf += token + " " else: # Note: Using "mangled" name for the sake of the exec # statement (below). cmd_buf += "self._" + self.__class__.__name__ +\ "__table['" + row_key + "']['" +\ token + "'] " exec(cmd_buf) for field_key, sub_value in value.items(): if field_key in self.__sum_fields: self.__totals_line[field_key] += sub_value return self.__totals_line def sprint_obj(self): r""" sprint the fields of this object. This would normally be for debug purposes. """ buffer = "" buffer += "class name: " + self.__class__.__name__ + "\n" buffer += gp.sprint_var(self.__obj_name) buffer += gp.sprint_var(self.__row_key_field_name) buffer += gp.sprint_var(self.__table) buffer += gp.sprint_var(self.__init_fields_dict) buffer += gp.sprint_var(self.__sum_fields) buffer += gp.sprint_var(self.__totals_line) buffer += gp.sprint_var(self.__calc_fields) buffer += gp.sprint_var(self.__table) return buffer def print_obj(self): r""" print the fields of this object to stdout. This would normally be for debug purposes. """ sys.stdout.write(self.sprint_obj()) def sprint_report(self): r""" sprint the tally sheet in a formatted way. """ buffer = "" # Build format strings. col_names = [self.__row_key_field_name.title()] report_width = 40 key_width = 40 format_string = '{0:<' + str(key_width) + '}' dash_format_string = '{0:-<' + str(key_width) + '}' field_num = 0 try: first_rec = next(iter(self.__table.items())) for row_key, value in first_rec[1].items(): field_num += 1 if isinstance(value, int): align = ':>' else: align = ':<' format_string += ' {' + str(field_num) + align +\ str(len(row_key)) + '}' dash_format_string += ' {' + str(field_num) + ':->' +\ str(len(row_key)) + '}' report_width += 1 + len(row_key) col_names.append(row_key.title()) except StopIteration: pass num_fields = field_num + 1 totals_line_fmt = '{0:=<' + str(report_width) + '}' buffer += format_string.format(*col_names) + "\n" buffer += dash_format_string.format(*([''] * num_fields)) + "\n" for row_key, value in self.__table.items(): buffer += format_string.format(row_key, *value.values()) + "\n" buffer += totals_line_fmt.format('') + "\n" buffer += format_string.format('Totals', *self.__totals_line.values()) + "\n" return buffer def print_report(self): r""" print the tally sheet in a formatted way. """ sys.stdout.write(self.sprint_report())
import sys sys.path.insert(1,"../../../") import h2o import os from tests import pyunit_utils from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm # checking pr_plot when we have cross-validation enabled. def glm_pr_plot_test(): print("Testing glm cross-validation with alpha array, default lambda values for binomial models.") h2o_data = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/binomial_20_cols_10KRows.csv")) enum_columns = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10"] for cname in enum_columns: h2o_data[cname] = h2o_data[cname] myY = "C21" h2o_data["C21"] = h2o_data["C21"].asfactor() myX = h2o_data.names.remove(myY) data_frames = h2o_data.split_frame(ratios=[0.8]) training_data = data_frames[0] test_data = data_frames[1] # build model with CV but no validation dataset cv_model = glm(family='binomial',alpha=[0.1,0.5,0.9], nfolds = 3, fold_assignment="modulo") cv_model.train(training_frame=training_data,x=myX,y=myY, validation_frame=test_data) fn = "pr_plot_train_valid_cx" perf = cv_model.model_performance(xval=True) perf.plot(type="pr", server=True, save_to_file=fn) if os.path.isfile(fn): os.remove(fn) (recall, precision) = perf.plot(type="pr", server=True, plot=False) assert len(precision) == len(recall), "Expected precision and recall to have the same shape but they are not." if __name__ == "__main__": pyunit_utils.standalone_test(glm_pr_plot_test) else: glm_pr_plot_test()
# Author: Kay Hartmann <kg.hartma@gmail.com> import numpy as np def normalize_data(x: np.ndarray) -> np.ndarray: x = x - x.mean() x = x / x.std() return x
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and # Tobias Kessler # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. import numpy as np import logging from bark.viewer import Viewer from bark.geometry import * from bark.models.dynamic import * from bark.world.opendrive import * from bark.world.goal_definition import * from modules.runtime.commons.parameters import ParameterServer import math logger = logging.getLogger() class BaseViewer(Viewer): def __init__(self, params=None, **kwargs): if(params is None): params = ParameterServer() Viewer.__init__(self) # color parameters # agents self.color_other_agents_line = params["Visualization"]["Agents"]["Color"]["Other"]["Lines", "Color of other agents", (0.7,0.7,0.7)] self.color_other_agents_face = params["Visualization"]["Agents"]["Color"]["Other"]["Face", "Color of other agents", (0.7,0.7,0.7)] self.color_eval_agents_line = params["Visualization"]["Agents"]["Color"]["Controlled"]["Lines", "Color of controlled, evaluated agents", (0.,.27,.58)] self.color_eval_agents_face = params["Visualization"]["Agents"]["Color"]["Controlled"]["Face", "Color of controlled, evaluated agents", (.49, .63, .83)] self.use_colormap_for_other_agents = params["Visualization"]["Agents"]["Color"]["UseColormapForOtherAgents", "Flag to enable color map for other agents", False] self.alpha_eval_agent = params["Visualization"]["Agents"]["Alpha"]["Controlled", "Alpha of evalagents", 1.] self.alpha_other_agents = params["Visualization"]["Agents"]["Alpha"]["Other", "Alpha of other agents", 1] self.route_color = params["Visualization"]["Agents"]["ColorRoute", "Color of agents routes", (0.2,0.2,0.2)] self.draw_route = params["Visualization"]["Agents"]["DrawRoute", "Draw Route of each agent", False] self.draw_agent_id = params["Visualization"]["Agents"]["DrawAgentId", "Draw id of each agent", True] self.draw_eval_goals = params["Visualization"]["Agents"]["DrawEvalGoals", "Draw Route of eval agent goals", True] self.eval_goal_color = params["Visualization"]["Agents"]["EvalGoalColor", "Color of eval agent goals", (.49, .63, .83)] self.draw_history = params["Visualization"]["Agents"]["DrawHistory", "Draw history with alpha trace for each agent", False] # map self.color_lane_boundaries = params["Visualization"]["Map"]["XodrLanes"]["Boundaries"]["Color", "Color of agents except ego vehicle", (0.7,0.7,0.7)] self.alpha_lane_boundaries = params["Visualization"]["Map"]["XodrLanes"]["Boundaries"]["Alpha", "Color of agents except ego vehicle", 1.0] self.plane_color = params["Visualization"]["Map"]["Plane"]["Color", "Color of the background plane", (1, 1, 1, 1)] self.plane_alpha = params["Visualization"]["Map"]["Plane"]["Alpha", "Alpha of the background plane", 1.0] self.map_linewidth = params["Visualization"]["Map"]["XodrLanes"]["Boundaries"]["Linewidth", "Linewidth of linestrings", 1.0] self.parameters = params self.use_world_bounds = kwargs.pop("use_world_bounds", False) self.follow_agent_id = kwargs.pop("follow_agent_id", None) self.center = kwargs.pop("center", np.array([0, 0])) self.world_x_range = kwargs.pop("x_range", np.array([-40, 40])) self.world_y_range = kwargs.pop("y_range", np.array([-40, 40])) self.enforce_x_length = kwargs.pop("enforce_x_length", True) self.enforce_y_length = kwargs.pop("enforce_y_length", False) self.x_length = kwargs.pop("x_length", np.sum(np.absolute(self.world_x_range))) self.y_length = kwargs.pop("y_length", np.sum(np.absolute(self.world_y_range))) self.dynamic_world_x_range = self.world_x_range.copy() self.dynamic_world_y_range = self.world_y_range.copy() def reset(): pass def get_aspect_ratio(self): pass def _get_draw_eval_agent_ids(self, world, eval_agent_ids=None, ): if self.follow_agent_id is not None: if isinstance(self.follow_agent_id, bool) and \ eval_agent_ids is not None and \ len(eval_agent_ids) == 1: draw_eval_agent_id = eval_agent_ids[0] else: draw_eval_agent_id = self.follow_agent_id if draw_eval_agent_id in world.agents: return draw_eval_agent_id return None def _update_world_view_range(self, world, eval_agent_ids=None): draw_eval_agent_id = self._get_draw_eval_agent_ids(world, eval_agent_ids) if draw_eval_agent_id != None: follow_agent = world.agents[draw_eval_agent_id] state = follow_agent.state pose = np.zeros(3) pose[0] = state[int(StateDefinition.X_POSITION)] pose[1] = state[int(StateDefinition.Y_POSITION)] pose[2] = state[int(StateDefinition.THETA_POSITION)] center = [pose[0], pose[1]] self._update_world_dynamic_range(center) else: if self.use_world_bounds: bb = world.bounding_box self.dynamic_world_x_range = [bb[0].x(), bb[1].x()] self.dynamic_world_y_range = [bb[0].y(), bb[1].y()] diffx = abs(self.dynamic_world_x_range[1] - self.dynamic_world_x_range[0]) diffy = abs(self.dynamic_world_y_range[1] - self.dynamic_world_y_range[0]) # enforce that in both dimensions the same range is covered if diffx > diffy: self.dynamic_world_y_range[0] -= (diffx - diffy)/2 self.dynamic_world_y_range[1] += (diffx - diffy)/2 else: self.dynamic_world_x_range[0] -= (diffy - diffx)/2 self.dynamic_world_x_range[1] += (diffy - diffx)/2 else: center = self.center # self._update_world_dynamic_range(center) def _update_world_dynamic_range(self, center): aspect_ratio = self.get_aspect_ratio() if self.enforce_x_length: self.dynamic_world_x_range = [-self.x_length/2 + center[0], self.x_length/2 + center[0]] self.dynamic_world_y_range = [-self.x_length/2/aspect_ratio + center[1], self.x_length/2/aspect_ratio + center[1]] if self.enforce_y_length: self.dynamic_world_x_range = [-self.y_length/2*aspect_ratio + center[0], self.y_length/2*aspect_ratio + center[0]] self.dynamic_world_y_range = [-self.y_length/2 + center[1], self.y_length/2 + center[1]] def drawPoint2d(self, point2d, color, alpha): pass def drawLine2d(self, line2d, color, alpha, line_style=None, zorder=10): pass def drawPolygon2d(self, polygon, color, alpha, facecolor=None): pass def drawTrajectory(self, trajectory, color): pass def drawObstacle(self, obstacle): pass def drawText(self, position, text, **kwargs): pass def getColor(self, color): pass def show(self,block=False): pass def clear(self): pass def drawAgents(self, world): for _, agent in world.agents.items(): self.drawAgent(agent) def drawHistory(self, agent, color, alpha, facecolor): shape = agent.shape if isinstance(shape, Polygon2d): history = agent.history lh = len(history) for idx, state_action in enumerate(history): state = state_action[0] pose = np.zeros(3) # pybind creates column based vectors, initialization maybe row-based -> we consider both pose[0] = state[int(StateDefinition.X_POSITION)] pose[1] = state[int(StateDefinition.Y_POSITION)] pose[2] = state[int(StateDefinition.THETA_POSITION)] transformed_polygon = shape.Transform(pose) alpha=1-0.8*(lh-idx)/4 alpha = 0 if alpha<0 else alpha self.drawPolygon2d(transformed_polygon, color, alpha, facecolor) # fade to 0.2 after 10 steps def drawGoalDefinition(self, goal_definition, color, alpha, facecolor): if isinstance(goal_definition, GoalDefinitionPolygon): self.drawPolygon2d(goal_definition.goal_shape, color, alpha, facecolor) elif isinstance(goal_definition, GoalDefinitionStateLimits): self.drawPolygon2d(goal_definition.xy_limits, color, alpha, facecolor) elif isinstance(goal_definition, GoalDefinitionStateLimitsFrenet): self.drawPolygon2d(goal_definition.goal_shape, color, alpha, facecolor) elif isinstance(goal_definition, GoalDefinitionSequential): prev_center = np.array([]) for idx, goal_def in enumerate(goal_definition.sequential_goals): self.drawGoalDefinition(goal_def, color, alpha, facecolor) goal_pos = None if isinstance(goal_def, GoalDefinitionPolygon): goal_pos = goal_def.goal_shape.center elif isinstance(goal_def, GoalDefinitionStateLimits): goal_pos = goal_def.xy_limits.center # self.drawText(position=goal_pos, text="Goal{}".format(idx), coordinate="world") if prev_center.any(): line = Line2d() line.AddPoint(Point2d(prev_center[0], prev_center[1])) line.AddPoint(Point2d(goal_pos[0], goal_pos[1])) self.drawLine2d(line, color, alpha=0.9) prev_center = goal_pos def drawWorld(self, world, eval_agent_ids=None, filename=None, scenario_idx=None, debug_text=True): # self.clear() self._update_world_view_range(world, eval_agent_ids) if world.map: self.drawMap(world.map.GetOpenDriveMap()) # draw agent goals for agent_id, agent in world.agents.items(): if eval_agent_ids and self.draw_eval_goals and agent.goal_definition and \ agent_id in eval_agent_ids: color_line = self.eval_goal_color color_face = self.eval_goal_color alpha = .5 self.drawGoalDefinition(agent.goal_definition, color_line, alpha, color_face) num_agents = len(world.agents.items()) for i, (agent_id, agent) in enumerate(world.agents.items()): color = "blue" alpha = 1.0 if eval_agent_ids and agent.id in eval_agent_ids: color_line = self.color_eval_agents_line color_face = self.color_eval_agents_face alpha = self.alpha_eval_agent else: alpha = self.alpha_other_agents if self.use_colormap_for_other_agents: color_line = self.getColorFromMap(float(i) / float(num_agents)) color_face = self.getColorFromMap(float(i) / float(num_agents)) else: color_line = self.color_other_agents_line color_face = self.color_other_agents_face self.drawAgent(agent, color_line, alpha, color_face) if self.draw_history: self.drawHistory(agent, color_line, alpha, color_face) if debug_text: self.drawText(position=(0.1, 0.9), text="Scenario: {}".format(scenario_idx), fontsize=14) self.drawText(position=(0.1, 0.95), text="Time: {:.2f}".format(world.time), fontsize=14) def drawMap(self, map): # draw the boundary of each lane for _, road in map.GetRoads().items(): self.drawXodrRoad(road, self.color_lane_boundaries) def drawXodrRoad(self, road, color=None): for lane_section in road.lane_sections: self.drawXodrLaneSection(lane_section, color) def drawXodrLaneSection(self, lane_section, color=None): for _, lane in lane_section.GetLanes().items(): self.drawXodrLane(lane, color) def drawXodrLane(self, lane, color=None): if color is None: self.color_lane_boundaries dashed = False # center line is type none and is drawn as broken if lane.road_mark.type == XodrRoadMarkType.broken or lane.road_mark.type == XodrRoadMarkType.none: dashed = True self.drawLine2d(lane.line, color, self.alpha_lane_boundaries, dashed, zorder=2, linewidth=self.map_linewidth) def drawAgent(self, agent, color, alpha, facecolor): shape = agent.shape if isinstance(shape, Polygon2d): pose = np.zeros(3) # pybind creates column based vectors, initialization maybe row-based -> we consider both state = agent.state pose[0] = state[int(StateDefinition.X_POSITION)] pose[1] = state[int(StateDefinition.Y_POSITION)] pose[2] = state[int(StateDefinition.THETA_POSITION)] transformed_polygon = shape.Transform(pose) centerx = (shape.front_dist - 0.5*(shape.front_dist+shape.rear_dist)) * math.cos(pose[2]) + pose[0] centery = (shape.front_dist - 0.5*(shape.front_dist+shape.rear_dist))* math.sin(pose[2]) + pose[1] if self.draw_agent_id: self.drawText(position=(centerx, centery), rotation=180.0*(1.0+pose[2]/math.pi), text="{}".format(agent.id),\ coordinate="not axes", ha='center', va="center", multialignment="center", size="smaller") self.drawPolygon2d(transformed_polygon, color, alpha, facecolor) else: raise NotImplementedError("Shape drawing not implemented.") def drawLaneCorridor(self, lane_corridor, color="blue"): self.drawPolygon2d(lane_corridor.polygon, color=color, alpha=.5) def drawRoadCorridor(self, road_corridor, color="blue"): # TODO(@hart): use agent specific coloring self.drawPolygon2d(road_corridor.polygon, color=color, alpha=.2) for lane_corridor in road_corridor.lane_corridors: self.drawLaneCorridor(lane_corridor) def Reset(self): pass
# -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick and Sean Bell # -------------------------------------------------------- from config import IM_SCALE import numpy as np # Verify that we compute the same anchors as Shaoqing's matlab implementation: # # >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat # >> anchors # # anchors = # # -83 -39 100 56 # -175 -87 192 104 # -359 -183 376 200 # -55 -55 72 72 # -119 -119 136 136 # -247 -247 264 264 # -35 -79 52 96 # -79 -167 96 184 # -167 -343 184 360 # array([[ -83., -39., 100., 56.], # [-175., -87., 192., 104.], # [-359., -183., 376., 200.], # [ -55., -55., 72., 72.], # [-119., -119., 136., 136.], # [-247., -247., 264., 264.], # [ -35., -79., 52., 96.], # [ -79., -167., 96., 184.], # [-167., -343., 184., 360.]]) def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): """ A wrapper function to generate anchors given different scales Also return the number of anchors in variable 'length' """ anchors = generate_base_anchors(base_size=base_size, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales)) A = anchors.shape[0] shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride # Same as shift_x shift_x, shift_y = np.meshgrid(shift_x, shift_x) shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1) # h, w, 4 all_anchors = shifts[:, :, None] + anchors[None, None] # h, w, A, 4 return all_anchors # shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose() # K = shifts.shape[0] # # width changes faster, so here it is H, W, C # anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)) # anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False) # length = np.int32(anchors.shape[0]) def generate_base_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6)): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = np.array([1, 1, base_size, base_size]) - 1 ratio_anchors = _ratio_enum(base_anchor, ratios) anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])]) return anchors def _whctrs(anchor): """ Return width, height, x center, and y center for an anchor (window). """ w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def _mkanchors(ws, hs, x_ctr, y_ctr): """ Given a vector of widths (ws) and heights (hs) around a center (x_ctr, y_ctr), output a set of anchors (windows). """ ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1))) return anchors def _ratio_enum(anchor, ratios): """ Enumerate a set of anchors for each aspect ratio wrt an anchor. """ w, h, x_ctr, y_ctr = _whctrs(anchor) size = w * h size_ratios = size / ratios # NOTE: CHANGED TO NOT HAVE ROUNDING ws = np.sqrt(size_ratios) hs = ws * ratios anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors def _scale_enum(anchor, scales): """ Enumerate a set of anchors for each scale wrt an anchor. """ w, h, x_ctr, y_ctr = _whctrs(anchor) ws = w * scales hs = h * scales anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors
# Copyright (C) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root for information. import sys if sys.version >= "3": basestring = str from synapse.ml.core.schema.Utils import * from synapse.ml.recommendation._SARModel import _SARModel @inherit_doc class SARModel(_SARModel): def recommendForAllUsers(self, numItems): return self._call_java("recommendForAllUsers", numItems)
""" tkcode.app module contains the main application class """ import os import tkinter as tk # observable model import tkcode.model # application settings import tkcode.settings # core components from tkcode.commander import Commander # register commands by importing decorated functions import tkcode.commands # ui theming import tkcode.theme # Visual components from tkcode.sidebar import SideBar from tkcode.editor import EditorFrame from tkcode.statusbar import StatusBar from tkcode.palette import PaletteFrame class App: """ Tk Code application : builds the ui and exposes an api for business logic like a controller """ def __init__(self): """ constructor """ self.model = tkcode.model.TkCodeModel() # observable data model self.model.add_observer(self) self.settings = tkcode.settings.Settings(self.model) self.root = None # tkinter Tk instance # The components of the interface self.sidebar = None self.notebook = None self.statusbar = None self.palette = None self.commander = None # later: # self.console = None def build_ui(self): """ builds the user interface """ self.root = root = tk.Tk() root.title(self.settings.name) root.minsize(300, 300) root.geometry("1000x700") style = tkcode.theme.build_style(self.settings.colors) style.theme_use("tkcode") self.commander = Commander(self) root.bind("<Control-p>", self.show_palette) # horizontal layout for the sidebar to expand / collapse panels self.paned = paned = tk.ttk.PanedWindow(root, orient=tk.HORIZONTAL) paned.pack(fill=tk.BOTH, expand=1) self.sidebar = SideBar(paned, self) paned.add(self.sidebar) self.editor_frame = EditorFrame(paned, self) paned.add(self.editor_frame) self.statusbar = StatusBar(root, self) self.statusbar.pack(fill=tk.X, side=tk.BOTTOM) self.palette = PaletteFrame(self.editor_frame, self.commander) def run(self): """ launch the application """ if not self.root: self.build_ui() self.root.mainloop() def after(self, delay, command): """ proxy method to Tk.after() """ self.root.after(delay, command) def on_file_selected(self, file_obj): """ callback on file selection : set the window title """ self.root.title("%s - %s" % (file_obj.basename, self.settings.name)) # methods below are the controller methods def command_callable(self, name): """create a callable of a command """ def _callback(*args, **kwargs): self.commander.run(name, *args, **kwargs) return _callback def run_command(self, name, *args, **kwargs): self.commander.run(name, *args, **kwargs) def preview_file(self, file_obj): self.model.set_preview(file_obj) def select_file(self, file_obj, originator): """ set a file as selected """ self.model.set_current_file(file_obj, originator) def show_palette(self, event): """ show tool palette """ self.palette.toggle()
# Generated by Django 2.0.5 on 2018-08-10 12:21 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('team', '0116_auto_20180803_1211'), ('team', '0116_auto_20180726_1655'), ] operations = [ ]
# -*- coding: utf-8 -*- from charguana.cjk import * from charguana.chinese import * from charguana.perluniprops import * from charguana.thai import * from charguana.viet import * cjk_charsets = {'chinese': han_utf8, 'zh': han_utf8, 'cn': han_utf8, 'japanese': jap_utf8, 'ja': jap_utf8, 'jp': jap_utf8, 'hiragana': [hiragana], 'katakana': [katakana], 'korean': kor_utf8, 'ko':kor_utf8, 'kr':kor_utf8, 'hangul_syllables': [hangul_syllables], 'hangul_jamo': [hangul_jamo], 'romanji': [romanji], 'cjk_punctuation': [cjk_symbols_punctuations], 'bopomofo': [bopomofo], 'cjk': sorted(set(han_utf8 + jap_utf8 + kor_utf8 + [romanji])), } perluniprops_charsets = {'Close_Punctuation': close_punctuation, 'Open_Punctuation': open_punctuation, 'Currency_Symbol': currency_symbol, 'IsSc': is_sc, 'IsAlnum': is_alnum, 'IsAlpha': is_alpha, 'IsLower': is_lower, 'IsUpper': is_upper, 'IsN': is_n, 'IsSo': is_so} def get_chars_between(start, end): for i in range(ord(start), ord(end)+1): yield chr(i) def get_cjk_charset(charset_name): for start, end in cjk_charsets[charset_name]: for char in get_chars_between(start, end): yield char def get_charset_ranges(charset_ranges): for start, end in charset_ranges: for char in get_chars_between(start, end): yield char def islang(string, charset): return any(set(string).intersection(charset)) other_charsets = {'thai': get_charset_ranges(thai_utf8), 'viet': viet_utf8, 'traditional_chinese': big5, 'simplified_chinese': gbk} def get_charset(charset_name): if charset_name in cjk_charsets: return get_cjk_charset(charset_name) elif charset_name in perluniprops_charsets: return iter(perluniprops_charsets[charset_name]) elif charset_name in other_charsets: return iter(other_charsets[charset_name]) else: return iter([])
import logging import os import tempfile import uuid from datetime import datetime from pathlib import Path from typing import Union logger = logging.getLogger(__name__) def initialize_logging( log_dir: str = None, log_name: str = "meerkat.log", format: str = "[%(asctime)s][%(levelname)s][%(name)s:%(lineno)s] :: %(message)s", level: int = logging.WARNING, ) -> None: """Initialize logging for Meerkat.""" # Generate a new directory using the log_dir, if it doesn't exist date = datetime.now().strftime("%Y_%m_%d") time = datetime.now().strftime("%H_%M_%S") uid = str(uuid.uuid4())[:8] if log_dir is None: log_dir = os.environ.get("MEERKAT_LOG_DIR") if log_dir is None: success = False # try potential logging directories until we find one with adequate permissions for log_dir in [ tempfile.gettempdir(), os.path.join(Path.home(), ".meerkat"), ]: try: log_path = os.path.join(log_dir, "log", date, time, uid) os.makedirs(log_path, exist_ok=True) success = True except PermissionError: pass if not success: raise PermissionError( "Permission denied in all of Meerkat's default logging directories. " "Set environment variable `MEERKAT_LOG_DIR` to specify a directory for " "Meerkat logging." ) else: log_path = os.path.join(log_dir, "log", date, time, uid) # Make the logdir os.makedirs(log_path, exist_ok=True) # Initialize logging logging.basicConfig( format=format, level=level, handlers=[ logging.FileHandler(os.path.join(log_path, log_name)), logging.StreamHandler(), ], ) # Set logging levels for dependencies set_logging_level_for_imports() logger.info("Logging initialized.") def set_logging_level_for_imports(level: int = logging.WARNING) -> None: """Set logging levels for dependencies.""" # Set levels for imports logging.getLogger("tensorflow").setLevel(level) logging.getLogger("matplotlib").setLevel(level) logging.getLogger("textattack").setLevel(level) logging.getLogger("filelock").setLevel(level) def set_logging_level(level: Union[int, str] = logging.INFO): """Set logging level for Meerkat.""" # Set the top-level logger if isinstance(level, int): logging.getLogger("meerkat").setLevel(level) elif isinstance(level, str): logging.getLogger("meerkat").setLevel( { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "critical": logging.CRITICAL, "fatal": logging.FATAL, }[level] ) else: raise NotImplementedError(f"Level `{level}` not recognized.")
#------------------------------------------# # Maths with numpy: # # Simple functions like +, -, /, * # # Linear algebra # # Statistics # #------------------------------------------# import numpy as np a = np.array([1,2,3,4]) # +,-,*,/ from each element a += 2 print(a) b = np.array([1, 0, 1, 2]) c = a + b print(c) a = a**2 print(a) #sin, cos, tan d = np.cos(a) print(d) # Linear Algebra x_1 = np.ones((2,3)) x_2 = np.zeros((3,2)) print(x_1) print(x_2) print(np.matmul(x_1, x_2)) # Find the determinate y = np.identity(3) y = np.linalg.det(y) print(y) # And there is alot more you could do with np.linalg. # Statistics stats = np.array([[1,2,3],[4,5,6]]) print(np.min(stats)) # Minimum Value print(np.max(stats)) # Maximum Value print(np.sum(stats)) # Sum of all values
import numpy as np from setuptools import setup from setuptools import find_packages # VERSION = '0.22.0' # AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, ' # AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist' # DESCRIPTION = 'The James Webb Space Telescope Quicklook Project' # REQUIRES = [ # 'asdf>=2.3.3', # 'astropy>=3.2.1', # 'astroquery>=0.3.9', # 'authlib', # 'bokeh>=1.0', # 'codecov', # 'django>=2.0', # 'flake8', # 'inflection', # 'ipython', # 'jinja2', # 'jsonschema==2.6.0', # 'jwedb>=0.0.3', # 'jwst==0.13.0', # 'matplotlib', # 'numpy', # 'numpydoc', # 'pandas', # 'psycopg2', # 'pysiaf', # 'pytest', # 'pytest-cov', # 'scipy', # 'sphinx', # 'sqlalchemy', # 'stsci_rtd_theme', # 'twine' # ] setup( name='sci_act_2019', # version=VERSION, # description=DESCRIPTION, url='https://https://github.com/laurenmarietta/sci_act_scheduler_2019', # author=AUTHORS, # author_email='jwql@stsci.edu', license='BSD', # keywords=['astronomy', 'python'], # classifiers=['Programming Language :: Python'], packages=find_packages(), # install_requires=REQUIRES, include_package_data=True, include_dirs=[np.get_include()], )
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import functools from ..core.tensor import amp class autocast: r""" A class to control autocast mode for amp as a context manager or a decorator. :param enabled: Whether autocast mode is enabled. :low_prec_dtype: Set amp autocast mode's lower precision dtype. It will change the target dtype in tensor casting for better speed and memory. Default: float16. :high_prec_dtype: Set amp autocast mode's higher precision dtype. It will change the target dtype in tensor casting for better precision. Default: float32. Examples: ..code-block:: # used as decorator @autocast() def train_step(image, label): with gm: logits = model(image) loss = F.nn.cross_entropy(logits, label) gm.backward(loss) opt.step().clear_grad() return loss # used as context manager def train_step(image, label): with autocast(): with gm: logits = model(image) loss = F.nn.cross_entropy(logits, label) gm.backward(loss) opt.step().clear_grad() return loss """ def __init__( self, enabled: bool = True, low_prec_dtype: str = "float16", high_prec_dtype: str = "float32", ): self.enabled = enabled self.high_prec_dtype = high_prec_dtype self.low_prec_dtype = low_prec_dtype self._origin_enabled = None self._origin_high = None self._origin_low = None def __enter__(self): self._origin_enabled, amp._enabled = amp._enabled, self.enabled self._origin_high = amp._high_prec_dtype amp._high_prec_dtype = self.high_prec_dtype self._origin_low = amp._low_prec_dtype amp._low_prec_dtype = self.low_prec_dtype def __exit__(self, *args): amp._enabled = self._origin_enabled amp._high_prec_dtype = self._origin_high amp._low_prec_dtype = self._origin_low def __call__(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): with self: return func(*args, **kwargs) return wrapper
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_bootstrap import Bootstrap from config import config_options from flask_login import LoginManager from flask_uploads import UploadSet, configure_uploads, IMAGES from flask_wtf import CsrfProtect from flask_mail import Mail db = SQLAlchemy() bootstrap = Bootstrap() login_manager = LoginManager() login_manager.session_protection = 'strong' login_manager.login_view = 'auth.login' photos = UploadSet('photos', IMAGES) csrf = CsrfProtect() mail = Mail() def create_app(config_name): #intializing app app = Flask(__name__) #creating app configurations app.config.from_object(config_options[config_name]) #Initializing flask extensions bootstrap.init_app(app) db.init_app(app) login_manager.init_app(app) csrf.init_app(app) mail.init_app(app) #Registering main Blueprint from .main import main as main_blueprint app.register_blueprint(main_blueprint) from .auth import auth as auth_blueprint app.register_blueprint(auth_blueprint, url_prefix = '/authenticate') #configuring photo uploads configure_uploads(app, photos) return app
__all__ = ['app'] from flask import request, Flask, jsonify, redirect from src.BusinessCentralLayer.setting import ROUTE_API from src.BusinessViewLayer.myapp.apis import * app = Flask(__name__) # =========================================================== # Public Interface # =========================================================== @app.route(ROUTE_API['capture_subscribe'], methods=['POST']) def capture_subscribe(): """ > 将Redis缓冲区链接作为备用链接返回 @return: AnyType subscribe """ return jsonify(apis_capture_subscribe(dict(request.form))) @app.route(ROUTE_API['version_manager'], methods=['GET', 'POST']) def version_manager(): """ > 版本管理,检查更新 1.管理文档树,记录各个版本文件的服务器路径 2.检查更新,记录最新版本号及最新版本文件存放地址 @return: --GET:str --response = 'latest version' --POST:dict --response.keys=[msg:str, version-server:str, version-usr:str, url:str, need_update:bool] """ if request.method == 'GET': return jsonify(apis_version_manager()) elif request.method == 'POST': return jsonify(apis_version_manager(usr_version=dict(request.form).get('local_version', False))) @app.route("/", methods=['GET']) def redirect_to_my_blog(): return redirect("https://github.com/QIN2DIM/V2RayCloudSpider") @app.route(ROUTE_API['get_subs_num'], methods=['GET']) def get_subs_num(): return jsonify(apis_get_subs_num()) # =========================================================== # Admin Interface # =========================================================== from uuid import uuid4 _private_interface = "Fill in your own private interface" # ---------------------------------- # 随机获取某个类型的订阅 v1 # ---------------------------------- @app.route(f"/super_admin/{uuid4()}/<command_>", methods=['GET']) def admin_get_subs(command_): return jsonify(apis_admin_get_subs(command_)) # ---------------------------------- # 获取指定netloc/domain的订阅 v1 # ---------------------------------- @app.route(f"/super_admin/{uuid4()}/debug/<_entropy_name>", methods=['GET']) def admin_get_subs_v2_debug(_entropy_name): """获取/debug""" return jsonify(apis_admin_get_subs_v2_debug(entropy_name=_entropy_name, _debug=True)) @app.route(f"/super_admin/{uuid4()}/<_entropy_name>", methods=['GET']) def admin_get_subs_v2(_entropy_name): """获取/general""" return jsonify(apis_admin_get_subs_v2(entropy_name=_entropy_name)) @app.route(f"/super_admin/{uuid4()}", methods=['GET']) def admin_select_subs(): """查询/general""" return jsonify(apis_admin_get_subs_v2_debug(entropy_name=None)) # ---------------------------------- # 获取正在活动的任务队列 # ---------------------------------- @app.route(f"/super_admin/{uuid4()}", methods=['GET']) def admin_get_entropy(): return jsonify(apis_admin_get_entropy()) # if __name__ == '__main__': # app.run(host='0.0.0.0', port=6500, debug=True)
import ast from django.contrib.auth import authenticate, login, logout from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import render, redirect from django.views.generic import TemplateView from django.views import View import re from urllib.request import urlopen import json from feat.converter.converter import CookingConverter from feat.forms import RegisterAsConsumerForm, RegisterAsProviderForm, ConsumerProfileForm, ProviderProfileForm, \ CreateRecipeForm, CreateMenuForm, CommentForm from feat.models import Recipe, Menu, Comment, RecipeLike, MenuLike, ConsumerProfile, ProviderProfile, \ DailyIntakeFromRecipe class HomeView(TemplateView): #template_name = "index.html" def get(self, request): most_viewed_recipes = Recipe.objects.filter(view_number__gt=0).order_by('view_number').reverse()[:3] most_liked_recipes = sorted(Recipe.objects.filter(), key=lambda a: a.get_like_count(), reverse=True)[:3] most_viewed_menus = Menu.objects.filter(view_number__gt=0).order_by('view_number').reverse()[:3] most_liked_menus = sorted(Menu.objects.filter(), key=lambda a: a.get_like_count(), reverse=True)[:3] return render(request, "index.html", {'most_viewed_recipes': most_viewed_recipes, 'most_liked_recipes': most_liked_recipes, 'most_viewed_menus': most_viewed_menus, 'most_liked_menus': most_liked_menus}) class UserHomeView(LoginRequiredMixin, TemplateView): def get(self, request): username = request.user.username recipes = Recipe.objects.filter(created_by__username=username).order_by('created_at').reverse() menus = Menu.objects.filter(created_by__username=username).order_by('created_at').reverse() rlikes = Recipe.objects.filter(recipelike__cprofiles__user_id=request.user.id).order_by('created_at').reverse()[:10] mlikes = Menu.objects.filter(menulike__cprofiles__user_id=request.user.id).order_by('created_at').reverse()[:10] return render(request, "user_home.html", {'recipes': recipes, 'menus': menus, 'rlikes': rlikes, 'mlikes': mlikes}) class ProfileView(LoginRequiredMixin, TemplateView): #template_name = "profile.html" def get(self, request): consumer_count = ConsumerProfile.objects.all().count() provider_count = ProviderProfile.objects.all().count() comments = Comment.objects.filter(created_by=request.user).order_by('created_at').reverse() daily_intake_recipes = DailyIntakeFromRecipe.objects.all().order_by('intake_at').reverse() return render(request, "profile.html", {'consumer_count': consumer_count, 'provider_count': provider_count, 'comments': comments, 'daily_intake_recipes': daily_intake_recipes}) class Logout(LoginRequiredMixin, View): def get(self, request): logout(request) return redirect('home') class ChangePasswordView(View): def get(self, request): form = PasswordResetForm() return render(request, "custom/change_password.html", {'form': form}) def post(self, request): form = PasswordResetForm(data=request.POST) if form.is_valid(): return redirect('login') else: return render(request, "custom/change_password.html", {'form': form}) class LoginView(View): def get(self, request): form = AuthenticationForm() return render(request, "pages/login.html", {'form': form}) def post(self, request): form = AuthenticationForm(data=request.POST) if form.is_valid(): username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('password') user = authenticate(username=username, password=raw_password) login(request, user) return redirect('user_home') else: return render(request, "pages/login.html", {'form': form}) class RegisterAsConsumerView(View): def get(self, request): form = RegisterAsConsumerForm() profile_form = ConsumerProfileForm() return render(request, "pages/sign-up.html", {'form': form, 'profile_form': profile_form}) def post(self, request): form = RegisterAsConsumerForm(request.POST) profile_form = ConsumerProfileForm(request.POST) if form.is_valid() and profile_form.is_valid(): user = form.save(commit=False) user.is_consumer = True user.save() user.consumer_profile.date_of_birth = profile_form.cleaned_data.get('date_of_birth') user.consumer_profile.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('password1') user = authenticate(username=username, password=raw_password) login(request, user) return redirect('user_home') else: #return redirect('pages/sign-up.html') return redirect('user_home') class RegisterAsProviderView(View): def get(self, request): form = RegisterAsProviderForm() profile_form = ProviderProfileForm() return render(request, "pages/sign-up.html", {'form': form, 'profile_form': profile_form}) def post(self, request): form = RegisterAsProviderForm(request.POST) profile_form = ProviderProfileForm(request.POST) if form.is_valid() and profile_form.is_valid(): user = form.save(commit=False) user.is_provider = True user.save() user.provider_profile.location = profile_form.cleaned_data.get('location') user.provider_profile.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('password1') user = authenticate(username=username, password=raw_password) login(request, user) return redirect('user_home') else: return redirect('pages/sign-up.html') class RecipeCreateView(LoginRequiredMixin, View): def get(self, request): form = CreateRecipeForm() return render(request, "create-recipe.html", {'form': form}) def post(self, request): form = CreateRecipeForm(request.POST, instance=request.user) if form.is_valid(): created_by = request.user title = form.cleaned_data.get('title') # adapter: string -> ingredient model ingredients_input = form.cleaned_data.get('ingredients') #Ex: Tomatoes, grape, raw(321360):2; pattern = r'([\w*(,\s)?]+)\(([0-9]*)\)\:([\w*(,\s)?]+)\:([0-9]*)\;' r = re.compile(pattern) ingredients_list = r.findall(ingredients_input) #for i in ingredients_list: # ingredients.append(Ingredient()) # Make a GET request to retrieve details about the food item. api_key = "uHpZNeQkPBjLCcnybhVSTcdzjXt6wgNohqA7gRQu" nutritional_value = { "Energy": 0, "Protein": 0, "Carbohydrates": 0, "Total lipid (fat)": 0, "Minerals": 0, "Vitamins and Other Components": 0, "Water": 0 } for i in ingredients_list: food_id = i[1] unit = i[2] quantity = int(i[3]) url = "https://api.nal.usda.gov/fdc/v1/food/{}?api_key={}".format(food_id, api_key) serialized_data = urlopen(url).read() # check the response status data = json.loads(serialized_data) # check whether the fields accessed here exist in the first place portions = data["foodPortions"] for portion in portions: if portion["measureUnit"]["name"] == unit: weight = portion["gramWeight"] nutrients = data["foodNutrients"] for n in nutrients: nutrient_name = n["nutrient"]["name"] #nutrient_value = n["nutrient"]["rank"] if nutrient_name in nutritional_value: # try: # nutrient_value = n["amount"] # nutrient_unit = n["nutrient"]["unitName"] # # should kJ always be ignored? or is there a case in which only this is given instead of kcal? # if nutrient_unit.lower() == "kj": # continue # nutrient_value = CookingConverter().to_standard(nutrient_value, nutrient_unit, nutrient_name) # except KeyError: # print("Nutrient amount/unit: '", nutrient_name, "' could not be found for food item: '", food_id, "'.") # nutrient_value = 0 try: # nutritional value per 100g nutrient_value = n["amount"] nutrient_unit = n["nutrient"]["unitName"] # should kJ always be ignored? or is there a case in which only this is given instead of kcal? if nutrient_unit.lower() == "kj": continue except KeyError: print("Nutrient amount/unit: '", nutrient_name, "' could not be found for food item: '", food_id, "'.") nutrient_value = 0 nutritional_value[nutrient_name] += quantity * weight * nutrient_value / 100 # for n in nutrients: # nutrient_name = n["nutrient"]["name"] # nutrient_value = n["nutrient"]["rank"] # if nutrient_name == "Energy": # # check whether cal or kcal or some other unit is used # kcal = nutrient_value # elif nutrient_name == "Protein": # # check whether g or kg or some other unit is used # protein = nutrient_value # elif nutrient_name == "Carbohydrates": # ch = nutrient_value # elif nutrient_name == "Minerals": # min = nutrient_value # elif nutrient_name == "Vitamins and Other Components": # vit = nutrient_value #nutritional_value_str = json.dumps(nutritional_value) description = form.cleaned_data.get('description') instructions = form.cleaned_data.get('instructions') difficulty = form.cleaned_data.get('difficulty') prepared_in = form.cleaned_data.get('prepared_in') image_link = form.cleaned_data.get('image_link') if image_link == "": image_link = "https://raw.githubusercontent.com/heobu/swe573/feature/posts-like-dislike-follow/feat/static/assets/images/eco-slider-img-1.jpg" #form.save(commit=True) Recipe.objects.create(created_by=created_by, title=title, ingredients=ingredients_input, nutritional_value=nutritional_value, description=description, instructions=instructions, difficulty=difficulty, prepared_in=prepared_in, image_link=image_link) return redirect('user_home') else: return render('create-recipe') class RecipeView(LoginRequiredMixin, View): def get(self, request, id=None): #recipe = Recipe.objects.all()[0]#filter(id=id) recipe = Recipe.objects.get(id=id) recipe.increase_view_number() form = CommentForm() return render(request, "recipe-detail.html", {'recipe': recipe, 'form': form}) # , recipe=None def post(self, request, id=None): form = CommentForm(request.POST, instance=request.user) if form.is_valid(): created_by = request.user recipe = Recipe.objects.get(id=id) content = form.cleaned_data.get('content') #recipe = form.cleaned_data.get('recipe') Comment.objects.create(created_by=created_by, content=content, recipe=recipe) #return redirect('user_home') return redirect('/recipe/detail/{}'.format(id)) else: return redirect('/recipe/detail/{}'.format(id)) class MenuCreateView(LoginRequiredMixin, View): def get(self, request): form = CreateMenuForm() recipes = Recipe.objects.filter().order_by('title') return render(request, "create-menu.html", {'form': form, 'recipes': recipes}) def post(self, request): form = CreateMenuForm(request.POST, instance=request.user) if form.is_valid(): created_by = request.user title = form.cleaned_data.get('title') description = form.cleaned_data.get('description') image_link = form.cleaned_data.get('image_link') # adapter: string -> ingredient model food_items_input = form.cleaned_data.get('food_items') #Ex: Recipe Title([recipe_id]]):[quantity]; pattern = r'([\w*(,\s)?]+)\(([0-9]*)\)\:([0-9]*)\;' r = re.compile(pattern) food_items = r.findall(food_items_input) menu_nutritional_value = { "Energy": 0, "Protein": 0, "Carbohydrates": 0, "Total lipid (fat)": 0, "Minerals": 0, "Vitamins and Other Components": 0, "Water": 0 } for i in food_items: food_id = i[1] quantity = int(i[2]) # food item is not a recipe # Retrieve information from USDA for food items. # food item is a recipe recipe_nutritional_value_str = Recipe.objects.get(id=food_id).nutritional_value recipe_nutritional_value = ast.literal_eval(recipe_nutritional_value_str) for nutrient_name, nutrient_value in recipe_nutritional_value.items(): if nutrient_name in menu_nutritional_value: menu_nutritional_value[nutrient_name] += quantity * nutrient_value #form.save(commit=True) if image_link == "": image_link = "https://raw.githubusercontent.com/heobu/swe573/feature/posts-like-dislike-follow/feat/static/assets/images/eco-slider-img-1.jpg" Menu.objects.create(created_by=created_by, title=title, description=description, food_items=food_items, nutritional_value=menu_nutritional_value, image_link=image_link) return redirect('user_home') else: return render('create-menu') class MenuView(LoginRequiredMixin, View): def get(self, request, id=None): #recipe = Recipe.objects.all()[0]#filter(id=id) menu = Menu.objects.get(id=id) menu.increase_view_number() return render(request, "menu-detail.html", {'menu': menu}) class SearchRecipeView(LoginRequiredMixin, View): def get(self, request, contains=None): keyword = request.GET.get('contains', '') #criteria = request.GET.get('filter', '') if keyword != '': recipes = Recipe.objects.filter(title__contains=keyword) |\ Recipe.objects.filter(description__contains=keyword) |\ Recipe.objects.filter(ingredients__contains=keyword) |\ Recipe.objects.filter(instructions__contains=keyword) else: recipes = Recipe.objects.all() #creators = ConsumerProfile.objects.filter(consumer_profile) #if criteria == '': # pass #elif criteria == '': # recipes.order_by() return render(request, "recipe-search-results.html", {'recipes': recipes, 'keyword': keyword}) class SearchMenuView(LoginRequiredMixin, View): def get(self, request, contains=None): keyword = request.GET.get('contains', '') if keyword != '': menus = Menu.objects.filter(title__contains=keyword) |\ Menu.objects.filter(description__contains=keyword) else: menus = Menu.objects.all() return render(request, "menu-search-results.html", {'menus': menus, 'keyword': keyword})
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Xclip(AutotoolsPackage): """xclip is a command line utility that is designed to run on any system with an X11 implementation. It provides an interface to X selections ("the clipboard") from the command line. It can read data from standard in or a file and place it in an X selection for pasting into other X applications. xclip can also print an X selection to standard out, which can then be redirected to a file or another program.""" homepage = "https://github.com/astrand/xclip" git = "https://github.com/astrand/xclip.git" version('0.13', commit='9aa7090c3b8b437c6489edca32ae43d82e0c1281') depends_on('libxmu') depends_on('libx11') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build')
import unittest from cloudwanderer import URN from ..helpers import CloudWandererCalls, ExpectedCall, MultipleResourceScenario, NoMotoMock, SingleResourceScenario class TestVpnGateways(NoMotoMock, unittest.TestCase): vpn_gateway_payload = { "State": "available", "Type": "ipsec.1", "VpcAttachments": [], "VpnGatewayId": "vgw-11111111111111111", "AmazonSideAsn": 64512, "Tags": [{"Key": "Name", "Value": "test-vpn-gateway"}], } mock = { "ec2": { "describe_vpn_gateways.return_value": {"VpnGateways": [vpn_gateway_payload]}, } } single_resource_scenarios = [ SingleResourceScenario( urn=URN.from_string("urn:aws:123456789012:eu-west-2:ec2:vpn_gateway:vpn-11111111111111111"), expected_results=[vpn_gateway_payload], expected_call=ExpectedCall( "ec2", "describe_vpn_gateways", [], {"VpnGatewayIds": ["vpn-11111111111111111"]} ), ) ] multiple_resource_scenarios = [ MultipleResourceScenario( arguments=CloudWandererCalls(regions=["eu-west-2"], service_names=["ec2"], resource_types=["vpn_gateway"]), expected_results=[vpn_gateway_payload], ) ]
# Copyright (c) BioniDL@SUSTECH. All Rights Reserved """ This is a demo to run effcientnet trained on waste sorting dataset on a test image paper.png Please download the pretrained weights and put it under ./weight folder before run the code """ from efficientnet_predictor import efficientnet import cv2 import numpy as np img_size = 300 model = efficientnet(0, 'weights/Recyclable-bs32-weights.08-1.000-DenseNet169.hdf5') image = cv2.resize(cv2.imread('paper.png'),(img_size,img_size)) # Feed the image in RGB order to the model. # The input can be of shape [height, width, channels] or [number of images, height, width, channels] preds = model.run(image[:,:,::-1])[0] # The model pretrained is to classify four recycable waste type ['glass', 'metal', 'paper', 'plastic'] obj = ['glass', 'metal', 'paper', 'plastic'][np.argmax(preds)] print("Recognize %s"%obj)
from google.appengine.ext import ndb from protorpc import messages from google.appengine.ext.ndb import msgprop from csvmodel import CsvModel class Stop(CsvModel): class LocationType(messages.Enum): STOP = 0 STATION = 1 class WheelchairBoarding(messages.Enum): UNKNOWN = 0 POSSIBLE = 1 IMPOSSIBLE = 2 _csv_file = 'stops.txt' _csv_id = 'stop_id' stop_code = ndb.StringProperty() stop_name = ndb.StringProperty(required=True) stop_desc = ndb.TextProperty() stop_latlon = ndb.GeoPtProperty(required=True) zone_id = ndb.KeyProperty(kind='Zone') stop_url = ndb.StringProperty() location_type = msgprop.EnumProperty(LocationType) parent_station = ndb.KeyProperty(kind='Stop') stop_timezone = ndb.StringProperty() wheelchair_boarding = msgprop.EnumProperty(WheelchairBoarding)
# Copyright (c) 2010 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { }, 'target_defaults': { 'conditions': [ ['OS!="win"', { 'defines': [ # For talloc 'HAVE_VA_COPY', ], }], ['OS!="mac"', { 'defines': [ # For talloc 'HAVE_STRNLEN', ], }], ['os_posix == 1 and OS != "mac"', { 'cflags': [ '-fPIC', ], }], ], 'defines': [ # For Mesa 'MAPI_GLAPI_CURRENT', ], }, 'targets': [ { 'target_name': 'mesa', 'type': 'static_library', 'include_dirs': [ '../talloc', 'MesaLib/include', 'MesaLib/src/glsl', 'MesaLib/src/mapi', 'MesaLib/src/mesa', 'MesaLib/src/mesa/main', ], 'sources': [ '../talloc/talloc.c', 'MesaLib/src/glsl/ast.h', 'MesaLib/src/glsl/ast_expr.cpp', 'MesaLib/src/glsl/ast_function.cpp', 'MesaLib/src/glsl/ast_to_hir.cpp', 'MesaLib/src/glsl/ast_type.cpp', 'MesaLib/src/glsl/builtin_function.cpp', 'MesaLib/src/glsl/builtin_types.h', 'MesaLib/src/glsl/builtin_variables.h', 'MesaLib/src/glsl/glsl_lexer.cpp', 'MesaLib/src/glsl/glsl_parser.cpp', 'MesaLib/src/glsl/glsl_parser.h', 'MesaLib/src/glsl/glsl_parser_extras.cpp', 'MesaLib/src/glsl/glsl_parser_extras.h', 'MesaLib/src/glsl/glsl_symbol_table.cpp', 'MesaLib/src/glsl/glsl_symbol_table.h', 'MesaLib/src/glsl/glsl_types.cpp', 'MesaLib/src/glsl/glsl_types.h', 'MesaLib/src/glsl/hir_field_selection.cpp', 'MesaLib/src/glsl/ir.cpp', 'MesaLib/src/glsl/ir.h', 'MesaLib/src/glsl/ir_algebraic.cpp', 'MesaLib/src/glsl/ir_basic_block.cpp', 'MesaLib/src/glsl/ir_basic_block.h', 'MesaLib/src/glsl/ir_clone.cpp', 'MesaLib/src/glsl/ir_constant_expression.cpp', 'MesaLib/src/glsl/ir_constant_folding.cpp', 'MesaLib/src/glsl/ir_constant_propagation.cpp', 'MesaLib/src/glsl/ir_constant_variable.cpp', 'MesaLib/src/glsl/ir_copy_propagation.cpp', 'MesaLib/src/glsl/ir_dead_code.cpp', 'MesaLib/src/glsl/ir_dead_code_local.cpp', 'MesaLib/src/glsl/ir_dead_functions.cpp', 'MesaLib/src/glsl/ir_div_to_mul_rcp.cpp', 'MesaLib/src/glsl/ir_explog_to_explog2.cpp', 'MesaLib/src/glsl/ir_expression_flattening.cpp', 'MesaLib/src/glsl/ir_expression_flattening.h', 'MesaLib/src/glsl/ir_function.cpp', 'MesaLib/src/glsl/ir_function_can_inline.cpp', 'MesaLib/src/glsl/ir_function_inlining.cpp', 'MesaLib/src/glsl/ir_function_inlining.h', 'MesaLib/src/glsl/ir_hierarchical_visitor.cpp', 'MesaLib/src/glsl/ir_hierarchical_visitor.h', 'MesaLib/src/glsl/ir_hv_accept.cpp', 'MesaLib/src/glsl/ir_if_simplification.cpp', 'MesaLib/src/glsl/ir_if_to_cond_assign.cpp', 'MesaLib/src/glsl/ir_import_prototypes.cpp', 'MesaLib/src/glsl/ir_lower_jumps.cpp', 'MesaLib/src/glsl/ir_mat_op_to_vec.cpp', 'MesaLib/src/glsl/ir_mod_to_fract.cpp', 'MesaLib/src/glsl/ir_noop_swizzle.cpp', 'MesaLib/src/glsl/ir_optimization.h', 'MesaLib/src/glsl/ir_print_visitor.cpp', 'MesaLib/src/glsl/ir_print_visitor.h', 'MesaLib/src/glsl/ir_reader.cpp', 'MesaLib/src/glsl/ir_reader.h', 'MesaLib/src/glsl/ir_rvalue_visitor.cpp', 'MesaLib/src/glsl/ir_rvalue_visitor.h', 'MesaLib/src/glsl/ir_set_program_inouts.cpp', 'MesaLib/src/glsl/ir_structure_splitting.cpp', 'MesaLib/src/glsl/ir_sub_to_add_neg.cpp', 'MesaLib/src/glsl/ir_swizzle_swizzle.cpp', 'MesaLib/src/glsl/ir_tree_grafting.cpp', 'MesaLib/src/glsl/ir_validate.cpp', 'MesaLib/src/glsl/ir_variable.cpp', 'MesaLib/src/glsl/ir_variable_refcount.cpp', 'MesaLib/src/glsl/ir_variable_refcount.h', 'MesaLib/src/glsl/ir_vec_index_to_cond_assign.cpp', 'MesaLib/src/glsl/ir_vec_index_to_swizzle.cpp', 'MesaLib/src/glsl/ir_visitor.h', 'MesaLib/src/glsl/link_functions.cpp', 'MesaLib/src/glsl/linker.cpp', 'MesaLib/src/glsl/linker.h', 'MesaLib/src/glsl/list.h', 'MesaLib/src/glsl/loop_analysis.cpp', 'MesaLib/src/glsl/loop_analysis.h', 'MesaLib/src/glsl/loop_controls.cpp', 'MesaLib/src/glsl/loop_unroll.cpp', 'MesaLib/src/glsl/lower_noise.cpp', 'MesaLib/src/glsl/lower_variable_index_to_cond_assign.cpp', 'MesaLib/src/glsl/opt_redundant_jumps.cpp', 'MesaLib/src/glsl/program.h', 'MesaLib/src/glsl/s_expression.cpp', 'MesaLib/src/glsl/s_expression.h', 'MesaLib/src/glsl/safe_strcmp.c', 'MesaLib/src/glsl/safe_strcmp.h', 'MesaLib/src/glsl/glcpp/glcpp-lex.c', 'MesaLib/src/glsl/glcpp/glcpp-parse.c', 'MesaLib/src/glsl/glcpp/glcpp-parse.h', 'MesaLib/src/glsl/glcpp/pp.c', 'MesaLib/src/mapi/glapi/glapi.h', 'MesaLib/src/mapi/glapi/glapi_dispatch.c', 'MesaLib/src/mapi/glapi/glapi_entrypoint.c', 'MesaLib/src/mapi/glapi/glapi_getproc.c', 'MesaLib/src/mapi/glapi/glapi_nop.c', 'MesaLib/src/mapi/glapi/glapi_priv.h', 'MesaLib/src/mapi/glapi/glapidispatch.h', 'MesaLib/src/mapi/glapi/glapioffsets.h', 'MesaLib/src/mapi/glapi/glapitable.h', 'MesaLib/src/mapi/glapi/glapitemp.h', 'MesaLib/src/mapi/glapi/glprocs.h', 'MesaLib/src/mapi/mapi/u_compiler.h', 'MesaLib/src/mapi/mapi/u_current.c', 'MesaLib/src/mapi/mapi/u_current.h', 'MesaLib/src/mapi/mapi/u_execmem.c', 'MesaLib/src/mapi/mapi/u_execmem.h', 'MesaLib/src/mapi/mapi/u_macros.h', 'MesaLib/src/mapi/mapi/u_thread.c', 'MesaLib/src/mapi/mapi/u_thread.h', 'MesaLib/src/mesa/main/accum.c', 'MesaLib/src/mesa/main/accum.h', 'MesaLib/src/mesa/main/api_arrayelt.c', 'MesaLib/src/mesa/main/api_arrayelt.h', 'MesaLib/src/mesa/main/api_exec.c', 'MesaLib/src/mesa/main/api_exec.h', 'MesaLib/src/mesa/main/api_loopback.c', 'MesaLib/src/mesa/main/api_loopback.h', 'MesaLib/src/mesa/main/api_noop.c', 'MesaLib/src/mesa/main/api_noop.h', 'MesaLib/src/mesa/main/api_validate.c', 'MesaLib/src/mesa/main/api_validate.h', 'MesaLib/src/mesa/main/arbprogram.c', 'MesaLib/src/mesa/main/arbprogram.h', 'MesaLib/src/mesa/main/arrayobj.c', 'MesaLib/src/mesa/main/arrayobj.h', 'MesaLib/src/mesa/main/atifragshader.c', 'MesaLib/src/mesa/main/atifragshader.h', 'MesaLib/src/mesa/main/attrib.c', 'MesaLib/src/mesa/main/attrib.h', 'MesaLib/src/mesa/main/bitset.h', 'MesaLib/src/mesa/main/blend.c', 'MesaLib/src/mesa/main/blend.h', 'MesaLib/src/mesa/main/bufferobj.c', 'MesaLib/src/mesa/main/bufferobj.h', 'MesaLib/src/mesa/main/buffers.c', 'MesaLib/src/mesa/main/buffers.h', 'MesaLib/src/mesa/main/clear.c', 'MesaLib/src/mesa/main/clear.h', 'MesaLib/src/mesa/main/clip.c', 'MesaLib/src/mesa/main/clip.h', 'MesaLib/src/mesa/main/colormac.h', 'MesaLib/src/mesa/main/colortab.c', 'MesaLib/src/mesa/main/colortab.h', 'MesaLib/src/mesa/main/compiler.h', 'MesaLib/src/mesa/main/condrender.c', 'MesaLib/src/mesa/main/condrender.h', 'MesaLib/src/mesa/main/config.h', 'MesaLib/src/mesa/main/context.c', 'MesaLib/src/mesa/main/context.h', 'MesaLib/src/mesa/main/convolve.c', 'MesaLib/src/mesa/main/convolve.h', 'MesaLib/src/mesa/main/core.h', 'MesaLib/src/mesa/main/cpuinfo.c', 'MesaLib/src/mesa/main/cpuinfo.h', 'MesaLib/src/mesa/main/dd.h', 'MesaLib/src/mesa/main/debug.c', 'MesaLib/src/mesa/main/debug.h', 'MesaLib/src/mesa/main/depth.c', 'MesaLib/src/mesa/main/depth.h', 'MesaLib/src/mesa/main/depthstencil.c', 'MesaLib/src/mesa/main/depthstencil.h', 'MesaLib/src/mesa/main/dispatch.h', 'MesaLib/src/mesa/main/dlist.c', 'MesaLib/src/mesa/main/dlist.h', 'MesaLib/src/mesa/main/dlopen.c', 'MesaLib/src/mesa/main/dlopen.h', 'MesaLib/src/mesa/main/drawpix.c', 'MesaLib/src/mesa/main/drawpix.h', 'MesaLib/src/mesa/main/drawtex.c', 'MesaLib/src/mesa/main/drawtex.h', 'MesaLib/src/mesa/main/enable.c', 'MesaLib/src/mesa/main/enable.h', 'MesaLib/src/mesa/main/enums.c', 'MesaLib/src/mesa/main/enums.h', 'MesaLib/src/mesa/main/eval.c', 'MesaLib/src/mesa/main/eval.h', 'MesaLib/src/mesa/main/execmem.c', 'MesaLib/src/mesa/main/extensions.c', 'MesaLib/src/mesa/main/extensions.h', 'MesaLib/src/mesa/main/fbobject.c', 'MesaLib/src/mesa/main/fbobject.h', 'MesaLib/src/mesa/main/feedback.c', 'MesaLib/src/mesa/main/feedback.h', 'MesaLib/src/mesa/main/ffvertex_prog.c', 'MesaLib/src/mesa/main/ffvertex_prog.h', 'MesaLib/src/mesa/main/fog.c', 'MesaLib/src/mesa/main/fog.h', 'MesaLib/src/mesa/main/formats.c', 'MesaLib/src/mesa/main/formats.h', 'MesaLib/src/mesa/main/framebuffer.c', 'MesaLib/src/mesa/main/framebuffer.h', 'MesaLib/src/mesa/main/get.c', 'MesaLib/src/mesa/main/get.h', 'MesaLib/src/mesa/main/getstring.c', 'MesaLib/src/mesa/main/glheader.h', 'MesaLib/src/mesa/main/hash.c', 'MesaLib/src/mesa/main/hash.h', 'MesaLib/src/mesa/main/hint.c', 'MesaLib/src/mesa/main/hint.h', 'MesaLib/src/mesa/main/histogram.c', 'MesaLib/src/mesa/main/histogram.h', 'MesaLib/src/mesa/main/image.c', 'MesaLib/src/mesa/main/image.h', 'MesaLib/src/mesa/main/imports.c', 'MesaLib/src/mesa/main/imports.h', 'MesaLib/src/mesa/main/light.c', 'MesaLib/src/mesa/main/light.h', 'MesaLib/src/mesa/main/lines.c', 'MesaLib/src/mesa/main/lines.h', 'MesaLib/src/mesa/main/macros.h', 'MesaLib/src/mesa/main/matrix.c', 'MesaLib/src/mesa/main/matrix.h', 'MesaLib/src/mesa/main/mfeatures.h', 'MesaLib/src/mesa/main/mipmap.c', 'MesaLib/src/mesa/main/mipmap.h', 'MesaLib/src/mesa/main/mm.c', 'MesaLib/src/mesa/main/mm.h', 'MesaLib/src/mesa/main/mtypes.h', 'MesaLib/src/mesa/main/multisample.c', 'MesaLib/src/mesa/main/multisample.h', 'MesaLib/src/mesa/main/nvprogram.c', 'MesaLib/src/mesa/main/nvprogram.h', 'MesaLib/src/mesa/main/pixel.c', 'MesaLib/src/mesa/main/pixel.h', 'MesaLib/src/mesa/main/pixelstore.c', 'MesaLib/src/mesa/main/pixelstore.h', 'MesaLib/src/mesa/main/points.c', 'MesaLib/src/mesa/main/points.h', 'MesaLib/src/mesa/main/polygon.c', 'MesaLib/src/mesa/main/polygon.h', 'MesaLib/src/mesa/main/queryobj.c', 'MesaLib/src/mesa/main/queryobj.h', 'MesaLib/src/mesa/main/rastpos.c', 'MesaLib/src/mesa/main/rastpos.h', 'MesaLib/src/mesa/main/readpix.c', 'MesaLib/src/mesa/main/readpix.h', 'MesaLib/src/mesa/main/remap.c', 'MesaLib/src/mesa/main/remap.h', 'MesaLib/src/mesa/main/remap_helper.h', 'MesaLib/src/mesa/main/renderbuffer.c', 'MesaLib/src/mesa/main/renderbuffer.h', 'MesaLib/src/mesa/main/scissor.c', 'MesaLib/src/mesa/main/scissor.h', 'MesaLib/src/mesa/main/shaderapi.c', 'MesaLib/src/mesa/main/shaderapi.h', 'MesaLib/src/mesa/main/shaderobj.c', 'MesaLib/src/mesa/main/shaderobj.h', 'MesaLib/src/mesa/main/shared.c', 'MesaLib/src/mesa/main/shared.h', 'MesaLib/src/mesa/main/simple_list.h', 'MesaLib/src/mesa/main/state.c', 'MesaLib/src/mesa/main/state.h', 'MesaLib/src/mesa/main/stencil.c', 'MesaLib/src/mesa/main/stencil.h', 'MesaLib/src/mesa/main/syncobj.c', 'MesaLib/src/mesa/main/syncobj.h', 'MesaLib/src/mesa/main/texcompress.c', 'MesaLib/src/mesa/main/texcompress.h', 'MesaLib/src/mesa/main/texcompress_fxt1.c', 'MesaLib/src/mesa/main/texcompress_fxt1.h', 'MesaLib/src/mesa/main/texcompress_s3tc.c', 'MesaLib/src/mesa/main/texcompress_s3tc.h', 'MesaLib/src/mesa/main/texenv.c', 'MesaLib/src/mesa/main/texenv.h', 'MesaLib/src/mesa/main/texenvprogram.c', 'MesaLib/src/mesa/main/texenvprogram.h', 'MesaLib/src/mesa/main/texfetch.c', 'MesaLib/src/mesa/main/texfetch.h', 'MesaLib/src/mesa/main/texfetch_tmp.h', 'MesaLib/src/mesa/main/texformat.c', 'MesaLib/src/mesa/main/texformat.h', 'MesaLib/src/mesa/main/texgen.c', 'MesaLib/src/mesa/main/texgen.h', 'MesaLib/src/mesa/main/texgetimage.c', 'MesaLib/src/mesa/main/texgetimage.h', 'MesaLib/src/mesa/main/teximage.c', 'MesaLib/src/mesa/main/teximage.h', 'MesaLib/src/mesa/main/texobj.c', 'MesaLib/src/mesa/main/texobj.h', 'MesaLib/src/mesa/main/texpal.c', 'MesaLib/src/mesa/main/texpal.h', 'MesaLib/src/mesa/main/texparam.c', 'MesaLib/src/mesa/main/texparam.h', 'MesaLib/src/mesa/main/texrender.c', 'MesaLib/src/mesa/main/texrender.h', 'MesaLib/src/mesa/main/texstate.c', 'MesaLib/src/mesa/main/texstate.h', 'MesaLib/src/mesa/main/texstore.c', 'MesaLib/src/mesa/main/texstore.h', 'MesaLib/src/mesa/main/transformfeedback.c', 'MesaLib/src/mesa/main/transformfeedback.h', 'MesaLib/src/mesa/main/uniforms.c', 'MesaLib/src/mesa/main/uniforms.h', 'MesaLib/src/mesa/main/varray.c', 'MesaLib/src/mesa/main/varray.h', 'MesaLib/src/mesa/main/version.c', 'MesaLib/src/mesa/main/version.h', 'MesaLib/src/mesa/main/viewport.c', 'MesaLib/src/mesa/main/viewport.h', 'MesaLib/src/mesa/main/vtxfmt.c', 'MesaLib/src/mesa/main/vtxfmt.h', 'MesaLib/src/mesa/main/vtxfmt_tmp.h', 'MesaLib/src/mesa/math/m_clip_tmp.h', 'MesaLib/src/mesa/math/m_copy_tmp.h', 'MesaLib/src/mesa/math/m_debug.h', 'MesaLib/src/mesa/math/m_debug_clip.c', 'MesaLib/src/mesa/math/m_debug_norm.c', 'MesaLib/src/mesa/math/m_debug_util.h', 'MesaLib/src/mesa/math/m_debug_xform.c', 'MesaLib/src/mesa/math/m_dotprod_tmp.h', 'MesaLib/src/mesa/math/m_eval.c', 'MesaLib/src/mesa/math/m_eval.h', 'MesaLib/src/mesa/math/m_matrix.c', 'MesaLib/src/mesa/math/m_matrix.h', 'MesaLib/src/mesa/math/m_norm_tmp.h', 'MesaLib/src/mesa/math/m_trans_tmp.h', 'MesaLib/src/mesa/math/m_translate.c', 'MesaLib/src/mesa/math/m_translate.h', 'MesaLib/src/mesa/math/m_vector.c', 'MesaLib/src/mesa/math/m_vector.h', 'MesaLib/src/mesa/math/m_xform.c', 'MesaLib/src/mesa/math/m_xform.h', 'MesaLib/src/mesa/math/m_xform_tmp.h', 'MesaLib/src/mesa/program/arbprogparse.c', 'MesaLib/src/mesa/program/arbprogparse.h', 'MesaLib/src/mesa/program/hash_table.c', 'MesaLib/src/mesa/program/hash_table.h', 'MesaLib/src/mesa/program/ir_to_mesa.cpp', 'MesaLib/src/mesa/program/ir_to_mesa.h', 'MesaLib/src/mesa/program/lex.yy.c', 'MesaLib/src/mesa/program/nvfragparse.c', 'MesaLib/src/mesa/program/nvfragparse.h', 'MesaLib/src/mesa/program/nvvertparse.c', 'MesaLib/src/mesa/program/nvvertparse.h', 'MesaLib/src/mesa/program/prog_cache.c', 'MesaLib/src/mesa/program/prog_cache.h', 'MesaLib/src/mesa/program/prog_execute.c', 'MesaLib/src/mesa/program/prog_execute.h', 'MesaLib/src/mesa/program/prog_instruction.c', 'MesaLib/src/mesa/program/prog_instruction.h', 'MesaLib/src/mesa/program/prog_noise.c', 'MesaLib/src/mesa/program/prog_noise.h', 'MesaLib/src/mesa/program/prog_optimize.c', 'MesaLib/src/mesa/program/prog_optimize.h', 'MesaLib/src/mesa/program/prog_parameter.c', 'MesaLib/src/mesa/program/prog_parameter.h', 'MesaLib/src/mesa/program/prog_parameter_layout.c', 'MesaLib/src/mesa/program/prog_parameter_layout.h', 'MesaLib/src/mesa/program/prog_print.c', 'MesaLib/src/mesa/program/prog_print.h', 'MesaLib/src/mesa/program/prog_statevars.c', 'MesaLib/src/mesa/program/prog_statevars.h', 'MesaLib/src/mesa/program/prog_uniform.c', 'MesaLib/src/mesa/program/prog_uniform.h', 'MesaLib/src/mesa/program/program.c', 'MesaLib/src/mesa/program/program.h', 'MesaLib/src/mesa/program/program_parse.tab.c', 'MesaLib/src/mesa/program/program_parse.tab.h', 'MesaLib/src/mesa/program/program_parse_extra.c', 'MesaLib/src/mesa/program/program_parser.h', 'MesaLib/src/mesa/program/programopt.c', 'MesaLib/src/mesa/program/programopt.h', 'MesaLib/src/mesa/program/symbol_table.c', 'MesaLib/src/mesa/program/symbol_table.h', 'MesaLib/src/mesa/swrast/s_aaline.c', 'MesaLib/src/mesa/swrast/s_aaline.h', 'MesaLib/src/mesa/swrast/s_aalinetemp.h', 'MesaLib/src/mesa/swrast/s_aatriangle.c', 'MesaLib/src/mesa/swrast/s_aatriangle.h', 'MesaLib/src/mesa/swrast/s_aatritemp.h', 'MesaLib/src/mesa/swrast/s_accum.c', 'MesaLib/src/mesa/swrast/s_accum.h', 'MesaLib/src/mesa/swrast/s_alpha.c', 'MesaLib/src/mesa/swrast/s_alpha.h', 'MesaLib/src/mesa/swrast/s_atifragshader.c', 'MesaLib/src/mesa/swrast/s_atifragshader.h', 'MesaLib/src/mesa/swrast/s_bitmap.c', 'MesaLib/src/mesa/swrast/s_blend.c', 'MesaLib/src/mesa/swrast/s_blend.h', 'MesaLib/src/mesa/swrast/s_blit.c', 'MesaLib/src/mesa/swrast/s_clear.c', 'MesaLib/src/mesa/swrast/s_context.c', 'MesaLib/src/mesa/swrast/s_context.h', 'MesaLib/src/mesa/swrast/s_copypix.c', 'MesaLib/src/mesa/swrast/s_depth.c', 'MesaLib/src/mesa/swrast/s_depth.h', 'MesaLib/src/mesa/swrast/s_drawpix.c', 'MesaLib/src/mesa/swrast/s_feedback.c', 'MesaLib/src/mesa/swrast/s_feedback.h', 'MesaLib/src/mesa/swrast/s_fog.c', 'MesaLib/src/mesa/swrast/s_fog.h', 'MesaLib/src/mesa/swrast/s_fragprog.c', 'MesaLib/src/mesa/swrast/s_fragprog.h', 'MesaLib/src/mesa/swrast/s_lines.c', 'MesaLib/src/mesa/swrast/s_lines.h', 'MesaLib/src/mesa/swrast/s_linetemp.h', 'MesaLib/src/mesa/swrast/s_logic.c', 'MesaLib/src/mesa/swrast/s_logic.h', 'MesaLib/src/mesa/swrast/s_masking.c', 'MesaLib/src/mesa/swrast/s_masking.h', 'MesaLib/src/mesa/swrast/s_points.c', 'MesaLib/src/mesa/swrast/s_points.h', 'MesaLib/src/mesa/swrast/s_readpix.c', 'MesaLib/src/mesa/swrast/s_span.c', 'MesaLib/src/mesa/swrast/s_span.h', 'MesaLib/src/mesa/swrast/s_spantemp.h', 'MesaLib/src/mesa/swrast/s_stencil.c', 'MesaLib/src/mesa/swrast/s_stencil.h', 'MesaLib/src/mesa/swrast/s_texcombine.c', 'MesaLib/src/mesa/swrast/s_texcombine.h', 'MesaLib/src/mesa/swrast/s_texfilter.c', 'MesaLib/src/mesa/swrast/s_texfilter.h', 'MesaLib/src/mesa/swrast/s_triangle.c', 'MesaLib/src/mesa/swrast/s_triangle.h', 'MesaLib/src/mesa/swrast/s_trispan.h', 'MesaLib/src/mesa/swrast/s_tritemp.h', 'MesaLib/src/mesa/swrast/s_zoom.c', 'MesaLib/src/mesa/swrast/s_zoom.h', 'MesaLib/src/mesa/swrast/swrast.h', 'MesaLib/src/mesa/swrast_setup/ss_context.c', 'MesaLib/src/mesa/swrast_setup/ss_context.h', 'MesaLib/src/mesa/swrast_setup/ss_triangle.c', 'MesaLib/src/mesa/swrast_setup/ss_triangle.h', 'MesaLib/src/mesa/swrast_setup/ss_tritmp.h', 'MesaLib/src/mesa/swrast_setup/ss_vb.h', 'MesaLib/src/mesa/swrast_setup/swrast_setup.h', 'MesaLib/src/mesa/tnl/t_context.c', 'MesaLib/src/mesa/tnl/t_context.h', 'MesaLib/src/mesa/tnl/t_draw.c', 'MesaLib/src/mesa/tnl/t_pipeline.c', 'MesaLib/src/mesa/tnl/t_pipeline.h', 'MesaLib/src/mesa/tnl/t_rasterpos.c', 'MesaLib/src/mesa/tnl/t_vb_cliptmp.h', 'MesaLib/src/mesa/tnl/t_vb_cull.c', 'MesaLib/src/mesa/tnl/t_vb_fog.c', 'MesaLib/src/mesa/tnl/t_vb_light.c', 'MesaLib/src/mesa/tnl/t_vb_lighttmp.h', 'MesaLib/src/mesa/tnl/t_vb_normals.c', 'MesaLib/src/mesa/tnl/t_vb_points.c', 'MesaLib/src/mesa/tnl/t_vb_program.c', 'MesaLib/src/mesa/tnl/t_vb_render.c', 'MesaLib/src/mesa/tnl/t_vb_rendertmp.h', 'MesaLib/src/mesa/tnl/t_vb_texgen.c', 'MesaLib/src/mesa/tnl/t_vb_texmat.c', 'MesaLib/src/mesa/tnl/t_vb_vertex.c', 'MesaLib/src/mesa/tnl/t_vertex.c', 'MesaLib/src/mesa/tnl/t_vertex.h', 'MesaLib/src/mesa/tnl/t_vertex_generic.c', 'MesaLib/src/mesa/tnl/t_vertex_sse.c', 'MesaLib/src/mesa/tnl/t_vp_build.c', 'MesaLib/src/mesa/tnl/t_vp_build.h', 'MesaLib/src/mesa/tnl/tnl.h', 'MesaLib/src/mesa/vbo/vbo.h', 'MesaLib/src/mesa/vbo/vbo_attrib.h', 'MesaLib/src/mesa/vbo/vbo_attrib_tmp.h', 'MesaLib/src/mesa/vbo/vbo_context.c', 'MesaLib/src/mesa/vbo/vbo_context.h', 'MesaLib/src/mesa/vbo/vbo_exec.c', 'MesaLib/src/mesa/vbo/vbo_exec.h', 'MesaLib/src/mesa/vbo/vbo_exec_api.c', 'MesaLib/src/mesa/vbo/vbo_exec_array.c', 'MesaLib/src/mesa/vbo/vbo_exec_draw.c', 'MesaLib/src/mesa/vbo/vbo_exec_eval.c', 'MesaLib/src/mesa/vbo/vbo_rebase.c', 'MesaLib/src/mesa/vbo/vbo_save.c', 'MesaLib/src/mesa/vbo/vbo_save.h', 'MesaLib/src/mesa/vbo/vbo_save_api.c', 'MesaLib/src/mesa/vbo/vbo_save_draw.c', 'MesaLib/src/mesa/vbo/vbo_save_loopback.c', 'MesaLib/src/mesa/vbo/vbo_split.c', 'MesaLib/src/mesa/vbo/vbo_split.h', 'MesaLib/src/mesa/vbo/vbo_split_copy.c', 'MesaLib/src/mesa/vbo/vbo_split_inplace.c', ], }, # Building this target will hide the native OpenGL shared library and # replace it with a slow software renderer. { 'target_name': 'osmesa', 'type': 'loadable_module', 'mac_bundle': 0, 'dependencies': [ 'mesa', ], # Fixes link problems on Mac OS X with missing __cxa_pure_virtual. 'conditions': [ ['OS=="mac"', { 'sources': [ 'MesaLib/src/mesa/drivers/osmesa/empty.cpp', ], }], ], 'include_dirs': [ 'MesaLib/include', 'MesaLib/src/mapi', 'MesaLib/src/mesa', 'MesaLib/src/mesa/drivers', ], 'sources': [ 'MesaLib/src/mesa/drivers/common/driverfuncs.c', 'MesaLib/src/mesa/drivers/common/driverfuncs.h', 'MesaLib/src/mesa/drivers/common/meta.c', 'MesaLib/src/mesa/drivers/common/meta.h', 'MesaLib/src/mesa/drivers/osmesa/osmesa.c', 'MesaLib/src/mesa/drivers/osmesa/osmesa.def', ], }, ], }
"""Implement unmasked linear attention as a recurrent cross attention module to speed up autoregressive decoding.""" import torch from torch.nn import Module from ....attention_registry import RecurrentCrossAttentionRegistry, Optional, Int, \ Callable, EventDispatcherInstance from ....events import EventDispatcher from ....feature_maps import elu_feature_map class RecurrentCrossLinearAttention(Module): """Implement autoregressive linear cross attention as a recurrent module. See fast_transformers.attention.linear_attention.LinearAttention . Arguments --------- feature_map: callable, a callable that applies the feature map to the last dimension of a tensor (default: elu(x)+1) eps: float, a small number to ensure the numerical stability of the denominator (default: 1e-6) event_dispatcher: str or EventDispatcher instance to be used by this module for dispatching events (default: the default global dispatcher) """ def __init__(self, query_dimensions, feature_map=None, eps=1e-6, event_dispatcher=""): super(RecurrentCrossLinearAttention, self).__init__() self.feature_map = ( feature_map(query_dimensions) if feature_map else elu_feature_map(query_dimensions) ) self.eps = eps self.event_dispatcher = EventDispatcher.get(event_dispatcher) def forward(self, query, keys, values, key_lengths, state=None): # If this is a new sequence re initialize the feature map if state is None: self.feature_map.new_feature_map(query.device) # Compute the feature representation of the query Q = self.feature_map.forward_queries(query) # If the state is not given compute the key-value matrix and the # normalizers, namely compute whatever is needed in order to attend to # keys and values with a given query. if state is None: K = self.feature_map.forward_keys(keys) K = K * key_lengths.float_matrix[:, :, None, None] S = torch.einsum("nshd,nshm->nhmd", K, values) Z = K.sum(dim=1) else: S, Z = state # Given S and Z now we can efficiently compute the new value QZ = 1/(torch.einsum("nhd,nhd->nh", Q, Z)+self.eps) V = torch.einsum("nhd,nhmd,nh->nhm", Q, S, QZ) return V.contiguous(), [S, Z] # Register the attention implementation so that it becomes available in our # builders RecurrentCrossAttentionRegistry.register( "linear", RecurrentCrossLinearAttention, [ ("query_dimensions", Int), ("feature_map", Optional(Callable)), ("event_dispatcher", Optional(EventDispatcherInstance, "")) ] )
import json import os from collections import OrderedDict from utils import codelist, loadJsonFile def pickone(nlsjson, sKey, tKey): sKeyList = sKey.split(">") tKeyList = tKey.split(">") sValue = getSValue(nlsjson,sKeyList) setTValue(nlsjson, tKeyList, sValue) def getSValue(nlsjson, sKeyList): for sk in sKeyList: if isinstance(nlsjson[sk], dict): nlsjson = nlsjson[sk] else: return nlsjson[sk] def setTValue(nlsjson, tKeyList, value): for tk in tKeyList: if tk in nlsjson and isinstance(nlsjson[tk],dict): nlsjson = nlsjson[tk] else: nlsjson[tk] = value def main(): dirName = input("NLS directory:") sourceKey = input("source key:") targetkey = input("target key:") dir = os.listdir(dirName) for fileName in dir: if fileName.replace(".json","") in codelist: j = loadJsonFile(dirName+"//"+fileName) pickone(j,sourceKey,targetkey) save = open(dirName+"//"+fileName, "w", encoding="UTF-8") save.write(json.dumps(j, indent=4, ensure_ascii=False))
from banco import Banco import random class Respostas(object): def inserirNovaResposta(self, respostas): banco = Banco() conn = banco.conexao.cursor() conn.execute(f""" INSERT INTO respostas (resposta) VALUES ('{respostas}') """) banco.conexao.commit() conn.close() print("Nova resposta cadastrada com sucesso!") def editarResposta(self, idresposta, respostas): banco = Banco() conn = banco.conexao.cursor() conn.execute(f""" UPDATE respostas SET resposta = '{respostas}' WHERE idresposta = '{idresposta}' """) banco.conexao.commit() conn.close() print('Resposta alterada com sucesso!') def excluirResposta(self, idresposta): banco = Banco() conn = banco.conexao.cursor() conn.execute(f""" DELETE FROM respostas WHERE idresposta = '{idresposta}' """) banco.conexao.commit() conn.close() print('Resposta excluída com sucesso') def mostrarRespostasCadastradas(self): banco = Banco() conn = banco.conexao.cursor() conn.execute(""" SELECT idresposta, resposta FROM respostas """) for i in conn.fetchall(): print(i) conn.close() def respostaAleatoria(self): banco = Banco() conn = banco.conexao.cursor() conn.execute(""" SELECT resposta FROM respostas ORDER BY RANDOM() limit 1; """) self.respostas_aleatorias = [list(i) for i in conn.fetchall()] return random.choice(self.respostas_aleatorias)
# Generated by Django 3.1 on 2020-08-10 09:09 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Employee', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('name', models.CharField(max_length=20)), ('sal', models.DecimalField(decimal_places=3, max_digits=10)), ], ), ]
# import pytest # import pandas as pd # import numpy as np # import pkg_resources, os # from io import StringIO # from epic.scripts.overlaps.overlaps import (_compute_region_overlap, # _create_overlap_matrix_regions) # from epic.config.genomes import (create_genome_size_dict, # get_effective_genome_length) # __author__ = "Endre Bakken Stovner https://github.com/endrebak/" # __license__ = "MIT" # @pytest.fixture # def region_matrixes(epic_overlap_intermediate_region_matrixes): # return [pd.read_table(f, sep=" ", index_col=0) for f in epic_overlap_intermediate_region_matrixes] # @pytest.fixture # def expected_result_region_overlap(): # df = pd.read_table("examples/epic-overlaps/region_overlap_result.csv", sep=" ", index_col=0) # return df # @pytest.mark.current # def test__compute_region_overlap(region_matrixes, expected_result_region_overlap): # region_matrix = region_matrixes[0] # df = _compute_region_overlap(region_matrix) # print(df) # # df.to_csv("examples/epic-overlaps/region_overlap_result.csv", sep=" ") # # print(df) # assert df.equals(expected_result_region_overlap) # @pytest.fixture # def expected_result_intermediate_region_matrix(epic_overlap_intermediate_region_matrixes): # df = pd.read_table(epic_overlap_intermediate_region_matrixes[0], sep=" ", index_col=0) # return df
#!/usr/bin/env python # This file is part of the pycalver project # https://gitlab.com/mbarkhau/pycalver # # Copyright (c) 2019 Manuel Barkhau (mbarkhau@gmail.com) - MIT License # SPDX-License-Identifier: MIT """ CLI module for PyCalVer. Provided subcommands: show, test, init, bump """ import sys import typing as typ import logging import subprocess as sp import click from . import vcs from . import config from . import rewrite from . import version _VERBOSE = 0 try: import pretty_traceback pretty_traceback.install() except ImportError: pass # no need to fail because of missing dev dependency click.disable_unicode_literals_warning = True VALID_RELEASE_VALUES = ("alpha", "beta", "dev", "rc", "post", "final") logger = logging.getLogger("pycalver.cli") def _configure_logging(verbose: int = 0) -> None: if verbose >= 2: log_format = "%(asctime)s.%(msecs)03d %(levelname)-7s %(name)-17s - %(message)s" log_level = logging.DEBUG elif verbose == 1: log_format = "%(levelname)-7s - %(message)s" log_level = logging.INFO else: log_format = "%(levelname)-7s - %(message)s" log_level = logging.INFO logging.basicConfig(level=log_level, format=log_format, datefmt="%Y-%m-%dT%H:%M:%S") logger.debug("Logging configured.") def _validate_release_tag(release: str) -> None: if release in VALID_RELEASE_VALUES: return logger.error(f"Invalid argument --release={release}") logger.error(f"Valid arguments are: {', '.join(VALID_RELEASE_VALUES)}") sys.exit(1) @click.group() @click.version_option(version="v202007.0036") @click.help_option() @click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.") def cli(verbose: int = 0) -> None: """Automatically update PyCalVer version strings on python projects.""" global _VERBOSE _VERBOSE = verbose @cli.command() @click.argument("old_version") @click.argument("pattern", default="{pycalver}") @click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.") @click.option( "--release", default=None, metavar="<name>", help="Override release name of current_version" ) @click.option("--major", is_flag=True, default=False, help="Increment major component.") @click.option("--minor", is_flag=True, default=False, help="Increment minor component.") @click.option("--patch", is_flag=True, default=False, help="Increment patch component.") def test( old_version: str, pattern : str = "{pycalver}", verbose : int = 0, release : str = None, major : bool = False, minor : bool = False, patch : bool = False, ) -> None: """Increment a version number for demo purposes.""" _configure_logging(verbose=max(_VERBOSE, verbose)) if release: _validate_release_tag(release) new_version = version.incr( old_version, pattern=pattern, release=release, major=major, minor=minor, patch=patch ) if new_version is None: logger.error(f"Invalid version '{old_version}' and/or pattern '{pattern}'.") sys.exit(1) pep440_version = version.to_pep440(new_version) click.echo(f"New Version: {new_version}") click.echo(f"PEP440 : {pep440_version}") def _update_cfg_from_vcs(cfg: config.Config, fetch: bool) -> config.Config: try: vcs_api = vcs.get_vcs_api() logger.debug(f"vcs found: {vcs_api.name}") if fetch: logger.info("fetching tags from remote (to turn off use: -n / --no-fetch)") vcs_api.fetch() version_tags = [ tag for tag in vcs_api.ls_tags() if version.is_valid(tag, cfg.version_pattern) ] if version_tags: version_tags.sort(reverse=True) logger.debug(f"found {len(version_tags)} tags: {version_tags[:2]}") latest_version_tag = version_tags[0] latest_version_pep440 = version.to_pep440(latest_version_tag) if latest_version_tag > cfg.current_version: logger.info(f"Working dir version : {cfg.current_version}") logger.info(f"Latest version from {vcs_api.name:>3} tag: {latest_version_tag}") cfg = cfg._replace( current_version=latest_version_tag, pep440_version=latest_version_pep440 ) else: logger.debug("no vcs tags found") except OSError: logger.debug("No vcs found") return cfg @cli.command() @click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.") @click.option( "-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin." ) def show(verbose: int = 0, fetch: bool = True) -> None: """Show current version.""" _configure_logging(verbose=max(_VERBOSE, verbose)) ctx: config.ProjectContext = config.init_project_ctx(project_path=".") cfg: config.MaybeConfig = config.parse(ctx) if cfg is None: logger.error("Could not parse configuration. Perhaps try 'pycalver init'.") sys.exit(1) cfg = _update_cfg_from_vcs(cfg, fetch=fetch) click.echo(f"Current Version: {cfg.current_version}") click.echo(f"PEP440 : {cfg.pep440_version}") @cli.command() @click.option('-v', '--verbose', count=True, help="Control log level. -vv for debug level.") @click.option( "--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files." ) def init(verbose: int = 0, dry: bool = False) -> None: """Initialize [pycalver] configuration.""" _configure_logging(verbose=max(_VERBOSE, verbose)) ctx: config.ProjectContext = config.init_project_ctx(project_path=".") cfg: config.MaybeConfig = config.parse(ctx) if cfg: logger.error(f"Configuration already initialized in {ctx.config_filepath}") sys.exit(1) if dry: click.echo(f"Exiting because of '--dry'. Would have written to {ctx.config_filepath}:") cfg_text: str = config.default_config(ctx) click.echo("\n " + "\n ".join(cfg_text.splitlines())) sys.exit(0) config.write_content(ctx) def _assert_not_dirty(vcs_api: vcs.VCSAPI, filepaths: typ.Set[str], allow_dirty: bool) -> None: dirty_files = vcs_api.status(required_files=filepaths) if dirty_files: logger.warning(f"{vcs_api.name} working directory is not clean. Uncomitted file(s):") for dirty_file in dirty_files: logger.warning(" " + dirty_file) if not allow_dirty and dirty_files: sys.exit(1) dirty_pattern_files = set(dirty_files) & filepaths if dirty_pattern_files: logger.error("Not commiting when pattern files are dirty:") for dirty_file in dirty_pattern_files: logger.warning(" " + dirty_file) sys.exit(1) def _commit( cfg: config.Config, new_version: str, vcs_api: vcs.VCSAPI, filepaths: typ.Set[str] ) -> None: for filepath in filepaths: vcs_api.add(filepath) vcs_api.commit(f"bump version to {new_version}") if cfg.commit and cfg.tag: vcs_api.tag(new_version) if cfg.commit and cfg.tag and cfg.push: vcs_api.push(new_version) def _bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None: vcs_api: typ.Optional[vcs.VCSAPI] = None if cfg.commit: try: vcs_api = vcs.get_vcs_api() except OSError: logger.warning("Version Control System not found, aborting commit.") filepaths = set(cfg.file_patterns.keys()) if vcs_api: _assert_not_dirty(vcs_api, filepaths, allow_dirty) try: new_vinfo = version.parse_version_info(new_version, cfg.version_pattern) rewrite.rewrite(cfg.file_patterns, new_vinfo) except Exception as ex: logger.error(str(ex)) sys.exit(1) if vcs_api: _commit(cfg, new_version, vcs_api, filepaths) def _try_bump(cfg: config.Config, new_version: str, allow_dirty: bool = False) -> None: try: _bump(cfg, new_version, allow_dirty) except sp.CalledProcessError as ex: logger.error(f"Error running subcommand: {ex.cmd}") if ex.stdout: sys.stdout.write(ex.stdout.decode('utf-8')) if ex.stderr: sys.stderr.write(ex.stderr.decode('utf-8')) sys.exit(1) def _print_diff(cfg: config.Config, new_version: str) -> None: new_vinfo = version.parse_version_info(new_version, cfg.version_pattern) diff: str = rewrite.diff(new_vinfo, cfg.file_patterns) if sys.stdout.isatty(): for line in diff.splitlines(): if line.startswith("+++") or line.startswith("---"): click.echo(line) elif line.startswith("+"): click.echo("\u001b[32m" + line + "\u001b[0m") elif line.startswith("-"): click.echo("\u001b[31m" + line + "\u001b[0m") elif line.startswith("@"): click.echo("\u001b[36m" + line + "\u001b[0m") else: click.echo(line) else: click.echo(diff) def _try_print_diff(cfg: config.Config, new_version: str) -> None: try: _print_diff(cfg, new_version) except Exception as ex: logger.error(str(ex)) sys.exit(1) @cli.command() @click.option("-v", "--verbose", count=True, help="Control log level. -vv for debug level.") @click.option( "-f/-n", "--fetch/--no-fetch", is_flag=True, default=True, help="Sync tags from remote origin." ) @click.option( "--dry", default=False, is_flag=True, help="Display diff of changes, don't rewrite files." ) @click.option( "--release", default=None, metavar="<name>", help=( f"Override release name of current_version. Valid options are: " f"{', '.join(VALID_RELEASE_VALUES)}." ), ) @click.option( "--allow-dirty", default=False, is_flag=True, help=( "Commit even when working directory is has uncomitted changes. " "(WARNING: The commit will still be aborted if there are uncomitted " "to files with version strings." ), ) @click.option("--major", is_flag=True, default=False, help="Increment major component.") @click.option("--minor", is_flag=True, default=False, help="Increment minor component.") @click.option("--patch", is_flag=True, default=False, help="Increment patch component.") def bump( release : typ.Optional[str] = None, verbose : int = 0, dry : bool = False, allow_dirty: bool = False, fetch : bool = True, major : bool = False, minor : bool = False, patch : bool = False, ) -> None: """Increment the current version string and update project files.""" verbose = max(_VERBOSE, verbose) _configure_logging(verbose) if release: _validate_release_tag(release) ctx: config.ProjectContext = config.init_project_ctx(project_path=".") cfg: config.MaybeConfig = config.parse(ctx) if cfg is None: logger.error("Could not parse configuration. Perhaps try 'pycalver init'.") sys.exit(1) cfg = _update_cfg_from_vcs(cfg, fetch=fetch) old_version = cfg.current_version new_version = version.incr( old_version, pattern=cfg.version_pattern, release=release, major=major, minor=minor, patch=patch, ) if new_version is None: is_semver = "{semver}" in cfg.version_pattern has_semver_inc = major or minor or patch if is_semver and not has_semver_inc: logger.warning("bump --major/--minor/--patch required when using semver.") else: logger.error(f"Invalid version '{old_version}' and/or pattern '{cfg.version_pattern}'.") sys.exit(1) logger.info(f"Old Version: {old_version}") logger.info(f"New Version: {new_version}") if dry or verbose >= 2: _try_print_diff(cfg, new_version) if dry: return _try_bump(cfg, new_version, allow_dirty) if __name__ == '__main__': cli()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # flatlib documentation build configuration file, created by # sphinx-quickstart on Mon Apr 13 18:08:41 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'flatlib' copyright = '2015, João Ventura' author = 'João Ventura' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2' # The full version, including alpha/beta/rc tags. release = '0.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'flatlibdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'flatlib.tex', 'flatlib Documentation', 'João Ventura', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'flatlib', 'flatlib Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'flatlib', 'flatlib Documentation', author, 'flatlib', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
import random random.seed(1) companies = [] combinations = {'Web Developer' : ['Computer Science', 'Software Engineering',], 'Marketing Intern' : ['Business', 'Finance', 'Economics'], 'Software Developer' : ['Computer Science', 'Software Engineering'], 'Assistant Nurse' : ['Nursing', 'Health Sciences', 'Biological Sciences'], 'Database Manager' : ['Computer Science', 'Data Science', 'Software Engineering', 'Information Systems'], 'Chemical Engineer' : ['Chemical Engineering'], 'Mechanical Engineer' : ['Mechanical Engineering'], 'Financial Analyst' : ['Finance', 'Mathematics', 'Economics', 'Business Analytics', 'Business'], 'Engineering Intern' : ['Engineering', 'Mechanical Engineering', 'Electrical Engineering', 'Computer Engineering'], 'Commercial Building Plan Reviewer' : ['Civil Engineering', 'Architectural Engineering', 'Mechanical Engineering'], 'Junior Interior Designer' : ['Interior Design', 'Architecture'], 'Social Media Manager' : ['English', 'Communications', 'Computer Science', 'Digital Media'], 'Public Relations Specialist' : ['Communications', 'English'], 'Games Designer' : ['Game Design and Production', 'Computer Science'], 'Graphic Designer' : ['Graphic Design', 'Digital Media', 'Product Design', 'Design and Merchandising'], 'Research Assistant' : ['Biology', 'Health Sciences', 'Nutrition and Foods'], 'Secruity Technician' : ['Computer Science', 'Computing and Security Technology', 'Software Engineering', 'Computer Engineering'], 'Field Service Engineer' : ['Electrical Engineering', 'Mechanical Engineering', 'Biomedical Engineering', 'Civil Engineering'], 'Digital Media Analyst' : ['Digital Media', 'Business', 'Business Analytics', 'Marketing'], 'Business Analyst' : ['Business', 'Economics']} locations = ['Center City Philadelphia', 'University City Philadelphia', 'Cherry Hill', 'Baltimore', 'Haddonfield', 'New York', 'Pittsburgh', 'Springfield', 'Moorestown', 'Trenton', 'Wilmington'] paid = ['True', 'True', 'True', 'True', 'False'] majors = ['Accounting', 'Air Force ROTC', 'Animation & Visual Effects', 'Anthropology', 'Architectural Engineering', 'Architecture', 'Army ROTC', 'Art History', 'Behavioral Health Counseling', 'Biological Sciences', 'Biomedical Engineering', 'Business', 'Business and Engineering', 'Business Analytics', 'Business Administration', 'Business Economics', 'Chemical Engineering', 'Chemistry', 'Chemistry-Biochemistry Concentration', 'Civil Engineering', 'Communication', 'Computer Engineering', 'Computer Science', 'Computing and Security Technology', 'Construction Management', 'Criminology and Justice Studies', 'Culinary Arts & Science', 'Custom-Designed Major', 'Dance', 'Data Science', 'Design and Merchandising', 'Design of Learning Technologies', 'Digital Media', 'Economics', 'Electrical Engineering', 'Elementary Education', 'Engineering', 'Engineering Management', 'Engineering Technology', 'English', 'English / Publishing', 'Entertainment and Arts Management', 'Entrepreneurship and Innovation', 'Environmental Engineering', 'Environmental Science', 'Environmental Studies and Sustainability', 'Fashion Design', 'Film and TV Production', 'Finance', 'Game Design and Production', 'General Business', 'General Studies', 'Geoscience', 'Global Studies', 'Global Studies / Public Health', 'Graphic Design', 'Health Sciences', 'Health Services Administration', 'History', 'Hospitality Management', 'Information Systems', 'Interactive Digital Media', 'International Business', 'Interior Design', 'Legal Studies', 'Management Information Systems', 'Marketing', 'Materials Science and Engineering', 'Mathematics', 'Mechanical Engineering', 'Music Industry', 'Navy ROTC', 'Nursing', 'Nursing: RN-MSN Bridge Program', 'Nursing: Accelerated', 'Nutrition and Foods', 'Nutrition Sciences: Accelerated', 'Operations and Supply Chain Management', 'Organizational Management', 'Philosophy', 'Photography', 'Physics', 'Political Science', 'Product Design', 'Psychology', 'Public Health', 'Public Health: Accelerated', 'Real Estate Management and Development', 'ROTC', 'Screenwriting & Playwriting', 'Sociology', 'Software Engineering', 'Sport Management', 'Sport Management / Business Administration', 'Systems Engineering', 'Teacher Education', 'Technology Innovation Management', 'Virtual Reality & Immersive Media', 'Westphal Studies Program'] com = open('new_companies.txt', 'r') for i in com.readlines(): temp = i.replace(' \n', '') companies.append(temp) c = 0 jobs = open('jobs.txt', 'a+') jobs.truncate(0) while c <= 49: l = [] f = open('template.txt', 'r') for i in f.readlines(): l.append(i) l[1] = l[1].replace('i.d.', str(c)) company_remove = random.choice(companies) l[2] = l[2].replace('company', company_remove) companies.remove(company_remove) title = random.choice(list(combinations.keys())) l[3] = l[3].replace('title', title) l[4] = l[4].replace('bool', random.choice(paid)) k = 0 store = '' while k < len(combinations[title]): major_id = 0 for i, j in enumerate(majors): if combinations[title][k] == j: if k == len(combinations[title])-1: store += str(i) else: store += str(i)+', ' k += 1 l[5] = l[5].replace('i.d.', store) l[5] = l[5].replace('Major ', 'Major') l[6] = l[6].replace('location', random.choice(locations)) if c == 49: l[7] = l[7].replace(',', '') c += 1 for i in l: jobs.write(i) f.close() jobs.close()
""" Copyright (c) 2021 Heureka Group a.s. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import time import socket import os from typing import Union from aioredis.connection import RedisConnection from aioredis.pool import ConnectionsPool from aioredis.commands import Redis from aiopyrq import helpers from aiopyrq.script import Script CHUNK_SIZE = 10 SET_QUEUE_SUFFIX = '-unique' PROCESSING_SUFFIX = '-processing' PROCESSING_TIMEOUT_SUFFIX = '-timeouts' PROCESSING_TIMEOUT = 7200 # seconds DEFAULT_SYNC_SLAVES_COUNT = 0 DEFAULT_SYNC_SLAVES_TIMEOUT = 100 class UniqueQueue(object): """ UniqueQueue is a queue implemented using List as the queue, multiple Lists as the processing queues and a Hash as a storage for processing queue timeouts (so you can tell which processing queue is expired). UniqueQueue items are always unique, adding the same item more than once will be ignored. There is no priority whatsoever - items are processed as they were inserted into the queue. Queue needs a garbage collector process because the queue creates a processing queues every time you request items from it. This process is implemented by the methods reEnqueue* and drop* of this class and they should be called before getting the items or periodically (if you don't care about the order of the items). author: Heureka.cz <vyvoj@heureka.cz> """ def __init__(self, queue_name: str, redis: Union[ConnectionsPool, RedisConnection, Redis], **kwargs): """ :param queue_name: Name of the queue :param redis: Redis client :param **kwargs: [ synced_slaves_enabled: bool Enables slave synchronous syncing synced_slaves_count: int Number of slaves that need to be synced in order to continue synced_slaves_timeout: int Timeout for syncing slaves. If reached, exception is raised ] :return: """ self.client_id = '{0}[{1}][{2}]'.format(socket.gethostname(), os.getpid(), int(time.time())) if isinstance(redis, (ConnectionsPool, RedisConnection)): redis = Redis(redis) self.redis = redis self.queue_name = queue_name self.options = kwargs self._register_commands() def _register_commands(self) -> None: self.add_command = self._register_script(self.QueueCommand.add()) self.ack_command = self._register_script(self.QueueCommand.ack()) self.get_command = self._register_script(self.QueueCommand.get()) self.reject_command = self._register_script(self.QueueCommand.reject()) self.re_enqueue_command = self._register_script(self.QueueCommand.re_enqueue()) def _register_script(self, script: str) -> Script: return Script(self.redis, script) async def get_count(self) -> int: """ :return: Number of items in the queue """ return await self.redis.llen(self.queue_name) async def add_item(self, item) -> None: """ :param item: Anything that is convertible to str """ await self.add_command(keys=[self.queue_name, self.set_name], args=[str(item)]) await self._wait_for_synced_slaves() async def add_items(self, items: list) -> None: """ :param items: List of items to be added via pipeline """ for chunk in helpers.create_chunks(items, CHUNK_SIZE): pipeline = self.redis.pipeline() for item in chunk: await self.add_command(keys=[self.queue_name, self.set_name], args=[str(item)], client=pipeline) await pipeline.execute() await self._wait_for_synced_slaves() async def get_items(self, count: int) -> list: """ :param count: Number of items to be returned :return: List of items """ return await self.get_command(keys=[self.queue_name, self.set_name, self.processing_queue_name, self.timeouts_hash_name], args=[count, int(time.time())]) async def ack_item(self, item) -> None: """ :param item: Anything that is convertible to str """ await self.ack_command(keys=[self.processing_queue_name, self.timeouts_hash_name], args=[str(item)]) await self._wait_for_synced_slaves() async def ack_items(self, items: list) -> None: """ :param items: List of items that are convertible to str """ pipeline = self.redis.pipeline() for item in items: await self.ack_command(keys=[self.processing_queue_name, self.timeouts_hash_name], args=[str(item)], client=pipeline) await pipeline.execute() await self._wait_for_synced_slaves() async def reject_item(self, item) -> None: """ :param item: Anything that is convertible to str """ await self.reject_command(keys=[self.queue_name, self.set_name, self.processing_queue_name, self.timeouts_hash_name], args=[str(item)]) await self._wait_for_synced_slaves() async def reject_items(self, items: list) -> None: """ :param items: List of items that are convertible to str """ pipeline = self.redis.pipeline() for item in reversed(items): await self.reject_command(keys=[self.queue_name, self.set_name, self.processing_queue_name, self.timeouts_hash_name], args=[str(item)], client=pipeline) await pipeline.execute() await self._wait_for_synced_slaves() async def re_enqueue_timeout_items(self, timeout: int=PROCESSING_TIMEOUT) -> None: """ :param timeout: int seconds """ sorted_processing_queues = await self._get_sorted_processing_queues() for queue, value_time in sorted_processing_queues: if int(float(value_time)) + timeout < int(time.time()): await self.re_enqueue_command(keys=[self.queue_name, self.set_name, queue, self.timeouts_hash_name]) await self._wait_for_synced_slaves() async def re_enqueue_all_items(self) -> None: sorted_processing_queues = await self._get_sorted_processing_queues() for queue, value_time in sorted_processing_queues: await self.re_enqueue_command(keys=[self.queue_name, self.set_name, queue, self.timeouts_hash_name]) await self._wait_for_synced_slaves() async def drop_timeout_items(self, timeout: int=PROCESSING_TIMEOUT) -> None: """ :param timeout: int seconds """ sorted_processing_queues = await self._get_sorted_processing_queues() for queue, value_time in sorted_processing_queues: if int(float(value_time)) + timeout < int(time.time()): await self.redis.delete(queue) await self.redis.hdel(self.timeouts_hash_name, queue) await self._wait_for_synced_slaves() async def drop_all_items(self) -> None: sorted_processing_queues = await self._get_sorted_processing_queues() for queue, value_time in sorted_processing_queues: await self.redis.delete(queue) await self.redis.hdel(self.timeouts_hash_name, queue) await self._wait_for_synced_slaves() async def _get_sorted_processing_queues(self) -> list: return sorted(await helpers.async_iterate_to_list(self.redis.ihscan(self.timeouts_hash_name)), reverse=True) @property def set_name(self) -> str: """ :return: Name of the set queue """ return self.queue_name + SET_QUEUE_SUFFIX @property def processing_queue_name(self) -> str: """ :return: Name of the processing queue """ return self.queue_name + PROCESSING_SUFFIX + '-' + self.client_id @property def timeouts_hash_name(self) -> str: """ :return: Name of the timeouts hash """ return self.queue_name + PROCESSING_TIMEOUT_SUFFIX async def _wait_for_synced_slaves(self) -> None: if self.options.get('synced_slaves_enabled'): count = self.options['synced_slaves_count'] if self.options['synced_slaves_count'] \ else DEFAULT_SYNC_SLAVES_COUNT timeout = self.options['synced_slaves_timeout'] if self.options['synced_slaves_timeout'] \ else DEFAULT_SYNC_SLAVES_TIMEOUT await helpers.wait_for_synced_slaves(self.redis, count, timeout) class QueueCommand(object): @staticmethod def add(): """ :return: LUA Script for ACK command """ return """ local queue = KEYS[1] local set = KEYS[2] local item = ARGV[1] local inQueue = redis.call('sismember', set, item) if inQueue == 0 then redis.call('lpush', queue, item) redis.call('sadd', set, item) end """ @staticmethod def ack(): """ :return: LUA Script for ACK command """ return """ local processing = KEYS[1] local timeouts = KEYS[2] local item = ARGV[1] local result = redis.call('lrem', processing, -1, item) local count = redis.call('llen', processing) if count == 0 then redis.call('hdel', timeouts, processing) end """ @staticmethod def get(): """ :return: LUA Script for GET command """ return """ local queue = KEYS[1] local set = KEYS[2] local processing = KEYS[3] local timeouts = KEYS[4] local size = ARGV[1] local time = ARGV[2] redis.call('hset', timeouts, processing, time) local item local items = {} for i = 1, size, 1 do item = redis.call('rpoplpush', queue, processing) if not item then break end redis.call('srem', set, item) table.insert(items, item) end return items """ @staticmethod def reject(): """ :return: LUA Script for REJECT command """ return """ local queue = KEYS[1] local set = KEYS[2] local processing = KEYS[3] local timeouts = KEYS[4] local item = ARGV[1] local removed = redis.call('lrem', processing, -1, item) if removed == 1 then local inQueue = redis.call('sismember', set, item) if inQueue == 0 then redis.call('rpush', queue, item) redis.call('sadd', set, item) end end local count = redis.call('llen', processing) if count == 0 then redis.call('hdel', timeouts, processing) end """ @staticmethod def re_enqueue(): """ :return: LUA Script for reject queue """ return """ local queue = KEYS[1] local set = KEYS[2] local processing = KEYS[3] local timeouts = KEYS[4] local item local inQueue while true do item = redis.call('lpop', processing); if not item then break end inQueue = redis.call('sismember', set, item) if inQueue == 0 then redis.call('rpush', queue, item) redis.call('sadd', set, item) else redis.call('lrem', queue, -1, item) redis.call('rpush', queue, item) end end redis.call('hdel', timeouts, processing) """
import uuid import py from rply import ParserGenerator, Token from rply.errors import ParserGeneratorError from .base import BaseTests class TestParserGenerator(BaseTests): def test_production_syntax_error(self): pg = ParserGenerator([]) with py.test.raises(ParserGeneratorError): pg.production("main VALUE") def test_production_terminal_overlap(self): pg = ParserGenerator(["VALUE"]) @pg.production("VALUE :") def x(p): pass with py.test.raises(ParserGeneratorError): pg.build() def test_duplicate_precedence(self): pg = ParserGenerator([], precedence=[ ("left", ["term", "term"]) ]) with py.test.raises(ParserGeneratorError): pg.build() def test_invalid_associativity(self): pg = ParserGenerator([], precedence=[ ("to-the-left", ["term"]), ]) with py.test.raises(ParserGeneratorError): pg.build() def test_nonexistent_precedence(self): pg = ParserGenerator(["VALUE"]) @pg.production("main : VALUE", precedence="abc") def main(p): pass with py.test.raises(ParserGeneratorError): pg.build() def test_error_symbol(self): pg = ParserGenerator(["VALUE"]) @pg.production("main : VALUE") def main(p): pass @pg.production("main : error") def main_error(p): pass pg.build() def test_pipe_production(self): pg = ParserGenerator(["VALUE1", "VALUE2"]) @pg.production("main : VALUE1 | VALUE2") def main(p): return p[0] parser = pg.build() assert len(pg.productions) == 2 assert parser.parse(iter([ Token("VALUE1", "3") ])) == Token("VALUE1", "3") assert parser.parse(iter([ Token("VALUE2", "3") ])) == Token("VALUE2", "3") class TestParserCaching(object): def test_simple_caching(self): # Generate a random cache_id so that every test run does both the cache # write and read paths. pg = ParserGenerator(["VALUE"], cache_id=str(uuid.uuid4())) @pg.production("main : VALUE") def main(p): return p[0] pg.build() parser = pg.build() assert parser.parse(iter([ Token("VALUE", "3") ])) == Token("VALUE", "3")
# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # pylint: disable=line-too-long # pylint: disable=too-many-lines # pylint: disable=unused-argument # pylint: disable=too-many-locals from azure.cli.core.util import sdk_no_wait def providerhub_custom_rollout_list(client, provider_namespace): return client.list_by_provider_registration(provider_namespace=provider_namespace) def providerhub_custom_rollout_show(client, provider_namespace, rollout_name): return client.get(provider_namespace=provider_namespace, rollout_name=rollout_name) def providerhub_custom_rollout_create(client, provider_namespace, rollout_name, canary): properties = {"specification": {"Canary": canary}} return client.create_or_update( provider_namespace=provider_namespace, rollout_name=rollout_name, properties=properties, ) def providerhub_custom_rollout_update(client, provider_namespace, rollout_name, canary): properties = {"specification": {"Canary": canary}} return client.create_or_update( provider_namespace=provider_namespace, rollout_name=rollout_name, properties=properties, ) def providerhub_default_rollout_list(client, provider_namespace): return client.list_by_provider_registration(provider_namespace=provider_namespace) def providerhub_default_rollout_show(client, provider_namespace, rollout_name): return client.get(provider_namespace=provider_namespace, rollout_name=rollout_name) def providerhub_default_rollout_create( client, provider_namespace, rollout_name, row2_wait_duration, skip_regions=None, no_wait=False, ): return sdk_no_wait( no_wait, client.begin_create_or_update, provider_namespace=provider_namespace, rollout_name=rollout_name, row2_wait_duration=row2_wait_duration, skip_regions=skip_regions, ) def providerhub_default_rollout_update( client, provider_namespace, rollout_name, row2_wait_duration, skip_regions=None, no_wait=False, ): return sdk_no_wait( no_wait, client.begin_create_or_update, provider_namespace=provider_namespace, rollout_name=rollout_name, row2_wait_duration=row2_wait_duration, skip_regions=skip_regions, ) def providerhub_default_rollout_delete(client, provider_namespace, rollout_name): return client.delete( provider_namespace=provider_namespace, rollout_name=rollout_name ) def providerhub_default_rollout_stop(client, provider_namespace, rollout_name): return client.stop(provider_namespace=provider_namespace, rollout_name=rollout_name) def providerhub_manifest_checkin( client, provider_namespace, environment, arm_manifest_location ): checkin_manifest_params = {} checkin_manifest_params["environment"] = environment checkin_manifest_params[ "baseline_arm_manifest_location" ] = arm_manifest_location return client.checkin_manifest( provider_namespace=provider_namespace, checkin_manifest_params=checkin_manifest_params, ) def providerhub_manifest_generate(client, provider_namespace): return client.generate_manifest(provider_namespace=provider_namespace) def providerhub_operation_list(client, provider_namespace): return client.list_by_provider_registration(provider_namespace=provider_namespace) def providerhub_operation_create(client, provider_namespace): return client.create_or_update(provider_namespace=provider_namespace) def providerhub_operation_update(client, provider_namespace): return client.create_or_update(provider_namespace=provider_namespace) def providerhub_operation_delete(client, provider_namespace): return client.delete(provider_namespace=provider_namespace) def providerhub_provider_registration_list(client, resource_group_name=None): if resource_group_name: return client.list_by_resource_group(resource_group_name=resource_group_name) return client.list() def providerhub_provider_registration_show(client, provider_namespace): return client.get(provider_namespace=provider_namespace) def providerhub_provider_registration_create( client, provider_namespace, provider_version="2.0", namespace=None, provider_type=None, provider_authentication=None, provider_authorizations=None, capabilities=None, metadata=None, template_deployment_options=None, schema_owners=None, manifest_owners=None, incident_routing_service=None, incident_routing_team=None, incident_contact_email=None, service_tree_infos=None, resource_access_policy=None, opt_in_headers=None, required_features_policy=None, managed_by_tenant_id=None, providerhub_metadata_provider_authorizations=None, providerhub_metadata_rp_authentication=None, lighthouse_authorizations=None, no_wait=False, ): return sdk_no_wait( no_wait, client.begin_create_or_update, provider_namespace=provider_namespace, provider_authentication=provider_authentication, provider_authorizations=provider_authorizations, namespace=namespace, provider_version=provider_version, provider_type=provider_type, capabilities=capabilities, metadata=metadata, template_deployment_options=template_deployment_options, schema_owners=schema_owners, manifest_owners=manifest_owners, incident_routing_service=incident_routing_service, incident_routing_team=incident_routing_team, incident_contact_email=incident_contact_email, service_tree_infos=service_tree_infos, resource_access_policy=resource_access_policy, opt_in_headers=opt_in_headers, required_features_policy=required_features_policy, managed_by_tenant_id=managed_by_tenant_id, providerhub_metadata_provider_authorizations=providerhub_metadata_provider_authorizations, providerhub_metadata_rp_authentication=providerhub_metadata_rp_authentication, lighthouse_authorizations=lighthouse_authorizations ) def providerhub_provider_registration_update( client, provider_namespace, provider_version="2.0", provider_type=None, provider_authentication=None, provider_authorizations=None, capabilities=None, metadata=None, template_deployment_options=None, schema_owners=None, manifest_owners=None, incident_routing_service=None, incident_routing_team=None, incident_contact_email=None, service_tree_infos=None, resource_access_policy=None, opt_in_headers=None, required_features_policy=None, managed_by_tenant_id=None, providerhub_metadata_provider_authorizations=None, providerhub_metadata_rp_authentication=None, lighthouse_authorizations=None, no_wait=False, ): return sdk_no_wait( no_wait, client.begin_create_or_update, provider_namespace=provider_namespace, provider_authentication=provider_authentication, provider_authorizations=provider_authorizations, provider_version=provider_version, provider_type=provider_type, capabilities=capabilities, metadata=metadata, template_deployment_options=template_deployment_options, schema_owners=schema_owners, manifest_owners=manifest_owners, incident_routing_service=incident_routing_service, incident_routing_team=incident_routing_team, incident_contact_email=incident_contact_email, service_tree_infos=service_tree_infos, resource_access_policy=resource_access_policy, opt_in_headers=opt_in_headers, required_features_policy=required_features_policy, managed_by_tenant_id=managed_by_tenant_id, providerhub_metadata_provider_authorizations=providerhub_metadata_provider_authorizations, providerhub_metadata_rp_authentication=providerhub_metadata_rp_authentication, lighthouse_authorizations=lighthouse_authorizations ) def providerhub_provider_registration_delete(client, provider_namespace): return client.delete(provider_namespace=provider_namespace) def providerhub_provider_registration_generate_operation(client, provider_namespace): return client.generate_operations(provider_namespace=provider_namespace) def providerhub_resource_type_registration_list(client, provider_namespace): return client.list_by_provider_registration(provider_namespace=provider_namespace) def providerhub_resource_type_registration_show( client, provider_namespace, resource_type ): return client.get( provider_namespace=provider_namespace, resource_type=resource_type ) def providerhub_resource_type_registration_create( # pylint: disable=too-many-locals client, provider_namespace, resource_type, routing_type=None, regionality=None, endpoints=None, marketplace_type=None, resource_creation_begin=None, resource_patch_begin=None, swagger_specifications=None, allowed_unauthorized_actions=None, authorization_action_mappings=None, linked_access_checks=None, default_api_version=None, logging_rules=None, throttling_rules=None, required_features=None, enable_async_operation=None, enable_third_party_s2s=None, is_pure_proxy=None, identity_management=None, check_name_availability_specifications=None, disallowed_action_verbs=None, service_tree_infos=None, subscription_state_rules=None, template_deployment_options=None, extended_locations=None, resource_move_policy=None, resource_deletion_policy=None, opt_in_headers=None, required_features_policy=None, ): return client.begin_create_or_update( provider_namespace=provider_namespace, resource_type=resource_type, routing_type=routing_type, regionality=regionality, endpoints=endpoints, resource_creation_begin=resource_creation_begin, resource_patch_begin=resource_patch_begin, marketplace_type=marketplace_type, swagger_specifications=swagger_specifications, allowed_unauthorized_actions=allowed_unauthorized_actions, authorization_action_mappings=authorization_action_mappings, linked_access_checks=linked_access_checks, default_api_version=default_api_version, logging_rules=logging_rules, throttling_rules=throttling_rules, required_features=required_features, enable_async_operation=enable_async_operation, enable_third_party_s2s=enable_third_party_s2s, is_pure_proxy=is_pure_proxy, identity_management=identity_management, check_name_availability_specifications=check_name_availability_specifications, disallowed_action_verbs=disallowed_action_verbs, service_tree_infos=service_tree_infos, subscription_state_rules=subscription_state_rules, template_deployment_options=template_deployment_options, extended_locations=extended_locations, resource_move_policy=resource_move_policy, resource_deletion_policy=resource_deletion_policy, opt_in_headers=opt_in_headers, required_features_policy=required_features_policy, ) def providerhub_resource_type_registration_update( # pylint: disable=too-many-locals client, provider_namespace, resource_type, routing_type=None, regionality=None, endpoints=None, marketplace_type=None, resource_creation_begin=None, resource_patch_begin=None, swagger_specifications=None, allowed_unauthorized_actions=None, authorization_action_mappings=None, linked_access_checks=None, default_api_version=None, logging_rules=None, throttling_rules=None, required_features=None, enable_async_operation=None, enable_third_party_s2s=None, is_pure_proxy=None, identity_management=None, check_name_availability_specifications=None, disallowed_action_verbs=None, service_tree_infos=None, subscription_state_rules=None, template_deployment_options=None, extended_locations=None, resource_move_policy=None, resource_deletion_policy=None, opt_in_headers=None, required_features_policy=None, ): return client.begin_create_or_update( provider_namespace=provider_namespace, resource_type=resource_type, routing_type=routing_type, regionality=regionality, endpoints=endpoints, resource_creation_begin=resource_creation_begin, resource_patch_begin=resource_patch_begin, marketplace_type=marketplace_type, swagger_specifications=swagger_specifications, allowed_unauthorized_actions=allowed_unauthorized_actions, authorization_action_mappings=authorization_action_mappings, linked_access_checks=linked_access_checks, default_api_version=default_api_version, logging_rules=logging_rules, throttling_rules=throttling_rules, required_features=required_features, enable_async_operation=enable_async_operation, enable_third_party_s2s=enable_third_party_s2s, is_pure_proxy=is_pure_proxy, identity_management=identity_management, check_name_availability_specifications=check_name_availability_specifications, disallowed_action_verbs=disallowed_action_verbs, service_tree_infos=service_tree_infos, subscription_state_rules=subscription_state_rules, template_deployment_options=template_deployment_options, extended_locations=extended_locations, resource_move_policy=resource_move_policy, resource_deletion_policy=resource_deletion_policy, opt_in_headers=opt_in_headers, required_features_policy=required_features_policy, ) def providerhub_resource_type_registration_delete( client, provider_namespace, resource_type ): return client.delete( provider_namespace=provider_namespace, resource_type=resource_type )
# coding: utf-8 """ GMO Aozora Net Bank Open API <p>オープンAPI仕様書(PDF版)は下記リンクをご参照ください</p> <div> <div style='display:inline-block;'><a style='text-decoration:none; font-weight:bold; color:#00b8d4;' href='https://gmo-aozora.com/business/service/api-specification.html' target='_blank'>オープンAPI仕様書</a></div><div style='display:inline-block; margin-left:2px; left:2px; width:10px; height:10px; border-top:2px solid #00b8d4; border-right:2px solid #00b8d4; transparent;-webkit-transform:rotate(45deg); transform: rotate(45deg);'></div> </div> <h4 style='margin-top:30px; border-left: solid 4px #1B2F48; padding: 0.1em 0.5em; color:#1B2F48;'>共通仕様</h4> <div style='width:100%; margin:10px;'> <p style='font-weight:bold; color:#616161;'><HTTPリクエストヘッダ></p> <div style='display:table; margin-left:10px; background-color:#29659b;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff;'>項目</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; color:#fff;'>仕様</div> </div> <div style='display:table; margin-left:10px;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff; background-color:#29659b;'>プロトコル</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; background-color:#f8f8f8;'>HTTP1.1/HTTPS</div> </div> <div style='display:table; margin-left:10px;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff; background-color:#29659b;'>charset</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; background-color:#f8f8f8;'>UTF-8</div> </div> <div style='display:table; margin-left:10px;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff; background-color:#29659b;'>content-type</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; background-color:#f8f8f8;'>application/json</div> </div> <div style='display:table; margin-left:10px;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff; background-color:#29659b;'>domain_name</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; background-color:#f8f8f8;'> 本番環境:api.gmo-aozora.com</br> 開発環境:stg-api.gmo-aozora.com </div> </div> <div style='display:table; margin-left:10px;'> <div style='display:table-cell; min-width:130px; padding:9px; border:1px solid #fff; color:#fff; background-color:#29659b;'>メインURL</div> <div style='display:table-cell; width:85%; padding:9px; border:1px solid #fff; background-color:#f8f8f8;'> https://{domain_name}/ganb/api/corporation/{version}</br> <span style='border-bottom:solid 1px;'>Version:1.x.x</span> の場合</br>  https://api.gmo-aozora.com/ganb/api/corporation/<span style='border-bottom:solid 1px;'>v1</span> </div> </div> </div> <div style='margin:20px 10px;'> <p style='font-weight:bold; color:#616161;'><リクエスト共通仕様></p> <p style='padding-left:20px; font-weight:bold; color:#616161;'>NULLデータの扱い</p> <p style='padding-left:40px;'>パラメータの値が空の場合、またはパラメータ自体が設定されていない場合、どちらもNULLとして扱います</p> </div> <div style='margin:20px 10px;'> <p style='font-weight:bold; color:#616161;'><レスポンス共通仕様></p> <p style='padding-left:20px; font-weight:bold; color:#616161;'>NULLデータの扱い</p> <ul> <li>レスポンスデータ</li> <ul> <li style='list-style-type:none;'>レスポンスデータの値が空の場合または、レスポンスデータ自体が設定されない場合は「項目自体を設定しません」と記載</li> </ul> <li>配列</li> <ul> <li style='list-style-type:none;'>配列の要素の値が空の場合は「空のリスト」と記載</li> <li style='list-style-type:none;'>配列自体が設定されない場合は「項目自体を設定しません」と記載</li> </ul> </ul> </div> <div style='margin:20px 10px;'> <p style='font-weight:bold; color:#616161;'><更新系APIに関する注意事項></p> <ul> <li style='list-style-type:none;'>更新系処理がタイムアウトとなった場合、処理自体は実行されている可能性がありますので、</li> <li style='list-style-type:none;'>再実行を行う必要がある場合は必ず照会系の処理で実行状況を確認してから再実行を行ってください</li> </ul> </div> # noqa: E501 OpenAPI spec version: 1.1.12 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from ganb_corporate_client.models.bulk_transfer_info import BulkTransferInfo # noqa: F401,E501 class BulkTransferResponse(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_id': 'str', 'remitter_name': 'str', 'transfer_designated_date': 'str', 'transfer_data_name': 'str', 'total_count': 'str', 'total_amount': 'str', 'bulk_transfer_infos': 'list[BulkTransferInfo]' } attribute_map = { 'account_id': 'accountId', 'remitter_name': 'remitterName', 'transfer_designated_date': 'transferDesignatedDate', 'transfer_data_name': 'transferDataName', 'total_count': 'totalCount', 'total_amount': 'totalAmount', 'bulk_transfer_infos': 'bulkTransferInfos' } def __init__(self, account_id=None, remitter_name=None, transfer_designated_date=None, transfer_data_name=None, total_count=None, total_amount=None, bulk_transfer_infos=None): # noqa: E501 """BulkTransferResponse - a model defined in Swagger""" # noqa: E501 self._account_id = None self._remitter_name = None self._transfer_designated_date = None self._transfer_data_name = None self._total_count = None self._total_amount = None self._bulk_transfer_infos = None self.discriminator = None if account_id is not None: self.account_id = account_id if remitter_name is not None: self.remitter_name = remitter_name if transfer_designated_date is not None: self.transfer_designated_date = transfer_designated_date if transfer_data_name is not None: self.transfer_data_name = transfer_data_name if total_count is not None: self.total_count = total_count if total_amount is not None: self.total_amount = total_amount if bulk_transfer_infos is not None: self.bulk_transfer_infos = bulk_transfer_infos @property def account_id(self): """Gets the account_id of this BulkTransferResponse. # noqa: E501 口座ID 半角英数字 口座を識別するID # noqa: E501 :return: The account_id of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._account_id @account_id.setter def account_id(self, account_id): """Sets the account_id of this BulkTransferResponse. 口座ID 半角英数字 口座を識別するID # noqa: E501 :param account_id: The account_id of this BulkTransferResponse. # noqa: E501 :type: str """ if account_id is not None and len(account_id) > 29: raise ValueError("Invalid value for `account_id`, length must be less than or equal to `29`") # noqa: E501 if account_id is not None and len(account_id) < 12: raise ValueError("Invalid value for `account_id`, length must be greater than or equal to `12`") # noqa: E501 self._account_id = account_id @property def remitter_name(self): """Gets the remitter_name of this BulkTransferResponse. # noqa: E501 振込依頼人名 半角文字 # noqa: E501 :return: The remitter_name of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._remitter_name @remitter_name.setter def remitter_name(self, remitter_name): """Sets the remitter_name of this BulkTransferResponse. 振込依頼人名 半角文字 # noqa: E501 :param remitter_name: The remitter_name of this BulkTransferResponse. # noqa: E501 :type: str """ if remitter_name is not None and len(remitter_name) > 48: raise ValueError("Invalid value for `remitter_name`, length must be less than or equal to `48`") # noqa: E501 if remitter_name is not None and len(remitter_name) < 1: raise ValueError("Invalid value for `remitter_name`, length must be greater than or equal to `1`") # noqa: E501 self._remitter_name = remitter_name @property def transfer_designated_date(self): """Gets the transfer_designated_date of this BulkTransferResponse. # noqa: E501 振込指定日 半角文字 YYYY-MM-DD形式 # noqa: E501 :return: The transfer_designated_date of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._transfer_designated_date @transfer_designated_date.setter def transfer_designated_date(self, transfer_designated_date): """Sets the transfer_designated_date of this BulkTransferResponse. 振込指定日 半角文字 YYYY-MM-DD形式 # noqa: E501 :param transfer_designated_date: The transfer_designated_date of this BulkTransferResponse. # noqa: E501 :type: str """ if transfer_designated_date is not None and len(transfer_designated_date) > 10: raise ValueError("Invalid value for `transfer_designated_date`, length must be less than or equal to `10`") # noqa: E501 if transfer_designated_date is not None and len(transfer_designated_date) < 10: raise ValueError("Invalid value for `transfer_designated_date`, length must be greater than or equal to `10`") # noqa: E501 self._transfer_designated_date = transfer_designated_date @property def transfer_data_name(self): """Gets the transfer_data_name of this BulkTransferResponse. # noqa: E501 振込データ名 全半角文字 作成した総合振込のデータを区別するためのメモ # noqa: E501 :return: The transfer_data_name of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._transfer_data_name @transfer_data_name.setter def transfer_data_name(self, transfer_data_name): """Sets the transfer_data_name of this BulkTransferResponse. 振込データ名 全半角文字 作成した総合振込のデータを区別するためのメモ # noqa: E501 :param transfer_data_name: The transfer_data_name of this BulkTransferResponse. # noqa: E501 :type: str """ if transfer_data_name is not None and len(transfer_data_name) > 10: raise ValueError("Invalid value for `transfer_data_name`, length must be less than or equal to `10`") # noqa: E501 if transfer_data_name is not None and len(transfer_data_name) < 1: raise ValueError("Invalid value for `transfer_data_name`, length must be greater than or equal to `1`") # noqa: E501 self._transfer_data_name = transfer_data_name @property def total_count(self): """Gets the total_count of this BulkTransferResponse. # noqa: E501 合計件数 半角数字 # noqa: E501 :return: The total_count of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._total_count @total_count.setter def total_count(self, total_count): """Sets the total_count of this BulkTransferResponse. 合計件数 半角数字 # noqa: E501 :param total_count: The total_count of this BulkTransferResponse. # noqa: E501 :type: str """ if total_count is not None and len(total_count) > 6: raise ValueError("Invalid value for `total_count`, length must be less than or equal to `6`") # noqa: E501 if total_count is not None and len(total_count) < 1: raise ValueError("Invalid value for `total_count`, length must be greater than or equal to `1`") # noqa: E501 self._total_count = total_count @property def total_amount(self): """Gets the total_amount of this BulkTransferResponse. # noqa: E501 合計金額 半角数字 # noqa: E501 :return: The total_amount of this BulkTransferResponse. # noqa: E501 :rtype: str """ return self._total_amount @total_amount.setter def total_amount(self, total_amount): """Sets the total_amount of this BulkTransferResponse. 合計金額 半角数字 # noqa: E501 :param total_amount: The total_amount of this BulkTransferResponse. # noqa: E501 :type: str """ if total_amount is not None and len(total_amount) > 20: raise ValueError("Invalid value for `total_amount`, length must be less than or equal to `20`") # noqa: E501 if total_amount is not None and len(total_amount) < 1: raise ValueError("Invalid value for `total_amount`, length must be greater than or equal to `1`") # noqa: E501 self._total_amount = total_amount @property def bulk_transfer_infos(self): """Gets the bulk_transfer_infos of this BulkTransferResponse. # noqa: E501 総合振込明細情報 総合振込明細のリスト 明細情報取得フラグが「True:取得する」、かつ明細情報取得結果フラグが「True:取得可」のときのみ設定します それ以外は項目自体を設定しません # noqa: E501 :return: The bulk_transfer_infos of this BulkTransferResponse. # noqa: E501 :rtype: list[BulkTransferInfo] """ return self._bulk_transfer_infos @bulk_transfer_infos.setter def bulk_transfer_infos(self, bulk_transfer_infos): """Sets the bulk_transfer_infos of this BulkTransferResponse. 総合振込明細情報 総合振込明細のリスト 明細情報取得フラグが「True:取得する」、かつ明細情報取得結果フラグが「True:取得可」のときのみ設定します それ以外は項目自体を設定しません # noqa: E501 :param bulk_transfer_infos: The bulk_transfer_infos of this BulkTransferResponse. # noqa: E501 :type: list[BulkTransferInfo] """ self._bulk_transfer_infos = bulk_transfer_infos def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BulkTransferResponse, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BulkTransferResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
import datetime import collections from djangomockingbird import queryset_utils # queryset that returns mock class objects class MockBaseQueryset(object): CHAINABLE_METHODS = [ "filter", "exclude", "prefetch_related", "order_by", "reverse", "distinct", "all", "union", "intersection", "difference", "select_related", "extra", "defer", "only", "using", "select_for_update", "raw", ] def __init__(self, mock_class, model_dict): self.mock_class = mock_class self.model_dict = model_dict def __getattr__(self, name): if name in self.CHAINABLE_METHODS: return lambda *args, **kwargs: self else: raise AttributeError() def annotate(self, *args, **kwargs): model_class = queryset_utils.annotate_mock_class(kwargs, self.mock_class) return self def values(self, *args, **kwargs): return MockDerivedQueryset(self.model_dict) def values_list(self, *args, **kwargs): mock_values_list = queryset_utils.make_mock_list_from_args(args) mock_values_tuple = tuple(mock_values_list) mock_fields_list = queryset_utils.get_keys_from_dict(self.model_dict) mock_named_tuple = collections.namedtuple("Mock_named_tuple", mock_fields_list) if "flat" in kwargs and kwargs["flat"] == True: return MockDerivedQueryset(mock_values_list) elif "named" in kwargs and kwargs["named"] == True: return MockDerivedQueryset(mock_named_tuple) else: return MockDerivedQueryset(mock_values_tuple) def dates(self, *args, **kwargs): return MockDerivedQueryset(datetime.datetime(2000, 1, 1)) def datetimes(self, *args, **kwargs): return MockDerivedQueryset(datetime.datetime(2000, 1, 1)) def none(self, *args, **kwargs): return None # methods that do not return querysets def get(self, *args, **kwargs): return self.mock_class() def create(self, *args, **kwargs): return None def get_or_create(self, *args, **kwargs): return (self.mock_class(), True) def update_or_create(self, *args, **kwargs): return (self.mock_class(), True) def bulk_create(self, *args, **kwargs): return None def bulk_update(self, *args, **kwargs): return None def count(self, *args, **kwargs): return 1 def in_bulk(self, *args, **kwargs): mock_in_bulk_dict = queryset_utils.make_mock_in_bulk_dict(args) return mock_in_bulk_dict def iterator(self, *args, **kwargs): return [self.mock_class()] def latest(self, *args, **kwargs): return self.mock_class() def earliest(self, *args, **kwargs): return self.mock_class() def first(self, *args, **kwargs): return self.mock_class() def last(self, *args, **kwargs): return self.mock_class() def aggregate(self, *args, **kwargs): mock_aggregate_dict = queryset_utils.make_mock_aggregate_dict(kwargs) return mock_aggregate_dict def exists(self, *args, **kwargs): return True def update(self, *args, **kwargs): return 1 def delete(self, *args, **kwargs): return 1 def as_manager(self, *args, **kwargs): return self # TODO def explain(self, *args, **kwargs): return "mock explain" # extra methods/protocols def __len__(self): return 1 def __iter__(self): return iter([self.mock_class()]) def __next__(self): return self.mock_class() def __getitem__(self, key): return self.mock_class() # methods for evaluating querysets def repr(self, *args, **kwargs): return self.mock_class() def list(self, *args, **kwargs): return [self.mock_class()] def bool(self, *args, **kwargs): return True # queryset that returns something other than mock class objects: dicts, tuples, datetime objects etc. class MockDerivedQueryset(MockBaseQueryset, dict): def __init__(self, return_value): self.return_value = return_value if isinstance(self.return_value, dict): dict.__init__(self, return_value) # methods that do not return querysets def get(self, *args, **kwargs): return self.return_value def get_or_create(self, *args, **kwargs): return (self.return_value, True) def update_or_create(self, *args, **kwargs): return (self.return_value, True) def iterator(self, *args, **kwargs): return [self.return_value] def latest(self, *args, **kwargs): return self.return_value def earliest(self, *args, **kwargs): return self.return_value def first(self, *args, **kwargs): return self.return_value def last(self, *args, **kwargs): return self.return_value # extra methods/protocols def __iter__(self): return iter([self.return_value]) def __next__(self): return self.return_value def __getitem__(self, key): return self.return_value # methods for evaluating querysets def repr(self, *args, **kwargs): return self.return_value def list(self, *args, **kwargs): return [self.return_value] # other methods def annotate(self, *args, **kwargs): annotated_return_value = queryset_utils.annotate_return_value( self.return_value, kwargs ) return MockDerivedQueryset(annotated_return_value) class MockRelatedManager(MockBaseQueryset): def __init__(self, mock_class, model_dict): self.mock_class = mock_class self.model_dict = model_dict def add(self, *args, **kwargs): MockBaseQueryset(self.mock_class, self.model_dict) def create(self, *args, **kwargs): MockBaseQueryset(self.mock_class, self.model_dict) def set(self, *args, **kwargs): MockBaseQueryset(self.mock_class, self.model_dict) def remove(self, *args, **kwargs): MockBaseQueryset(self.mock_class, self.model_dict) def clear(self, *args, **kwargs): MockBaseQueryset(self.mock_class, self.model_dict)
""" Test settings for ORM Blog project. - Used to run tests fast on the continuous integration server and locally """ from .base import * # noqa # DEBUG # ------------------------------------------------------------------------------ # Turn debug off so tests run faster DEBUG = False # This needs to be enabled if we want to use the coverage plugin for templates TEMPLATES[0]["OPTIONS"]["debug"] = True # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env("DJANGO_SECRET_KEY", default="CHANGEME!!!") # EMAIL # ------------------------------------------------------------------------------ EMAIL_HOST = "localhost" EMAIL_PORT = 1025 # In-memory email backend stores messages in django.core.mail.outbox # for unit testing purposes EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend" # CACHING # ------------------------------------------------------------------------------ # Speed advantages of in-memory caching without having to run Memcached CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "", } } # TESTING # ------------------------------------------------------------------------------ # TEST_RUNNER = 'django.test.runner.DiscoverRunner' TEST_RUNNER = "config.runner.PytestTestRunner" AUTHENTICATION_BACKENDS += ["django_webtest.backends.WebtestUserBackend"] MIDDLEWARE += ["django_webtest.middleware.WebtestUserMiddleware"] # CELERY # ------------------------------------------------------------------------------ # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-always-eager CELERY_TASK_ALWAYS_EAGER = True # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates CELERY_TASK_EAGER_PROPAGATES = True # PASSWORD HASHING # ------------------------------------------------------------------------------ # Use fast password hasher so tests run faster PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"] # TEMPLATES # ------------------------------------------------------------------------------ # Keep templates in memory so tests run faster. # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]["OPTIONS"]["loaders"] = [ [ "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ] ]
from django.shortcuts import render , redirect from django.core.paginator import Paginator from django import forms from .models import Video from django.http import HttpResponse, HttpResponseRedirect class videoForm(forms.Form): uploader = forms.CharField() imgpath = forms.ImageField() uppath = forms.FileField() describe = forms.CharField() uploaddate = forms.DateField() kind = forms.CharField() score = forms.IntegerField() haveseen = forms.CharField() #form表单 def form(request): if request.method == 'POST': vf = videoForm(request.POST ,request.FILES) if vf.is_valid(): uploader = vf.cleaned_data['uploader'] imgpath = vf.cleaned_data['imgpath'] print(uploader) uppath = vf.cleaned_data['uppath'] print(uppath ,"+++++++++++++++++++++++++++++++++++++++++++++++++") describe = vf.cleaned_data['describe'] print(describe) uploaddate = vf.cleaned_data['uploaddate'] print(uploaddate) kind = vf.cleaned_data['kind'] print(kind) score = vf.cleaned_data['score'] print(score,'5555555555555555555555555555555555555555555555') haveseen = vf.cleaned_data['haveseen'] vid = Video() vid.uploader = uploader vid.imgpath = imgpath vid.uppath = uppath vid.describe = describe vid.uploaddate = uploaddate vid.kind = kind vid.score = score vid.haveseen = haveseen print(type(vid.uppath), '---' ,vid.uppath ,"11111111111111111111111111111111111111111111111111111111111111111111111111111111") vid.save() print("2222222222222222") print("来到这里会重定向-----------------") return redirect('/media/upload') else: vf = videoForm haveseen = "已看过" print("怎么没到这历来。。。。。。。。。。。。。。。。。。。。。。。。。") return render(request, 'avi/uploading.html', locals() ,{"title":"上传视频","haveseen":haveseen}) from project.settings import MEDIA_URL #蓝快课堂 def media(request ,pageid): username = request.session['username'] print(username ,'======当前登录用户名username========================') vidList = Video.objects.all() paginator = Paginator(vidList, 3) page = paginator.page(pageid) print(page ,'页数') #搜索 search = request.POST.get("search") error_msg = "查无此视频" #我的总分 #我看过的视频ID,所有ID的分数和 iseeList = Isee.objects.filter(iname=username) count = 0 score = 0 for isee in iseeList: v = Video.objects.get(id=isee.igid)#取出我看过的视频 print(v.score ,"单条视频的分数") count += 1 score += v.score print(score ,"所有视频的分数和") if not search: error_msg="请输入关键字" return render(request, 'avi/media.html', {'title': '蓝快课堂', 'username': username, 'video': page, 'vidList': vidList, 'error_msg': error_msg , "count":count , "score":score}) else: vidList1 = Video.objects.filter(describe__icontains = search) return render(request, 'avi/media.html', {'title': '蓝快课堂', 'username': username, 'video': page, 'vidList': vidList1, 'error_msg': error_msg , "count":count , "score":score}) def avi(request ,id): video = Video.objects.get(id = id) return render(request ,'avi/avi.html' ,{"video":video}) import time from .models import Isee def addIsee(request ,pid): #当前用户 username = request.session['username'] print(username) #视频ID newpath = request.path[10:]#media/add1 video = Video.objects.get(id=newpath) print(request.path ,"----------------------url----------------------") print(newpath ,"新dizhi --------------------------------------------") seetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(seetime ,"") #如果该用户看过该视频,则直接重定向 if Isee.objects.filter(iname=username ,igid=newpath): return redirect('/media/avi' + newpath) else: #否则先存进去再重定向 isee = Isee.createisee(username, newpath, seetime) isee.save() return HttpResponseRedirect('http://127.0.0.1:8000/media/avi' + newpath) def addok(request): return redirect('/addok')
# -*-: coding utf-8 -*- """ Notification definition from a YAML config. """ # pylint: disable=too-few-public-methods class NotificationDef: """ Notification definition from a YAML config. """ def __init__(self, name, action): """ Initialisation. :param name: the name of the notification. :param action: the code to execute. """ self.name = name self.action = action
from __future__ import unicode_literals from django.db import models from django.utils import timezone from django.db.models import Q class Room(models.Model): name = models.TextField() label = models.SlugField(unique=True) locked = models.BooleanField(default=False) owner = models.CharField(max_length=100, null=True, default=None) def __unicode__(self): return self.label def as_dict(self): return {'name': self.name, 'type': 'room', 'locked': self.locked, 'label': self.label} class Player(models.Model): room = models.ForeignKey(Room, related_name='players') score = models.IntegerField(default=0) handle = models.TextField() timestamp = models.DateTimeField(default=timezone.now, db_index=True) def __unicode__(self): return self.room.name + '-' + self.handle def as_dict(self): return {'handle': self.handle, 'type': 'player'} class Meta: ordering = ["timestamp"] class Game(models.Model): room = models.ForeignKey(Room, related_name='games') hakkam = models.IntegerField(default=5) current_bid = models.IntegerField(default=150) winning_bid = models.IntegerField(null=True, default=None) bid_winner = models.ForeignKey(Player, null=True, default=None) partner1 = models.ForeignKey(Player, null=True, default=None, related_name='games_partner1') partner2 = models.ForeignKey(Player, null=True, default=None, related_name='games_partner2') partner1card = models.IntegerField(null=True, default=None) partner2card = models.IntegerField(null=True, default=None) active = models.BooleanField(default=True) timestamp = models.DateTimeField(default=timezone.now, db_index=True) next_to_bid = models.ForeignKey(Player, null=True, default=None, related_name='games_next_to_bid') minimum = models.ForeignKey(Player, null=True, default=None, related_name='games_minimum') cards = models.TextField(null=True, default=None) def evaluate_bid(self): if self.bids.count() > self.room.players.count(): bid_dict = {} for bid in self.bids.all(): if bid.value == 0: bid_dict.pop(bid.player.handle, None) else: bid_dict[bid.player.handle] = bid.value, bid.player if len(bid_dict)==1: return True, bid_dict.values()[0][0], bid_dict.values()[0][1] return False, None, None class Meta: ordering = ["timestamp"] class Bid(models.Model): game = models.ForeignKey(Game, related_name='bids') player = models.ForeignKey(Player) value = models.IntegerField() # 0 is pass timestamp = models.DateTimeField(default=timezone.now, db_index=True) class Meta: ordering = ["timestamp"] class Hand(models.Model): game = models.ForeignKey(Game, related_name='hands') hand_winner = models.ForeignKey(Player, null=True, default=None) first_suit = models.IntegerField(null=True, default=None) active = models.BooleanField(default=True) points = models.IntegerField(null=True, default=None) timestamp = models.DateTimeField(default=timezone.now, db_index=True) def a_better_than_b(self, a, b): hakkam = self.game.hakkam first_suit = self.first_suit if b == -1: return 1 suit_a = a%4 val_a = a//4 suit_b = b%4 val_b = b//4 if suit_a != first_suit and suit_a != hakkam: return -1 if suit_a == hakkam and suit_b == hakkam: if val_a < val_b: return 1 if val_a > val_b: return -1 return 1 elif suit_a == hakkam: return 1 elif suit_b == hakkam: return -1 else: if val_a < val_b: return 1 if val_a > val_b: return -1 return 1 def get_points(self, a): val_a = a//4 suit_a = a%4 if val_a == 2 and suit_a == 0: return 30 if val_a == 0: return 15 if val_a == 4: return 10 if val_a == 9: return 5 return 0 def compute_winner(self): if self.entries.count() == self.game.room.players.count(): current_best = -1 # ordering is AS, AD, AC, AH, KS, KD, ... hakkam = self.game.hakkam winner = None points = 0 points_cards = [] for entry in self.entries.all(): ab = self.a_better_than_b(entry.card_played, current_best) if self.get_points(entry.card_played) > 0: points_cards.append(entry.card_played) points += self.get_points(entry.card_played) if ab == 1: current_best = entry.card_played winner = entry.player return winner, points, points_cards class Meta: ordering = ["timestamp"] class HandEntry(models.Model): hand = models.ForeignKey(Hand, related_name='entries') player = models.ForeignKey(Player, null=True, default=None) card_played = models.IntegerField() timestamp = models.DateTimeField(default=timezone.now, db_index=True) def get_points(self): a = self.card_played val_a = a//4 suit_a = a%4 if val_a == 2 and suit_a == 0: return 30 if val_a == 0: return 15 if val_a == 4: return 10 if val_a == 9: return 5 def is_partner(self): if self.card_played == self.hand.game.partner1card and self.hand.game.partner1 == None: self.hand.game.partner1 = self.player self.hand.game.save() return True elif self.card_played == self.hand.game.partner2card and self.hand.game.partner2 == None: self.hand.game.partner2 = self.player self.hand.game.save() return True return False class Meta: ordering = ["timestamp"] class Message(models.Model): room = models.ForeignKey(Room, related_name='messages') handle = models.TextField() message = models.TextField() timestamp = models.DateTimeField(default=timezone.now, db_index=True) def __unicode__(self): return '[{timestamp}] {handle}: {message}'.format(**self.as_dict()) @property def formatted_timestamp(self): return self.timestamp.strftime('%b %-d %-I:%M %p') def as_dict(self): return {'handle': self.handle, 'message': self.message, 'timestamp': self.formatted_timestamp, 'type': 'dm'}
# Сделайте ветку по шаблону name-surname-01-hello # Добавьте в программу код, который выводит фразу "I'm done!" # Запушьте ветку
"""Function for recording and reporting deprecations. Notes ----- this file is copied (with minor modifications) from the Nibabel. https://github.com/nipy/nibabel. See COPYING file distributed along with the Nibabel package for the copyright and license terms. """ import functools import warnings import re from inspect import signature from fury import __version__ from fury.optpkg import optional_package # packaging.version.parse is a third-party utility but is used by setuptools # (so probably already installed) and is conformant to the current PEP 440. # But just if it is not the case, we use distutils packaging, have_pkg, _ = optional_package('setuptools.extern.packaging') _LEADING_WHITE = re.compile(r'^(\s*)') class ExpiredDeprecationError(RuntimeError): """Error for expired deprecation. Error raised when a called function or method has passed out of its deprecation period. """ pass class ArgsDeprecationWarning(DeprecationWarning): """Warning for args deprecation. Warning raised when a function or method argument has changed or removed. """ pass def _ensure_cr(text): """Remove trailing whitespace and add carriage return. Ensures that `text` always ends with a carriage return """ return text.rstrip() + '\n' def _add_dep_doc(old_doc, dep_doc): """Add deprecation message `dep_doc` to docstring in `old_doc`. Parameters ---------- old_doc : str Docstring from some object. dep_doc : str Deprecation warning to add to top of docstring, after initial line. Returns ------- new_doc : str `old_doc` with `dep_doc` inserted after any first lines of docstring. """ dep_doc = _ensure_cr(dep_doc) if not old_doc: return dep_doc old_doc = _ensure_cr(old_doc) old_lines = old_doc.splitlines() new_lines = [] for line_no, line in enumerate(old_lines): if line.strip(): new_lines.append(line) else: break next_line = line_no + 1 if next_line >= len(old_lines): # nothing following first paragraph, just append message return old_doc + '\n' + dep_doc indent = _LEADING_WHITE.match(old_lines[next_line]).group() dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']] return '\n'.join(new_lines + dep_lines + old_lines[next_line:]) + '\n' def cmp_pkg_version(version_str, pkg_version_str=__version__): """Compare `version_str` to current package version. Parameters ---------- version_str : str Version string to compare to current package version pkg_version_str : str, optional Version of our package. Optional, set fom ``__version__`` by default. Returns ------- version_cmp : int 1 if `version_str` is a later version than `pkg_version_str`, 0 if same, -1 if earlier. Examples -------- >>> cmp_pkg_version('1.2.1', '1.2.0') 1 >>> cmp_pkg_version('1.2.0dev', '1.2.0') -1 """ version_cmp = packaging.version.parse if have_pkg else None if any([re.match(r'^[a-z, A-Z]', v)for v in [version_str, pkg_version_str]]): msg = 'Invalid version {0} or {1}'.format(version_str, pkg_version_str) raise ValueError(msg) elif version_cmp(version_str) > version_cmp(pkg_version_str): return 1 elif version_cmp(version_str) == version_cmp(pkg_version_str): return 0 else: return -1 def is_bad_version(version_str, version_comparator=cmp_pkg_version): """Return True if `version_str` is too high.""" return version_comparator(version_str) == -1 def deprecate_with_version(message, since='', until='', version_comparator=cmp_pkg_version, warn_class=DeprecationWarning, error_class=ExpiredDeprecationError): """Return decorator function for deprecation warning / error. The decorated function / method will: * Raise the given `warning_class` warning when the function / method gets called, up to (and including) version `until` (if specified); * Raise the given `error_class` error when the function / method gets called, when the package version is greater than version `until` (if specified). Parameters ---------- message : str Message explaining deprecation, giving possible alternatives. since : str, optional Released version at which object was first deprecated. until : str, optional Last released version at which this function will still raise a deprecation warning. Versions higher than this will raise an error. version_comparator : callable Callable accepting string as argument, and return 1 if string represents a higher version than encoded in the `version_comparator`, 0 if the version is equal, and -1 if the version is lower. For example, the `version_comparator` may compare the input version string to the current package version string. warn_class : class, optional Class of warning to generate for deprecation. error_class : class, optional Class of error to generate when `version_comparator` returns 1 for a given argument of ``until``. Returns ------- deprecator : func Function returning a decorator. """ messages = [message] if (since, until) != ('', ''): messages.append('') if since: messages.append('* deprecated from version: ' + since) if until: messages.append('* {0} {1} as of version: {2}'.format( "Raises" if is_bad_version(until) else "Will raise", error_class, until)) message = '\n'.join(messages) def deprecator(func): @functools.wraps(func) def deprecated_func(*args, **kwargs): if until and is_bad_version(until, version_comparator): raise error_class(message) warnings.warn(message, warn_class, stacklevel=2) return func(*args, **kwargs) deprecated_func.__doc__ = _add_dep_doc(deprecated_func.__doc__, message) return deprecated_func return deprecator def deprecated_params(old_name, new_name=None, since='', until='', version_comparator=cmp_pkg_version, arg_in_kwargs=False, warn_class=ArgsDeprecationWarning, error_class=ExpiredDeprecationError, alternative=''): """Deprecate a _renamed_ or _removed_ function argument. The decorator assumes that the argument with the ``old_name`` was removed from the function signature and the ``new_name`` replaced it at the **same position** in the signature. If the ``old_name`` argument is given when calling the decorated function the decorator will catch it and issue a deprecation warning and pass it on as ``new_name`` argument. Parameters ---------- old_name : str or list/tuple thereof The old name of the argument. new_name : str or list/tuple thereof or ``None``, optional The new name of the argument. Set this to `None` to remove the argument ``old_name`` instead of renaming it. since : str or number or list/tuple thereof, optional The release at which the old argument became deprecated. until : str or number or list/tuple thereof, optional Last released version at which this function will still raise a deprecation warning. Versions higher than this will raise an error. version_comparator : callable Callable accepting string as argument, and return 1 if string represents a higher version than encoded in the ``version_comparator``, 0 if the version is equal, and -1 if the version is lower. For example, the ``version_comparator`` may compare the input version string to the current package version string. arg_in_kwargs : bool or list/tuple thereof, optional If the argument is not a named argument (for example it was meant to be consumed by ``**kwargs``) set this to ``True``. Otherwise the decorator will throw an Exception if the ``new_name`` cannot be found in the signature of the decorated function. Default is ``False``. warn_class : warning, optional Warning to be issued. error_class : Exception, optional Error to be issued alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object if ``new_name`` is None. The deprecation warning will tell the user about this alternative if provided. Raises ------ TypeError If the new argument name cannot be found in the function signature and arg_in_kwargs was False or if it is used to deprecate the name of the ``*args``-, ``**kwargs``-like arguments. At runtime such an Error is raised if both the new_name and old_name were specified when calling the function and "relax=False". Notes ----- This function is based on the Astropy (major modification). https://github.com/astropy/astropy. See COPYING file distributed along with the astropy package for the copyright and license terms. Examples -------- The deprecation warnings are not shown in the following examples. To deprecate a positional or keyword argument:: >>> from fury.deprecator import deprecated_params >>> @deprecated_params('sig', 'sigma', '0.3') ... def test(sigma): ... return sigma >>> test(2) 2 >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 To deprecate an argument caught inside the ``**kwargs`` the ``arg_in_kwargs`` has to be set:: >>> @deprecated_params('sig', 'sigma', '1.0', ... arg_in_kwargs=True) ... def test(**kwargs): ... return kwargs['sigma'] >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 It is also possible to replace multiple arguments. The ``old_name``, ``new_name`` and ``since`` have to be `tuple` or `list` and contain the same number of entries:: >>> @deprecated_params(['a', 'b'], ['alpha', 'beta'], ... ['0.2', 0.4]) ... def test(alpha, beta): ... return alpha, beta >>> test(a=2, b=3) # doctest: +SKIP (2, 3) """ if isinstance(old_name, (list, tuple)): # Normalize input parameters if not isinstance(arg_in_kwargs, (list, tuple)): arg_in_kwargs = [arg_in_kwargs] * len(old_name) if not isinstance(since, (list, tuple)): since = [since] * len(old_name) if not isinstance(until, (list, tuple)): until = [until] * len(old_name) if not isinstance(new_name, (list, tuple)): new_name = [new_name] * len(old_name) if len(set([len(old_name), len(new_name), len(since), len(until), len(arg_in_kwargs)])) != 1: raise ValueError("All parameters should have the same length") else: # To allow a uniform approach later on, wrap all arguments in lists. old_name = [old_name] new_name = [new_name] since = [since] until = [until] arg_in_kwargs = [arg_in_kwargs] def deprecator(function): # The named arguments of the function. arguments = signature(function).parameters positions = [None] * len(old_name) for i, (o_name, n_name, in_keywords) in enumerate(zip(old_name, new_name, arg_in_kwargs)): # Determine the position of the argument. if in_keywords: continue if n_name is not None and n_name not in arguments: # In case the argument is not found in the list of arguments # the only remaining possibility is that it should be caught # by some kind of **kwargs argument. msg = '"{}" was not specified in the function '.format(n_name) msg += 'signature. If it was meant to be part of ' msg += '"**kwargs" then set "arg_in_kwargs" to "True"' raise TypeError(msg) key = o_name if n_name is None else n_name param = arguments[key] if param.kind == param.POSITIONAL_OR_KEYWORD: key = o_name if n_name is None else n_name positions[i] = list(arguments.keys()).index(key) elif param.kind == param.KEYWORD_ONLY: # These cannot be specified by position. positions[i] = None else: # positional-only argument, varargs, varkwargs or some # unknown type: msg = 'cannot replace argument "{}" '.format(n_name) msg += 'of kind {}.'.format(repr(param.kind)) raise TypeError(msg) @functools.wraps(function) def wrapper(*args, **kwargs): for i, (o_name, n_name) in enumerate(zip(old_name, new_name)): messages = ['"{}" was deprecated'.format(o_name), ] if (since[i], until[i]) != ('', ''): messages.append('') if since[i]: messages.append('* deprecated from version: ' + str(since[i])) if until[i]: messages.append('* {0} {1} as of version: {2}'.format( "Raises" if is_bad_version(until[i]) else "Will raise", error_class, until[i])) messages.append('') message = '\n'.join(messages) # The only way to have oldkeyword inside the function is # that it is passed as kwarg because the oldkeyword # parameter was renamed to newkeyword. if o_name in kwargs: value = kwargs.pop(o_name) # Check if the newkeyword was given as well. newarg_in_args = (positions[i] is not None and len(args) > positions[i]) newarg_in_kwargs = n_name in kwargs if newarg_in_args or newarg_in_kwargs: msg = 'cannot specify both "{}"'.format(o_name) msg += ' (deprecated parameter) and ' msg += '"{}" (new parameter name).'.format(n_name) raise TypeError(msg) # Pass the value of the old argument with the # name of the new argument to the function key = n_name or o_name kwargs[key] = value if n_name is not None: message += '* Use argument "{}" instead.' \ .format(n_name) elif alternative: message += '* Use {} instead.'.format(alternative) if until[i] and is_bad_version(until[i], version_comparator): raise error_class(message) warnings.warn(message, warn_class, stacklevel=2) # Deprecated keyword without replacement is given as # positional argument. elif (not n_name and positions[i] and len(args) > positions[i]): if alternative: message += '* Use {} instead.'.format(alternative) if until[i] and is_bad_version(until[i], version_comparator): raise error_class(message) warnings.warn(message, warn_class, stacklevel=2) return function(*args, **kwargs) return wrapper return deprecator
# Aula 21 - 12-12-2019 # Cliente..... # Crie uma classe cliente. # Use os seguintes atributos: codigo cliente(int), nome, idade(int), telefone, email, endereço # Use o seguinte atributo de estado: crédito em R$, saldo R$, # cliente_devedor(True/False) # O atributo cliente_devedor deve ser True toda vez que o saldo negativo for menor # ou igual ao crédito. # Para o atributo cliente_devedor voltar a ser False o cliente deve pagar sua divida # ficando com o saldo igual a 0 ou positivo. # Como metodo use: class Cliente: def __init__(self,codigo, nome, idade, telefone, email, endereco): self.codigo = codigo self.nome = nome self.idade = idade self.telefone = telefone self.email = email self.endereço = endereço def atualizar (self): ''' Este metodo serve para atualizar o cadastro do cliente. Os dados que podem ser atualizados são: nome, idade(int), telefone, email, endereço ''' pass def limite_credito(self,valor): ''' O crédito é o valor máximo que o cliente pode ter de saldo negativo. Este metodo altera o valor tanto para aumentar o crédito quanto para diminuir ou eliminar o crédito. Este valor deve ser passado como número negativo (ex: -50.00) para o atributo credito Se diminuir o crédito (exemplo de -50 para -10) e o cliente ficar com o saldo menor que o cédito (exemplo saldo = -20 e cédito -10) o cliente_devedor fica True Se o cliente_devedor estiver True, o crédito pode ser diminuido porem não aumentado. ''' pass def dinheiro(self,valor): ''' Este metodo serve para adicionar/remover valor em R$ para o atributo saldo para o cliente. Esta função revebe o valor como parametro. Se o valor for positivo, o saldo aumenta, se o valor for negativo o saldo diminui. O cliente não pode ter seu saldo menor que o crédito. Então se o valor exceder deve retornar False e a operação fica cancelada. (Exemplo: limite do cartão de crédito) Caso o valor não exceda o crédito a operação será realizada, o valor do saldo irá diminuir e deve retornar True Se o cliente_devedor estiver True o dinheiro só poderá receber valores positivos. Se o cliente_devedor estiver True e o cliente depositar dinheiro suficiente para o saldo ficar maior ou igual a 0 o cliente_devedor deve ser alterado para False. ''' pass def __eq__(self,valor): ''' Este metodo deve comparar se o valor do código do cliente é igual ao valor. Se positivo ele retorna True caso contrário retorna False ''' pass def __srt__(self): ''' Este metodo deve retornar uma string com todos os dados do cliente. Use f-string para interpolar o texto com as variáveis ''' pass if __name__ == "__main__": ''' Use este if para fazer os testes com a classe. Este if pergunta se este arquivo está sendo executado diretamente. Caso positivo o código será executado. ''' pass
# coding=utf-8 # *** WARNING: this file was generated by crd2pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'ReplicaSchedulingPreferenceSpec', ] @pulumi.output_type class ReplicaSchedulingPreferenceSpec(dict): def __init__(__self__, *, target_kind: str, total_replicas: int, clusters: Optional[Mapping[str, Any]] = None, rebalance: Optional[bool] = None): """ :param str target_kind: TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset). :param int total_replicas: Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified. :param Mapping[str, Any] clusters: A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled. :param bool rebalance: If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved. """ pulumi.set(__self__, "target_kind", target_kind) pulumi.set(__self__, "total_replicas", total_replicas) if clusters is not None: pulumi.set(__self__, "clusters", clusters) if rebalance is not None: pulumi.set(__self__, "rebalance", rebalance) @property @pulumi.getter(name="targetKind") def target_kind(self) -> str: """ TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset). """ return pulumi.get(self, "target_kind") @property @pulumi.getter(name="totalReplicas") def total_replicas(self) -> int: """ Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified. """ return pulumi.get(self, "total_replicas") @property @pulumi.getter def clusters(self) -> Optional[Mapping[str, Any]]: """ A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled. """ return pulumi.get(self, "clusters") @property @pulumi.getter def rebalance(self) -> Optional[bool]: """ If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved. """ return pulumi.get(self, "rebalance") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
# ===-- utils.py ---------------------------------------------------------===# # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https:#swift.org/LICENSE.txt for license information # See https:#swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ===---------------------------------------------------------------------===# from __future__ import absolute_import, print_function, unicode_literals import sys def fatal_error(message, stream=sys.stderr): """Writes a message to the given stream and exits. By default this function outputs to stderr. """ stream.write('[{}] ERROR: {}\n'.format(sys.argv[0], message)) stream.flush() sys.exit(1) def exit_rejecting_arguments(message, parser=None): print(message, file=sys.stderr) if parser: parser.print_usage(sys.stderr) sys.exit(2) # 2 is the same as `argparse` error exit code.
import torch import random import numpy as np from PIL import Image import math import torch.nn.functional as F def crop(vid, i, j, h, w): return vid[..., i:(i + h), j:(j + w)] def center_crop(vid, output_size): h, w = vid.shape[-2:] th, tw = output_size i = int(round((h - th) / 2.)) j = int(round((w - tw) / 2.)) return crop(vid, i, j, th, tw) def hflip(vid): return vid.flip(dims=(-1,)) # NOTE: for those functions, which generally expect mini-batches, we keep them # as non-minibatch so that they are applied as if they were 4d (thus image). # this way, we only apply the transformation in the spatial domain def resize(vid, size, interpolation='bilinear'): # NOTE: using bilinear interpolation because we don't work on minibatches # at this level scale = None if isinstance(size, int): scale = float(size) / min(vid.shape[-2:]) size = None return torch.nn.functional.interpolate( vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False) def pad(vid, padding, fill=0, padding_mode="constant"): # NOTE: don't want to pad on temporal dimension, so let as non-batch # (4d) before padding. This works as expected return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode) def to_normalized_float_tensor(vid): # import pdb; pdb.set_trace() if not isinstance(vid, torch.Tensor): vid = torch.from_numpy(vid) return vid.permute(3, 0, 1, 2).to(torch.float32) / 255 def normalize(vid, mean, std): shape = (-1,) + (1,) * (vid.dim() - 1) mean = torch.as_tensor(mean).reshape(shape) std = torch.as_tensor(std).reshape(shape) return (vid - mean) / std def gaussian_kernel(size, sigma=2., dim=2, channels=3): # The gaussian kernel is the product of the gaussian function of each dimension. # kernel_size should be an odd number. kernel_size = 2*size + 1 kernel_size = [kernel_size] * dim sigma = [sigma] * dim kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) return kernel from PIL import ImageFilter class BlurTransform(object): """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[.1, 2.]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x import itertools class GaussianBlurTransform(object): def __init__(self, sizes=[2*n + 1 for n in range(10)], sigmas=[i+1 for i in range(7)]): self._sizes = sizes self._sigma = sigmas self._kernels = {'%s_%s' % (size, sigma): gaussian_kernel(size=size, sigma=sigma) for size, sigma in itertools.product(self._sizes, self._sigma)} def __call__(self, x, size=None, sigma=None): size = size if size is not None else self._sizes[np.random.randint(len(self._sizes))] sigma = sigma if sigma is not None else self._sigma[np.random.randint(len(self._sigma))] kernel = self._kernels['%s_%s' % (size, sigma)] kernel_size = 2*size + 1 is_img = len(x.shape) == 3 if is_img: x = x[None,...] else: x = x.transpose(0, 1) padding = int((kernel_size - 1) / 2) x = F.pad(x, (padding, padding, padding, padding), mode='reflect') x = F.conv2d(x, kernel, groups=3) if is_img: x = torch.squeeze(x) else: x = x.transpose(0, 1) return x class PerTimestepTransform(object): def __init__(self, transforms, pil_convert=True): self.transforms = transforms self.pil_convert = pil_convert def __call__(self, vid): # import pdb; pdb.set_trace() if isinstance(vid, Image.Image): return np.stack([self.transforms(vid)]) if isinstance(vid, torch.Tensor): vid = vid.numpy() # for idx in range(vid.shape[0]): # vid[idx] = np.asarray(self.transforms(Image.fromarray(vid[idx]))) if self.pil_convert else self.transforms(vid[dx]) # # return vid if self.pil_convert: x = np.stack([np.asarray(self.transforms(Image.fromarray(v))) for v in vid]) return x else: return np.stack([self.transforms(v) for v in vid]) # Class interface class RandomCrop(object): def __init__(self, size): self.size = size @staticmethod def get_params(vid, output_size): """Get parameters for ``crop`` for a random crop. """ h, w = vid.shape[-2:] th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw def __call__(self, vid): i, j, h, w = self.get_params(vid, self.size) return crop(vid, i, j, h, w) class CenterCrop(object): def __init__(self, size): self.size = size def __call__(self, vid): return center_crop(vid, self.size) class Resize(object): def __init__(self, size): self.size = size def __call__(self, vid): return resize(vid, self.size) class ToFloatTensorInZeroOne(object): def __call__(self, vid): return to_normalized_float_tensor(vid) class Normalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, vid): return normalize(vid, self.mean, self.std) class RandomHorizontalFlip(object): def __init__(self, p=0.5): self.p = p def __call__(self, vid): if random.random() < self.p: return hflip(vid) return vid class Pad(object): def __init__(self, padding, fill=0): self.padding = padding self.fill = fill def __call__(self, vid): return pad(vid, self.padding, self.fill)
import tkinter from tkinter import * from tkinter import messagebox from TextModified import TextModified from DialogueData import Page from Content import Content # TODO Text box right click # TODO Text box copy paste _WRAP_WIDTH = 37 # From the game _WRAP_HEIGHT = 7 # From the game _BUTTON_WIDTH = 5 # Width of buttons class TextPage(tkinter.Frame): def __init__(self, page, index, rebuildcommand, master=None, cnf={}, **kw): tkinter.Frame.__init__(self, master, cnf, **kw) self.config(padx=5,pady=5) self.master = master self.page = page self.index = index self.rebuildcommand = rebuildcommand content = 'Lorem Ipsum Dolor Est' if page: content = page.content self.detaillabel = Label(self) self.detaillabel.grid(row=0, column=0, sticky=W) self.pageEditPane = TextModified(self, wrap=WORD, width=_WRAP_WIDTH, height=_WRAP_HEIGHT) self.pageEditPane.insert(END, content) self.pageEditPane.grid(row=1, column=0, sticky=NW) self.pageEditPane.bind('<<TextModified>>', self._pageModified) buttonframe = Frame(self, pady=5) buttonframe.grid(row=2, column=0, sticky=NSEW) buttonframe.columnconfigure(1, weight=1) self.addButtonleft = Button(buttonframe, text='<+', width=_BUTTON_WIDTH, command=self._addPageLeft) self.addButtonleft.grid(row=0, column=0, sticky=W) self.buttonDelete = Button(buttonframe, text='X', width=_BUTTON_WIDTH, command=self._deletePage) self.buttonDelete.grid(row=0, column=1) self.addbuttonright = Button(buttonframe, text='+>', width=_BUTTON_WIDTH, command=self._addPageRight) self.addbuttonright.grid(row=0, column=2, sticky=E) self._updateDetails() def _pageModified(self, event): pane = self.pageEditPane.get('1.0',END).rstrip() self.page.content = pane self.page.parent.parent.modified = True # Content.markRestorePoint() # TODO this was too slow Content.contentMutated() self._updateDetails() def _updateDetails(self): if self.page: self.detaillabel.config(text='Page ' + str(self.index + 1) + '\tChars: ' + str(len(self.page.content.rstrip()))) def _deletePage(self): if len(self.page.parent.pages) < 2: # We can't delete the last page return text = 'Text: [Empty]' if len(self.page.content) > 0: text = 'Text: \"' + self.page.content + '\"' if messagebox.askyesno( 'Delete Page?', 'Are you sure you want to delete page ' + str(self.index + 1) + ' from entry \"' + self.page.parent.parent.getPath() + '\"?\n\n' + text, default=messagebox.NO ): self.page.parent.pages.pop(self.index) self.page.parent.parent.modified = True Content.markRestorePoint() self.rebuildcommand() def _addPageLeft(self): self.page.parent.addPage(self.index) self.page.parent.parent.modified = True Content.markRestorePoint() self.rebuildcommand() def _addPageRight(self): self.page.parent.addPage(self.index + 1) self.page.parent.parent.modified = True Content.markRestorePoint() self.rebuildcommand() def width(self): return self.winfo_reqwidth() def height(self): return self.winfo_reqheight()
import os import time from random import random import datetime import tensorflow as tf from utils.input_helpers import InputHelper from siamese_network import SiameseNet from utils.modules import AdamWeightDecayOptimizer os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' tf.flags.DEFINE_integer("embedding_dim", 64, "Dimensionality of character embedding (default: 64)") tf.flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 1.0)") tf.flags.DEFINE_float("learning_rate", 1e-3, "L2 regularization lambda (default: 0.0)") tf.flags.DEFINE_string("training_files", "./data/train.txt", "training file (default: None)") # 数据来源: https://github.com/ziweipolaris/atec2018-nlp.git tf.flags.DEFINE_integer("hidden_units", 50, "Number of hidden units (default:50)") tf.flags.DEFINE_integer("max_document_length", 50, "max length of sentence (default:50)") tf.flags.DEFINE_integer("percent_dev", 10, "percent_dev (default:10)") tf.flags.DEFINE_integer("n_layers", 3, "rnn layers (default:3)") tf.flags.DEFINE_string("initializer", "xavier", "initializer (default:xavier)") tf.flags.DEFINE_string("cell", "sru", "cell type (default:lstm)") tf.flags.DEFINE_integer("num_blocks", 6, " number of encoder/decoder blocks (default:6)") tf.flags.DEFINE_integer("num_heads", 8, " num_heads (default:8)") tf.flags.DEFINE_integer("num_units", 64, " alias = C (default:64)") # Training parameters tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") tf.flags.DEFINE_integer("num_epochs", 300, "Number of training epochs (default: 200)") tf.flags.DEFINE_integer("evaluate_every", 1000, "Evaluate model on dev set after this many steps (default: 100)") tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 100)") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS.flag_values_dict() if FLAGS.training_files is None: print("Input Files List is empty. use --training_files argument.") exit() inpH = InputHelper() train_set, dev_set, vocab_processor, sum_no_of_batches = inpH.getDataSets(FLAGS.training_files, FLAGS.max_document_length, FLAGS.percent_dev, FLAGS.batch_size) # Training # ================================================== print("starting graph def") with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) print("started session") with sess.as_default(): siameseModel = SiameseNet( config=FLAGS, vocab_size=len(vocab_processor.vocabulary_) ) # Define Training procedure global_step = tf.Variable(0, name="global_step", trainable=False) # optimizer = AdamWeightDecayOptimizer(learning_rate=FLAGS.learning_rate, weight_decay_rate=0.01) optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) print("initialized siameseModel object") grads_and_vars = optimizer.compute_gradients(siameseModel.loss) capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in grads_and_vars] tr_op_set = optimizer.apply_gradients(capped_gvs, global_step=global_step) print("defined training_ops") # Keep track of gradient values and sparsity (optional) grad_summaries = [] for g, v in grads_and_vars: if g is not None: grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g) sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g)) grad_summaries.append(grad_hist_summary) grad_summaries.append(sparsity_summary) grad_summaries_merged = tf.summary.merge(grad_summaries) print("defined gradient summaries") # Output directory for models and summaries timestamp = str(int(time.time())) out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp)) print("Writing to {}\n".format(out_dir)) # Summaries for loss and accuracy loss_summary = tf.summary.scalar("loss", siameseModel.loss) acc_summary = tf.summary.scalar("accuracy", siameseModel.accuracy) # Train Summaries train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged]) train_summary_dir = os.path.join(out_dir, "summaries", "train") train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) # Dev summaries dev_summary_op = tf.summary.merge([loss_summary, acc_summary]) dev_summary_dir = os.path.join(out_dir, "summaries", "dev") dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph) # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints")) checkpoint_prefix = os.path.join(checkpoint_dir, "model") if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver = tf.train.Saver(tf.global_variables(), max_to_keep=100) # Write vocabulary vocab_processor.save(os.path.join(checkpoint_dir, "vocab_")) # Initialize all variables sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) print("init all variables") graph_def = tf.get_default_graph().as_graph_def() graphpb_txt = str(graph_def) with open(os.path.join(checkpoint_dir, "graphpb.txt"), 'w') as f: f.write(graphpb_txt) def train_step(x1_batch, x2_batch, y_batch): """ A single training step """ if random() > 0.5: feed_dict = { siameseModel.input_x1: x1_batch, siameseModel.input_x2: x2_batch, siameseModel.input_y: y_batch, siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob, } else: feed_dict = { siameseModel.input_x1: x2_batch, siameseModel.input_x2: x1_batch, siameseModel.input_y: y_batch, siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob, } _, step, loss, accuracy, dist, sim, summaries = sess.run( [tr_op_set, global_step, siameseModel.loss, siameseModel.accuracy, siameseModel.distance, siameseModel.temp_sim, train_summary_op], feed_dict) time_str = datetime.datetime.now().isoformat() print( "TRAIN {}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) train_summary_writer.add_summary(summaries, step) def dev_step(x1_batch, x2_batch, y_batch): """ A single training step """ if random() > 0.5: feed_dict = { siameseModel.input_x1: x1_batch, siameseModel.input_x2: x2_batch, siameseModel.input_y: y_batch, siameseModel.dropout_keep_prob: 1.0, } else: feed_dict = { siameseModel.input_x1: x2_batch, siameseModel.input_x2: x1_batch, siameseModel.input_y: y_batch, siameseModel.dropout_keep_prob: 1.0, } step, loss, accuracy, sim, summaries = sess.run( [global_step, siameseModel.loss, siameseModel.accuracy, siameseModel.temp_sim, dev_summary_op], feed_dict) time_str = datetime.datetime.now().isoformat() print( "DEV {}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) dev_summary_writer.add_summary(summaries, step) return accuracy # Generate batches batches = inpH.batch_iter( list(zip(train_set[0], train_set[1], train_set[2])), FLAGS.batch_size, FLAGS.num_epochs) ptr = 0 max_validation_acc = 0.0 count = 0 for nn in range(sum_no_of_batches * FLAGS.num_epochs): batch = batches.__next__() if len(batch) < 1: continue x1_batch, x2_batch, y_batch = zip(*batch) if len(y_batch) < 1: continue train_step(x1_batch, x2_batch, y_batch) current_step = tf.train.global_step(sess, global_step) sum_acc = 0.0 if current_step % FLAGS.evaluate_every == 0: print("\nEvaluation:") dev_batches = inpH.batch_iter(list(zip(dev_set[0], dev_set[1], dev_set[2])), FLAGS.batch_size, 1) for db in dev_batches: if len(db) < 1: continue x1_dev_b, x2_dev_b, y_dev_b = zip(*db) if len(y_dev_b) < 1: continue acc = dev_step(x1_dev_b, x2_dev_b, y_dev_b) sum_acc = sum_acc + acc if current_step % FLAGS.checkpoint_every == 0: if sum_acc >= max_validation_acc: count += 1 max_validation_acc = sum_acc saver.save(sess, checkpoint_prefix, global_step=current_step) print("Saved model {} with sum_accuracy={} checkpoint to {}\n".format(nn, max_validation_acc, checkpoint_prefix)) if count > 100: break
# https://www.hackerrank.com/challenges/one-week-preparation-kit-tree-huffman-decoding/problem import queue as Queue cntr = 0 class Node: def __init__(self, freq, data): self.freq = freq self.data = data self.left = None self.right = None global cntr self._count = cntr cntr = cntr + 1 def __lt__(self, other): if self.freq != other.freq: return self.freq < other.freq return self._count < other._count def huffman_hidden():#builds the tree and returns root q = Queue.PriorityQueue() for key in freq: q.put((freq[key], key, Node(freq[key], key) )) while q.qsize() != 1: a = q.get() b = q.get() obj = Node(a[0] + b[0], '\0' ) obj.left = a[2] obj.right = b[2] q.put((obj.freq, obj.data, obj )) root = q.get() root = root[2]#contains root object return root def dfs_hidden(obj, already): if(obj == None): return elif(obj.data != '\0'): code_hidden[obj.data] = already dfs_hidden(obj.right, already + "1") dfs_hidden(obj.left, already + "0") """class Node: def __init__(self, freq,data): self.freq= freq self.data=data self.left = None self.right = None """ def decodeHuff(root, s): c = root for z in s: c = c.left if(z=='0') else c.right if(c.left==c.right): print(c.data, end='') c = root ip = input() freq = {} #maps each character to its frequency cntr = 0 for ch in ip: if(freq.get(ch) == None): freq[ch] = 1 else: freq[ch]+=1 root = huffman_hidden() #contains root of huffman tree code_hidden = {} #contains code for each object dfs_hidden(root, "") if len(code_hidden) == 1: #if there is only one character in the i/p for key in code_hidden: code_hidden[key] = "0" toBeDecoded = "" for ch in ip: toBeDecoded += code_hidden[ch] decodeHuff(root, toBeDecoded)
# Generated by Django 3.2 on 2021-05-21 10:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tweetes_app', '0001_initial'), ] operations = [ migrations.AddField( model_name='tweet', name='unique_id', field=models.TextField(blank=True), ), migrations.AlterField( model_name='tweet', name='sentiment', field=models.CharField(blank=True, choices=[('POS', 'Postive'), ('NUE', 'Negative'), ('NEG', 'Neutral')], max_length=3), ), ]
# first run /usr/lib/openoffice.org/program/soffice -silent -invisible -accept="socket,port=8100;urp;" from OOoLib import * import os cSourceFile = os.path.abspath('test.doc') cSourceURL = pathnameToUrl( cSourceFile ) cTargetFile = os.path.abspath('test.pdf') cTargetURL = pathnameToUrl( cTargetFile ) oDoc = openURL( cSourceURL ) oDoc.storeToURL( cTargetURL, (createPropertyValue("FilterName","writer_pdf_Export"),) ) oDoc.dispose()
from .metrics import RETRY_HANDLER_DROP from .metrics import RETRY_HANDLER_FORWARD from .metrics import RETRY_HANDLER_RAISE from .metrics import RETRY_POLICY from .metrics import RETRY_POLICY_TIME from abc import ABC from aiokafka.structs import ConsumerRecord from datetime import datetime from pydantic import BaseModel from typing import Dict from typing import List from typing import Optional from typing import Tuple from typing import Type from typing import TYPE_CHECKING import asyncio import logging if TYPE_CHECKING: # pragma: no cover from .app import Application from .app import Subscription RETRY_HISTORY_FAILURES_MAX_SIZE = 10 class Record(BaseModel): topic: str partition: int offset: int timestamp: int timestamp_type: int key: Optional[str] = None value: str checksum: Optional[int] = None serialized_key_size: int serialized_value_size: int headers: tuple @classmethod def from_consumer_record(cls, record: ConsumerRecord) -> "Record": headers = [(k, v.decode()) for k, v in record.headers or []] return cls( topic=record.topic, partition=record.partition, offset=record.offset, timestamp=record.timestamp, # type: ignore timestamp_type=record.timestamp_type, # type: ignore key=record.key, # type: ignore value=record.value.decode("utf-8"), # type: ignore checksum=record.checksum, # type: ignore serialized_key_size=record.serialized_key_size, # type: ignore serialized_value_size=record.serialized_value_size, # type: ignore headers=headers, ) def to_consumer_record(self) -> ConsumerRecord: # We need to convert the value back into bytes before giving this back to the consumer data = self.dict() data["value"] = data["value"].encode("utf-8") data["headers"] = [(k, v.encode()) for k, v in data["headers"]] return ConsumerRecord(**data) # type: ignore class FailureInfo(BaseModel): exception: str handler_key: str timestamp: datetime class RetryHistory(BaseModel): failures: List[FailureInfo] = [] class RetryMessage(BaseModel): original_record: Record retry_history: RetryHistory class RetryPolicy: logger = logging.getLogger("kafkaesk.retry.retry_policy") def __init__( self, app: "Application", subscription: "Subscription", ): self.app = app self.subscription = subscription self._default_handler: "RetryHandler" = Raise() self._handlers = self.subscription.retry_handlers or {} self._handler_cache: Dict[Type[Exception], Tuple[str, RetryHandler]] = {} self._ready = False if "RetryMessage:1" not in self.app.schemas: self.app.schema("RetryMessage", version=1)(RetryMessage) async def initialize(self) -> None: self.logger.debug("Retry policy initializing retry handlers...") await asyncio.gather(*[handler.initialize(self) for handler in self._handlers.values()]) self.logger.debug("Retry policy initialized") self._ready = True async def finalize(self) -> None: self._ready = False self.logger.debug("Retry policy finalizing retry handlers...") await asyncio.gather(*[handler.finalize() for handler in self._handlers.values()]) self.logger.debug("Retry policy finalized") async def __call__( self, record: ConsumerRecord, exception: Exception, retry_history: Optional[RetryHistory] = None, ) -> None: if self._ready is not True: raise RuntimeError("RetryPolicy is not initalized") self.logger.debug(f"Handling msg retry: {record} {exception}") with RETRY_POLICY_TIME.labels( stream_id=record.topic, partition=record.partition, group_id=self.subscription.group, ).time(): handler_key, handler = self._get_handler(exception) if retry_history is None: retry_history = RetryHistory() # Add information about this failure to the history retry_history.failures.append( FailureInfo( exception=exception.__class__.__name__, handler_key=handler_key, timestamp=datetime.now(), ) ) # Enforce a maximum number of failures kept if len(retry_history.failures) > RETRY_HISTORY_FAILURES_MAX_SIZE: retry_history.failures = retry_history.failures[-RETRY_HISTORY_FAILURES_MAX_SIZE:] try: await handler(self, handler_key, retry_history, record, exception) finally: RETRY_POLICY.labels( stream_id=record.topic, partition=record.partition, group_id=self.subscription.group, handler=handler.__class__.__name__, exception=exception.__class__.__name__, ).inc() def _get_handler(self, exception: Exception) -> Tuple[str, "RetryHandler"]: exception_type = exception.__class__ handler_key, handler = self._handler_cache.get(exception_type, (None, None)) if handler is None: handler = self._handlers.get(exception_type) if handler is not None: handler_key = exception_type.__name__ self._handler_cache[exception_type] = (handler_key, handler) if handler is None: for handler_exception_type in self._handlers.keys(): if isinstance(exception, handler_exception_type): handler = self._handlers[handler_exception_type] handler_key = handler_exception_type.__name__ self._handler_cache[exception_type] = (handler_key, handler) break # Return the default handler if handler is None: handler = self._default_handler if handler_key is None: handler_key = "Exception" return (handler_key, handler) def format_record(record: ConsumerRecord) -> str: val = repr(record) if len(val) > 512: return val[:512] + "..." return val class RetryHandler(ABC): """Base class implementing common logic for RetryHandlers All RetryHandler's should implement the following metrics: * RETRY_HANDLER_FORWARD - Note: This is implemented by RetryHandler._forward_message * RETRY_HANDLER_DROP - Note: this is implemented by RetryHandler._drop_message * RETRY_CONSUMER_TOPIC_OFFSET * RETRY_CONSUMER_MESSAGE_LEAD_TIME - Note: If a RetryHandler's consumer expects a delay, this delay should be subtracted from the lead time * RETRY_CONSUMER_CONSUMED_MESSAGE_TIME * RETRY_CONSUMER_CONSUMED_MESSAGES See `kafkaesk.metrics` for more information on each of these metrics """ logger = logging.getLogger("kafkaesk.retry.retry_handler") def __init__(self) -> None: self._ready = False async def initialize(self, policy: RetryPolicy) -> None: self.logger.debug(f"{self.__class__.__name__} retry handler initialized") self._ready = True async def finalize(self) -> None: self.logger.debug(f"{self.__class__.__name__} retry handler finalized") self._ready = False async def __call__( self, policy: RetryPolicy, handler_key: str, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: # pragma: no cover raise NotImplementedError async def _raise_message( self, policy: RetryPolicy, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: self.logger.critical( f"{self.__class__.__name__} handler recieved exception, " f"re-raising exception {format_record(record)}" ) RETRY_HANDLER_RAISE.labels( stream_id=record.topic, partition=record.partition, group_id=policy.subscription.group, handler=self.__class__.__name__, exception=exception.__class__.__name__, ).inc() raise exception from exception async def _drop_message( self, policy: RetryPolicy, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: self.logger.warn( f"{self.__class__.__name__} handler recieved exception, dropping message {record}", exc_info=exception, ) RETRY_HANDLER_DROP.labels( stream_id=record.topic, partition=record.partition, group_id=policy.subscription.group, handler=self.__class__.__name__, exception=exception.__class__.__name__, ).inc() async def _forward_message( self, policy: RetryPolicy, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, forward_stream_id: str, ) -> None: self.logger.info( f"{self.__class__.__name__} handler forwarding message " f"to {forward_stream_id}: {format_record(record)}" ) await policy.app.publish( forward_stream_id, RetryMessage( original_record=Record.from_consumer_record(record), retry_history=retry_history ), ) RETRY_HANDLER_FORWARD.labels( stream_id=record.topic, partition=record.partition, group_id=policy.subscription.group, handler=self.__class__.__name__, exception=exception.__class__.__name__, forward_stream_id=forward_stream_id, ).inc() class Raise(RetryHandler): async def __call__( self, policy: RetryPolicy, handler_key: str, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: await self._raise_message(policy, retry_history, record, exception) class Drop(RetryHandler): async def __call__( self, policy: RetryPolicy, handler_key: str, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: self.logger.error( f"{self.__class__.__name__}: Dropping message due to error: {format_record(record)}", exc_info=exception, ) await self._drop_message(policy, retry_history, record, exception) class Forward(RetryHandler): def __init__(self, stream_id: str): self.stream_id = stream_id super().__init__() async def __call__( self, policy: RetryPolicy, handler_key: str, retry_history: RetryHistory, record: ConsumerRecord, exception: Exception, ) -> None: self.logger.error( f"{self.__class__.__name__}: Forwarding message due to error: {format_record(record)}", exc_info=exception, ) await self._forward_message(policy, retry_history, record, exception, self.stream_id)
# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The FilterScheduler is for scheduling of share and share group creation. You can customize this scheduler by specifying your own share/share group filters and weighing functions. """ from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.scheduler.drivers import base from manila.scheduler import scheduler_options from manila.share import share_types CONF = cfg.CONF LOG = log.getLogger(__name__) class FilterScheduler(base.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.cost_function_cache = None self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def get_pools(self, context, filters): return self.host_manager.get_pools(context, filters) def _post_select_populate_filter_properties(self, filter_properties, host_state): """Add additional information to filter properties. Add additional information to the filter properties after a host has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_host(filter_properties, host_state.host) def _add_retry_host(self, filter_properties, host): """Add retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry') if not retry: return hosts = retry['hosts'] hosts.append(host) def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: msg = _("Invalid value for 'scheduler_max_attempts', " "must be >=1") raise exception.InvalidParameterValue(err=msg) return max_attempts def schedule_create_share(self, context, request_spec, filter_properties): weighed_host = self._schedule_share(context, request_spec, filter_properties) host = weighed_host.obj.host share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = base.share_update_db(context, share_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_instance( context, updated_share.instance, host, request_spec=request_spec, filter_properties=filter_properties, snapshot_id=snapshot_id ) def schedule_create_replica(self, context, request_spec, filter_properties): share_replica_id = request_spec['share_instance_properties'].get('id') weighed_host = self._schedule_share( context, request_spec, filter_properties) host = weighed_host.obj.host updated_share_replica = base.share_replica_update_db( context, share_replica_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_replica( context, updated_share_replica, host, request_spec=request_spec, filter_properties=filter_properties) def _format_filter_properties(self, context, filter_properties, request_spec): elevated = context.elevated() share_properties = request_spec['share_properties'] share_instance_properties = (request_spec.get( 'share_instance_properties', {})) # Since Manila is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. resource_properties = share_properties.copy() resource_properties.update(share_instance_properties.copy()) share_type = request_spec.get("share_type", {}) if not share_type: msg = _("You must create a share type in advance," " and specify in request body or" " set default_share_type in manila.conf.") LOG.error(msg) raise exception.InvalidParameterValue(err=msg) extra_specs = share_type.get('extra_specs', {}) if extra_specs: for extra_spec_name in share_types.get_boolean_extra_specs(): extra_spec = extra_specs.get(extra_spec_name) if extra_spec is not None: if not extra_spec.startswith("<is>"): extra_spec = "<is> %s" % extra_spec share_type['extra_specs'][extra_spec_name] = extra_spec resource_type = request_spec.get("share_type") or {} request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() share_group = request_spec.get('share_group') # NOTE(gouthamr): If 'active_replica_host' is present in the request # spec, pass that host's 'replication_domain' to the # ShareReplication filter. active_replica_host = request_spec.get('active_replica_host') replication_domain = None if active_replica_host: temp_hosts = self.host_manager.get_all_host_states_share(elevated) ar_host = next((host for host in temp_hosts if host.host == active_replica_host), None) if ar_host: replication_domain = ar_host.replication_domain # NOTE(zengyingzhe): remove the 'share_backend_name' extra spec, # let scheduler choose the available host for this replica # creation request. share_type.get('extra_specs', {}).pop('share_backend_name', None) if filter_properties is None: filter_properties = {} self._populate_retry_share(filter_properties, resource_properties) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': resource_type, 'share_group': share_group, 'replication_domain': replication_domain, }) self.populate_filter_properties_share(request_spec, filter_properties) return filter_properties, share_properties def _schedule_share(self, context, request_spec, filter_properties=None): """Returns a list of hosts that meet the required specs. The list is ordered by their fitness. """ elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. hosts = self.host_manager.get_all_host_states_share(elevated) # Filter local hosts based on requirements ... hosts, last_filter = self.host_manager.get_filtered_hosts( hosts, filter_properties) if not hosts: msg = _('Failed to find a weighted host, the last executed filter' ' was %s.') raise exception.NoValidHost( reason=msg % last_filter, detail_data={'last_filter': last_filter}) LOG.debug("Filtered share %(hosts)s", {"hosts": hosts}) # weighted_host = WeightedHost() ... the best # host for the job. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) best_host = weighed_hosts[0] LOG.debug("Choosing for share: %(best_host)s", {"best_host": best_host}) # NOTE(rushiagr): updating the available space parameters at same place best_host.obj.consume_from_share(share_properties) return best_host def _populate_retry_share(self, filter_properties, properties): """Populate filter properties with retry history. Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of share service hosts tried } filter_properties['retry'] = retry share_id = properties.get('share_id') self._log_share_error(share_id, retry) if retry['num_attempts'] > max_attempts: msg = _("Exceeded max scheduling attempts %(max_attempts)d for " "share %(share_id)s") % { "max_attempts": max_attempts, "share_id": share_id } raise exception.NoValidHost(reason=msg) def _log_share_error(self, share_id, retry): """Log any exceptions from a previous share create operation. If the request contained an exception from a previous share create operation, log it to aid debugging. """ exc = retry.pop('exc', None) # string-ified exception from share if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts') if not hosts: return # no previously attempted hosts, skip last_host = hosts[-1] LOG.error("Error scheduling %(share_id)s from last share-service: " "%(last_host)s : %(exc)s", { "share_id": share_id, "last_host": last_host, "exc": "exc" }) def populate_filter_properties_share(self, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ shr = request_spec['share_properties'] inst = request_spec['share_instance_properties'] filter_properties['size'] = shr['size'] filter_properties['availability_zone_id'] = ( inst.get('availability_zone_id') ) filter_properties['user_id'] = shr.get('user_id') filter_properties['metadata'] = shr.get('metadata') def schedule_create_share_group(self, context, share_group_id, request_spec, filter_properties): LOG.info("Scheduling share group %s.", share_group_id) host = self._get_best_host_for_share_group(context, request_spec) if not host: msg = _("No hosts available for share group %s.") % share_group_id raise exception.NoValidHost(reason=msg) msg = "Chose host %(host)s for create_share_group %(group)s." LOG.info(msg, {'host': host, 'group': share_group_id}) updated_share_group = base.share_group_update_db( context, share_group_id, host) self.share_rpcapi.create_share_group( context, updated_share_group, host) def _get_weighted_hosts_for_share_type(self, context, request_spec, share_type): config_options = self._get_configuration_options() # NOTE(ameade): Find our local list of acceptable hosts by # filtering and weighing our options. We virtually consume # resources on it so subsequent selections can adjust accordingly. # NOTE(ameade): Remember, we are using an iterator here. So only # traverse this list once. all_hosts = self.host_manager.get_all_host_states_share(context) if not all_hosts: return [] share_type['extra_specs'] = share_type.get('extra_specs', {}) if share_type['extra_specs']: for spec_name in share_types.get_required_extra_specs(): extra_spec = share_type['extra_specs'].get(spec_name) if extra_spec is not None: share_type['extra_specs'][spec_name] = ( "<is> %s" % extra_spec) filter_properties = { 'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': share_type, 'size': 0, } # Filter local hosts based on requirements ... hosts, last_filter = self.host_manager.get_filtered_hosts( all_hosts, filter_properties) if not hosts: return [] LOG.debug("Filtered %s", hosts) # weighted_host = WeightedHost() ... the best host for the job. weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not weighed_hosts: return [] return weighed_hosts def _get_weighted_hosts_for_share_group_type(self, context, request_spec, share_group_type): config_options = self._get_configuration_options() all_hosts = self.host_manager.get_all_host_states_share(context) if not all_hosts: return [] filter_properties = { 'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_group_type': share_group_type, 'resource_type': share_group_type, } hosts, last_filter = self.host_manager.get_filtered_hosts( all_hosts, filter_properties, CONF.scheduler_default_share_group_filters) if not hosts: return [] LOG.debug("Filtered %s", hosts) weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not weighed_hosts: return [] return weighed_hosts def _get_weighted_candidates_share_group(self, context, request_spec): """Finds hosts that support the share group. Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() shr_types = request_spec.get("share_types") weighed_hosts = [] for iteration_count, share_type in enumerate(shr_types): temp_weighed_hosts = self._get_weighted_hosts_for_share_type( elevated, request_spec, share_type) # NOTE(ameade): Take the intersection of hosts so we have one that # can support all share types of the share group if iteration_count == 0: weighed_hosts = temp_weighed_hosts else: new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_hosts: if host1.obj.host == host2.obj.host: new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts if not weighed_hosts: return [] # NOTE(ameade): Ensure the hosts support the share group type share_group_type = request_spec.get("resource_type", {}) temp_weighed_group_hosts = ( self._get_weighted_hosts_for_share_group_type( elevated, request_spec, share_group_type)) new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_group_hosts: if host1.obj.host == host2.obj.host: new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts return weighed_hosts def _get_best_host_for_share_group(self, context, request_spec): weighed_hosts = self._get_weighted_candidates_share_group( context, request_spec) if not weighed_hosts: return None return weighed_hosts[0].obj.host def host_passes_filters(self, context, host, request_spec, filter_properties): elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) hosts = self.host_manager.get_all_host_states_share(elevated) hosts, last_filter = self.host_manager.get_filtered_hosts( hosts, filter_properties) hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) for tgt_host in hosts: if tgt_host.obj.host == host: return tgt_host.obj msg = (_('Cannot place share %(id)s on %(host)s, the last executed' ' filter was %(last_filter)s.') % {'id': request_spec['share_id'], 'host': host, 'last_filter': last_filter}) raise exception.NoValidHost(reason=msg)
# Generated by Django 3.2.8 on 2021-11-29 14:33 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('student', '0020_thesorting'), ] operations = [ migrations.RemoveField( model_name='classroom', name='class_The_sorting', ), ]
#!/usr/bin/env python # encoding: utf-8 from werobot.session import SessionStorage from werobot.utils import json_loads, json_dumps from DjangoBlog.utils import cache class MemcacheStorage(SessionStorage): def __init__(self, prefix='ws_'): self.prefix = prefix self.cache = cache @property def is_available(self): value = "1" self.set('checkavaliable', value=value) return value == self.get('checkavaliable') def key_name(self, s): return '{prefix}{s}'.format(prefix=self.prefix, s=s) def get(self, id): id = self.key_name(id) session_json = self.cache.get(id) or '{}' return json_loads(session_json) def set(self, id, value): id = self.key_name(id) self.cache.set(id, json_dumps(value)) def delete(self, id): id = self.key_name(id) self.cache.delete(id)
# (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from codecs import open # To use a consistent encoding from os import path from setuptools import setup HERE = path.dirname(path.abspath(__file__)) # Get version info with open(path.join(HERE, 'datadog_checks', 'scylla', '__about__.py'), 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line.startswith('__version__'): VERSION = line.split('=')[1].strip(' \'"') break else: VERSION = '0.0.1' # Get the long description from the README file with open(path.join(HERE, 'README.md'), encoding='utf-8') as f: long_description = f.read() def get_dependencies(): dep_file = path.join(HERE, 'requirements.in') if not path.isfile(dep_file): return [] with open(dep_file, encoding='utf-8') as f: return f.readlines() CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0' setup( name='datadog-scylla', version=VERSION, description='The Scylla check', long_description=long_description, long_description_content_type='text/markdown', keywords='datadog agent scylla check', # The project's main homepage. url='https://github.com/DataDog/integrations-core', # Author details author='Datadog', author_email='packages@datadoghq.com', # License license='BSD-3-Clause', # See https://pypi.org/classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: System :: Monitoring', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.8', ], # The package we're going to ship packages=['datadog_checks.scylla'], # Run-time dependencies install_requires=[CHECKS_BASE_REQ], extras_require={'deps': get_dependencies()}, # Extra files to ship with the wheel package include_package_data=True, )
# Advent of Code 2020 - Day 9 - Encoding Error - Part 2 # https://adventofcode.com/2020/day/9 import sys from day09_part1 import parse_input, find_invalid_number def find_contiguous_set(numbers, target): for start in range(0, len(numbers) - 2): for stop in range(start, len(numbers) - 1): candidates = numbers[start:stop] total = sum(candidates) if total > target: break if total == target: return candidates def part2(numbers): invalid_number = find_invalid_number(numbers) contiguous_set = find_contiguous_set(numbers, invalid_number) return min(contiguous_set) + max(contiguous_set) if __name__ == "__main__": print(part2(parse_input(sys.stdin.readlines())))
from django.urls import path, include from rest_framework.routers import DefaultRouter from .viewsets import ( CountryViewSet, CategoryViewSet, ItemVariantViewSet, ReviewViewSet, ItemViewSet, ) router = DefaultRouter() router.register("country", CountryViewSet) router.register("itemvariant", ItemVariantViewSet) router.register("item", ItemViewSet) router.register("review", ReviewViewSet) router.register("category", CategoryViewSet) urlpatterns = [ path("", include(router.urls)), ]
from contextlib import ContextDecorator from gcloudc.db.backends.datastore import caching class DisableCache(ContextDecorator): def __enter__(self): self.context = caching.get_context() self.context.context_enabled = False return self def __exit__(self, *args, **kwargs): self.context.context_enabled = True return False disable_cache = DisableCache
import pickle import pytest from pydantic import BaseModel class Model(BaseModel): a: float b: int = 10 def test_simple_construct(): m = Model.construct(dict(a=40, b=10), {'a', 'b'}) assert m.a == 40 assert m.b == 10 def test_construct_missing(): m = Model.construct(dict(a='not a float'), {'a'}) assert m.a == 'not a float' with pytest.raises(AttributeError) as exc_info: print(m.b) assert "'Model' object has no attribute 'b'" in str(exc_info) def test_large_any_str(): class Model(BaseModel): a: bytes b: str content_bytes = b"x" * (2 ** 16 + 1) content_str = "x" * (2 ** 16 + 1) m = Model(a=content_bytes, b=content_str) assert m.a == content_bytes assert m.b == content_str def test_simple_copy(): m = Model(a=24) m2 = m.copy() assert m.a == m2.a == 24 assert m.b == m2.b == 10 assert m == m2 assert m.__fields__ == m2.__fields__ class ModelTwo(BaseModel): a: float b: int = 10 c: str = 'foobar' d: Model def test_deep_copy(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy(deep=True) assert m.a == m2.a == 24 assert m.b == m2.b == 10 assert m.c == m2.c == 'foobar' assert m.d is not m2.d assert m == m2 assert m.__fields__ == m2.__fields__ def test_copy_exclude(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy(exclude={'b'}) assert m.a == m2.a == 24 assert isinstance(m2.d, Model) assert m2.d.a == 12 assert hasattr(m2, 'c') assert not hasattr(m2, 'b') assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'} assert set(m2.dict().keys()) == {'a', 'c', 'd'} assert m != m2 def test_copy_include(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy(include={'a'}) assert m.a == m2.a == 24 assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'} assert set(m2.dict().keys()) == {'a'} assert m != m2 def test_copy_include_exclude(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy(include={'a', 'b', 'c'}, exclude={'c'}) assert set(m.dict().keys()) == {'a', 'b', 'c', 'd'} assert set(m2.dict().keys()) == {'a', 'b'} def test_copy_update(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy(update={'a': 'different'}) assert m.a == 24 assert m2.a == 'different' assert set(m.dict().keys()) == set(m2.dict().keys()) == {'a', 'b', 'c', 'd'} assert m != m2 def test_copy_set_fields(): m = ModelTwo(a=24, d=Model(a='12')) m2 = m.copy() assert m.dict(skip_defaults=True) == {'a': 24.0, 'd': {'a': 12}} assert m.dict(skip_defaults=True) == m2.dict(skip_defaults=True) def test_simple_pickle(): m = Model(a='24') b = pickle.dumps(m) m2 = pickle.loads(b) assert m.a == m2.a == 24 assert m.b == m2.b == 10 assert m == m2 assert m is not m2 assert tuple(m) == (('a', 24.0), ('b', 10)) assert tuple(m2) == (('a', 24.0), ('b', 10)) assert m.__fields__ == m2.__fields__ def test_recursive_pickle(): m = ModelTwo(a=24, d=Model(a='123.45')) m2 = pickle.loads(pickle.dumps(m)) assert m == m2 assert m.d.a == 123.45 assert m2.d.a == 123.45 assert m.__fields__ == m2.__fields__ def test_immutable_copy(): class Model(BaseModel): a: int b: int class Config: allow_mutation = False m = Model(a=40, b=10) assert m == m.copy() m2 = m.copy(update={'b': 12}) assert str(m2) == 'Model a=40 b=12' with pytest.raises(TypeError): m2.b = 13 def test_pickle_fields_set(): m = Model(a=24) assert m.dict(skip_defaults=True) == {'a': 24} m2 = pickle.loads(pickle.dumps(m)) assert m2.dict(skip_defaults=True) == {'a': 24}
import json import logging import re import time from json import JSONDecodeError from typing import Optional, Tuple, Dict, Any from requests import HTTPError, Response from importer import JSON from importer.functions import requests_get from importer.models import CachedObject logger = logging.getLogger(__name__) class BaseLoader: """Provides a json and file download function. This class can be overwritten for vendor specific fixups """ def __init__(self, system: JSON) -> None: self.system = system def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON: logger.debug(f"Loader is loading {url}") if query is None: query = dict() response = requests_get(url, params=query) data = response.json() if data is None: # json() can actually return None data = dict() if "id" in data and data["id"] != url: logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}") return data def load_file(self, url: str) -> Tuple[bytes, Optional[str]]: """Returns the content and the content type""" response = requests_get(url) content = response.content content_type = response.headers.get("Content-Type") return content, content_type class SternbergLoader(BaseLoader): empty_list_error = { "error": "Die angeforderte Ressource wurde nicht gefunden.", "code": 802, "type": "SD.NET RIM Webservice", } empty_page = {"data": [], "links": {}, "pagination": {}} def visit_object(self, response: JSON): if response.get("type") == "https://schema.oparl.org/1.0/File": if "accessUrl" in response: response["accessUrl"] = response["accessUrl"].replace( r"files//rim", r"files/rim" ) if "downloadUrl" in response: response["downloadUrl"] = response["downloadUrl"].replace( r"files//rim", r"files/rim" ) if response.get("type") == "https://schema.oparl.org/1.0/Body": # Check for a missing leading zero ags = response.get("ags") if ags and len(ags) == 7: # noinspection PyTypeChecker response["ags"] = "0" + ags def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON: if query is None: query = dict() try: response = super().load(url, query) # type: Dict[str, Any] except HTTPError as error: # Sometimes, an error is returned when the list would have been empty if ( error.response.status_code == 404 and "modified_since" in query and error.response.json() == self.empty_list_error ): response = self.empty_page else: raise error # Sometime, an empty list is returned instead of an object with an empty list if "modified_since" in query and response == []: response = self.empty_page if response.get("deleted", False) and "type" not in response: response["type"] = ( "https://schema.oparl.org/1.0/" + url.split("/")[-2].title() ) # Instead of the body list, there's only a body # https://ris.krefeld.de/webservice/oparl/v1.0/body if url.endswith("/body") and "id" in response: response = {"data": [response], "pagination": {}, "links": {}} if "/body" in url: # Add missing "type"-attributes in body-lists if "data" in response: for data in response["data"]: if "location" in data.keys() and isinstance(data["location"], dict): data["location"][ "type" ] = "https://schema.oparl.org/1.0/Location" # There are deleted entries in unfiltered external lists (which they shouldn't) and then # they don't even have type attributes (which are mandatory) for entry in response["data"][:]: if entry.get("deleted") and "type" not in entry: response["data"].remove(entry) # Add missing "type"-attributes in single bodies if "location" in response.keys() and isinstance(response["location"], dict): response["location"]["type"] = "https://schema.oparl.org/1.0/Location" # Location in Person must be a url, not an object if "/person" in url and "data" in response: for data in response["data"]: if "location" in data and isinstance(data["location"], dict): data["location"] = data["location"]["id"] if "/organization" in url and "data" in response: for data in response["data"]: if "id" in data and "type" not in data: data["type"] = "https://schema.oparl.org/1.0/Organization" if "/membership" in url: # If an array is returned instead of an object, we just skip all list entries except for the last one if isinstance(response, list): response = response[0] if "/person" in url: if "location" in response and not isinstance(response["location"], str): response["location"] = response["location"]["id"] if "/meeting" in url: if "location" in response and not isinstance(response["location"], str): response["location"]["type"] = "https://schema.oparl.org/1.0/Location" if "data" in response: for data in response["data"]: self.visit_object(data) else: self.visit_object(response) return response def load_file(self, url: str) -> Tuple[bytes, Optional[str]]: try: content, content_type = super().load_file(url) except HTTPError as error: # Sometimes (if there's a dot in the filename(?)), the extension gets overriden # by repeating the part after the dot in the extension-less filename splitted = error.response.url.split(".") if ( error.response.status_code == 404 and len(splitted) > 2 and splitted[-2] == splitted[-1] ): new_url = ".".join(splitted[:-1]) + ".pdf" content, content_type = super().load_file(new_url) else: raise error if content_type == "application/octetstream; charset=UTF-8": content_type = None return content, content_type class CCEgovLoader(BaseLoader): def visit(self, data: JSON): """Removes quirks like `"streetAddress": " "` in Location""" # `"auxiliaryFile": { ... }` -> `"auxiliaryFile": [{ ... }]` if "auxiliaryFile" in data and isinstance(data["auxiliaryFile"], dict): logger.warning( f"auxiliaryFile is supposed to be an array of objects, " f"but is an object (in {data.get('id')})" ) data["auxiliaryFile"] = [data["auxiliaryFile"]] for key, value in data.copy().items(): if isinstance(value, dict): self.visit(value) if isinstance(value, list): for i in value: if isinstance(i, dict): self.visit(i) elif isinstance(value, str): if value == "N/A" or not value.strip(): del data[key] def load(self, url: str, query: Optional[dict] = None) -> JSON: logger.debug(f"Loader is loading {url}") if query is None: query = dict() try: response = requests_get(url, params=query) except HTTPError as e: if e.response.status_code == 500: logger.error(f"Got an 500 for a CC e-gov request, retrying: {e}") response = requests_get(url, params=query) else: raise text = response.text try: data = json.loads(text) except JSONDecodeError: logger.error( f"The server returned invalid json. This is a bug in the OParl implementation: {url}" ) # Hack with based on std json code to load broken json where the control characters (U+0000 through # U+001F except \n) weren't properly escaped ESCAPE = re.compile(r"[\x00-\x09\x0B-\x1f]") ESCAPE_DCT = {} for i in range(0x20): ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i)) def replace(match): return ESCAPE_DCT[match.group(0)] text = ESCAPE.sub(replace, text) data = json.loads(text) if data is None: # json() can actually return None data = dict() if "id" in data and data["id"] != url: logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}") self.visit(data) return data def load_file(self, url: str) -> Tuple[bytes, Optional[str]]: """Returns the content and the content type""" response = requests_get(url) content = response.content content_type = response.headers.get("Content-Type") return content, content_type class SomacosLoader(BaseLoader): max_retries: int = 3 error_sleep_seconds: int = 5 def get_with_retry_on_500(self, url: str) -> Response: """Custom retry logic with logging and backoff""" current_try = 1 while True: try: return requests_get(url) except HTTPError as e: if e.response.status_code == 500: if current_try == self.max_retries: logger.error( f"Request failed {self.max_retries} times with an Error 500, aborting: {e}" ) raise else: logger.error( f"Got an 500 for a Somacos request, " f"retrying after sleeping {self.error_sleep_seconds}s: {e}" ) time.sleep(self.error_sleep_seconds) current_try += 1 continue else: raise def load(self, url: str, query: Optional[Dict[str, str]] = None) -> JSON: if query: # Somacos doesn't like encoded urls url = ( url + "?" + "&".join([key + "=" + value for key, value in query.items()]) ) logger.debug(f"Loader is loading {url}") response = self.get_with_retry_on_500(url) data = response.json() if "id" in data and data["id"] != url: logger.warning(f"Mismatch between url and id. url: {url} id: {data['id']}") return data def get_loader_from_system(entrypoint: str) -> BaseLoader: response = requests_get(entrypoint) system = response.json() if system.get("contactName") == "STERNBERG Software GmbH & Co. KG": logger.info("Using Sternberg patches") return SternbergLoader(system) elif ( system.get("vendor") == "http://cc-egov.de/" or system.get("vendor") == "https://www.cc-egov.de" ): logger.info("Using CC e-gov patches") return CCEgovLoader(system) elif ( system.get("vendor") == "http://www.somacos.de" or system.get("product") == "Sitzungsmanagementsystem Session Copyright SOMACOS GmbH & Co. KG" ): logger.info("Using Somacos patches ") return SomacosLoader(system) else: logger.info("Using no vendor specific patches") return BaseLoader(system) def get_loader_from_body(body_id: str) -> BaseLoader: """ Assumptions: * The body->system link hasn't changed * The system might have, e.g. to a newer version where we don't workarounds anymore """ cached_body = CachedObject.objects.filter(url=body_id).first() if cached_body: logger.info(f"The body {body_id} is cached") system_id = cached_body.data["system"] else: logger.info(f"Fetching the body {body_id}") response = requests_get(body_id) data = response.json() CachedObject.objects.create( url=data["id"], oparl_type=data["type"], data=data, to_import=False ) system_id = data["system"] return get_loader_from_system(system_id)
# Generated by Django 3.0.7 on 2020-07-03 18:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('websites', '0026_auto_20200630_2307'), ] operations = [ migrations.AlterField( model_name='websites', name='is_active', field=models.BooleanField(default=True), ), ]
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 1.8.2. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '9)n!s!*u+f8b2@$&wz64a7bn((_%!hrisk6rq_s)y2wq*bhg_e' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
import FWCore.ParameterSet.Config as cms from L1Trigger.TrackTrigger.TrackTrigger_cff import * from SimTracker.TrackTriggerAssociation.TrackTriggerAssociator_cff import * from L1Trigger.TrackerDTC.ProducerED_cff import * from L1Trigger.TrackFindingTracklet.L1HybridEmulationTracks_cff import * L1TrackTrigger=cms.Sequence(TrackTriggerClustersStubs*TrackTriggerAssociatorClustersStubs*TrackerDTCProducer) # Customisation to enable TTTracks in geometry D41 and later (corresponding to phase2_trackerV14 or later). Includes the HGCAL L1 trigger _tttracks_l1tracktrigger = L1TrackTrigger.copy() _tttracks_l1tracktrigger = cms.Sequence(_tttracks_l1tracktrigger + L1PromptExtendedHybridTracksWithAssociators) from Configuration.Eras.Modifier_phase2_trigger_cff import phase2_trigger phase2_trigger.toReplaceWith( L1TrackTrigger, _tttracks_l1tracktrigger ) TTStubAlgorithm_official_Phase2TrackerDigi_.zMatchingPS = True
import os from setuptools import setup with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) PROJECT_NAME = 'payu' data_files = [] for dirpath, dirnames, filenames in os.walk(PROJECT_NAME): for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] if '__init__.py' in filenames: continue elif filenames: for f in filenames: data_files.append(os.path.join( dirpath[len(PROJECT_NAME) + 1:], f)) setup( name='django-payu', version='0.5', packages=['payu', 'payu.migrations'], include_package_data=True, description='A simple PayU app for Django.', long_description=README, url='https://github.com/MicroPyramid/django-payu', author='Ashwin Kumar', author_email='ashwin@micropyramid.com', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], install_requires=[ 'django>=1.7,<=2.0.8', ], )
# -*- coding: utf-8 -*- __version__ = "0.9.9" from .orm import Model, SoftDeletes, Collection, accessor, mutator, scope from .database_manager import DatabaseManager from .query.expression import QueryExpression from .schema import Schema from .pagination import Paginator, LengthAwarePaginator
from dataclasses import dataclass from bxcommon.models.serializeable_enum import SerializeableEnum class NodeEventType(SerializeableEnum): PEER_CONN_ERR = "PEER_CONN_ERR" PEER_CONN_ESTABLISHED = "PEER_CONN_ESTABLISHED" PEER_CONN_CLOSED = "PEER_CONN_CLOSED" ONLINE = "ONLINE" OFFLINE = "OFFLINE" NOTIFY_OFFLINE = "NOTIFY_OFFLINE" SID_SPACE_EAGER_FETCH = "SID_SPACE_EAGER_FETCH" SID_SPACE_FULL = "SID_SPACE_FULL" SID_SPACE_SWITCH = "SID_SPACE_SWITCH" BLOCKCHAIN_NODE_CONN_ERR = "BLOCKCHAIN_NODE_CONN_ERR" BLOCKCHAIN_NODE_CONN_ESTABLISHED = "BLOCKCHAIN_NODE_CONN_ESTABLISHED" BLOCKCHAIN_NODE_CONN_ADDED = "BLOCKCHAIN_NODE_CONN_ADDED" BLOCKCHAIN_NODE_CONN_REMOVED = "BLOCKCHAIN_NODE_CONN_REMOVED" REMOTE_BLOCKCHAIN_CONN_ERR = "REMOTE_BLOCKCHAIN_CONN_ERR" REMOTE_BLOCKCHAIN_CONN_ESTABLISHED = "REMOTE_BLOCKCHAIN_CONN_ESTABLISHED" TX_SERVICE_FULLY_SYNCED = "TX_SERVICE_FULLY_SYNCED" TX_SERVICE_NOT_SYNCED = "TX_SERVICE_NOT_SYNCED" SWITCHING_RELAYS = "SWITCHING_RELAYS" PEER_CONN_DISABLED = "PEER_CONN_DISABLED" @dataclass class NodeEventModel: # What event happened in a node. node_id: str # pyre-fixme[8]: Attribute has type `NodeEventType`; used as `None`. event_type: NodeEventType = None # pyre-fixme[8]: Attribute has type `str`; used as `None`. peer_ip: str = None # pyre-fixme[8]: Attribute has type `int`; used as `None`. peer_port: int = None # pyre-fixme[8]: Attribute has type `str`; used as `None`. timestamp: str = None # pyre-fixme[8]: Attribute has type `str`; used as `None`. event_id: str = None # pyre-fixme[8]: Attribute has type `str`; used as `None`. payload: str = None # 'type' has been deprecated but remains here for backwards compatibility with versions pre 1.54.0. # TODO: Remove "type" attribute and "__post_init__" once all gateway versions in the BDN are > v1.54.0. Also, remove # the default value of "None" for "event_type" # pyre-fixme[8]: Attribute has type `NodeEventType`; used as `None`. type: NodeEventType = None def __post_init__(self): if self.event_type is None: self.event_type = self.type
import sys from pathlib import Path import argparse import os import re import json from typing import Dict, Optional, Union, Tuple, cast import xml from xml.dom.minidom import parseString import pkg_resources from pyreball.constants import ( PATH_TO_CONFIG_LOCATION, DEFAULT_PATH_TO_CONFIG, STYLES_TEMPLATE_FILENAME, CONFIG_INI_FILENAME, ) from pyreball.utils.logger import get_logger from pyreball.utils.utils import ( get_file_config, check_and_fix_parameters, merge_parameter_dictionaries, ChoiceParameter, IntegerParameter, carefully_remove_directory_if_exists, Substitutor, ) from pyreball.utils.template_utils import get_css, get_html_begin, get_html_end logger = get_logger() # keep the indentation in the following snippets!!! JAVASCRIPT_CHANGE_EXPAND = """ function change_expand(button, table_id){ var table = document.getElementById(table_id); if (table.classList.contains("expanded")) { // collapse the table table.style.maxHeight = "390px"; button.innerHTML = "⟱"; } else { // expand the table table.style.maxHeight = "none"; button.innerHTML = "⟰"; } table.classList.toggle("expanded"); } """ JAVASCRIPT_ON_LOAD = """ window.onload = function() { //dom not only ready, but everything is loaded scrollers = document.getElementsByClassName("table-scroller"); for (i = 0; i < scrollers.length; i++) { if (scrollers[i].scrollHeight == scrollers[i].clientHeight) { // hide the expand button expander_id = scrollers[i].id.replace('scroller', 'expander'); expander = document.getElementById(expander_id); expander.style.display = "none"; } } }; """ JAVASCRIPT_ROLLING_PLOTS = """ function next(div_id, button_next_id, button_prev_id) { var qElems = document.querySelectorAll(div_id + '>div'); for (var i = 0; i < qElems.length; i++) { if (qElems[i].style.display != 'none') { qElems[i].style.display = 'none'; qElems[i + 1].style.display = 'block'; if (i == qElems.length - 2) { document.getElementById(button_next_id).disabled = true; } document.getElementById(button_prev_id).disabled = false; break; } } } function previous(div_id, button_next_id, button_prev_id) { var qElems = document.querySelectorAll(div_id + '>div'); for (var i = 0; i < qElems.length; i++) { if (qElems[i].style.display != 'none') { qElems[i].style.display = 'none'; qElems[i - 1].style.display = 'block'; if (i == 1) { document.getElementById(button_prev_id).disabled = true; } document.getElementById(button_next_id).disabled = false; break; } } } """ JAVASCRIPT_SORTABLE_TABLE = """ $(document).ready(function () { $('.sortable_table').DataTable({ "paging": false, "searching": false, "info": false, }); }); """ def replace_ids(filename: Path) -> None: # collect all ids in form of table-N-M all_table_and_img_ids = set() with open(filename, "r") as f: for line in f: # note that we don't need to replace only "table" ids by also "img" etc. results = re.findall(r'table-id[\d]+-[\d]+', line) if results: all_table_and_img_ids.update(results) results = re.findall(r'img-id[\d]+-[\d]+', line) if results: all_table_and_img_ids.update(results) replacements = [] for element_id in all_table_and_img_ids: re_results = re.search(r'(.+)-(id\d+)-(\d+)', element_id) if re_results: # this must be first replacements.append(("ref-" + re_results.group(2), re_results.group(1) + "-" + re_results.group(3))) # this must be second (because it would catch the first case as well) replacements.append((re_results.group(2) + '(-' + re_results.group(3) + ')?', re_results.group(3))) # replace all table-N-M with table-M and Table N with Table M substitutor = Substitutor(replacements=replacements) modified_lines = [] with open(filename, "r") as f: for line in f: modified_lines.append(substitutor.sub(line)) with open(filename, "w") as f: f.writelines(modified_lines) def _get_node_text(node: xml.dom.minidom.Element) -> str: result = [] for child in node.childNodes: if child.nodeType in (xml.dom.Node.TEXT_NODE, xml.dom.Node.CDATA_SECTION_NODE): result.append(child.data) else: result.extend(_get_node_text(child)) return ''.join(result) def _parse_heading_info(line: str) -> Optional[Tuple[int, str, str]]: heading_pattern = r'<h(\d).+</h(\d)>' m = re.search(heading_pattern, line) if m: heading_level = m.group(1) doc = parseString(m.group(0)) heading = doc.getElementsByTagName("h" + heading_level)[0] heading_id = heading.getAttribute('id') content = _get_node_text(heading).replace('¶', '') return int(heading_level), heading_id, content else: return None def insert_heading_title_and_toc(filename: Path, include_toc: bool = True): # fetch all lines with open(filename, "r") as f: lines = f.readlines() # try to extract the title from <title> element: report_title = None for line in lines: m = re.match(r'^<title class="custom">([^<]*)</title>$', line) if m: report_title = m.group(1) break # get all headings in the report container_start_index = 0 headings = [] for i, line in enumerate(lines): if '<div class="main_container">' in line: container_start_index = i if include_toc: heading_info = _parse_heading_info(line) if heading_info: headings.append(heading_info) if len(headings) > 0 and report_title is None: # only when headings were collected (only when include_toc=True) and there was not title set manually report_title = "Table of Contents" lines_index = container_start_index + 1 # prepare new HTML lines with TOC if report_title is not None: lines.insert( lines_index, f'<h1 id="toc_generated_0">{report_title}<a class="anchor-link" href="#toc_generated_0">¶</a></h1>\n' ) lines_index += 1 current_level = 1 for h in headings: # do we need to add also <ul> ? while h[0] > current_level: lines.insert(lines_index, '<ul style="list-style-type:none; margin:0px">\n') lines_index += 1 current_level += 1 # do we need to add also </ul> ? while h[0] < current_level: lines.insert(lines_index, '</ul>\n') lines_index += 1 current_level -= 1 # prepare the line: if h[0] == 1: current_line = '<a href="#' + h[1] + '">' + h[2] + '</a><br/>\n' else: current_line = '<li><a href="#' + h[1] + '">' + h[2] + '</a></li>\n' lines.insert(lines_index, current_line) lines_index += 1 # at the end, get back to level 1 if necessary while 1 < current_level: lines.insert(lines_index, '</ul>\n') lines_index += 1 current_level -= 1 with open(filename, "w") as f: f.writelines(lines) parameter_specifications = [ ChoiceParameter('--toc', choices=['yes', 'no'], default='no', help='Include table of contents.'), ChoiceParameter('--align-tables', choices=['left', 'center', 'right'], default='center', help='Alignment of tables.'), ChoiceParameter('--numbered-tables', choices=['yes', 'no'], default='no', help='Number the tables.'), ChoiceParameter('--sortable-tables', choices=['yes', 'no'], default='no', help='Make the tables sortable.'), ChoiceParameter('--full-tables', choices=['yes', 'no'], default='no', help='Force all tables to be expanded.'), ChoiceParameter('--align-plots', choices=['left', 'center', 'right'], default='center', help='Alignment of plots.'), ChoiceParameter('--numbered-plots', choices=['yes', 'no'], default='no', help='Number the plots.'), ChoiceParameter('--matplotlib-format', choices=['png', 'svg'], default='svg', help='Format of matplotlib plots.'), ChoiceParameter('--matplotlib-embedded', choices=['yes', 'no'], default='no', help='Whether to embedded matplotlib images directly into HTML. Only for svg format.'), ChoiceParameter('--numbered-headings', choices=['yes', 'no'], default='no', help='Number the headings.'), IntegerParameter('--page-width', boundaries=(40, 100), default=80, help='Width of the page in percentage. An integer in the range 40..100.'), ChoiceParameter('--keep-stdout', choices=['yes', 'no'], default='no', help='Print the output to stdout too.'), ] def parse_arguments() -> Dict[str, Optional[Union[str, int]]]: parser = argparse.ArgumentParser(description='Generate Python report.') for input_param in parameter_specifications: input_param.add_argument_to_parser(parser) parser.add_argument('--output-path', help='Output path. It must contain also the name of the output file with ' 'suffix .html. If not provided, the output file name is derived from ' 'the name of the input script and the same directory is used.') parser.add_argument('filename', help='Input file path.') parser.add_argument('script_args', nargs=argparse.REMAINDER) args = parser.parse_args() return vars(args) def get_config_directory() -> Path: """Get the location of the config files. If the configs were generated by pyreball-generate-config command, they should be found. If they were not generated or some of them no longer exist, the default package config will be used. """ config_location_file_path = Path(PATH_TO_CONFIG_LOCATION) if config_location_file_path.exists(): # the config was generated, let's find out its directory config_directory = Path(Path(PATH_TO_CONFIG_LOCATION).read_text()) if not (config_directory / CONFIG_INI_FILENAME).exists() \ or not (config_directory / STYLES_TEMPLATE_FILENAME).exists(): logger.warning(f'{CONFIG_INI_FILENAME} or {STYLES_TEMPLATE_FILENAME} was not found in {config_directory}. ' f'Try re-generating the configs by pyreball-generate-config command. For now, we will ' f'use the default package configs.') config_directory = DEFAULT_PATH_TO_CONFIG else: config_directory = DEFAULT_PATH_TO_CONFIG return config_directory def main() -> None: args_dict = parse_arguments() script_args_string = ' '.join(args_dict.pop('script_args')) input_filename = Path(args_dict.pop('filename')) # type: ignore output_path = cast(Optional[str], args_dict.pop('output_path')) if output_path and not output_path.endswith(".html"): raise ValueError("Value of output path parameter must end with .html suffix.") cli_parameters = check_and_fix_parameters(parameters=args_dict, parameter_specifications=parameter_specifications, none_allowed=True) config_directory = get_config_directory() file_config_parameters = get_file_config(filename=CONFIG_INI_FILENAME, directory=config_directory, parameter_specifications=parameter_specifications) parameters = merge_parameter_dictionaries(primary_parameters=cli_parameters, secondary_parameters=file_config_parameters, parameter_specifications=parameter_specifications) if not input_filename.is_file(): raise ValueError(f"File {input_filename} does not exist.") if not output_path: # use the directory of the input file output_dir_path = input_filename.resolve().parents[0] title = input_filename.stem else: output_path = Path(output_path).resolve() title = output_path.stem output_dir_path = output_path.parents[0] output_dir_path.mkdir(parents=True, exist_ok=True) path_str = str(output_dir_path / title) os.environ["_TMP_PYREBALL_GENERATOR_PARAMETERS"] = json.dumps({**parameters, 'html_dir_path': path_str}) html_path = Path(path_str + ".html") # remove the directory with images if it exists: carefully_remove_directory_if_exists(directory=Path(path_str)) script_definitions = (JAVASCRIPT_CHANGE_EXPAND + JAVASCRIPT_ON_LOAD + JAVASCRIPT_SORTABLE_TABLE + JAVASCRIPT_ROLLING_PLOTS) css_definitions = get_css(filename=STYLES_TEMPLATE_FILENAME, directory=config_directory, page_width=cast(int, parameters['page_width'])) html_begin = get_html_begin(template_path=Path(pkg_resources.resource_filename('pyreball', 'cfg/html_begin.template')), title=title, script_definitions=script_definitions, css_definitions=css_definitions) html_end = get_html_end(template_path=Path(pkg_resources.resource_filename('pyreball', 'cfg/html_end.template'))) with open(html_path, 'w') as f: f.write(html_begin) try: # Use {sys.executable} instead of just "python" command as it may not work correctly as a PyCharm external tool os.system(f"{sys.executable} {input_filename} {script_args_string}") finally: with open(html_path, 'a') as f: f.write(html_end) replace_ids(html_path) insert_heading_title_and_toc(filename=html_path, include_toc=parameters['toc'] == 'yes') if __name__ == '__main__': main()
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torchvision.models as models import torchwordemb from args import get_parser # ============================================================================= parser = get_parser() opts = parser.parse_args() # # ============================================================================= class TableModule(nn.Module): def __init__(self): super(TableModule, self).__init__() def forward(self, x, dim): y = torch.cat(x, dim) return y def norm(input, p=2, dim=1, eps=1e-12): return input / input.norm(p,dim,keepdim=True).clamp(min=eps).expand_as(input) # Skip-thoughts LSTM class stRNN(nn.Module): def __init__(self): super(stRNN, self).__init__() self.lstm = nn.LSTM(input_size=opts.stDim, hidden_size=opts.srnnDim, bidirectional=False, batch_first=True) def forward(self, x, sq_lengths): # here we use a previous LSTM to get the representation of each instruction # sort sequence according to the length sorted_len, sorted_idx = sq_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx\ .view(-1,1,1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # pack sequence packed_seq = torch.nn.utils.rnn.pack_padded_sequence( sorted_inputs, sorted_len.cpu().data.numpy(), batch_first=True) # pass it to the lstm out, hidden = self.lstm(packed_seq) # unsort the output _, original_idx = sorted_idx.sort(0, descending=False) unpacked, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True) unsorted_idx = original_idx.view(-1,1,1).expand_as(unpacked) # we get the last index of each sequence in the batch idx = (sq_lengths-1).view(-1,1).expand(unpacked.size(0), unpacked.size(2)).unsqueeze(1) # we sort and get the last element of each sequence output = unpacked.gather(0, unsorted_idx.long()).gather(1,idx.long()) output = output.view(output.size(0),output.size(1)*output.size(2)) return output class ingRNN(nn.Module): def __init__(self): super(ingRNN, self).__init__() self.irnn = nn.LSTM(input_size=opts.ingrW2VDim, hidden_size=opts.irnnDim, bidirectional=True, batch_first=True) _, vec = torchwordemb.load_word2vec_bin(opts.ingrW2V) self.embs = nn.Embedding(vec.size(0), opts.ingrW2VDim, padding_idx=0) # not sure about the padding idx self.embs.weight.data.copy_(vec) def forward(self, x, sq_lengths): # we get the w2v for each element of the ingredient sequence x = self.embs(x) # sort sequence according to the length sorted_len, sorted_idx = sq_lengths.sort(0, descending=True) index_sorted_idx = sorted_idx\ .view(-1,1,1).expand_as(x) sorted_inputs = x.gather(0, index_sorted_idx.long()) # pack sequence packed_seq = torch.nn.utils.rnn.pack_padded_sequence( sorted_inputs, sorted_len.cpu().data.numpy(), batch_first=True) # pass it to the rnn out, hidden = self.irnn(packed_seq) # unsort the output _, original_idx = sorted_idx.sort(0, descending=False) # LSTM # bi-directional unsorted_idx = original_idx.view(1,-1,1).expand_as(hidden[0]) # 2 directions x batch_size x num features, we transpose 1st and 2nd dimension output = hidden[0].gather(1,unsorted_idx).transpose(0,1).contiguous() output = output.view(output.size(0),output.size(1)*output.size(2)) return output # Im2recipe model class im2recipe(nn.Module): def __init__(self): super(im2recipe, self).__init__() if opts.preModel=='resNet50': resnet = models.resnet50(pretrained=True) modules = list(resnet.children())[:-1] # we do not use the last fc layer. self.visionMLP = nn.Sequential(*modules) self.visual_embedding = nn.Sequential( nn.Linear(opts.imfeatDim, opts.embDim), nn.Tanh(), ) self.recipe_embedding = nn.Sequential( nn.Linear(opts.irnnDim*2 + opts.srnnDim, opts.embDim, opts.embDim), nn.Tanh(), ) else: raise Exception('Only resNet50 model is implemented.') self.stRNN_ = stRNN() self.ingRNN_ = ingRNN() self.table = TableModule() if opts.semantic_reg: self.semantic_branch = nn.Linear(opts.embDim, opts.numClasses) def forward(self, x, y1, y2, z1, z2): # we need to check how the input is going to be provided to the model # recipe embedding recipe_emb = self.table([self.stRNN_(y1,y2), self.ingRNN_(z1,z2) ],1) # joining on the last dim recipe_emb = self.recipe_embedding(recipe_emb) recipe_emb = norm(recipe_emb) # visual embedding visual_emb = self.visionMLP(x) visual_emb = visual_emb.view(visual_emb.size(0), -1) visual_emb = self.visual_embedding(visual_emb) visual_emb = norm(visual_emb) if opts.semantic_reg: visual_sem = self.semantic_branch(visual_emb) recipe_sem = self.semantic_branch(recipe_emb) # final output output = [visual_emb, recipe_emb, visual_sem, recipe_sem] else: # final output output = [visual_emb, recipe_emb] return output
#!/usr/bin/env python3 import os import sys import time import torch import logging import argparse import numpy as np import pandas as pd import seaborn as sns import os.path as osp import torch.nn as nn import torch.utils.data as data import torch.optim as optim import matplotlib.pyplot as plt import torch.backends.cudnn as cudnn import torch.optim.lr_scheduler as lr_scheduler import torchvision.transforms as transforms import torchvision.datasets as datasets sys.path.append(osp.dirname(os.getcwd())) from models.cifar import Network from utils import Config from utils import get_mean_std from utils import get_subset_dataset from run_models import model_choices from hessian import LayerHessian from hessian import FullHessian def parse_args(): file_purpose = 'train a network with computing full hessian and layer hessian eigenspectrums' parser = argparse.ArgumentParser(description=file_purpose, epilog=file_purpose) dataset_choices = ['CIFAR10', 'CIFAR100', 'MNIST', 'FashionMNIST', 'STL10', 'SVHN'] optimizer_choices = ['sgd', 'adam'] default_learning_rate = 1e-4 default_l2 = 0.0 default_num_epochs = 100 default_dataset = dataset_choices[0] default_batch_size = 256 default_workers = 4 default_model = model_choices[0] default_milestone = [10000] default_step_gamma = 0.1 default_dataset_root = osp.join(osp.dirname(os.getcwd()) ,'datasets') default_log_dir = 'log' default_ckpt_dir = 'ckpt' default_images_dir = 'images' default_seed = 0 default_examples_per_class = 10 parser.add_argument('-lr', '--learning_rate', type=float, default=default_learning_rate, help='learning rate, default={}'.format(default_learning_rate) ) parser.add_argument('-l2', '--weight_decay', type=float, default=default_l2, help='l2 penalty, default={}'.format(default_l2) ) parser.add_argument('-n', '--num_epochs', type=int, default=default_num_epochs, help='number of training epochs, default={}'.format(default_num_epochs) ) parser.add_argument('-o', '--optimizer', type=str, required=True, choices=['sgd', 'adam'], help='optimizer' ) parser.add_argument('-d', '--dataset', type=str, choices=dataset_choices, default=default_dataset, help='type of dataset, default={}'.format(default_dataset) ) parser.add_argument('-pdb', '--with_pdb', action='store_true', help='run with python debugger' ) parser.add_argument('-b', '--batch_size', type=int, default=default_batch_size, help='batch size for training, default={}'.format(default_batch_size) ) parser.add_argument('-j', '--workers', type=int, default=default_workers, help='number of wrokers for dataloader, default={}'.format(default_workers) ) parser.add_argument('--dataset_root', type=str, default=default_dataset_root, help='directory for dataset, default={}'.format(default_dataset_root) ) parser.add_argument('--log_dir', type=str, default=default_log_dir, help='directory for logs, default={}'.format(default_log_dir) ) parser.add_argument('--ckpt_dir', type=str, default=default_ckpt_dir, help='directory to store checkpoints, ' 'default={}'.format(default_ckpt_dir) ) parser.add_argument('--images_dir', type=str, default=default_images_dir, help='directory to store images' ', default={}'.format(default_images_dir) ) parser.add_argument('-m', '--model', type=str, default=default_model, choices=model_choices, help='model type, default={}'.format(default_model) ) parser.add_argument('--cuda', type=int, help='use cuda, if use, then give gpu number' ) parser.add_argument('--loss', type=str, default='ce', choices=['ce'], help='loss name, default=ce' ) parser.add_argument('-r', '--run', type=str, help='run directory prefix' ) parser.add_argument('--save_freq', type=int, help='save epoch weights with these freq' ) parser.add_argument('--milestones', type=int, nargs='+', default=default_milestone, help='milestones for multistep-lr scheduler, ' 'default={}'.format(default_milestone) ) parser.add_argument('--step_gamma', type=float, default=default_step_gamma, help='gamma for step-lr scheduler' ', default={}'.format(default_step_gamma) ) parser.add_argument('--augment', action='store_true', help='augment data with random-flip and random crop' ) parser.add_argument('--resume', type=str, help='path to *.pth to resume training' ) parser.add_argument('--seed', type=int, default=default_seed, help='seed for randomness' ) parser.add_argument('--examples_per_class', type=int, default=default_examples_per_class, help='examples per class for hessian computation' ) return parser.parse_args() def get_valid_layers(model): layer_names = list() for name, params in model.named_modules(): if isinstance(params, nn.Conv2d): layer_names.append(name + '.weight') elif isinstance(params, nn.Linear): layer_names.append(name + '.weight') return layer_names def evaluate_model(model, criterion, dataloader, device, dataset_size): model.eval() running_loss = 0.0 running_corrects = 0 with torch.no_grad(): for batch, truth in dataloader: batch = batch.to(device) truth = truth.to(device) output = model(batch) _, preds = torch.max(output, 1) running_corrects += torch.sum(preds == truth) loss = criterion(output, truth) running_loss += loss.item() * batch.size(0) return {'loss': running_loss / dataset_size, 'acc': running_corrects.double() / dataset_size} def train(model, optimizer, scheduler, dataloaders, criterion, device, num_epochs=100, args=None, dataset_sizes={'train': 5e4, 'test': 1e4}, images_dir=None, ckpt_dir=None ): logger = logging.getLogger('train') loss_list = {'train': list(), 'test': list()} acc_list = {'train': list(), 'test': list()} assert images_dir is not None assert ckpt_dir is not None loss_image_path = osp.join(images_dir, 'loss.png') acc_image_path = osp.join(images_dir, 'acc.png') model.train() full_eigenspectrums = list() epoch_eigenspectrums = list() full_eigenspectrums_path = osp.join(ckpt_dir, 'training_eigenspectrum_full.npy') C = config.num_classes valid_layers = get_valid_layers(model) for epoch in range(num_epochs): logger.info('epoch: %d' % epoch) with torch.enable_grad(): for batch, truth in dataloaders['train']: batch = batch.to(device) truth = truth.to(device) optimizer.zero_grad() output = model(batch) loss = criterion(output, truth) loss.backward() optimizer.step() scheduler.step() # updates finished for epochs mean, std = get_mean_std(args.dataset) pad = int((config.padded_im_size-config.im_size)/2) transform = transforms.Compose([transforms.Pad(pad), transforms.ToTensor(), transforms.Normalize(mean,std)]) if args.dataset in ['MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100']: full_dataset = getattr(datasets, args.dataset) subset_dataset = get_subset_dataset(full_dataset=full_dataset, examples_per_class=args.examples_per_class, epc_seed=config.epc_seed, root=osp.join(args.dataset_root, args.dataset), train=True, transform=transform, download=True ) elif args.dataset in ['STL10', 'SVHN']: full_dataset = getattr(datasets, args.dataset) subset_dataset = get_subset_dataset(full_dataset=full_dataset, examples_per_class=args.examples_per_class, epc_seed=config.epc_seed, root=osp.join(args.dataset_root, args.dataset), split='train', transform=transform, download=True ) else: raise Exception('Unknown dataset: {}'.format(args.dataset)) loader = data.DataLoader(dataset=subset_dataset, drop_last=False, batch_size=args.batch_size) Hess = FullHessian(crit='CrossEntropyLoss', loader=loader, device=device, model=model, num_classes=C, hessian_type='Hessian', init_poly_deg=64, poly_deg=128, spectrum_margin=0.05, poly_points=1024, SSI_iters=128 ) Hess_eigval, \ Hess_eigval_density = Hess.LanczosLoop(denormalize=True) full_eigenspectrums.append(Hess_eigval) full_eigenspectrums.append(Hess_eigval_density) for layer_name, _ in model.named_parameters(): if layer_name not in valid_layers: continue Hess = LayerHessian(crit='CrossEntropyLoss', loader=loader, device=device, model=model, num_classes=C, layer_name=layer_name, hessian_type='Hessian', init_poly_deg=64, poly_deg=128, spectrum_margin=0.05, poly_points=1024, SSI_iters=128 ) Hess_eigval, \ Hess_eigval_density = Hess.LanczosLoop(denormalize=True) layerwise_eigenspectrums_path = osp.join(ckpt_dir, 'training_eigenspectrums_epoch_{}_layer_{}.npz'.format(epoch, layer_name)) np.savez(layerwise_eigenspectrums_path, eigval=Hess_eigval, eigval_density=Hess_eigval_density) for phase in ['train', 'test']: stats = evaluate_model(model, criterion, dataloaders[phase], device, dataset_sizes[phase]) loss_list[phase].append(stats['loss']) acc_list[phase].append(stats['acc']) logger.info('{}:'.format(phase)) logger.info('\tloss:{}'.format(stats['loss'])) logger.info('\tacc :{}'.format(stats['acc'])) if phase == 'test': plt.clf() plt.plot(loss_list['test'], label='test_loss') plt.plot(loss_list['train'], label='train_loss') plt.legend() plt.savefig(loss_image_path) plt.clf() plt.plot(acc_list['test'], label='test_acc') plt.plot(acc_list['train'], label='train_acc') plt.legend() plt.savefig(acc_image_path) plt.clf() full_eigenspectrums = np.array(full_eigenspectrums) assert full_eigenspectrums.shape[0] % 2 == 0 assert full_eigenspectrums.shape[0] // 2 == num_epochs np.save(full_eigenspectrums_path, full_eigenspectrums) return full_eigenspectrums if __name__ == '__main__': args = parse_args() if args.with_pdb: import pdb pdb.set_trace() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False sns.set_style('darkgrid') if args.cuda is None: device = torch.device('cpu') else: device = torch.device('cuda:%d' % args.cuda) log_dir = osp.join(args.run, 'logs') ckpt_dir = osp.join(args.run, 'ckpt') images_dir = osp.join(args.run, 'images') if not osp.exists(args.run): os.makedirs(args.run) if not osp.exists(log_dir): os.makedirs(log_dir) if not osp.exists(ckpt_dir): os.makedirs(ckpt_dir) if not osp.exists(images_dir): os.makedirs(images_dir) logging.basicConfig(level=logging.INFO, format='%(message)s') logging_file = osp.join(log_dir, 'training_eigenspectrum.log') logger = logging.getLogger('train') with open(logging_file, 'w+') as f: pass logger_file_handler = logging.FileHandler(logging_file) logger.addHandler(logger_file_handler) logger.info('Arguments: {}'.format(args)) mean, std = get_mean_std(args.dataset) if args.dataset in ['MNIST', 'FashionMNIST']: input_ch = 1 padded_im_size = 32 num_classes = 10 im_size = 28 epc_seed = 0 config = Config(input_ch=input_ch, padded_im_size=padded_im_size, num_classes=num_classes, im_size=im_size, epc_seed=epc_seed ) dataset_sizes = {'train': 6e4, 'test': 1e4} elif args.dataset in ['CIFAR10', 'CIFAR100']: input_ch = 3 padded_im_size = 32 if args.dataset == 'CIFAR10': num_classes = 10 elif args.dataset == 'CIFAR100': num_classes = 100 else: raise Exception('Should not have reached here') im_size = 32 epc_seed = 0 config = Config(input_ch=input_ch, padded_im_size=padded_im_size, num_classes=num_classes, im_size=im_size, epc_seed=epc_seed ) dataset_sizes = {'train': 5e4, 'test': 1e4} elif args.dataset in ['STL10']: input_ch = 3 padded_im_size = 102 num_classes = 10 im_size = 96 epc_seed = 0 config = Config(input_ch=input_ch, padded_im_size=padded_im_size, num_classes=num_classes, im_size=im_size, epc_seed=epc_seed ) dataset_sizes = {'train': 5000, 'test': 8000} elif args.dataset in ['SVHN']: input_ch = 3 padded_im_size = 32 num_classes = 11 im_size = 32 epc_seed = 0 config = Config(input_ch=input_ch, padded_im_size=padded_im_size, num_classes=num_classes, im_size=im_size, epc_seed=epc_seed ) dataset_sizes = {'train': 73257, 'test': 26032} else: raise Exception('Should not have reached here') pad = (config.padded_im_size - config.im_size) // 2 if args.augment: train_transform = transforms.Compose([ transforms.Pad(pad), transforms.ColorJitter(), transforms.RandomCrop(config.padded_im_size, padding=pad), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std), ]) else: train_transform = transforms.Compose([ transforms.Pad(pad), transforms.ToTensor(), transforms.Normalize(mean, std)]) test_transform = transforms.Compose([ transforms.Pad(pad), transforms.ToTensor(), transforms.Normalize(mean, std)] ) if args.dataset == 'MNIST': train_data = datasets.MNIST(osp.join(args.dataset_root, 'MNIST'), train=True, transform=train_transform, download=True) test_data = datasets.MNIST(osp.join(args.dataset_root, 'MNIST'), train=False, transform=test_transform, download=True) elif args.dataset == 'FashionMNIST': train_data = datasets.FashionMNIST(osp.join(args.dataset_root, 'FashionMNIST'), train=True, transform=train_transform, download=False) test_data = datasets.FashionMNIST(osp.join(args.dataset_root, 'FashionMNIST'), train=False, transform=test_transform, download=False) elif args.dataset == 'CIFAR10': train_data = datasets.CIFAR10(osp.join(args.dataset_root, 'CIFAR10'), train=True, transform=train_transform, download=False) test_data = datasets.CIFAR10(osp.join(args.dataset_root, 'CIFAR10'), train=False, transform=test_transform, download=False) elif args.dataset == 'CIFAR100': train_data = datasets.CIFAR100(osp.join(args.dataset_root, 'CIFAR100'), train=True, transform=train_transform, download=False) test_data = datasets.CIFAR100(osp.join(args.dataset_root, 'CIFAR100'), train=False, transform=test_transform, download=False) elif args.dataset == 'STL10': train_data = datasets.STL10(osp.join(args.dataset_root, 'STL10'), split='train', transform=train_transform, download=False) test_data = datasets.STL10(osp.join(args.dataset_root, 'STL10'), split='test', transform=train_transform, download=False) elif args.dataset == 'SVHN': train_data = datasets.SVHN(osp.join(args.dataset_root, 'SVHN'), split='train', transform=train_transform, download=False) test_data = datasets.SVHN(osp.join(args.dataset_root, 'SVHN'), split='test', transform=test_transform, download=False) else: raise Exception('Unknown dataset: {}'.format(args.dataset)) dataloaders = dict() dataloaders['train'] = data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers ) dataloaders['test'] = data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers ) if args.model in model_choices: model = Network().construct(args.model, config) else: raise Exception('Unknown model: {}'.format()) model = model.to(device) if args.loss == 'ce': criterion = nn.CrossEntropyLoss() else: raise Exception('Only cross entropy is allowed: {}'.format(args.loss)) if args.optimizer == 'sgd': optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, momentum=0.9) elif args.optimizer == 'adam': optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) else: raise Exception('Optimizer not allowed: {}'.format(args.optimizer)) scheduler = lr_scheduler.MultiStepLR(optimizer, args.milestones, gamma=args.step_gamma) train(model, optimizer, scheduler, dataloaders, criterion, device, num_epochs=args.num_epochs, args=args, ckpt_dir=ckpt_dir, dataset_sizes=dataset_sizes, images_dir=images_dir )
import unittest import os from inspect import cleandoc from typing import Any, List from hstest.check_result import CheckResult from hstest.stage_test import StageTest from hstest.test_case import TestCase from hstest.test_run import TestRun class TestCurrTestCase(StageTest): tc_1 = None tc_2 = None def generate(self) -> List[TestCase]: self.tc_1 = TestCase(stdin='1', attach=1) self.tc_2 = TestCase(stdin='2', attach=2) return [self.tc_1, self.tc_2] def check(self, reply: str, attach: Any) -> CheckResult: tc = TestRun.curr_test_run.test_case if (tc.input == '1' and tc.attach == 1 and tc is self.tc_1 or tc.input == '2' and tc.attach == 2 and tc is self.tc_2): return CheckResult.correct() return CheckResult.wrong('') class Test(unittest.TestCase): def test(self): file = __file__.replace(os.sep, '.')[:-3] file = file[file.find('.tests.') + 1: file.rfind('.') + 1] + 'main' status, feedback = TestCurrTestCase(file).run_tests() self.assertEqual('test OK', feedback) self.assertEqual(status, 0)
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. import binascii import os import struct import pytest from cryptography.hazmat.backends.interfaces import CipherBackend from cryptography.hazmat.primitives.ciphers import Cipher, algorithms from .utils import _load_all_params from ...utils import load_nist_vectors @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.ChaCha20(b"\x00" * 32, b"0" * 16), None ), skip_message="Does not support ChaCha20", ) @pytest.mark.requires_backend_interface(interface=CipherBackend) class TestChaCha20(object): @pytest.mark.parametrize( "vector", _load_all_params( os.path.join("ciphers", "ChaCha20"), ["rfc7539.txt"], load_nist_vectors, ), ) def test_vectors(self, vector, backend): key = binascii.unhexlify(vector["key"]) nonce = binascii.unhexlify(vector["nonce"]) ibc = struct.pack("<i", int(vector["initial_block_counter"])) pt = binascii.unhexlify(vector["plaintext"]) encryptor = Cipher( algorithms.ChaCha20(key, ibc + nonce), None, backend ).encryptor() computed_ct = encryptor.update(pt) + encryptor.finalize() assert binascii.hexlify(computed_ct) == vector["ciphertext"] def test_buffer_protocol(self, backend): key = bytearray(os.urandom(32)) nonce = bytearray(os.urandom(16)) cipher = Cipher(algorithms.ChaCha20(key, nonce), None, backend) enc = cipher.encryptor() ct = enc.update(bytearray(b"hello")) + enc.finalize() dec = cipher.decryptor() pt = dec.update(ct) + dec.finalize() assert pt == b"hello" def test_key_size(self): chacha = algorithms.ChaCha20(b"0" * 32, b"0" * 16) assert chacha.key_size == 256 def test_invalid_key_size(self): with pytest.raises(ValueError): algorithms.ChaCha20(b"wrongsize", b"0" * 16) def test_invalid_nonce(self): with pytest.raises(ValueError): algorithms.ChaCha20(b"0" * 32, b"0") with pytest.raises(TypeError): algorithms.ChaCha20(b"0" * 32, object()) # type:ignore[arg-type] def test_invalid_key_type(self): with pytest.raises(TypeError, match="key must be bytes"): algorithms.ChaCha20("0" * 32, b"0" * 16) # type:ignore[arg-type]
class DataPreparator: """A class used to wrap functions necessary for preparing data for transmission along the TCP sockets The TCP server will call the 'convert()' method that must return a bytes object of the correct format for transmission""" def __init__(self, data=None, encoding: str = 'utf-8'): self.data = data self.encoding = encoding def convert(self, data=None) -> bytes: """Converts the data into a usable object for tcp server""" if self.data is None and data is None: raise AssertionError("Data to transfer cannot be None") return ''.encode(self.encoding)
"""Download""" import subprocess from src.helpers import logger LOG = logger.getLogger(__name__) def run(settings: dict): """Download""" sequences_file = settings['downloads']['sequences'] LOG.info(f"Downloading {sequences_file}") subprocess.run([ 'wget', '-q', sequences_file, '-O', f"{settings['data_dir']}/raw/{settings['pipeline']}_sequences.fasta", ])
"""Realtime rate limiting tests.""" import sched import threading import time import pytest from pytest import approx from redbucket import (InMemoryRateLimiter, RedisScriptRateLimiter, RedisTransactionalRateLimiter, RateLimit, Zone) @pytest.fixture def in_memory_rate_limiter(): return InMemoryRateLimiter() @pytest.fixture def redis_tx_rate_limiter(redis, key_format): return RedisTransactionalRateLimiter(redis, key_format=key_format) @pytest.fixture def redis_script_rate_limiter(redis, redis_version_check, key_format): redis_version_check(RedisScriptRateLimiter.MIN_REDIS_VERSION) return RedisScriptRateLimiter(redis, key_format=key_format) @pytest.fixture(params=('in_memory', 'redis_tx', 'redis_script')) def rate_limiter(request): return request.getfixturevalue(f'{request.param}_rate_limiter') def test_basic(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 5))) results = [None] * 12 def req(i): results[i] = rate_limiter.request(k1='foo') sch = sched.scheduler() for i in range(12): sch.enter(0.1 + i/12, 0, req, (i,)) sch.run() accepted = [int(s) for s, d in results] assert accepted == [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0] def test_burst(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 5), burst=2)) results = [None] * 12 def req(i): results[i] = rate_limiter.request(k1='foo') sch = sched.scheduler() for i in range(12): sch.enter(0.1 + i/12, 0, req, (i,)) sch.run() accepted = [int(s) for s, d in results] assert accepted == [1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0] def test_delay(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 5), delay=2)) results = [None] * 12 def req(i): results[i] = rate_limiter.request(k1='foo') sch = sched.scheduler() for i in range(12): sch.enter(0.1 + i/12, 0, req, (i,)) sch.run() accepted = [int(s) for s, d in results] assert accepted == [1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0] dreq = [i/12 + d for i, (s, d) in enumerate(results) if s] assert dreq == approx([0, 1/5, 2/5, 3/5, 4/5, 1, 6/5], abs=0.05) def test_burst_delay(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 5), burst=1, delay=1)) results = [None] * 12 def req(i): results[i] = rate_limiter.request(k1='foo') sch = sched.scheduler() for i in range(12): sch.enter(0.1 + i/12, 0, req, (i,)) sch.run() accepted = [int(s) for s, d in results] assert accepted == [1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0] dreq = [i/12 + d for i, (s, d) in enumerate(results) if s] assert dreq == approx([0, 1/12, 1/5, 2/5, 3/5, 4/5, 1], abs=0.05) def test_multi_zone(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 1), burst=4), k2=RateLimit(Zone('z2', 5), delay=2)) results = [None] * 12 def req(i): results[i] = rate_limiter.request(k1='foo', k2='bar') sch = sched.scheduler() for i in range(12): sch.enter(0.1 + i/12, 0, req, (i,)) sch.run() accepted = [int(s) for s, d in results] assert accepted == [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0] dreq = [i/12 + d for i, (s, d) in enumerate(results) if s] assert dreq == approx([0, 1/5, 2/5, 3/5, 4/5], abs=0.05) def test_multithreaded(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 7))) tstamps = [[], [], []] start = time.monotonic() + 0.1 end = start + .95 def thread_fn(i): time.sleep(max(start - time.monotonic(), 0)) while time.monotonic() < end: s, d = rate_limiter.request(k1='foo') if s: tstamps[i].append(time.monotonic()) time.sleep(0) threads = [threading.Thread(target=thread_fn, args=(i,)) for i in range(3)] for thread in threads: thread.start() for thread in threads: thread.join() all_ts = sorted(tstamps[0] + tstamps[1] + tstamps[2]) stime = all_ts[0] for i in range(len(all_ts)): all_ts[i] -= stime assert all_ts == approx([0, 1/7, 2/7, 3/7, 4/7, 5/7, 6/7], abs=0.05) def test_request_invalid_key(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 1))) with pytest.raises(KeyError) as ei: rate_limiter.request(k2='bar') assert str(ei.value) == repr('k2') def test_request_no_keys(rate_limiter): rate_limiter.configure(k1=RateLimit(Zone('z1', 1))) assert rate_limiter.request() == (True, 0)
# Copyright (c) 2014 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import tempfile import mock import six from cinder import exception from cinder import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hnas_nfs as nfs from cinder.volume.drivers import nfs as drivernfs from cinder.volume.drivers import remotefs from cinder.volume import volume_types SHARESCONF = """172.17.39.132:/cinder 172.17.39.133:/cinder""" HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username>supervisor</username> <password>supervisor</password> <svc_0> <volume_type>default</volume_type> <hdp>172.17.39.132:/cinder</hdp> </svc_0> <svc_1> <volume_type>silver</volume_type> <hdp>172.17.39.133:/cinder</hdp> </svc_1> </config> """ HNAS_WRONG_CONF1 = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username>supervisor</username> <password>supervisor</password> <volume_type>default</volume_type> <hdp>172.17.39.132:/cinder</hdp> </svc_0> </config> """ HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username>supervisor</username> <password>supervisor</password> <svc_0> <volume_type>default</volume_type> </svc_0> <svc_1> <volume_type>silver</volume_type> </svc_1> </config> """ HNAS_WRONG_CONF3 = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username> </username> <password>supervisor</password> <svc_0> <volume_type>default</volume_type> <hdp>172.17.39.132:/cinder</hdp> </svc_0> <svc_1> <volume_type>silver</volume_type> <hdp>172.17.39.133:/cinder</hdp> </svc_1> </config> """ HNAS_WRONG_CONF4 = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username>super</username> <password>supervisor</password> <svc_0> <volume_type>default</volume_type> <hdp>172.17.39.132:/cinder</hdp> </svc_0> <svc_4> <volume_type>silver</volume_type> <hdp>172.17.39.133:/cinder</hdp> </svc_4> </config> """ HNAS_FULL_CONF = """<?xml version="1.0" encoding="UTF-8" ?> <config> <hnas_cmd>ssc</hnas_cmd> <mgmt_ip0>172.17.44.15</mgmt_ip0> <username>super</username> <password>supervisor</password> <ssh_enabled>True</ssh_enabled> <ssh_port>2222</ssh_port> <chap_enabled>True</chap_enabled> <ssh_private_key>/etc/cinder/ssh_priv</ssh_private_key> <cluster_admin_ip0>10.0.0.1</cluster_admin_ip0> <svc_0> <volume_type>default</volume_type> <hdp>172.17.39.132:/cinder</hdp> </svc_0> <svc_1> <volume_type>silver</volume_type> <hdp>172.17.39.133:/cinder/silver </hdp> </svc_1> <svc_2> <volume_type>gold</volume_type> <hdp>172.17.39.133:/cinder/gold</hdp> </svc_2> <svc_3> <volume_type>platinum</volume_type> <hdp>172.17.39.133:/cinder/platinum</hdp> </svc_3> </config> """ # The following information is passed on to tests, when creating a volume _SERVICE = ('Test_hdp', 'Test_path', 'Test_label') _SHARE = '172.17.39.132:/cinder' _SHARE2 = '172.17.39.133:/cinder' _EXPORT = '/cinder' _VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', 'size': 128, 'volume_type': 'silver', 'volume_type_id': 'test', 'metadata': [{'key': 'type', 'service_label': 'silver'}], 'provider_location': None, 'id': 'bcc48c61-9691-4e5f-897c-793686093190', 'status': 'available', 'host': 'host1@hnas-iscsi-backend#silver'} _SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', 'size': 128, 'volume_type': None, 'provider_location': None, 'volume_size': 128, 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', 'host': 'host1@hnas-iscsi-backend#silver'} _VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc', 'id': '61da3-8d23-4bb9-3136-ca819d89e7fc', 'size': 4, 'metadata': [{'key': 'type', 'service_label': 'silver'}], 'volume_type': 'silver', 'volume_type_id': 'silver', 'provider_location': '172.24.44.34:/silver/', 'volume_size': 128, 'host': 'host1@hnas-nfs#silver'} GET_ID_VOL = { ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME], ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME] } def id_to_vol(arg): return GET_ID_VOL.get(arg) class SimulatedHnasBackend(object): """Simulation Back end. Talks to HNAS.""" # these attributes are shared across object instances start_lun = 0 def __init__(self): self.type = 'HNAS' self.out = '' def file_clone(self, cmd, ip0, user, pw, fslabel, source_path, target_path): return "" def get_version(self, ver, cmd, ip0, user, pw): self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \ "version: 11.2.3319.09 LU: 256 " \ "RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01" return self.out def get_hdp_info(self, ip0, user, pw): self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \ "Normal fs1\n" \ "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \ "Normal fs2" return self.out def get_nfs_info(self, cmd, ip0, user, pw): self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \ "EVS: 1 IPS: 172.17.39.132\n" \ "Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \ "EVS: 1 IPS: 172.17.39.133" return self.out class HDSNFSDriverTest(test.TestCase): """Test HNAS NFS volume driver.""" def __init__(self, *args, **kwargs): super(HDSNFSDriverTest, self).__init__(*args, **kwargs) @mock.patch.object(nfs, 'factory_bend') def setUp(self, m_factory_bend): super(HDSNFSDriverTest, self).setUp() self.backend = SimulatedHnasBackend() m_factory_bend.return_value = self.backend self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(HNASCONF) self.config_file.flush() self.shares_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.shares_file.close) self.shares_file.write(SHARESCONF) self.shares_file.flush() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.hds_hnas_nfs_config_file = self.config_file.name self.configuration.nfs_shares_config = self.shares_file.name self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' self.configuration.nfs_mount_options = None self.configuration.nas_ip = None self.configuration.nas_share_path = None self.configuration.nas_mount_options = None self.driver = nfs.HDSNFSDriver(configuration=self.configuration) self.driver.do_setup("") @mock.patch('six.moves.builtins.open') @mock.patch.object(os, 'access') def test_read_config(self, m_access, m_open): # Test exception when file is not found m_access.return_value = False m_open.return_value = six.StringIO(HNASCONF) self.assertRaises(exception.NotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to missing <svc> tag m_access.return_value = True m_open.return_value = six.StringIO(HNAS_WRONG_CONF1) self.assertRaises(exception.ConfigNotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to missing <hdp> tag m_open.return_value = six.StringIO(HNAS_WRONG_CONF2) self.configuration.hds_hnas_iscsi_config_file = '' self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') # Test exception when config file has parsing errors # due to blank tag m_open.return_value = six.StringIO(HNAS_WRONG_CONF3) self.configuration.hds_hnas_iscsi_config_file = '' self.assertRaises(exception.ParameterNotFound, nfs._read_config, '') # Test when config file has parsing errors due invalid svc_number m_open.return_value = six.StringIO(HNAS_WRONG_CONF4) self.configuration.hds_hnas_iscsi_config_file = '' config = nfs._read_config('') self.assertEqual(1, len(config['services'])) # Test config with full options # due invalid svc_number m_open.return_value = six.StringIO(HNAS_FULL_CONF) self.configuration.hds_hnas_iscsi_config_file = '' config = nfs._read_config('') self.assertEqual(4, len(config['services'])) @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_snapshot(self, m_get_volume_location, m_get_export_path, m_get_provider_location, m_id_to_vol): svol = _SNAPVOLUME.copy() m_id_to_vol.return_value = svol m_get_provider_location.return_value = _SHARE m_get_volume_location.return_value = _SHARE m_get_export_path.return_value = _EXPORT loc = self.driver.create_snapshot(svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(nfs.HDSNFSDriver, '_get_service') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_cloned_volume(self, m_get_volume_location, m_get_provider_location, m_id_to_vol, m_get_service): vol = _VOLUME.copy() svol = _SNAPVOLUME.copy() m_get_service.return_value = _SERVICE m_get_provider_location.return_value = _SHARE m_get_volume_location.return_value = _SHARE loc = self.driver.create_cloned_volume(vol, svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted') @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_volume(self, m_get_volume_location, m_get_provider_location, m_id_to_vol, m_do_create_volume, m_ensure_shares_mounted): vol = _VOLUME.copy() m_get_provider_location.return_value = _SHARE2 m_get_volume_location.return_value = _SHARE2 loc = self.driver.create_volume(vol) out = "{'provider_location': \'" + _SHARE2 + "'}" self.assertEqual(str(loc), out) @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol') @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present') def test_delete_snapshot(self, m_volume_not_present, m_get_provider_location, m_id_to_vol): svol = _SNAPVOLUME.copy() m_id_to_vol.return_value = svol m_get_provider_location.return_value = _SHARE m_volume_not_present.return_value = True self.driver.delete_snapshot(svol) self.assertIsNone(svol['provider_location']) @mock.patch.object(nfs.HDSNFSDriver, '_get_service') @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol) @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location') @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path') @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location') def test_create_volume_from_snapshot(self, m_get_volume_location, m_get_export_path, m_get_provider_location, m_id_to_vol, m_get_service): vol = _VOLUME.copy() svol = _SNAPVOLUME.copy() m_get_service.return_value = _SERVICE m_get_provider_location.return_value = _SHARE m_get_export_path.return_value = _EXPORT m_get_volume_location.return_value = _SHARE loc = self.driver.create_volume_from_snapshot(vol, svol) out = "{'provider_location': \'" + _SHARE + "'}" self.assertEqual(out, str(loc)) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', return_value={'key': 'type', 'service_label': 'silver'}) def test_get_pool(self, m_ext_spec): vol = _VOLUME.copy() self.assertEqual('silver', self.driver.get_pool(vol)) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'silver'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} with mock.patch.object(self.driver, '_execute'): out = self.driver.manage_existing(vol, existing_vol_ref) loc = {'provider_location': '172.17.39.133:/cinder'} self.assertEqual(loc, out) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'silver'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.driver._execute = mock.Mock(side_effect=OSError) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing, vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_get_extra_specs): vol = _VOLUME_NFS.copy() m_get_extra_specs.return_value = {'key': 'type', 'service_label': 'gold'} self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.driver._execute = mock.Mock(side_effect=OSError) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol, existing_vol_ref) m_get_extra_specs.assert_called_once_with('silver') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(utils, 'get_file_size', return_value=4000000000) @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_get_size(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_file_size): vol = _VOLUME_NFS.copy() self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} out = self.driver.manage_existing_get_size(vol, existing_vol_ref) self.assertEqual(vol['size'], out) m_file_size.assert_called_once_with('/mnt/gold/volume-test') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() @mock.patch.object(utils, 'get_file_size', return_value='badfloat') @mock.patch.object(os.path, 'isfile', return_value=True) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133') @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted') def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve, m_mount_point, m_isfile, m_file_size): vol = _VOLUME_NFS.copy() self.driver._mounted_shares = ['172.17.39.133:/cinder'] existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, vol, existing_vol_ref) m_file_size.assert_called_once_with('/mnt/gold/volume-test') m_isfile.assert_called_once_with('/mnt/gold/volume-test') m_mount_point.assert_called_once_with('172.17.39.133:/cinder') m_resolve.assert_called_with('172.17.39.133') m_ensure_shares.assert_called_once_with() def test_manage_existing_get_size_without_source_name(self): vol = _VOLUME.copy() existing_vol_ref = { 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, existing_vol_ref) @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share', return_value='/mnt/gold') def test_unmanage(self, m_mount_point): with mock.patch.object(self.driver, '_execute'): vol = _VOLUME_NFS.copy() self.driver.unmanage(vol) m_mount_point.assert_called_once_with('172.24.44.34:/silver/')
"""Tests for batch_encoder.py.""" import os import unittest from bfv.batch_encoder import BatchEncoder from bfv.bfv_parameters import BFVParameters from util.plaintext import Plaintext from util.random_sample import sample_uniform TEST_DIRECTORY = os.path.dirname(__file__) class TestBatchEncoder(unittest.TestCase): def setUp(self): self.degree = 8 self.plain_modulus = 17 self.ciph_modulus = 0x3fffffff000001 params = BFVParameters(poly_degree=self.degree, plain_modulus=self.plain_modulus, ciph_modulus=self.ciph_modulus) self.encoder = BatchEncoder(params) def run_test_encode_decode(self, inp): plain = self.encoder.encode(inp) value = self.encoder.decode(plain) self.assertEqual(len(value), len(inp)) self.assertEqual(value, inp) def run_test_encode_multiply(self, inp1, inp2): prod = [(inp1[i] * inp2[i]) % self.plain_modulus for i in range(len(inp1))] plain1 = self.encoder.encode(inp1) plain2 = self.encoder.encode(inp2) plain_prod = Plaintext(plain1.poly.multiply(plain2.poly, self.plain_modulus)) decoded_prod = self.encoder.decode(plain_prod) self.assertEqual(prod, decoded_prod) def test_encode_decode_01(self): vec = sample_uniform(0, self.plain_modulus, self.degree) self.run_test_encode_decode(vec) def test_encode_decode_02(self): self.run_test_encode_decode([0] * self.degree) def test_encode_multiply_01(self): vec1 = sample_uniform(0, self.plain_modulus, self.degree) vec2 = sample_uniform(0, self.plain_modulus, self.degree) self.run_test_encode_multiply(vec1, vec2) if __name__ == '__main__': res = unittest.main(verbosity=3, exit=False)
import argparse import math import os import random import shutil import time from collections import OrderedDict from copy import deepcopy import numpy as np import torch.backends.cudnn as cudnn import torch.optim as optim from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from fixmatch.dataset.cifar import get_cifar10, get_cifar100 from fixmatch.models.confucius_model import * from fixmatch.utils import AverageMeter, accuracy from logger import Logger DATASET_GETTERS = {'cifar10': get_cifar10, 'cifar100': get_cifar100} best_acc = 0 def save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'): filepath = os.path.join(checkpoint, filename) torch.save(state, filepath) if is_best: shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=7. / 16., last_epoch=-1): def _lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) no_progress = float(current_step - num_warmup_steps) / \ float(max(1, num_training_steps - num_warmup_steps)) return max(0., math.cos(math.pi * num_cycles * no_progress)) return LambdaLR(optimizer, _lr_lambda, last_epoch) def main(set_name, version): logger = Logger(set_name, version) parser = argparse.ArgumentParser(description='PyTorch FixMatch Training') parser.add_argument('--set', default='default', type=str) parser.add_argument('--version', default='0', type=str) parser.add_argument('--gpu-id', default='0', type=int, help='id(s) for CUDA_VISIBLE_DEVICES') parser.add_argument('--num-workers', type=int, default=64, help='number of workers') parser.add_argument('--dataset', default='cifar10', type=str, choices=['cifar10', 'cifar100'], help='dataset name') parser.add_argument('--num-labeled', type=int, default=4000, help='number of labeled data') parser.add_argument('--arch', default='wideresnet', type=str, choices=['wideresnet', 'resnext'], help='dataset name') parser.add_argument('--epochs', default=256, type=int, help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') parser.add_argument('--batch-size', default=128, type=int, help='train batchsize') parser.add_argument('--lr', '--learning-rate', default=0.03, type=float, help='initial learning rate') parser.add_argument('--warmup', default=0, type=float, help='warmup epochs (unlabeled data based)') parser.add_argument('--wdecay', default=5e-4, type=float, help='weight decay') parser.add_argument('--nesterov', action='store_true', default=True, help='use nesterov momentum') parser.add_argument('--use-ema', action='store_true', default=True, help='use EMA model') parser.add_argument('--ema-decay', default=0.999, type=float, help='EMA decay rate') parser.add_argument('--mu', default=7, type=int, help='coefficient of unlabeled batch size') parser.add_argument('--lambda-u', default=1, type=float, help='coefficient of unlabeled loss') parser.add_argument('--threshold', default=0.98, type=float, help='pseudo label threshold') parser.add_argument('--k-img', default=65536, type=int, help='number of labeled examples') parser.add_argument('--out', default=str(logger.exp_dir / 'result'), help='directory to output the result') parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)') parser.add_argument('--seed', type=int, default=-1, help="random seed (-1: don't use random seed)") parser.add_argument("--amp", action="store_true", help="use 16-bit (mixed) precision through NVIDIA apex AMP") parser.add_argument("--opt_level", type=str, default="O1", help="apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--no-progress', action='store_true', default=True, help="don't use progress bar") parser.add_argument('--no-semi-confucius', action='store_true', help="train confucius with unlabelled data") args = parser.parse_args() global best_acc def create_model(args): if args.arch == 'wideresnet': import models.wideresnet as models model = models.build_wideresnet(depth=args.model_depth, widen_factor=args.model_width, dropout=0, num_classes=args.num_classes, expose=True) elif args.arch == 'resnext': import models.resnext as models model = models.build_resnext(cardinality=args.model_cardinality, depth=args.model_depth, width=args.model_width, num_classes=args.num_classes) logger.log_print("Total params: {:.2f}M".format( sum(p.numel() for p in model.parameters()) / 1e6)) return model if args.local_rank == -1: device = torch.device('cuda', args.gpu_id) args.world_size = 1 args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.world_size = torch.distributed.get_world_size() args.n_gpu = 1 args.device = device if args.dataset == 'cifar10': args.num_classes = 10 if args.arch == 'wideresnet': args.model_depth = 28 args.model_width = 2 if args.arch == 'resnext': args.model_cardinality = 4 args.model_depth = 28 args.model_width = 4 elif args.dataset == 'cifar100': args.num_classes = 100 if args.arch == 'wideresnet': args.model_depth = 28 args.model_width = 10 if args.arch == 'resnext': args.model_cardinality = 8 args.model_depth = 29 args.model_width = 64 logger.log_print( f"Process rank: {args.local_rank}, " f"device: {args.device}, " f"n_gpu: {args.n_gpu}, " f"distributed training: {bool(args.local_rank != -1)}, " f"16-bits training: {args.amp}", ) logger.log_print(dict(args._get_kwargs())) if args.seed != -1: set_seed(args) if args.local_rank in [-1, 0]: os.makedirs(args.out, exist_ok=True) writer = SummaryWriter(logger.path / "summary") if args.local_rank not in [-1, 0]: torch.distributed.barrier() labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset]( '../../data/cifarfm', args.num_labeled, args.k_img, args.k_img * args.mu) model = create_model(args) if args.local_rank == 0: torch.distributed.barrier() model.to(args.device) confucius = Confucius(10, 128, 32) confucius.to(args.device) train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler labeled_trainloader = DataLoader( labeled_dataset, sampler=train_sampler(labeled_dataset), batch_size=args.batch_size, num_workers=args.num_workers, drop_last=True) unlabeled_trainloader = DataLoader( unlabeled_dataset, sampler=train_sampler(unlabeled_dataset), batch_size=args.batch_size * args.mu, num_workers=args.num_workers, drop_last=True) test_loader = DataLoader( test_dataset, sampler=SequentialSampler(test_dataset), batch_size=args.batch_size, num_workers=args.num_workers) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=args.nesterov) args.iteration = args.k_img // args.batch_size // args.world_size args.total_steps = args.epochs * args.iteration scheduler = get_cosine_schedule_with_warmup( optimizer, args.warmup * args.iteration, args.total_steps) confucius_optim = optim.Adam(confucius.parameters()) # no scheduler. not sure if SGD is the matter, Adam needs no scheduler # TODO ablation if args.use_ema: ema_model = ModelEMA(args, model, args.ema_decay, device) start_epoch = 0 if args.resume: logger.log_print("==> Resuming from checkpoint..") assert os.path.isfile( args.resume), "Error: no checkpoint directory found!" args.out = os.path.dirname(args.resume) checkpoint = torch.load(args.resume) best_acc = checkpoint['best_acc'] start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) if args.use_ema: ema_model.ema.load_state_dict(checkpoint['ema_state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) # # if args.amp: # from apex import amp # model, optimizer = amp.initialize( # model, optimizer, opt_level=args.opt_level) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.log_print("***** Running training *****") logger.log_print(f" Task = {args.dataset}@{args.num_labeled}") logger.log_print(f" Num Epochs = {args.epochs}") logger.log_print(f" Batch size per GPU = {args.batch_size}") logger.log_print( f" Total train batch size = {args.batch_size * args.world_size}") logger.log_print(f" Total optimization steps = {args.total_steps}") test_accs = [] model.zero_grad() model = nn.DataParallel(model) confucius = nn.DataParallel(confucius) if args.use_ema: test_model = ema_model.ema else: test_model = model # logger.log_print("Dry run") # test_loss, test_acc = ttest(args, test_loader, test_model, epoch, logger) for epoch in range(start_epoch, args.epochs): train_loss, train_loss_x, train_loss_u, mask_prob, epoch_time = train_one_epoch( args, labeled_trainloader, unlabeled_trainloader, model, optimizer, confucius, confucius_optim, ema_model, scheduler, epoch, logger) if args.use_ema: test_model = ema_model.ema else: test_model = model test_loss, test_acc = ttest(args, test_loader, test_model, 0, logger) if args.local_rank in [-1, 0]: logger.auto_log("train loss", epoch=epoch, iter=len(labeled_trainloader), loss=train_loss, loss_x=train_loss_x, loss_u=train_loss_u, mask_prob=mask_prob, epoch_time=epoch_time) logger.auto_log("test loss", epoch=epoch, accu=test_acc, loss=test_loss) writer.add_scalar('train/1.train_loss', train_loss, epoch) writer.add_scalar('train/2.train_loss_x', train_loss_x, epoch) writer.add_scalar('train/3.train_loss_u', train_loss_u, epoch) writer.add_scalar('train/4.mask', mask_prob, epoch) writer.add_scalar('test/1.test_acc', test_acc, epoch) writer.add_scalar('test/2.test_loss', test_loss, epoch) is_best = test_acc > best_acc best_acc = max(test_acc, best_acc) if args.local_rank in [-1, 0]: model_to_save = model.module if hasattr(model, "module") else model if args.use_ema: ema_to_save = ema_model.ema.module if hasattr( ema_model.ema, "module") else ema_model.ema save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model_to_save.state_dict(), 'ema_state_dict': ema_to_save.state_dict() if args.use_ema else None, 'acc': test_acc, 'best_acc': best_acc, 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), }, is_best, args.out) test_accs.append(test_acc) logger.auto_log("test accuracy", epoch=epoch, best_top_1=best_acc, mean_top_1=np.mean(test_accs[-20:])) if args.local_rank in [-1, 0]: writer.close() def train_one_epoch(args, labeled_trainloader, unlabeled_trainloader, model, optimizer, confucius, confucius_optim, ema_model, scheduler, epoch, logger): if args.amp: from apex import amp batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() losses_x = AverageMeter() losses_u = AverageMeter() # gpu_util = AverageMeter() # gpu_mem = AverageMeter() end = time.time() epoch_start = time.time() if not args.no_progress: p_bar = tqdm(range(args.iteration), disable=args.local_rank not in [-1, 0]) train_loader = zip(labeled_trainloader, unlabeled_trainloader) model.train() for batch_idx, (data_x, data_u) in enumerate(train_loader): inputs_x, targets_x = data_x (inputs_u_w, inputs_u_s), _ = data_u data_time.update(time.time() - end) batch_size = inputs_x.shape[0] inputs = torch.cat((inputs_x, inputs_u_w, inputs_u_s)).to(args.device) targets_x = targets_x.to(args.device) logits, exposed = model(inputs) confidence_logits = confucius(logits.detach(), exposed.detach()) conf_logits_x = confidence_logits[:batch_size] uconf = confidence_logits[batch_size:] conf_logits_u_w, conf_logits_u_s = uconf.chunk(2) conf_x, conf_u_w, conf_u_s = torch.sigmoid(conf_logits_x), torch.sigmoid(conf_logits_u_w), \ torch.sigmoid(conf_logits_u_s) logits_x = logits[:batch_size] logits_u_w, logits_u_s = logits[batch_size:].chunk(2) # compute the labelled loss Lx = F.cross_entropy(logits_x, targets_x, reduction='mean') # generate the pseudo labels pseudo_label = torch.softmax(logits_u_w.detach_(), dim=-1) max_probs, pseudo_label = torch.max(pseudo_label, dim=-1) # use confidence instead to mask the pseudo-label below threshold # I'm guessing that the mask does not have gradient, the masked has mask = conf_u_w.ge(args.threshold).float() # mask = max_probs.ge(args.threshold).float() Lu = (F.cross_entropy(logits_u_s, pseudo_label, reduction='none') * mask).mean() loss = Lx + args.lambda_u * Lu if args.amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() losses.update(loss.item()) losses_x.update(Lx.item()) losses_u.update(Lu.item()) # mem, util=report_mem_and_util() # gpu_mem.update(mem) # gpu_util.update(util) optimizer.step() scheduler.step() if args.use_ema: ema_model.update(model) model.zero_grad() # I'm guessing that the confucius.zero_grad() # main model finishes, start confidence model # generate label for confucius, we use precision as usual # labelled prediction # TODO debug this # TODO question, should only labelled data be used to train confidence or should strongly augmented as well? # TODO the main model is trained with augmented, so should confidence do the same? I'm not sure pred = torch.softmax(logits_x.detach(), dim=-1) pred = pred.argmax(dim=1, keepdim=True) precision = pred.eq(targets_x.view_as(pred)).float() x_conf_loss = F.binary_cross_entropy_with_logits(conf_logits_x, precision) if args.no_semi_confucius: u_s_conf_loss = 0 else: u_s_pred = torch.softmax(logits_u_s.detach(), dim=-1) u_s_pred = u_s_pred.argmax(dim=1, keepdim=True) u_s_accu = u_s_pred.eq(pseudo_label.view_as(u_s_pred)).float() u_s_conf_loss = F.binary_cross_entropy_with_logits( conf_u_s, u_s_accu) conf_loss = x_conf_loss + args.lambda_u * u_s_conf_loss conf_loss.backward() confucius_optim.step() model.zero_grad() confucius.zero_grad() batch_time.update(time.time() - end) end = time.time() mask_prob = mask.mean().item() if not args.no_progress: p_bar.set_description( "Train Epoch: {epoch}/{epochs:4}. Iter: {batch:4}/{iter:4}. LR: {lr:.6f}. Data: {data:.3f}s. " "Batch: {bt:.3f}s. Loss: {loss:.4f}. Loss_x: {loss_x:.4f}. " "Loss_u: {loss_u:.4f}. Mask: {mask:.4f}. ".format( epoch=epoch + 1, epochs=args.epochs, batch=batch_idx + 1, iter=args.iteration, lr=scheduler.get_last_lr()[0], data=data_time.avg, bt=batch_time.avg, loss=losses.avg, loss_x=losses_x.avg, loss_u=losses_u.avg, mask=mask_prob)) p_bar.update() else: print_intvl = int(len(labeled_trainloader) / 20) if batch_idx % print_intvl == 0: logger.auto_log("train loss", epoch=epoch, iter=batch_idx, loss=losses.avg, loss_x=losses_x.avg, loss_u=losses_u.avg, mask_prob=mask_prob, epoch_time=time.time() - epoch_start) if not args.no_progress: p_bar.close() return losses.avg, losses_x.avg, losses_u.avg, mask_prob, time.time() - epoch_start def ttest(args, test_loader, model, epoch, logger): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() if not args.no_progress: test_loader = tqdm(test_loader, disable=args.local_rank not in [-1, 0]) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_loader): data_time.update(time.time() - end) model.eval() inputs = inputs.to(args.device) targets = targets.to(args.device) outputs, _ = model(inputs) loss = F.cross_entropy(outputs, targets) prec1, prec5 = accuracy(outputs, targets, topk=(1, 5)) losses.update(loss.item(), inputs.shape[0]) top1.update(prec1.item(), inputs.shape[0]) top5.update(prec5.item(), inputs.shape[0]) batch_time.update(time.time() - end) end = time.time() if not args.no_progress: test_loader.set_description( "Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. " "top1: {top1:.2f}. top5: {top5:.2f}. ".format( batch=batch_idx + 1, iter=len(test_loader), data=data_time.avg, bt=batch_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) if not args.no_progress: test_loader.close() logger.auto_log("top accuracy", epoch=epoch, top_1_accu=top1.avg, top_5_acc=top5.avg) # logger.log_print("top-1 acc: {:.2f}".format(top1.avg)) # logger.log_print("top-5 acc: {:.2f}".format(top5.avg)) return losses.avg, top1.avg class ModelEMA(object): def __init__(self, args, model, decay, device='', resume=''): self.ema = deepcopy(model) self.ema.eval() self.decay = decay self.device = device self.wd = args.lr * args.wdecay if device: self.ema.to(device=device) self.ema_has_module = hasattr(self.ema, 'module') if resume: self._load_checkpoint(resume) for p in self.ema.parameters(): p.requires_grad_(False) def _load_checkpoint(self, checkpoint_path): checkpoint = torch.load(checkpoint_path) assert isinstance(checkpoint, dict) if 'ema_state_dict' in checkpoint: new_state_dict = OrderedDict() for k, v in checkpoint['ema_state_dict'].items(): if self.ema_has_module: name = 'module.' + k if not k.startswith('module') else k else: name = k new_state_dict[name] = v self.ema.load_state_dict(new_state_dict) def update(self, model): needs_module = hasattr(model, 'module') and not self.ema_has_module with torch.no_grad(): msd = model.state_dict() for k, ema_v in self.ema.state_dict().items(): if needs_module: k = 'module.' + k model_v = msd[k].detach() if self.device: model_v = model_v.to(device=self.device) ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) # weight decay if 'bn' not in k: msd[k] = msd[k] * (1. - self.wd) if __name__ == '__main__': cudnn.benchmark = True main("digging", "98semifix")
import asyncio import concurrent import logging from concurrent.futures.thread import ThreadPoolExecutor from pathlib import Path from typing import Callable, Dict, List, Optional, Set, Tuple from blspy import G1Element import flora.server.ws_connection as ws # lgtm [py/import-and-import-from] from flora.consensus.constants import ConsensusConstants from flora.plotting.plot_tools import PlotInfo from flora.plotting.plot_tools import add_plot_directory as add_plot_directory_pt from flora.plotting.plot_tools import get_plot_directories as get_plot_directories_pt from flora.plotting.plot_tools import load_plots from flora.plotting.plot_tools import remove_plot_directory as remove_plot_directory_pt log = logging.getLogger(__name__) class Harvester: provers: Dict[Path, PlotInfo] failed_to_open_filenames: Dict[Path, int] no_key_filenames: Set[Path] farmer_public_keys: List[G1Element] pool_public_keys: List[G1Element] root_path: Path _is_shutdown: bool executor: ThreadPoolExecutor state_changed_callback: Optional[Callable] cached_challenges: List constants: ConsensusConstants _refresh_lock: asyncio.Lock def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants): self.root_path = root_path # From filename to prover self.provers = {} self.failed_to_open_filenames = {} self.no_key_filenames = set() self._is_shutdown = False self.farmer_public_keys = [] self.pool_public_keys = [] self.match_str = None self.show_memo: bool = False self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"]) self.state_changed_callback = None self.server = None self.constants = constants self.cached_challenges = [] self.log = log self.state_changed_callback: Optional[Callable] = None self.last_load_time: float = 0 self.plot_load_frequency = config.get("plot_loading_frequency_seconds", 120) async def _start(self): self._refresh_lock = asyncio.Lock() def _close(self): self._is_shutdown = True self.executor.shutdown(wait=True) async def _await_closed(self): pass def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback def _state_changed(self, change: str): if self.state_changed_callback is not None: self.state_changed_callback(change) def on_disconnect(self, connection: ws.WSFloraConnection): self.log.info(f"peer disconnected {connection.get_peer_info()}") self._state_changed("close_connection") def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]: self.log.debug(f"get_plots prover items: {len(self.provers)}") response_plots: List[Dict] = [] for path, plot_info in self.provers.items(): prover = plot_info.prover response_plots.append( { "filename": str(path), "size": prover.get_size(), "plot-seed": prover.get_id(), # Deprecated "plot_id": prover.get_id(), "pool_public_key": plot_info.pool_public_key, "pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash, "plot_public_key": plot_info.plot_public_key, "file_size": plot_info.file_size, "time_modified": plot_info.time_modified, } ) self.log.debug( f"get_plots response: plots: {len(response_plots)}, " f"failed_to_open_filenames: {len(self.failed_to_open_filenames)}, " f"no_key_filenames: {len(self.no_key_filenames)}" ) return ( response_plots, [str(s) for s, _ in self.failed_to_open_filenames.items()], [str(s) for s in self.no_key_filenames], ) async def refresh_plots(self): locked: bool = self._refresh_lock.locked() changed: bool = False if not locked: async with self._refresh_lock: # Avoid double refreshing of plots (changed, self.provers, self.failed_to_open_filenames, self.no_key_filenames,) = load_plots( self.provers, self.failed_to_open_filenames, self.farmer_public_keys, self.pool_public_keys, self.match_str, self.show_memo, self.root_path, ) if changed: self._state_changed("plots") def delete_plot(self, str_path: str): path = Path(str_path).resolve() if path in self.provers: del self.provers[path] # Remove absolute and relative paths if path.exists(): path.unlink() self._state_changed("plots") return True async def add_plot_directory(self, str_path: str) -> bool: add_plot_directory_pt(str_path, self.root_path) await self.refresh_plots() return True async def get_plot_directories(self) -> List[str]: return get_plot_directories_pt(self.root_path) async def remove_plot_directory(self, str_path: str) -> bool: remove_plot_directory_pt(str_path, self.root_path) return True def set_server(self, server): self.server = server
import pytest import skil def test_experiment_serde(): exp = skil.Experiment(name='foo') exp.save('exp.json') recov = skil.Experiment.load('exp.json') assert recov.get_config() == exp.get_config() def test_experiment_serde_yaml(): exp = skil.Experiment(name='foo') exp.save('exp.yml', file_format='yaml') recov = skil.Experiment.load('exp.yml') assert recov.get_config() == exp.get_config() def test_model_serde(): model = skil.Model('keras_mnist.h5', name='bar') model.save('model.json') recov = skil.Model.load('model.json') assert recov.get_config() == model.get_config() def test_transform_serde(): transform = skil.Transform('iris_tp.json', 'baz') transform.save('transform.json') recov = skil.Transform.load('transform.json') assert recov.get_config() == transform.get_config() def test_deployment_serde(): dep = skil.Deployment() dep.save('dep.json') recov = skil.Deployment.load('dep.json') assert recov.get_config() == dep.get_config() def test_service_serde(): dep = skil.Deployment() model = skil.Model('keras_mnist.h5', name='bar') service = model.deploy(dep) service.save('service.json') recov = skil.Service.load('service.json') assert recov.get_config() == service.get_config() if __name__ == '__main__': pytest.main([__file__])
from django.conf.urls import url from . import views app_name = 'polls' urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'), url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'), url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'), ]
# -*- coding: utf-8 -*- ################################################ # # URL: # ===== # https://leetcode.com/problems/unique-binary-search-trees-ii/ # # DESC: # ===== # Given an integer n, generate all structurally unique BST's (binary search trees) that store values 1 ... n. # # Example: # Input: 3 # Output: # [ # [1,null,3,2], # [3,2,null,1], # [3,1,null,null,2], # [2,1,3], # [1,null,2,null,3] # ] # Explanation: # The above output corresponds to the 5 unique BST's shown below: # # 1 3 3 2 1 # \ / / / \ \ # 3 2 1 1 3 2 # / / \ \ # 2 1 2 3 # ################################################ from typing import List from utils.tree.TreeNode import TreeNode class Solution: def generateTrees(self, n: int) -> List[TreeNode]: return self.generate(1, n) if n >= 1 else [] def generate(self, start, end): if start > end: return [None] if start == end: return [TreeNode(start)] result = [] for num in range(start, end + 1): lefts = self.generate(start, num - 1) rights = self.generate(num + 1, end) for left in lefts: for right in rights: root = TreeNode(num) root.left = left root.right = right result.append(root) return result
""" WSGI config for cplugbackend project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cplugbackend.settings") application = get_wsgi_application()
# Copyright (c) 2016 by Kaminario Technologies, Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Kaminario K2 all-flash arrays.""" from oslo_log import log as logging from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common ISCSI_TCP_PORT = "3260" K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) kaminario_logger = common.kaminario_logger @interface.volumedriver class KaminarioISCSIDriver(common.KaminarioCinderDriver): """Kaminario K2 iSCSI Volume Driver. Version history: 1.0 - Initial driver 1.1 - Added manage/unmanage and extra-specs support for nodedup 1.2 - Added replication support 1.3 - Added retype support 1.4 - Added replication failback support """ VERSION = '1.4' # ThirdPartySystems wiki page name CI_WIKI_NAME = "Kaminario_K2_CI" @kaminario_logger def __init__(self, *args, **kwargs): super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) self._protocol = 'iSCSI' @kaminario_logger @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target # Get target_portal and target iqn. iscsi_portal, target_iqn = self.get_target_info(volume) # Map volume. lun = self.k2_initialize_connection(volume, connector) # To support replication failback if temp_client: self.client = temp_client # Return target volume information. return {"driver_volume_type": "iscsi", "data": {"target_iqn": target_iqn, "target_portal": iscsi_portal, "target_lun": lun, "target_discovered": True}} @kaminario_logger @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target super(KaminarioISCSIDriver, self).terminate_connection(volume, connector) # To support replication failback if temp_client: self.client = temp_client def get_target_info(self, volume): LOG.debug("Searching first iscsi port ip without wan in K2.") iscsi_ip_rs = self.client.search("system/net_ips") iscsi_ip = target_iqn = None if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0: for ip in iscsi_ip_rs.hits: if not ip.wan_port: iscsi_ip = ip.ip_address break if not iscsi_ip: msg = _("Unable to get ISCSI IP address from K2.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT) LOG.debug("Searching system state for target iqn in K2.") sys_state_rs = self.client.search("system/state") if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0: target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name if not target_iqn: msg = _("Unable to get target iqn from K2.") LOG.error(msg) raise exception.KaminarioCinderDriverException(reason=msg) return iscsi_portal, target_iqn @kaminario_logger def _get_host_object(self, connector): host_name = self.get_initiator_host_name(connector) LOG.debug("Searching initiator hostname: %s in K2.", host_name) host_rs = self.client.search("hosts", name=host_name) """Create a host if not exists.""" if host_rs.total == 0: try: LOG.debug("Creating initiator hostname: %s in K2.", host_name) host = self.client.new("hosts", name=host_name, type="Linux").save() LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.", {'iqn': connector['initiator'], 'host': host_name}) iqn = self.client.new("host_iqns", iqn=connector['initiator'], host=host) iqn.save() except Exception as ex: self._delete_host_by_name(host_name) LOG.exception("Unable to create host: %s in K2.", host_name) raise exception.KaminarioCinderDriverException(reason=ex) else: LOG.debug("Use existing initiator hostname: %s in K2.", host_name) host = host_rs.hits[0] return host, host_rs, host_name
import asyncio import dataclasses import logging import multiprocessing from concurrent.futures.process import ProcessPoolExecutor from enum import Enum from typing import Any, Callable, Dict, List, Optional, Set, Tuple from stor.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block from stor.consensus.block_record import BlockRecord from stor.consensus.blockchain_interface import BlockchainInterface from stor.consensus.constants import ConsensusConstants from stor.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty from stor.consensus.find_fork_point import find_fork_point_in_chain from stor.consensus.full_block_to_block_record import block_to_block_record from stor.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing from stor.types.blockchain_format.sized_bytes import bytes32 from stor.types.blockchain_format.sub_epoch_summary import SubEpochSummary from stor.types.coin_spend import CoinSpend from stor.types.header_block import HeaderBlock from stor.types.unfinished_header_block import UnfinishedHeaderBlock from stor.util.errors import Err, ValidationError from stor.util.ints import uint32, uint64 from stor.util.streamable import recurse_jsonify from stor.wallet.block_record import HeaderBlockRecord from stor.wallet.wallet_block_store import WalletBlockStore from stor.wallet.wallet_coin_store import WalletCoinStore from stor.wallet.wallet_pool_store import WalletPoolStore from stor.wallet.wallet_transaction_store import WalletTransactionStore log = logging.getLogger(__name__) class ReceiveBlockResult(Enum): """ When Blockchain.receive_block(b) is called, one of these results is returned, showing whether the block was added to the chain (extending the peak), and if not, why it was not added. """ NEW_PEAK = 1 # Added to the peak of the blockchain ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain) INVALID_BLOCK = 3 # Block was not added because it was invalid ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain class WalletBlockchain(BlockchainInterface): constants: ConsensusConstants constants_json: Dict # peak of the blockchain _peak_height: Optional[uint32] # All blocks in peak path are guaranteed to be included, can include orphan blocks __block_records: Dict[bytes32, BlockRecord] # Defines the path from genesis to the peak, no orphan blocks __height_to_hash: Dict[uint32, bytes32] # all hashes of blocks in block_record by height, used for garbage collection __heights_in_cache: Dict[uint32, Set[bytes32]] # All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak # (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak __sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {} # Stores coin_store: WalletCoinStore tx_store: WalletTransactionStore pool_store: WalletPoolStore block_store: WalletBlockStore # Used to verify blocks in parallel pool: ProcessPoolExecutor new_transaction_block_callback: Any reorg_rollback: Any wallet_state_manager_lock: asyncio.Lock # Whether blockchain is shut down or not _shut_down: bool # Lock to prevent simultaneous reads and writes lock: asyncio.Lock log: logging.Logger @staticmethod async def create( block_store: WalletBlockStore, coin_store: WalletCoinStore, tx_store: WalletTransactionStore, pool_store: WalletPoolStore, consensus_constants: ConsensusConstants, new_transaction_block_callback: Callable, # f(removals: List[Coin], additions: List[Coin], height: uint32) reorg_rollback: Callable, lock: asyncio.Lock, ): """ Initializes a blockchain with the BlockRecords from disk, assuming they have all been validated. Uses the genesis block given in override_constants, or as a fallback, in the consensus constants config. """ self = WalletBlockchain() self.lock = asyncio.Lock() self.coin_store = coin_store self.tx_store = tx_store self.pool_store = pool_store cpu_count = multiprocessing.cpu_count() if cpu_count > 61: cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903 num_workers = max(cpu_count - 2, 1) self.pool = ProcessPoolExecutor(max_workers=num_workers) log.info(f"Started {num_workers} processes for block validation") self.constants = consensus_constants self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants)) self.block_store = block_store self._shut_down = False self.new_transaction_block_callback = new_transaction_block_callback self.reorg_rollback = reorg_rollback self.log = logging.getLogger(__name__) self.wallet_state_manager_lock = lock await self._load_chain_from_store() return self def shut_down(self): self._shut_down = True self.pool.shutdown(wait=True) async def _load_chain_from_store(self) -> None: """ Initializes the state of the Blockchain class from the database. """ height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_heights_dicts() self.__height_to_hash = height_to_hash self.__sub_epoch_summaries = sub_epoch_summaries self.__block_records = {} self.__heights_in_cache = {} blocks, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE) for block_record in blocks.values(): self.add_block_record(block_record) if len(blocks) == 0: assert peak is None self._peak_height = None return None assert peak is not None self._peak_height = self.block_record(peak).height assert len(self.__height_to_hash) == self._peak_height + 1 def get_peak(self) -> Optional[BlockRecord]: """ Return the peak of the blockchain """ if self._peak_height is None: return None return self.height_to_block_record(self._peak_height) async def receive_block( self, header_block_record: HeaderBlockRecord, pre_validation_result: Optional[PreValidationResult] = None, trusted: bool = False, fork_point_with_peak: Optional[uint32] = None, additional_coin_spends: List[CoinSpend] = None, ) -> Tuple[ReceiveBlockResult, Optional[Err], Optional[uint32]]: """ Adds a new block into the blockchain, if it's valid and connected to the current blockchain, regardless of whether it is the child of a head, or another block. Returns a header if block is added to head. Returns an error if the block is invalid. Also returns the fork height, in the case of a new peak. """ if additional_coin_spends is None: additional_coin_spends = [] block = header_block_record.header genesis: bool = block.height == 0 if self.contains_block(block.header_hash): return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None if not self.contains_block(block.prev_header_hash) and not genesis: return ( ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ) if block.height == 0: prev_b: Optional[BlockRecord] = None else: prev_b = self.block_record(block.prev_header_hash) sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty( self.constants, len(block.finished_sub_slots) > 0, prev_b, self ) if trusted is False and pre_validation_result is None: required_iters, error = validate_finished_header_block( self.constants, self, block, False, difficulty, sub_slot_iters ) elif trusted: unfinished_header_block = UnfinishedHeaderBlock( block.finished_sub_slots, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_filter, ) required_iters, val_error = validate_unfinished_header_block( self.constants, self, unfinished_header_block, False, difficulty, sub_slot_iters, False, True ) error = val_error if val_error is not None else None else: assert pre_validation_result is not None required_iters = pre_validation_result.required_iters error = ( ValidationError(Err(pre_validation_result.error)) if pre_validation_result.error is not None else None ) if error is not None: return ReceiveBlockResult.INVALID_BLOCK, error.code, None assert required_iters is not None block_record = block_to_block_record( self.constants, self, required_iters, None, block, ) heights_changed: Set[Tuple[uint32, Optional[bytes32]]] = set() # Always add the block to the database async with self.wallet_state_manager_lock: async with self.block_store.db_wrapper.lock: try: await self.block_store.db_wrapper.begin_transaction() await self.block_store.add_block_record(header_block_record, block_record, additional_coin_spends) self.add_block_record(block_record) self.clean_block_record(block_record.height - self.constants.BLOCKS_CACHE_SIZE) fork_height, records_to_add = await self._reconsider_peak( block_record, genesis, fork_point_with_peak, additional_coin_spends, heights_changed ) for record in records_to_add: if record.sub_epoch_summary_included is not None: self.__sub_epoch_summaries[record.height] = record.sub_epoch_summary_included await self.block_store.db_wrapper.commit_transaction() except BaseException as e: self.log.error(f"Error during db transaction: {e}") if self.block_store.db_wrapper.db._connection is not None: await self.block_store.db_wrapper.rollback_transaction() self.remove_block_record(block_record.header_hash) self.block_store.rollback_cache_block(block_record.header_hash) await self.coin_store.rebuild_wallet_cache() await self.tx_store.rebuild_tx_cache() await self.pool_store.rebuild_cache() for height, replaced in heights_changed: # If it was replaced change back to the previous value otherwise pop the change if replaced is not None: self.__height_to_hash[height] = replaced else: self.__height_to_hash.pop(height) raise if fork_height is not None: self.log.info(f"💰 Updated wallet peak to height {block_record.height}, weight {block_record.weight}, ") return ReceiveBlockResult.NEW_PEAK, None, fork_height else: return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None async def _reconsider_peak( self, block_record: BlockRecord, genesis: bool, fork_point_with_peak: Optional[uint32], additional_coin_spends_from_wallet: Optional[List[CoinSpend]], heights_changed: Set[Tuple[uint32, Optional[bytes32]]], ) -> Tuple[Optional[uint32], List[BlockRecord]]: """ When a new block is added, this is called, to check if the new block is the new peak of the chain. This also handles reorgs by reverting blocks which are not in the heaviest chain. It returns the height of the fork between the previous chain and the new chain, or returns None if there was no update to the heaviest chain. """ peak = self.get_peak() if genesis: if peak is None: block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record( block_record.header_hash ) assert block is not None replaced = None if uint32(0) in self.__height_to_hash: replaced = self.__height_to_hash[uint32(0)] self.__height_to_hash[uint32(0)] = block.header_hash heights_changed.add((uint32(0), replaced)) assert len(block.additions) == 0 and len(block.removals) == 0 await self.new_transaction_block_callback(block.removals, block.additions, block_record, []) self._peak_height = uint32(0) return uint32(0), [block_record] return None, [] assert peak is not None if block_record.weight > peak.weight: # Find the fork. if the block is just being appended, it will return the peak # If no blocks in common, returns -1, and reverts all blocks if fork_point_with_peak is not None: fork_h: int = fork_point_with_peak else: fork_h = find_fork_point_in_chain(self, block_record, peak) # Rollback to fork self.log.debug(f"fork_h: {fork_h}, SB: {block_record.height}, peak: {peak.height}") if block_record.prev_hash != peak.header_hash: await self.reorg_rollback(fork_h) # Rollback sub_epoch_summaries heights_to_delete = [] for ses_included_height in self.__sub_epoch_summaries.keys(): if ses_included_height > fork_h: heights_to_delete.append(ses_included_height) for height in heights_to_delete: del self.__sub_epoch_summaries[height] # Collect all blocks from fork point to new peak blocks_to_add: List[Tuple[HeaderBlockRecord, BlockRecord, List[CoinSpend]]] = [] curr = block_record.header_hash while fork_h < 0 or curr != self.height_to_hash(uint32(fork_h)): fetched_header_block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(curr) fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr) if curr == block_record.header_hash: additional_coin_spends = additional_coin_spends_from_wallet else: additional_coin_spends = await self.block_store.get_additional_coin_spends(curr) if additional_coin_spends is None: additional_coin_spends = [] assert fetched_header_block is not None assert fetched_block_record is not None blocks_to_add.append((fetched_header_block, fetched_block_record, additional_coin_spends)) if fetched_header_block.height == 0: # Doing a full reorg, starting at height 0 break curr = fetched_block_record.prev_hash records_to_add: List[BlockRecord] = [] for fetched_header_block, fetched_block_record, additional_coin_spends in reversed(blocks_to_add): replaced = None if fetched_block_record.height in self.__height_to_hash: replaced = self.__height_to_hash[fetched_block_record.height] self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash heights_changed.add((fetched_block_record.height, replaced)) records_to_add.append(fetched_block_record) if fetched_block_record.is_transaction_block: await self.new_transaction_block_callback( fetched_header_block.removals, fetched_header_block.additions, fetched_block_record, additional_coin_spends, ) # Changes the peak to be the new peak await self.block_store.set_peak(block_record.header_hash) self._peak_height = block_record.height if fork_h < 0: return None, records_to_add return uint32(fork_h), records_to_add # This is not a heavier block than the heaviest we have seen, so we don't change the coin set return None, [] def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64: assert self.contains_block(header_hash) curr = self.block_record(header_hash) if curr.height <= 2: return self.constants.DIFFICULTY_STARTING return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1] def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64: assert self.contains_block(header_hash) curr = self.block_record(header_hash) if curr.height <= 2: return self.constants.SUB_SLOT_ITERS_STARTING return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0] async def pre_validate_blocks_multiprocessing( self, blocks: List[HeaderBlock], batch_size: int = 4 ) -> Optional[List[PreValidationResult]]: return await pre_validate_blocks_multiprocessing( self.constants, self.constants_json, self, blocks, self.pool, True, {}, None, batch_size ) def contains_block(self, header_hash: bytes32) -> bool: """ True if we have already added this block to the chain. This may return false for orphan blocks that we have added but no longer keep in memory. """ return header_hash in self.__block_records def block_record(self, header_hash: bytes32) -> BlockRecord: return self.__block_records[header_hash] def height_to_block_record(self, height: uint32, check_db=False) -> BlockRecord: header_hash = self.height_to_hash(height) return self.block_record(header_hash) def get_ses_heights(self) -> List[uint32]: return sorted(self.__sub_epoch_summaries.keys()) def get_ses(self, height: uint32) -> SubEpochSummary: return self.__sub_epoch_summaries[height] def height_to_hash(self, height: uint32) -> Optional[bytes32]: return self.__height_to_hash[height] def contains_height(self, height: uint32) -> bool: return height in self.__height_to_hash def get_peak_height(self) -> Optional[uint32]: return self._peak_height async def warmup(self, fork_point: uint32): """ Loads blocks into the cache. The blocks loaded include all blocks from fork point - BLOCKS_CACHE_SIZE up to and including the fork_point. Args: fork_point: the last block height to load in the cache """ if self._peak_height is None: return None blocks = await self.block_store.get_block_records_in_range( fork_point - self.constants.BLOCKS_CACHE_SIZE, self._peak_height ) for block_record in blocks.values(): self.add_block_record(block_record) def clean_block_record(self, height: int): """ Clears all block records in the cache which have block_record < height. Args: height: Minimum height that we need to keep in the cache """ if height < 0: return None blocks_to_remove = self.__heights_in_cache.get(uint32(height), None) while blocks_to_remove is not None and height >= 0: for header_hash in blocks_to_remove: del self.__block_records[header_hash] del self.__heights_in_cache[uint32(height)] # remove height from heights in cache if height == 0: break height -= 1 blocks_to_remove = self.__heights_in_cache.get(uint32(height), None) def clean_block_records(self): """ Cleans the cache so that we only maintain relevant blocks. This removes block records that have height < peak - BLOCKS_CACHE_SIZE. These blocks are necessary for calculating future difficulty adjustments. """ if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE: return None peak = self.get_peak() assert peak is not None if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0: return None self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE) async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]: return await self.block_store.get_block_records_in_range(start, stop) async def get_header_blocks_in_range( self, start: int, stop: int, tx_filter: bool = True ) -> Dict[bytes32, HeaderBlock]: return await self.block_store.get_header_blocks_in_range(start, stop) async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]: if header_hash in self.__block_records: return self.__block_records[header_hash] return await self.block_store.get_block_record(header_hash) def remove_block_record(self, header_hash: bytes32): sbr = self.block_record(header_hash) del self.__block_records[header_hash] self.__heights_in_cache[sbr.height].remove(header_hash) def add_block_record(self, block_record: BlockRecord): self.__block_records[block_record.header_hash] = block_record if block_record.height not in self.__heights_in_cache.keys(): self.__heights_in_cache[block_record.height] = set() self.__heights_in_cache[block_record.height].add(block_record.header_hash)
version = "0.117.0" import atexit import datetime import logging import os import random import signal import time from instabot import utils # from instabot.api.api import API from ..api import API from .state.bot_state import BotState from .state.bot_cache import BotCache from .bot_archive import archive, archive_medias, unarchive_medias from .bot_block import block, block_bots, block_users, unblock, unblock_users from .bot_checkpoint import load_checkpoint, save_checkpoint from .bot_comment import ( comment, comment_geotag, comment_hashtag, comment_medias, comment_user, comment_users, is_commented, reply_to_comment, ) from .bot_delete import delete_comment, delete_media, delete_medias from .bot_direct import ( approve_pending_thread_requests, send_hashtag, send_like, send_media, send_medias, send_message, send_messages, send_photo, send_profile, ) from .bot_filter import check_media, check_not_bot, check_user, filter_medias from .bot_follow import ( approve_pending_follow_requests, follow, follow_followers, follow_following, follow_users, reject_pending_follow_requests, ) from .bot_get import ( convert_to_user_id, get_archived_medias, get_comment, get_comment_likers, get_geotag_medias, get_geotag_users, get_hashtag_medias, get_hashtag_users, get_last_user_medias, get_link_from_media_id, get_locations_from_coordinates, get_media_commenters, get_media_comments, get_media_comments_all, get_media_id_from_link, get_media_info, get_media_likers, get_media_owner, get_messages, get_pending_follow_requests, get_pending_thread_requests, get_popular_medias, get_self_story_viewers, get_timeline_medias, get_timeline_users, get_total_hashtag_medias, get_total_user_medias, get_user_followers, get_user_following, get_user_id_from_username, get_user_info, get_user_likers, get_user_medias, get_user_reel, get_user_stories, get_user_tags_medias, get_username_from_user_id, get_your_medias, search_users, get_muted_friends, ) from .bot_like import ( like, like_comment, like_followers, like_following, like_geotag, like_hashtag, like_location_feed, like_media_comments, like_medias, like_timeline, like_user, like_users, ) from .bot_photo import download_photo, download_photos, upload_photo, upload_album from .bot_stats import save_user_stats from .bot_story import download_stories, upload_story_photo, watch_users_reels from .bot_support import ( check_if_file_exists, console_print, extract_urls, read_list_from_file, ) from .bot_unfollow import ( unfollow, unfollow_everyone, unfollow_non_followers, unfollow_users, ) from .bot_unlike import ( unlike, unlike_comment, unlike_media_comments, unlike_medias, unlike_user, ) from .bot_video import download_video, upload_video current_path = os.path.abspath(os.getcwd()) class Bot(object): def __init__( self, base_path=current_path + "/config/", whitelist_file="whitelist.txt", blacklist_file="blacklist.txt", comments_file="comments.txt", followed_file="followed.txt", unfollowed_file="unfollowed.txt", skipped_file="skipped.txt", friends_file="friends.txt", proxy=None, max_likes_per_day=random.randint(50, 100), max_unlikes_per_day=random.randint(50, 100), max_follows_per_day=random.randint(50, 100), max_unfollows_per_day=random.randint(50, 100), max_comments_per_day=random.randint(50, 100), max_blocks_per_day=random.randint(50, 100), max_unblocks_per_day=random.randint(50, 100), max_likes_to_like=random.randint(50, 100), min_likes_to_like=random.randint(50, 100), max_messages_per_day=random.randint(50, 100), filter_users=False, filter_private_users=False, filter_users_without_profile_photo=False, filter_previously_followed=False, filter_business_accounts=False, filter_verified_accounts=False, max_followers_to_follow=5000, min_followers_to_follow=10, max_following_to_follow=2000, min_following_to_follow=10, max_followers_to_following_ratio=15, max_following_to_followers_ratio=15, min_media_count_to_follow=3, max_following_to_block=2000, like_delay=random.randint(300, 600), unlike_delay=random.randint(300, 600), follow_delay=random.randint(300, 600), unfollow_delay=random.randint(300, 600), comment_delay=random.randint(300, 600), block_delay=random.randint(300, 600), unblock_delay=random.randint(300, 600), message_delay=random.randint(300, 600), stop_words=("shop", "store", "free"), blacklist_hashtags=["#shop", "#store", "#free"], blocked_actions_protection=True, blocked_actions_sleep=True, blocked_actions_sleep_delay=random.randint(600, 1200), verbosity=True, device=None, save_logfile=True, log_filename=None, loglevel_file=logging.DEBUG, loglevel_stream=logging.INFO, log_follow_unfollow=True, cli=True, ): self.cli = cli self.api = API( device=device, base_path=base_path, save_logfile=save_logfile, log_filename=log_filename, loglevel_file=loglevel_file, loglevel_stream=loglevel_stream, cli=cli ) self.log_follow_unfollow = log_follow_unfollow self.base_path = base_path self.state = BotState() self.delays = { "like": like_delay, "unlike": unlike_delay, "follow": follow_delay, "unfollow": unfollow_delay, "comment": comment_delay, "block": block_delay, "unblock": unblock_delay, "message": message_delay, } # limits - follow self.filter_users = filter_users self.filter_private_users = filter_private_users self.filter_users_without_profile_photo = filter_users_without_profile_photo self.filter_business_accounts = filter_business_accounts self.filter_verified_accounts = filter_verified_accounts self.filter_previously_followed = filter_previously_followed self.max_per_day = { "likes": max_likes_per_day, "unlikes": max_unlikes_per_day, "follows": max_follows_per_day, "unfollows": max_unfollows_per_day, "comments": max_comments_per_day, "blocks": max_blocks_per_day, "unblocks": max_unblocks_per_day, "messages": max_messages_per_day, } self.blocked_actions_protection = blocked_actions_protection self.blocked_actions_sleep = blocked_actions_sleep self.blocked_actions_sleep_delay = blocked_actions_sleep_delay self.max_likes_to_like = max_likes_to_like self.min_likes_to_like = min_likes_to_like self.max_followers_to_follow = max_followers_to_follow self.min_followers_to_follow = min_followers_to_follow self.max_following_to_follow = max_following_to_follow self.min_following_to_follow = min_following_to_follow self.max_followers_to_following_ratio = max_followers_to_following_ratio self.max_following_to_followers_ratio = max_following_to_followers_ratio self.min_media_count_to_follow = min_media_count_to_follow self.stop_words = stop_words self.blacklist_hashtags = blacklist_hashtags # limits - block self.max_following_to_block = max_following_to_block # current following and followers self.cache = BotCache() # Adjust file paths followed_file = os.path.join(base_path, followed_file) unfollowed_file = os.path.join(base_path, unfollowed_file) skipped_file = os.path.join(base_path, skipped_file) friends_file = os.path.join(base_path, friends_file) comments_file = os.path.join(base_path, comments_file) blacklist_file = os.path.join(base_path, blacklist_file) whitelist_file = os.path.join(base_path, whitelist_file) # Database files self.followed_file = utils.file(followed_file) self.unfollowed_file = utils.file(unfollowed_file) self.skipped_file = utils.file(skipped_file) self.friends_file = utils.file(friends_file) self.comments_file = utils.file(comments_file) self.blacklist_file = utils.file(blacklist_file) self.whitelist_file = utils.file(whitelist_file) self.proxy = proxy self.verbosity = verbosity self.logger = self.api.logger self.logger.info("Instabot version: " + version + " Started") self.logger.debug("Bot imported from {}".format(__file__)) @property def user_id(self): # For compatibility return self.api.user_id @property def username(self): # For compatibility return self.api.username @property def password(self): # For compatibility return self.api.password @property def last_json(self): # For compatibility return self.api.last_json @property def blacklist(self): # This is a fast operation because # `get_user_id_from_username` is cached. return [ self.convert_to_user_id(i) for i in self.blacklist_file.list if i is not None ] @property def whitelist(self): # This is a fast operation because # `get_user_id_from_username` is cached. return [ self.convert_to_user_id(i) for i in self.whitelist_file.list if i is not None ] @property def following(self): now = time.time() last = self.last.get("updated_following", now) if self._following is None or (now - last) > 7200: self.console_print("`bot.following` is empty, will download.", "green") self._following = self.get_user_following(self.user_id) self.last["updated_following"] = now return self._following @property def followers(self): now = time.time() last = self.last.get("updated_followers", now) if self._followers is None or (now - last) > 7200: self.console_print("`bot.followers` is empty, will download.", "green") self._followers = self.get_user_followers(self.user_id) self.last["updated_followers"] = now return self._followers @property def start_time(self): return self.state.start_time @start_time.setter def start_time(self, value): self.state.start_time = value @property def total(self): return self.state.total @total.setter def total(self, value): self.state.total = value @property def sleeping_actions(self): return self.state.sleeping_actions @sleeping_actions.setter def sleeping_actions(self, value): self.state.sleeping_actions = value @property def blocked_actions(self): return self.state.blocked_actions @blocked_actions.setter def blocked_actions(self, value): self.state.blocked_actions = value @property def last(self): return self.state.last @last.setter def last(self, value): self.state.last = value @property def _following(self): return self.cache.following @_following.setter def _following(self, value): self.cache.following = value @property def _followers(self): return self.cache.followers @_followers.setter def _followers(self, value): self.cache.followers = value @property def _user_infos(self): return self.cache.user_infos @_user_infos.setter def _user_infos(self, value): self.cache.user_infos = value @property def _usernames(self): return self.cache.usernames @_usernames.setter def _usernames(self, value): self.cache.usernames = value @staticmethod def version(): try: from pip._vendor import pkg_resources except ImportError: import pkg_resources return next( ( p.version for p in pkg_resources.working_set if p.project_name.lower() == "instabot" ), "No match", ) def logout(self, *args, **kwargs): self.api.logout() self.logger.info( "Bot stopped. " "Worked: %s", datetime.datetime.now() - self.start_time ) self.print_counters() def login(self, **kwargs): """if login function is run threaded, for example in scheduled job, signal will fail because it 'only works in main thread'. In this case, you may want to call login(is_threaded=True). Provide 2FA code with two_factor_code key for the first time when it is needed for the first login. """ if self.proxy: kwargs["proxy"] = self.proxy if "two_factor_code" in kwargs: self.api.set_two_factor_code(kwargs.pop("two_factor_code")) if self.api.login(**kwargs) is False: return False self.prepare() atexit.register(self.print_counters) if "is_threaded" in kwargs: if kwargs["is_threaded"]: return True signal.signal(signal.SIGTERM, self.print_counters) return True def prepare(self): storage = load_checkpoint(self) if storage is not None: ( total, self.blocked_actions, self.api.total_requests, self.start_time, ) = storage for k, v in total.items(): self.total[k] = v def print_counters(self, *args, **kwargs): save_checkpoint(self) for key, val in self.total.items(): if val > 0: self.logger.info( "Total {}: {}{}".format( key, val, "/" + str(self.max_per_day[key]) if self.max_per_day.get(key) else "", ) ) for key, val in self.blocked_actions.items(): if val: self.logger.info("Blocked {}".format(key)) self.logger.info("Total requests: {}".format(self.api.total_requests)) def delay(self, key): """ Sleep only if elapsed time since `self.last[key]` < `self.delay[key]`. """ last_action, target_delay = self.last[key], self.delays[key] elapsed_time = time.time() - last_action if elapsed_time < target_delay: t_remaining = target_delay - elapsed_time time.sleep(t_remaining * random.uniform(0.25, 1.25)) self.last[key] = time.time() def error_delay(self): time.sleep(10) def small_delay(self): time.sleep(random.uniform(0.75, 3.75)) def very_small_delay(self): time.sleep(random.uniform(0.175, 0.875)) def reached_limit(self, key): current_date = datetime.datetime.now() passed_days = (current_date.date() - self.start_time.date()).days if passed_days > 0: self.reset_counters() return self.max_per_day[key] - self.total[key] <= 0 def reset_counters(self): for k in self.total: self.total[k] = 0 for k in self.blocked_actions: self.blocked_actions[k] = False self.start_time = datetime.datetime.now() def reset_cache(self): self._following = None self._followers = None self._user_infos = {} self._usernames = {} # getters def get_user_stories(self, user_id): """ Returns array of stories links """ return get_user_stories(self, user_id) def get_user_reel(self, user_id): return get_user_reel(self, user_id) def get_self_story_viewers(self, story_id): return get_self_story_viewers(self, story_id) def get_pending_follow_requests(self): return get_pending_follow_requests(self) def get_your_medias(self, as_dict=False): """ Returns your media ids. With parameter as_dict=True returns media as dict. :type as_dict: bool """ return get_your_medias(self, as_dict) def get_archived_medias(self, as_dict=False): """ Returns your archived media ids. With parameter as_dict=True returns media as dict. :type as_dict: bool """ return get_archived_medias(self, as_dict) def get_timeline_medias(self): return get_timeline_medias(self) def get_popular_medias(self): return get_popular_medias(self) def get_user_medias(self, user_id, filtration=True, is_comment=False): return get_user_medias(self, user_id, filtration, is_comment) def get_total_user_medias(self, user_id): return get_total_user_medias(self, user_id) def get_last_user_medias(self, user_id, count): """ Returns the last number of posts specified in count in media ids array. :type count: int :param count: Count of posts :return: array """ return get_last_user_medias(self, user_id, count) def get_hashtag_medias(self, hashtag, filtration=True): return get_hashtag_medias(self, hashtag, filtration) def get_total_hashtag_medias(self, hashtag, amount=100, filtration=False): return get_total_hashtag_medias(self, hashtag, amount, filtration) def get_geotag_medias(self, geotag, filtration=True): return get_geotag_medias(self, geotag, filtration) def get_locations_from_coordinates(self, latitude, longitude): return get_locations_from_coordinates(self, latitude, longitude) def get_media_info(self, media_id): return get_media_info(self, media_id) def get_timeline_users(self): return get_timeline_users(self) def get_hashtag_users(self, hashtag): return get_hashtag_users(self, hashtag) def get_geotag_users(self, geotag): return get_geotag_users(self, geotag) def get_user_id_from_username(self, username): return get_user_id_from_username(self, username) def get_user_tags_medias(self, user_id): return get_user_tags_medias(self, user_id) def get_username_from_user_id(self, user_id): return get_username_from_user_id(self, user_id) def get_user_info(self, user_id, use_cache=True): return get_user_info(self, user_id, use_cache) def get_user_followers(self, user_id, nfollows=None): return get_user_followers(self, user_id, nfollows) def get_user_following(self, user_id, nfollows=None): return get_user_following(self, user_id, nfollows) def get_comment_likers(self, comment_id): return get_comment_likers(self, comment_id) def get_media_likers(self, media_id): return get_media_likers(self, media_id) def get_media_comments(self, media_id, only_text=False): return get_media_comments(self, media_id, only_text) def get_media_comments_all(self, media_id, only_text=False, count=False): return get_media_comments_all(self, media_id, only_text, count) def get_comment(self): return get_comment(self) def get_media_commenters(self, media_id): return get_media_commenters(self, media_id) def get_media_owner(self, media): return get_media_owner(self, media) def get_user_likers(self, user_id, media_count=10): return get_user_likers(self, user_id, media_count) def get_media_id_from_link(self, link): return get_media_id_from_link(self, link) def get_link_from_media_id(self, link): return get_link_from_media_id(self, link) def get_messages(self): return get_messages(self) def search_users(self, query): return search_users(self, query) def get_muted_friends(self, muted_content="stories"): return get_muted_friends(self, muted_content) def convert_to_user_id(self, usernames): return convert_to_user_id(self, usernames) def get_pending_thread_requests(self): return get_pending_thread_requests(self) # like def like( self, media_id, check_media=True, container_module="feed_short_url", feed_position=0, username=None, user_id=None, hashtag_name=None, hashtag_id=None, entity_page_name=None, entity_page_id=None, ): return like( self, media_id, check_media, container_module=container_module, feed_position=feed_position, username=username, user_id=user_id, hashtag_name=hashtag_name, hashtag_id=hashtag_id, entity_page_name=entity_page_name, entity_page_id=entity_page_id, ) def like_comment(self, comment_id): return like_comment(self, comment_id) def like_medias( self, media_ids, check_media=True, container_module="feed_timeline", username=None, user_id=None, hashtag_name=None, hashtag_id=None, entity_page_name=None, entity_page_id=None, ): return like_medias( self, media_ids, check_media, container_module=container_module, username=username, user_id=user_id, hashtag_name=hashtag_name, hashtag_id=hashtag_id, entity_page_name=entity_page_name, entity_page_id=entity_page_id, ) def like_timeline(self, amount=None): return like_timeline(self, amount) def like_media_comments(self, media_id): return like_media_comments(self, media_id) def like_user(self, user_id, amount=None, filtration=True): return like_user(self, user_id, amount, filtration) def like_hashtag(self, hashtag, amount=None): return like_hashtag(self, hashtag, amount) def like_geotag(self, geotag, amount=None): return like_geotag(self, geotag, amount) def like_users(self, user_ids, nlikes=None, filtration=True): return like_users(self, user_ids, nlikes, filtration) def like_location_feed(self, place, amount): return like_location_feed(self, place, amount) def like_followers(self, user_id, nlikes=None, nfollows=None): return like_followers(self, user_id, nlikes, nfollows) def like_following(self, user_id, nlikes=None, nfollows=None): return like_following(self, user_id, nlikes, nfollows) # unlike def unlike(self, media_id): return unlike(self, media_id) def unlike_comment(self, comment_id): return unlike_comment(self, comment_id) def unlike_media_comments(self, media_id): return unlike_media_comments(self, media_id) def unlike_medias(self, media_ids): return unlike_medias(self, media_ids) def unlike_user(self, user): return unlike_user(self, user) # story def download_stories(self, username): return download_stories(self, username) def upload_story_photo(self, photo, upload_id=None): return upload_story_photo(self, photo, upload_id) def watch_users_reels(self, user_ids, max_users=100): return watch_users_reels(self, user_ids, max_users=max_users) # photo def download_photo( self, media_id, folder="photos", filename=None, save_description=False ): return download_photo(self, media_id, folder, filename, save_description) def download_photos(self, medias, folder="photos", save_description=False): return download_photos(self, medias, folder, save_description) def upload_photo( self, photo, caption=None, upload_id=None, from_video=False, options={}, user_tags=None, is_sidecar=False): """Upload photo to Instagram @param photo Path to photo file (String) @param caption Media description (String) @param upload_id Unique upload_id (String). When None, then generate automatically @param from_video A flag that signals whether the photo is loaded from the video or by itself (Boolean, DEPRECATED: not used) @param options Object with difference options, e.g. configure_timeout, rename (Dict) Designed to reduce the number of function arguments! This is the simplest request object. @param user_tags Tag other users (List) usertags = [ {"user_id": user_id, "position": [x, y]} ] @param is_sidecar An album element (Boolean) @return Object with state of uploading to Instagram (or False) """ return upload_photo(self, photo, caption, upload_id, from_video, options, user_tags, is_sidecar) def upload_album( self, photos, caption=None, upload_id=None, from_video=False, options={}, user_tags=None ): """Upload album to Instagram @param photos List of paths to photo files (List of strings) @param caption Media description (String) @param upload_id Unique upload_id (String). When None, then generate automatically @param from_video A flag that signals whether the photo is loaded from the video or by itself (Boolean, DEPRECATED: not used) @param options Object with difference options, e.g. configure_timeout, rename (Dict) Designed to reduce the number of function arguments! This is the simplest request object. @param user_tags @return Object with state of uploading to Instagram (or False) """ return upload_album(self, photos, caption, upload_id, from_video, options, user_tags) # video def upload_video(self, video, caption="", thumbnail=None, options={}): """Upload video to Instagram @param video Path to video file (String) @param caption Media description (String) @param thumbnail Path to thumbnail for video (String). When None, then thumbnail is generated automatically @param options Object with difference options, e.g. configure_timeout, rename_thumbnail, rename (Dict) Designed to reduce the number of function arguments! @return Object with Instagram upload state (or False) """ return upload_video(self, video, caption, thumbnail, options) def download_video( self, media_id, folder="videos", filename=None, save_description=False ): return download_video(self, media_id, folder, filename, save_description) # follow def follow(self, user_id, check_user=True): return follow(self, user_id, check_user) def follow_users(self, user_ids, nfollows=None): return follow_users(self, user_ids, nfollows) def follow_followers(self, user_id, nfollows=None): return follow_followers(self, user_id, nfollows) def follow_following(self, user_id, nfollows=None): return follow_following(self, user_id, nfollows) # unfollow def unfollow(self, user_id): return unfollow(self, user_id) def unfollow_users(self, user_ids): return unfollow_users(self, user_ids) def unfollow_non_followers(self, n_to_unfollows=None): return unfollow_non_followers(self, n_to_unfollows) def unfollow_everyone(self): return unfollow_everyone(self) def approve_pending_follow_requests(self): return approve_pending_follow_requests(self) def reject_pending_follow_requests(self): return reject_pending_follow_requests(self) # direct def send_message(self, text, user_ids, thread_id=None): return send_message(self, text, user_ids, thread_id) def send_messages(self, text, user_ids): return send_messages(self, text, user_ids) def send_media(self, media_id, user_ids, text=None, thread_id=None): return send_media(self, media_id, user_ids, text, thread_id) def send_medias(self, media_id, user_ids, text=None): return send_medias(self, media_id, user_ids, text) def send_hashtag(self, hashtag, user_ids, text="", thread_id=None): return send_hashtag(self, hashtag, user_ids, text, thread_id) def send_profile(self, profile_user_id, user_ids, text="", thread_id=None): return send_profile(self, profile_user_id, user_ids, text, thread_id) def send_like(self, user_ids, thread_id=None): return send_like(self, user_ids, thread_id) def send_photo(self, user_ids, filepath, thread_id=None): return send_photo(self, user_ids, filepath, thread_id) def approve_pending_thread_requests(self): return approve_pending_thread_requests(self) # delete def delete_media(self, media_id): return delete_media(self, media_id) def delete_medias(self, medias): return delete_medias(self, medias) def delete_comment(self, media_id, comment_id): return delete_comment(self, media_id, comment_id) # archive def archive(self, media_id, undo=False): return archive(self, media_id, undo) def unarchive(self, media_id): return archive(self, media_id, True) def archive_medias(self, medias): return archive_medias(self, medias) def unarchive_medias(self, medias): return unarchive_medias(self, medias) # comment def comment(self, media_id, comment_text): return comment(self, media_id, comment_text) def reply_to_comment(self, media_id, comment_text, parent_comment_id): return reply_to_comment(self, media_id, comment_text, parent_comment_id) def comment_hashtag(self, hashtag, amount=None): return comment_hashtag(self, hashtag, amount) def comment_medias(self, medias): return comment_medias(self, medias) def comment_user(self, user_id, amount=None): return comment_user(self, user_id, amount) def comment_users(self, user_ids, ncomments=None): return comment_users(self, user_ids, ncomments) def comment_geotag(self, geotag): return comment_geotag(self, geotag) def is_commented(self, media_id): return is_commented(self, media_id) # block def block(self, user_id): return block(self, user_id) def unblock(self, user_id): return unblock(self, user_id) def block_users(self, user_ids): return block_users(self, user_ids) def unblock_users(self, user_ids): return unblock_users(self, user_ids) def block_bots(self): return block_bots(self) # filter def filter_medias( self, media_items, filtration=True, quiet=False, is_comment=False ): return filter_medias(self, media_items, filtration, quiet, is_comment) def check_media(self, media): return check_media(self, media) def check_user(self, user, unfollowing=False): return check_user(self, user, unfollowing) def check_not_bot(self, user): return check_not_bot(self, user) # support def check_if_file_exists(self, file_path, quiet=False): return check_if_file_exists(file_path, quiet) def extract_urls(self, text): return extract_urls(text) def read_list_from_file(self, file_path): return read_list_from_file(file_path) def console_print(self, text, color=None): return console_print(self, text, color) # stats def save_user_stats(self, username, path=""): return save_user_stats(self, username, path=path)
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class OperatorControl(object): """ Operator Access Control enables you to grant, audit, or revoke the access Oracle has to your Exadata Cloud@Customer infrastructure, and obtain audit reports of all actions taken by a human operator, in a near real-time manner. """ #: A constant which can be used with the resource_type property of a OperatorControl. #: This constant has a value of "EXACC" RESOURCE_TYPE_EXACC = "EXACC" #: A constant which can be used with the resource_type property of a OperatorControl. #: This constant has a value of "EXADATAINFRASTRUCTURE" RESOURCE_TYPE_EXADATAINFRASTRUCTURE = "EXADATAINFRASTRUCTURE" #: A constant which can be used with the resource_type property of a OperatorControl. #: This constant has a value of "AUTONOMOUSVMCLUSTER" RESOURCE_TYPE_AUTONOMOUSVMCLUSTER = "AUTONOMOUSVMCLUSTER" #: A constant which can be used with the lifecycle_state property of a OperatorControl. #: This constant has a value of "CREATED" LIFECYCLE_STATE_CREATED = "CREATED" #: A constant which can be used with the lifecycle_state property of a OperatorControl. #: This constant has a value of "ASSIGNED" LIFECYCLE_STATE_ASSIGNED = "ASSIGNED" #: A constant which can be used with the lifecycle_state property of a OperatorControl. #: This constant has a value of "UNASSIGNED" LIFECYCLE_STATE_UNASSIGNED = "UNASSIGNED" #: A constant which can be used with the lifecycle_state property of a OperatorControl. #: This constant has a value of "DELETED" LIFECYCLE_STATE_DELETED = "DELETED" def __init__(self, **kwargs): """ Initializes a new OperatorControl object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param id: The value to assign to the id property of this OperatorControl. :type id: str :param operator_control_name: The value to assign to the operator_control_name property of this OperatorControl. :type operator_control_name: str :param description: The value to assign to the description property of this OperatorControl. :type description: str :param approvers_list: The value to assign to the approvers_list property of this OperatorControl. :type approvers_list: list[str] :param approver_groups_list: The value to assign to the approver_groups_list property of this OperatorControl. :type approver_groups_list: list[str] :param pre_approved_op_action_list: The value to assign to the pre_approved_op_action_list property of this OperatorControl. :type pre_approved_op_action_list: list[str] :param approval_required_op_action_list: The value to assign to the approval_required_op_action_list property of this OperatorControl. :type approval_required_op_action_list: list[str] :param is_fully_pre_approved: The value to assign to the is_fully_pre_approved property of this OperatorControl. :type is_fully_pre_approved: bool :param email_id_list: The value to assign to the email_id_list property of this OperatorControl. :type email_id_list: list[str] :param resource_type: The value to assign to the resource_type property of this OperatorControl. Allowed values for this property are: "EXACC", "EXADATAINFRASTRUCTURE", "AUTONOMOUSVMCLUSTER", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type resource_type: str :param system_message: The value to assign to the system_message property of this OperatorControl. :type system_message: str :param compartment_id: The value to assign to the compartment_id property of this OperatorControl. :type compartment_id: str :param lifecycle_state: The value to assign to the lifecycle_state property of this OperatorControl. Allowed values for this property are: "CREATED", "ASSIGNED", "UNASSIGNED", "DELETED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type lifecycle_state: str :param time_of_creation: The value to assign to the time_of_creation property of this OperatorControl. :type time_of_creation: datetime :param time_of_modification: The value to assign to the time_of_modification property of this OperatorControl. :type time_of_modification: datetime :param time_of_deletion: The value to assign to the time_of_deletion property of this OperatorControl. :type time_of_deletion: datetime :param last_modified_info: The value to assign to the last_modified_info property of this OperatorControl. :type last_modified_info: str :param freeform_tags: The value to assign to the freeform_tags property of this OperatorControl. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this OperatorControl. :type defined_tags: dict(str, dict(str, object)) """ self.swagger_types = { 'id': 'str', 'operator_control_name': 'str', 'description': 'str', 'approvers_list': 'list[str]', 'approver_groups_list': 'list[str]', 'pre_approved_op_action_list': 'list[str]', 'approval_required_op_action_list': 'list[str]', 'is_fully_pre_approved': 'bool', 'email_id_list': 'list[str]', 'resource_type': 'str', 'system_message': 'str', 'compartment_id': 'str', 'lifecycle_state': 'str', 'time_of_creation': 'datetime', 'time_of_modification': 'datetime', 'time_of_deletion': 'datetime', 'last_modified_info': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))' } self.attribute_map = { 'id': 'id', 'operator_control_name': 'operatorControlName', 'description': 'description', 'approvers_list': 'approversList', 'approver_groups_list': 'approverGroupsList', 'pre_approved_op_action_list': 'preApprovedOpActionList', 'approval_required_op_action_list': 'approvalRequiredOpActionList', 'is_fully_pre_approved': 'isFullyPreApproved', 'email_id_list': 'emailIdList', 'resource_type': 'resourceType', 'system_message': 'systemMessage', 'compartment_id': 'compartmentId', 'lifecycle_state': 'lifecycleState', 'time_of_creation': 'timeOfCreation', 'time_of_modification': 'timeOfModification', 'time_of_deletion': 'timeOfDeletion', 'last_modified_info': 'lastModifiedInfo', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags' } self._id = None self._operator_control_name = None self._description = None self._approvers_list = None self._approver_groups_list = None self._pre_approved_op_action_list = None self._approval_required_op_action_list = None self._is_fully_pre_approved = None self._email_id_list = None self._resource_type = None self._system_message = None self._compartment_id = None self._lifecycle_state = None self._time_of_creation = None self._time_of_modification = None self._time_of_deletion = None self._last_modified_info = None self._freeform_tags = None self._defined_tags = None @property def id(self): """ **[Required]** Gets the id of this OperatorControl. The OCID of the operator control. :return: The id of this OperatorControl. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this OperatorControl. The OCID of the operator control. :param id: The id of this OperatorControl. :type: str """ self._id = id @property def operator_control_name(self): """ **[Required]** Gets the operator_control_name of this OperatorControl. Name of the operator control. The name must be unique. :return: The operator_control_name of this OperatorControl. :rtype: str """ return self._operator_control_name @operator_control_name.setter def operator_control_name(self, operator_control_name): """ Sets the operator_control_name of this OperatorControl. Name of the operator control. The name must be unique. :param operator_control_name: The operator_control_name of this OperatorControl. :type: str """ self._operator_control_name = operator_control_name @property def description(self): """ Gets the description of this OperatorControl. Description of operator control. :return: The description of this OperatorControl. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this OperatorControl. Description of operator control. :param description: The description of this OperatorControl. :type: str """ self._description = description @property def approvers_list(self): """ Gets the approvers_list of this OperatorControl. List of users who can approve an access request associated with a target resource under the governance of this operator control. :return: The approvers_list of this OperatorControl. :rtype: list[str] """ return self._approvers_list @approvers_list.setter def approvers_list(self, approvers_list): """ Sets the approvers_list of this OperatorControl. List of users who can approve an access request associated with a target resource under the governance of this operator control. :param approvers_list: The approvers_list of this OperatorControl. :type: list[str] """ self._approvers_list = approvers_list @property def approver_groups_list(self): """ Gets the approver_groups_list of this OperatorControl. List of user groups who can approve an access request associated with a target resource under the governance of this operator control. :return: The approver_groups_list of this OperatorControl. :rtype: list[str] """ return self._approver_groups_list @approver_groups_list.setter def approver_groups_list(self, approver_groups_list): """ Sets the approver_groups_list of this OperatorControl. List of user groups who can approve an access request associated with a target resource under the governance of this operator control. :param approver_groups_list: The approver_groups_list of this OperatorControl. :type: list[str] """ self._approver_groups_list = approver_groups_list @property def pre_approved_op_action_list(self): """ Gets the pre_approved_op_action_list of this OperatorControl. List of pre-approved operator actions. Access requests associated with a resource governed by this operator control will be automatically approved if the access request only contain operator actions in the pre-approved list. :return: The pre_approved_op_action_list of this OperatorControl. :rtype: list[str] """ return self._pre_approved_op_action_list @pre_approved_op_action_list.setter def pre_approved_op_action_list(self, pre_approved_op_action_list): """ Sets the pre_approved_op_action_list of this OperatorControl. List of pre-approved operator actions. Access requests associated with a resource governed by this operator control will be automatically approved if the access request only contain operator actions in the pre-approved list. :param pre_approved_op_action_list: The pre_approved_op_action_list of this OperatorControl. :type: list[str] """ self._pre_approved_op_action_list = pre_approved_op_action_list @property def approval_required_op_action_list(self): """ Gets the approval_required_op_action_list of this OperatorControl. List of operator actions that need explicit approval. Any operator action not in the pre-approved list will require explicit approval. Access requests associated with a resource governed by this operator control will be require explicit approval if the access request contains any operator action in this list. :return: The approval_required_op_action_list of this OperatorControl. :rtype: list[str] """ return self._approval_required_op_action_list @approval_required_op_action_list.setter def approval_required_op_action_list(self, approval_required_op_action_list): """ Sets the approval_required_op_action_list of this OperatorControl. List of operator actions that need explicit approval. Any operator action not in the pre-approved list will require explicit approval. Access requests associated with a resource governed by this operator control will be require explicit approval if the access request contains any operator action in this list. :param approval_required_op_action_list: The approval_required_op_action_list of this OperatorControl. :type: list[str] """ self._approval_required_op_action_list = approval_required_op_action_list @property def is_fully_pre_approved(self): """ Gets the is_fully_pre_approved of this OperatorControl. Whether all the operator actions have been pre-approved. If yes, all access requests associated with a resource governed by this operator control will be auto-approved. :return: The is_fully_pre_approved of this OperatorControl. :rtype: bool """ return self._is_fully_pre_approved @is_fully_pre_approved.setter def is_fully_pre_approved(self, is_fully_pre_approved): """ Sets the is_fully_pre_approved of this OperatorControl. Whether all the operator actions have been pre-approved. If yes, all access requests associated with a resource governed by this operator control will be auto-approved. :param is_fully_pre_approved: The is_fully_pre_approved of this OperatorControl. :type: bool """ self._is_fully_pre_approved = is_fully_pre_approved @property def email_id_list(self): """ Gets the email_id_list of this OperatorControl. List of emailId. :return: The email_id_list of this OperatorControl. :rtype: list[str] """ return self._email_id_list @email_id_list.setter def email_id_list(self, email_id_list): """ Sets the email_id_list of this OperatorControl. List of emailId. :param email_id_list: The email_id_list of this OperatorControl. :type: list[str] """ self._email_id_list = email_id_list @property def resource_type(self): """ Gets the resource_type of this OperatorControl. resourceType for which the OperatorControl is applicable Allowed values for this property are: "EXACC", "EXADATAINFRASTRUCTURE", "AUTONOMOUSVMCLUSTER", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The resource_type of this OperatorControl. :rtype: str """ return self._resource_type @resource_type.setter def resource_type(self, resource_type): """ Sets the resource_type of this OperatorControl. resourceType for which the OperatorControl is applicable :param resource_type: The resource_type of this OperatorControl. :type: str """ allowed_values = ["EXACC", "EXADATAINFRASTRUCTURE", "AUTONOMOUSVMCLUSTER"] if not value_allowed_none_or_none_sentinel(resource_type, allowed_values): resource_type = 'UNKNOWN_ENUM_VALUE' self._resource_type = resource_type @property def system_message(self): """ Gets the system_message of this OperatorControl. System message that would be displayed to the operator users on accessing the target resource under the governance of this operator control. :return: The system_message of this OperatorControl. :rtype: str """ return self._system_message @system_message.setter def system_message(self, system_message): """ Sets the system_message of this OperatorControl. System message that would be displayed to the operator users on accessing the target resource under the governance of this operator control. :param system_message: The system_message of this OperatorControl. :type: str """ self._system_message = system_message @property def compartment_id(self): """ **[Required]** Gets the compartment_id of this OperatorControl. The OCID of the compartment that contains the operator control. :return: The compartment_id of this OperatorControl. :rtype: str """ return self._compartment_id @compartment_id.setter def compartment_id(self, compartment_id): """ Sets the compartment_id of this OperatorControl. The OCID of the compartment that contains the operator control. :param compartment_id: The compartment_id of this OperatorControl. :type: str """ self._compartment_id = compartment_id @property def lifecycle_state(self): """ Gets the lifecycle_state of this OperatorControl. The current lifecycle state of the operator control. Allowed values for this property are: "CREATED", "ASSIGNED", "UNASSIGNED", "DELETED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The lifecycle_state of this OperatorControl. :rtype: str """ return self._lifecycle_state @lifecycle_state.setter def lifecycle_state(self, lifecycle_state): """ Sets the lifecycle_state of this OperatorControl. The current lifecycle state of the operator control. :param lifecycle_state: The lifecycle_state of this OperatorControl. :type: str """ allowed_values = ["CREATED", "ASSIGNED", "UNASSIGNED", "DELETED"] if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values): lifecycle_state = 'UNKNOWN_ENUM_VALUE' self._lifecycle_state = lifecycle_state @property def time_of_creation(self): """ Gets the time_of_creation of this OperatorControl. Time when the operator control was created expressed in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :return: The time_of_creation of this OperatorControl. :rtype: datetime """ return self._time_of_creation @time_of_creation.setter def time_of_creation(self, time_of_creation): """ Sets the time_of_creation of this OperatorControl. Time when the operator control was created expressed in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :param time_of_creation: The time_of_creation of this OperatorControl. :type: datetime """ self._time_of_creation = time_of_creation @property def time_of_modification(self): """ Gets the time_of_modification of this OperatorControl. Time when the operator control was last modified expressed in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :return: The time_of_modification of this OperatorControl. :rtype: datetime """ return self._time_of_modification @time_of_modification.setter def time_of_modification(self, time_of_modification): """ Sets the time_of_modification of this OperatorControl. Time when the operator control was last modified expressed in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :param time_of_modification: The time_of_modification of this OperatorControl. :type: datetime """ self._time_of_modification = time_of_modification @property def time_of_deletion(self): """ Gets the time_of_deletion of this OperatorControl. Time when deleted expressed in `RFC 3339`__timestamp format. Example: '2020-05-22T21:10:29.600Z'. Note a deleted operator control still stays in the system, so that you can still audit operator actions associated with access requests raised on target resources governed by the deleted operator control. __ https://tools.ietf.org/html/rfc3339 :return: The time_of_deletion of this OperatorControl. :rtype: datetime """ return self._time_of_deletion @time_of_deletion.setter def time_of_deletion(self, time_of_deletion): """ Sets the time_of_deletion of this OperatorControl. Time when deleted expressed in `RFC 3339`__timestamp format. Example: '2020-05-22T21:10:29.600Z'. Note a deleted operator control still stays in the system, so that you can still audit operator actions associated with access requests raised on target resources governed by the deleted operator control. __ https://tools.ietf.org/html/rfc3339 :param time_of_deletion: The time_of_deletion of this OperatorControl. :type: datetime """ self._time_of_deletion = time_of_deletion @property def last_modified_info(self): """ Gets the last_modified_info of this OperatorControl. Description associated with the latest modification of the operator control. :return: The last_modified_info of this OperatorControl. :rtype: str """ return self._last_modified_info @last_modified_info.setter def last_modified_info(self, last_modified_info): """ Sets the last_modified_info of this OperatorControl. Description associated with the latest modification of the operator control. :param last_modified_info: The last_modified_info of this OperatorControl. :type: str """ self._last_modified_info = last_modified_info @property def freeform_tags(self): """ Gets the freeform_tags of this OperatorControl. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. :return: The freeform_tags of this OperatorControl. :rtype: dict(str, str) """ return self._freeform_tags @freeform_tags.setter def freeform_tags(self, freeform_tags): """ Sets the freeform_tags of this OperatorControl. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. :param freeform_tags: The freeform_tags of this OperatorControl. :type: dict(str, str) """ self._freeform_tags = freeform_tags @property def defined_tags(self): """ Gets the defined_tags of this OperatorControl. Defined tags for this resource. Each key is predefined and scoped to a namespace. :return: The defined_tags of this OperatorControl. :rtype: dict(str, dict(str, object)) """ return self._defined_tags @defined_tags.setter def defined_tags(self, defined_tags): """ Sets the defined_tags of this OperatorControl. Defined tags for this resource. Each key is predefined and scoped to a namespace. :param defined_tags: The defined_tags of this OperatorControl. :type: dict(str, dict(str, object)) """ self._defined_tags = defined_tags def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
from .tqdm import stdout_to_tqdm from .image import crop_image, not_crop_but_resize from .image import color_jittering_, lighting_, normalize_ from .transforms import get_affine_transform, affine_transform, fliplr_joints
import numpy as np import cv2 import matplotlib.pyplot as plt from sklearn.cluster import Birch from iDetection import * ## Imagen Original PATH = "../data/JPEG/" image = cv2.imread(PATH+'IMG_2465.jpg') print('Tamaño original : ', image.shape) scale_percent = 50 width = int(image.shape[1] * scale_percent / 100) height = int(image.shape[0] * scale_percent / 100) dim = (width, height) resized_image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) print('Tamaño reducido : ',resized_image.shape) plt.axis("off") plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)) plt.show() # Selección del objeto de interés cropped = rotate_image(resized_image,0)[ int(770*scale_percent/100):int(950*scale_percent/100), int(630*scale_percent/100):int(760*scale_percent/100)] plt.axis("off") plt.imshow(cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)) plt.show() # Detección y conteo de los objetos de interés # Inceptionv3-Siamese x_detect, y_detect, score, counter = explore_image_inceptionv3(resized_image, cropped, 0.75) ## Clusterización input_image, centroids = clustering(x_detect, y_detect, cropped, resized_image, 0.4) ## Resultados print(f'El número de objetos detectados antes de la clusterización es : ', str(counter)) print(f'El número de objetos detectados es : ', str(len(centroids))) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(input_image,'Objetos detectados:'+str(len(centroids)),(10,100), font, 2,(0,0,255),6) plt.imshow(input_image) plt.show() cv2.imwrite('Inceptionv3_DETECTION.jpg', input_image);
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** # Export this package's modules as members: from .default_kms_key import * from .encryption_by_default import * from .get_default_kms_key import * from .get_ebs_volumes import * from .get_encryption_by_default import * from .get_snapshot import * from .get_snapshot_ids import * from .get_volume import * from .snapshot import * from .snapshot_copy import * from .volume import * from ._inputs import * from . import outputs
##*** ##class Base: ## def methodBase(self): ## print("In base class") ##class child(Base): ## def methodchild(Base): ## print("In child class") ##c1=child() ##c1.methodBase() ##c1.methodchild() ##*** ##class Base: ## def ___init__(self): ## print('base') ##class child(Base): ## pass ## ## ##c1=Base() class stud: def __init__(self,r,n): self.rollno=r self.name=n def Displaystud(self): print("enter rollno :",self.rollno) print("name is :",self.name) class ArtsStud(stud): def __init__(self,t): super().__init__(101,'abc') self.typeOfArt=t def DisplayArtsStud(self): print("enter the type of art:",self.typeOfArt) s1=ArtsStud("dance") s1.Displaystud() s1.DisplayArtsStud() class Animal(): def __init__(self,n,c,a): self.name=n self.color=c self.age=a def DisplayA(self): print("nameof the animal:",self.name) print("color:",self.color) print("age:",self.age) class breed(Animal): def __init__(self,t): super().__init__("dog","black","4yrs") self.breedname=t def DisplayB(self): print("breed is:",self.breedname) c1=breed("doberman") c1.DisplayA() c1.DisplayB()
""" A Cython plugin for coverage.py Requires the coverage package at least in version 4.0 (which added the plugin API). """ from __future__ import absolute_import import re import os.path import sys from collections import defaultdict from coverage.plugin import CoveragePlugin, FileTracer, FileReporter # requires coverage.py 4.0+ from coverage.files import canonical_filename from .Utils import find_root_package_dir, is_package_dir, is_cython_generated_file, open_source_file from . import __version__ C_FILE_EXTENSIONS = ['.c', '.cpp', '.cc', '.cxx'] MODULE_FILE_EXTENSIONS = set(['.py', '.pyx', '.pxd'] + C_FILE_EXTENSIONS) def _find_c_source(base_path): file_exists = os.path.exists for ext in C_FILE_EXTENSIONS: file_name = base_path + ext if file_exists(file_name): return file_name return None def _find_dep_file_path(main_file, file_path, relative_path_search=False): abs_path = os.path.abspath(file_path) if not os.path.exists(abs_path) and (file_path.endswith('.pxi') or relative_path_search): # files are looked up relative to the main source file rel_file_path = os.path.join(os.path.dirname(main_file), file_path) if os.path.exists(rel_file_path): abs_path = os.path.abspath(rel_file_path) # search sys.path for external locations if a valid file hasn't been found if not os.path.exists(abs_path): for sys_path in sys.path: test_path = os.path.realpath(os.path.join(sys_path, file_path)) if os.path.exists(test_path): return canonical_filename(test_path) return canonical_filename(abs_path) class Plugin(CoveragePlugin): # map from traced file paths to absolute file paths _file_path_map = None # map from traced file paths to corresponding C files _c_files_map = None # map from parsed C files to their content _parsed_c_files = None # map from traced files to lines that are excluded from coverage _excluded_lines_map = None # list of regex patterns for lines to exclude _excluded_line_patterns = () def sys_info(self): return [('Cython version', __version__)] def configure(self, config): # Entry point for coverage "configurer". # Read the regular expressions from the coverage config that match lines to be excluded from coverage. self._excluded_line_patterns = config.get_option("report:exclude_lines") def file_tracer(self, filename): """ Try to find a C source file for a file path found by the tracer. """ if filename.startswith('<') or filename.startswith('memory:'): return None c_file = py_file = None filename = canonical_filename(os.path.abspath(filename)) if self._c_files_map and filename in self._c_files_map: c_file = self._c_files_map[filename][0] if c_file is None: c_file, py_file = self._find_source_files(filename) if not c_file: return None # unknown file # parse all source file paths and lines from C file # to learn about all relevant source files right away (pyx/pxi/pxd) # FIXME: this might already be too late if the first executed line # is not from the main .pyx file but a file with a different # name than the .c file (which prevents us from finding the # .c file) _, code = self._read_source_lines(c_file, filename) if code is None: return None # no source found if self._file_path_map is None: self._file_path_map = {} return CythonModuleTracer(filename, py_file, c_file, self._c_files_map, self._file_path_map) def file_reporter(self, filename): # TODO: let coverage.py handle .py files itself #ext = os.path.splitext(filename)[1].lower() #if ext == '.py': # from coverage.python import PythonFileReporter # return PythonFileReporter(filename) filename = canonical_filename(os.path.abspath(filename)) if self._c_files_map and filename in self._c_files_map: c_file, rel_file_path, code = self._c_files_map[filename] else: c_file, _ = self._find_source_files(filename) if not c_file: return None # unknown file rel_file_path, code = self._read_source_lines(c_file, filename) if code is None: return None # no source found return CythonModuleReporter( c_file, filename, rel_file_path, code, self._excluded_lines_map.get(rel_file_path, frozenset()) ) def _find_source_files(self, filename): basename, ext = os.path.splitext(filename) ext = ext.lower() if ext in MODULE_FILE_EXTENSIONS: pass elif ext == '.pyd': # Windows extension module platform_suffix = re.search(r'[.]cp[0-9]+-win[_a-z0-9]*$', basename, re.I) if platform_suffix: basename = basename[:platform_suffix.start()] elif ext == '.so': # Linux/Unix/Mac extension module platform_suffix = re.search(r'[.](?:cpython|pypy)-[0-9]+[-_a-z0-9]*$', basename, re.I) if platform_suffix: basename = basename[:platform_suffix.start()] elif ext == '.pxi': # if we get here, it means that the first traced line of a Cython module was # not in the main module but in an include file, so try a little harder to # find the main source file self._find_c_source_files(os.path.dirname(filename), filename) if filename in self._c_files_map: return self._c_files_map[filename][0], None else: # none of our business return None, None c_file = filename if ext in C_FILE_EXTENSIONS else _find_c_source(basename) if c_file is None: # a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c" package_root = find_root_package_dir.uncached(filename) package_path = os.path.relpath(basename, package_root).split(os.path.sep) if len(package_path) > 1: test_basepath = os.path.join(os.path.dirname(filename), '.'.join(package_path)) c_file = _find_c_source(test_basepath) py_source_file = None if c_file: py_source_file = os.path.splitext(c_file)[0] + '.py' if not os.path.exists(py_source_file): py_source_file = None if not is_cython_generated_file(c_file, if_not_found=False): if py_source_file and os.path.exists(c_file): # if we did not generate the C file, # then we probably also shouldn't care about the .py file. py_source_file = None c_file = None return c_file, py_source_file def _find_c_source_files(self, dir_path, source_file): """ Desperately parse all C files in the directory or its package parents (not re-descending) to find the (included) source file in one of them. """ if not os.path.isdir(dir_path): return splitext = os.path.splitext for filename in os.listdir(dir_path): ext = splitext(filename)[1].lower() if ext in C_FILE_EXTENSIONS: self._read_source_lines(os.path.join(dir_path, filename), source_file) if source_file in self._c_files_map: return # not found? then try one package up if is_package_dir(dir_path): self._find_c_source_files(os.path.dirname(dir_path), source_file) def _read_source_lines(self, c_file, sourcefile): """ Parse a Cython generated C/C++ source file and find the executable lines. Each executable line starts with a comment header that states source file and line number, as well as the surrounding range of source code lines. """ if self._parsed_c_files is None: self._parsed_c_files = {} if c_file in self._parsed_c_files: code_lines = self._parsed_c_files[c_file] else: code_lines = self._parse_cfile_lines(c_file) self._parsed_c_files[c_file] = code_lines if self._c_files_map is None: self._c_files_map = {} for filename, code in code_lines.items(): abs_path = _find_dep_file_path(c_file, filename, relative_path_search=True) self._c_files_map[abs_path] = (c_file, filename, code) if sourcefile not in self._c_files_map: return (None,) * 2 # e.g. shared library file return self._c_files_map[sourcefile][1:] def _parse_cfile_lines(self, c_file): """ Parse a C file and extract all source file lines that generated executable code. """ match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match match_comment_end = re.compile(r' *[*]/$').match match_trace_line = re.compile(r' *__Pyx_TraceLine\(([0-9]+),').match not_executable = re.compile( r'\s*c(?:type)?def\s+' r'(?:(?:public|external)\s+)?' r'(?:struct|union|enum|class)' r'(\s+[^:]+|)\s*:' ).match if self._excluded_line_patterns: line_is_excluded = re.compile("|".join(["(?:%s)" % regex for regex in self._excluded_line_patterns])).search else: line_is_excluded = lambda line: False code_lines = defaultdict(dict) executable_lines = defaultdict(set) current_filename = None if self._excluded_lines_map is None: self._excluded_lines_map = defaultdict(set) with open(c_file) as lines: lines = iter(lines) for line in lines: match = match_source_path_line(line) if not match: if '__Pyx_TraceLine(' in line and current_filename is not None: trace_line = match_trace_line(line) if trace_line: executable_lines[current_filename].add(int(trace_line.group(1))) continue filename, lineno = match.groups() current_filename = filename lineno = int(lineno) for comment_line in lines: match = match_current_code_line(comment_line) if match: code_line = match.group(1).rstrip() if not_executable(code_line): break if line_is_excluded(code_line): self._excluded_lines_map[filename].add(lineno) break code_lines[filename][lineno] = code_line break elif match_comment_end(comment_line): # unexpected comment format - false positive? break # Remove lines that generated code but are not traceable. for filename, lines in code_lines.items(): dead_lines = set(lines).difference(executable_lines.get(filename, ())) for lineno in dead_lines: del lines[lineno] return code_lines class CythonModuleTracer(FileTracer): """ Find the Python/Cython source file for a Cython module. """ def __init__(self, module_file, py_file, c_file, c_files_map, file_path_map): super(CythonModuleTracer, self).__init__() self.module_file = module_file self.py_file = py_file self.c_file = c_file self._c_files_map = c_files_map self._file_path_map = file_path_map def has_dynamic_source_filename(self): return True def dynamic_source_filename(self, filename, frame): """ Determine source file path. Called by the function call tracer. """ source_file = frame.f_code.co_filename try: return self._file_path_map[source_file] except KeyError: pass abs_path = _find_dep_file_path(filename, source_file) if self.py_file and source_file[-3:].lower() == '.py': # always let coverage.py handle this case itself self._file_path_map[source_file] = self.py_file return self.py_file assert self._c_files_map is not None if abs_path not in self._c_files_map: self._c_files_map[abs_path] = (self.c_file, source_file, None) self._file_path_map[source_file] = abs_path return abs_path class CythonModuleReporter(FileReporter): """ Provide detailed trace information for one source file to coverage.py. """ def __init__(self, c_file, source_file, rel_file_path, code, excluded_lines): super(CythonModuleReporter, self).__init__(source_file) self.name = rel_file_path self.c_file = c_file self._code = code self._excluded_lines = excluded_lines def lines(self): """ Return set of line numbers that are possibly executable. """ return set(self._code) def excluded_lines(self): """ Return set of line numbers that are excluded from coverage. """ return self._excluded_lines def _iter_source_tokens(self): current_line = 1 for line_no, code_line in sorted(self._code.items()): while line_no > current_line: yield [] current_line += 1 yield [('txt', code_line)] current_line += 1 def source(self): """ Return the source code of the file as a string. """ if os.path.exists(self.filename): with open_source_file(self.filename) as f: return f.read() else: return '\n'.join( (tokens[0][1] if tokens else '') for tokens in self._iter_source_tokens()) def source_token_lines(self): """ Iterate over the source code tokens. """ if os.path.exists(self.filename): with open_source_file(self.filename) as f: for line in f: yield [('txt', line.rstrip('\n'))] else: for line in self._iter_source_tokens(): yield [('txt', line)] def coverage_init(reg, options): plugin = Plugin() reg.add_configurer(plugin) reg.add_file_tracer(plugin)
#TODO: Fill out with same behavior as run-model.js
"""Context parser that returns a dictionary from a key-value pair string. Takes list of key=value pair string and returns a dictionary where each pair becomes a dictionary element. Don't have spaces in your values unless your really mean it. "k1=v1 ' k2'=v2" will result in a context key name of ' k2' not 'k2'. So cli input like this "pig=ham hen=eggs yummypig=bacon", will yield: {'pig': 'ham', 'hen': ''eggs', 'yummypig': 'bacon'} """ import logging # use pypyr logger to ensure loglevel is set correctly logger = logging.getLogger(__name__) def get_parsed_context(args): """Parse input context args and returns context as dictionary.""" if not args: logger.debug("pipeline invoked without context arg set. For " "this keyvaluepairs parser you're looking for " "something like:\n" "pypyr pipelinename key1=value1 key2=value2") return None logger.debug("starting") # for each arg, project key=value return dict(element.split('=') for element in args)
# coding: utf-8 from flask import Flask,request,session,g,redirect,url_for,Blueprint from flask import abort,render_template,flash from helpers import getAvatar #from .base import BaseHandler import os import time import cPickle import datetime import logging import werkzeug import optparse import numpy as np import pandas as pd from PIL import Image import cStringIO as StringIO import urllib from tools.ML import * #import exifutil import config config = config.rec() ml = Blueprint('ml', __name__) import pika REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..') UPLOAD_FOLDER = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../static/uploads') ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif']) @ml.route('/') def index(): imag = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../../tensorflow/use/cat.jpg') return render_template('ml_index.html', has_result=True,result = predict(imag)) @ml.route('/classify_url', methods=['GET']) def classify_url(): imageurl = request.args.get('imageurl', UPLOAD_FOLDER) filename_ = str(time.time()).replace('[ |:]', '_') filename = os.path.join(UPLOAD_FOLDER, filename_)+'.jpg' urllib.urlretrieve(imageurl,filename) return render_template( 'ml_index.html', has_result=True, result=predict(filename), imagesrc=imageurl) @ml.route('/classify_upload', methods=['POST']) def classify_upload(): print 1111 try: # We will save the file to disk for possible data collection. imagefile = request.files['imagefile'] filename_ = str(time.time()).replace('[ |:]', '_') + \ werkzeug.secure_filename(imagefile.filename) filename = os.path.join(UPLOAD_FOLDER, filename_) print filename print filename_ imagefile.save(filename) logging.info('Saving to %s.', filename) #image = exifutil.open_oriented_im(filename) #image = Image.open(filename) except Exception as err: print 'Uploaded image open error: %s' % err return render_template( 'ml_index.html', has_result=False, result=(False, 'Cannot open uploaded image.') ) #result = app.clf.classify_image(image) return render_template( 'ml_index.html', has_result=True, result=predict(filename),imagesrc=u'/static/uploads/'+filename_ )
from load_anchors import AnchorList def find_primary_ds(al_filtered, num_top, alltop=False, run_until=10, shift=0, get_size=False): ht = {} max_keys = [0, ] max_cnts = [1,] for x, y in al_filtered.anchors(): d = x - y key = str(((d >> shift) << shift)) if key in ht: ht[key] += 1 cnt = ht[key] for i, mcnt in enumerate(max_cnts): if cnt > mcnt: max_cnts[i] = cnt max_keys[i] = int(key) #if mcnt >= run_until: # print("HT Size: {}".format(len(ht.keys()))) # if get_size: # return len(ht.keys()) # else: # return max_keys break else: if len(max_cnts) < num_top or (alltop and cnt > 1): max_cnts.append(cnt) max_keys.append(int(key)) else: ht[key] = 1 print("HT Size: {}".format(len(ht.keys()))) if get_size: return len(ht.keys()) else: return max_keys def k_bounds(al, al_filtered, k_dist, num_top, alltop=False, run_until=10, shift=0): primary_ds = find_primary_ds(al_filtered, num_top, alltop, run_until=run_until, shift=shift) priority0 = AnchorList() priority1 = AnchorList() for x, y in al.anchors(): d = x - y for pd in primary_ds: if abs(d - pd) < k_dist: priority1.p.append((x, y)) break else: priority0.p.append((x, y)) return (priority1, priority0)
# Copyright 2019, A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from taskflow.patterns import linear_flow from octavia.common import constants from octavia.controller.worker.tasks import database_tasks from octavia.controller.worker.tasks import lifecycle_tasks from octavia.controller.worker.tasks import model_tasks from a10_octavia.common import a10constants from a10_octavia.controller.worker.tasks import a10_database_tasks from a10_octavia.controller.worker.tasks import a10_network_tasks from a10_octavia.controller.worker.tasks import server_tasks from a10_octavia.controller.worker.tasks import vthunder_tasks CONF = cfg.CONF class MemberFlows(object): def get_create_member_flow(self, topology): """Create a flow to create a member :returns: The flow for creating a member """ create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( requires=constants.MEMBER)) create_member_flow.add(a10_network_tasks.CalculateDelta( requires=constants.LOADBALANCER, provides=constants.DELTAS)) create_member_flow.add(a10_network_tasks.HandleNetworkDeltas( requires=constants.DELTAS, provides=constants.ADDED_PORTS)) create_member_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( requires=constants.LOADBALANCER, provides=constants.AMPHORA)) create_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) # managing interface additions here create_member_flow.add( vthunder_tasks.AmphoraePostMemberNetworkPlug( requires=( constants.LOADBALANCER, constants.ADDED_PORTS, a10constants.VTHUNDER))) create_member_flow.add(vthunder_tasks.VThunderComputeConnectivityWait( requires=(a10constants.VTHUNDER, constants.AMPHORA))) create_member_flow.add( vthunder_tasks.EnableInterfaceForMembers( requires=[ constants.ADDED_PORTS, constants.LOADBALANCER, a10constants.VTHUNDER])) # configure member flow for HA if topology == constants.TOPOLOGY_ACTIVE_STANDBY: create_member_flow.add( a10_database_tasks.GetBackupVThunderByLoadBalancer( name="get_backup_vThunder", requires=constants.LOADBALANCER, provides=a10constants.BACKUP_VTHUNDER)) create_member_flow.add( vthunder_tasks.AmphoraePostMemberNetworkPlug( name="backup_amphora_network_plug", requires=[ constants.ADDED_PORTS, constants.LOADBALANCER], rebind={ a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER})) create_member_flow.add( vthunder_tasks.VThunderComputeConnectivityWait( name="backup_compute_conn_wait", requires=constants.AMPHORA, rebind={ a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER})) create_member_flow.add( vthunder_tasks.EnableInterfaceForMembers( name="backup_enable_interface", requires=[ constants.ADDED_PORTS, constants.LOADBALANCER], rebind={ a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER})) create_member_flow.add(a10_database_tasks.CountMembersWithIP( requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP )) create_member_flow.add(server_tasks.MemberCreate( requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL, a10constants.MEMBER_COUNT_IP))) create_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) create_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) create_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER, constants.LISTENERS))) create_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) create_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return create_member_flow def get_delete_member_flow(self): """Flow to delete a member on VThunder :returns: The flow for deleting a member """ delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( requires=constants.MEMBER)) delete_member_flow.add(model_tasks. DeleteModelObject(rebind={constants.OBJECT: constants.MEMBER})) delete_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) delete_member_flow.add(a10_database_tasks.CountMembersWithIP( requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP)) delete_member_flow.add( a10_database_tasks.CountMembersWithIPPortProtocol( requires=( constants.MEMBER, constants.POOL), provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL)) delete_member_flow.add(a10_database_tasks.GetFlavorData( rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER}, provides=constants.FLAVOR)) delete_member_flow.add( server_tasks.MemberDelete( requires=( constants.MEMBER, a10constants.VTHUNDER, constants.POOL, a10constants.MEMBER_COUNT_IP, a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL))) delete_member_flow.add(database_tasks.DeleteMemberInDB( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.DecrementMemberQuota( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) delete_member_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) delete_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) delete_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return delete_member_flow def get_rack_vthunder_delete_member_flow(self): """Flow to delete a member in Thunder devices :returns: The flow for deleting a member """ delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( requires=constants.MEMBER)) delete_member_flow.add(model_tasks. DeleteModelObject(rebind={constants.OBJECT: constants.MEMBER})) delete_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) delete_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap( requires=a10constants.VTHUNDER, provides=a10constants.VTHUNDER)) delete_member_flow.add(a10_database_tasks.CountMembersWithIP( requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP)) delete_member_flow.add( a10_database_tasks.CountMembersWithIPPortProtocol( requires=( constants.MEMBER, constants.POOL), provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL)) delete_member_flow.add(a10_database_tasks.GetFlavorData( rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER}, provides=constants.FLAVOR)) delete_member_flow.add(server_tasks.MemberFindNatPool( requires=[constants.MEMBER, a10constants.VTHUNDER, constants.POOL, constants.FLAVOR], provides=a10constants.NAT_FLAVOR)) delete_member_flow.add(a10_database_tasks.GetNatPoolEntry( requires=[constants.MEMBER, a10constants.NAT_FLAVOR], provides=a10constants.NAT_POOL)) delete_member_flow.add(a10_network_tasks.ReleaseSubnetAddressForMember( requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL])) delete_member_flow.add(a10_database_tasks.DeleteNatPoolEntry( requires=a10constants.NAT_POOL)) delete_member_flow.add( server_tasks.MemberDelete( requires=( constants.MEMBER, a10constants.VTHUNDER, constants.POOL, a10constants.MEMBER_COUNT_IP, a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL))) if CONF.a10_global.network_type == 'vlan': delete_member_flow.add( vthunder_tasks.DeleteInterfaceTagIfNotInUseForMember( requires=[ constants.MEMBER, a10constants.VTHUNDER])) # Handle VRID setting delete_member_flow.add(self.get_delete_member_vrid_subflow()) delete_member_flow.add(database_tasks.DeleteMemberInDB( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.DecrementMemberQuota( requires=constants.MEMBER)) delete_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) delete_member_flow.add(database_tasks.MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) delete_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) delete_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return delete_member_flow def get_delete_member_vthunder_internal_subflow(self, member_id): delete_member_thunder_subflow = linear_flow.Flow( a10constants.DELETE_MEMBER_VTHUNDER_INTERNAL_SUBFLOW) delete_member_thunder_subflow.add(vthunder_tasks.SetupDeviceNetworkMap( name='setup_device_network_map_' + member_id, requires=a10constants.VTHUNDER, provides=a10constants.VTHUNDER)) delete_member_thunder_subflow.add( a10_database_tasks.CountMembersWithIPPortProtocol( name='count_members_ip_port_' + member_id, requires=( constants.MEMBER, constants.POOL), provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL, rebind={ constants.MEMBER: member_id})) delete_member_thunder_subflow.add(a10_database_tasks.PoolCountforIP( name='pool_count_for_ip_' + member_id, requires=constants.MEMBER, provides=a10constants.POOL_COUNT_IP, rebind={constants.MEMBER: member_id})) # NAT pools database and pools clean up for flavor delete_member_thunder_subflow.add(a10_database_tasks.GetFlavorData( name='get_flavor_data_' + member_id, rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER}, provides=constants.FLAVOR)) delete_member_thunder_subflow.add(server_tasks.MemberFindNatPool( name='member_find_nat_pool_' + member_id, requires=[constants.MEMBER, a10constants.VTHUNDER, constants.POOL, constants.FLAVOR], provides=a10constants.NAT_FLAVOR, rebind={constants.MEMBER: member_id})) delete_member_thunder_subflow.add(a10_database_tasks.GetNatPoolEntry( name='get_nat_pool_db_entry_' + member_id, requires=[constants.MEMBER, a10constants.NAT_FLAVOR], provides=a10constants.NAT_POOL, rebind={constants.MEMBER: member_id})) delete_member_thunder_subflow.add(a10_network_tasks.ReleaseSubnetAddressForMember( name='release_subnet_address_for_member_' + member_id, requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL], rebind={constants.MEMBER: member_id})) delete_member_thunder_subflow.add(a10_database_tasks.DeleteNatPoolEntry( name='delete_nat_pool_entry_' + member_id, requires=a10constants.NAT_POOL)) delete_member_thunder_subflow.add( server_tasks.MemberDeletePool( name='delete_thunder_member_pool_' + member_id, requires=( constants.MEMBER, a10constants.VTHUNDER, constants.POOL, a10constants.POOL_COUNT_IP, a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL), rebind={ constants.MEMBER: member_id})) if CONF.a10_global.network_type == 'vlan': delete_member_thunder_subflow.add( vthunder_tasks.DeleteInterfaceTagIfNotInUseForMember( name='delete_unused_interface_tag_in_member_' + member_id, requires=[ constants.MEMBER, a10constants.VTHUNDER], rebind={ constants.MEMBER: member_id})) return delete_member_thunder_subflow def get_delete_member_vrid_subflow(self): delete_member_vrid_subflow = linear_flow.Flow( a10constants.DELETE_MEMBER_VRID_SUBFLOW) delete_member_vrid_subflow.add(a10_network_tasks.GetLBResourceSubnet( rebind={a10constants.LB_RESOURCE: constants.MEMBER}, provides=constants.SUBNET)) delete_member_vrid_subflow.add( a10_database_tasks.GetChildProjectsOfParentPartition( rebind={a10constants.LB_RESOURCE: constants.MEMBER}, provides=a10constants.PARTITION_PROJECT_LIST )) delete_member_vrid_subflow.add( a10_database_tasks.CountLoadbalancersInProjectBySubnet( requires=[constants.SUBNET, a10constants.PARTITION_PROJECT_LIST], provides=a10constants.LB_COUNT)) delete_member_vrid_subflow.add( a10_database_tasks.CountMembersInProjectBySubnet( requires=[constants.SUBNET, a10constants.PARTITION_PROJECT_LIST], provides=a10constants.MEMBER_COUNT)) delete_member_vrid_subflow.add( a10_database_tasks.GetVRIDForLoadbalancerResource( requires=a10constants.PARTITION_PROJECT_LIST, provides=a10constants.VRID_LIST)) delete_member_vrid_subflow.add( a10_network_tasks.DeleteVRIDPort( requires=[ a10constants.VTHUNDER, a10constants.VRID_LIST, constants.SUBNET, a10constants.LB_COUNT, a10constants.MEMBER_COUNT], provides=( a10constants.VRID, a10constants.DELETE_VRID))) delete_member_vrid_subflow.add(a10_database_tasks.DeleteVRIDEntry( requires=[a10constants.VRID, a10constants.DELETE_VRID])) return delete_member_vrid_subflow def get_delete_member_vrid_internal_subflow(self): delete_member_vrid_subflow = linear_flow.Flow( a10constants.DELETE_MEMBER_VRID_INTERNAL_SUBFLOW) delete_member_vrid_subflow.add( a10_database_tasks.GetChildProjectsOfParentPartition( rebind={a10constants.LB_RESOURCE: constants.POOL}, provides=a10constants.PARTITION_PROJECT_LIST )) delete_member_vrid_subflow.add( a10_database_tasks.GetSubnetForDeletionInPool( requires=[a10constants.MEMBER_LIST, a10constants.PARTITION_PROJECT_LIST], provides=a10constants.SUBNET_LIST)) delete_member_vrid_subflow.add( a10_database_tasks.GetVRIDForLoadbalancerResource( requires=a10constants.PARTITION_PROJECT_LIST, provides=a10constants.VRID_LIST)) delete_member_vrid_subflow.add( a10_network_tasks.DeleteMultipleVRIDPort( requires=[ a10constants.VTHUNDER, a10constants.VRID_LIST, a10constants.SUBNET_LIST], provides=a10constants.VRID_LIST)) delete_member_vrid_subflow.add(a10_database_tasks.DeleteMultiVRIDEntry( requires=a10constants.VRID_LIST)) return delete_member_vrid_subflow def handle_vrid_for_member_subflow(self): handle_vrid_for_member_subflow = linear_flow.Flow( a10constants.HANDLE_VRID_MEMBER_SUBFLOW) handle_vrid_for_member_subflow.add( a10_network_tasks.GetLBResourceSubnet( rebind={ a10constants.LB_RESOURCE: constants.MEMBER}, provides=constants.SUBNET)) handle_vrid_for_member_subflow.add( a10_database_tasks.GetChildProjectsOfParentPartition( rebind={a10constants.LB_RESOURCE: constants.MEMBER}, provides=a10constants.PARTITION_PROJECT_LIST )) handle_vrid_for_member_subflow.add( a10_database_tasks.GetVRIDForLoadbalancerResource( requires=a10constants.PARTITION_PROJECT_LIST, provides=a10constants.VRID_LIST)) handle_vrid_for_member_subflow.add( a10_network_tasks.HandleVRIDFloatingIP( requires=[ a10constants.VTHUNDER, a10constants.VRID_LIST, constants.SUBNET], rebind={ a10constants.LB_RESOURCE: constants.MEMBER}, provides=a10constants.VRID_LIST)) handle_vrid_for_member_subflow.add( a10_database_tasks.UpdateVRIDForLoadbalancerResource( requires=a10constants.VRID_LIST, rebind={ a10constants.LB_RESOURCE: constants.MEMBER})) return handle_vrid_for_member_subflow def get_update_member_flow(self): """Flow to update a member :returns: The flow for updating a member """ update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( requires=constants.MEMBER)) update_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) update_member_flow.add(server_tasks.MemberUpdate( requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL))) update_member_flow.add(database_tasks.UpdateMemberInDB( requires=[constants.MEMBER, constants.UPDATE_DICT])) update_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) update_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) update_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) update_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) update_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return update_member_flow def get_rack_vthunder_update_member_flow(self): """Flow to update a member in Thunder devices :returns: The flow for updating a member """ update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( requires=constants.MEMBER)) update_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) update_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap( requires=a10constants.VTHUNDER, provides=a10constants.VTHUNDER)) # Handle VRID settings update_member_flow.add(self.handle_vrid_for_member_subflow()) update_member_flow.add(a10_database_tasks.GetFlavorData( rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER}, provides=constants.FLAVOR)) update_member_flow.add(server_tasks.MemberUpdate( requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL, constants.FLAVOR))) update_member_flow.add(database_tasks.UpdateMemberInDB( requires=[constants.MEMBER, constants.UPDATE_DICT])) if CONF.a10_global.network_type == 'vlan': update_member_flow.add(vthunder_tasks.TagInterfaceForMember( requires=[constants.MEMBER, a10constants.VTHUNDER])) update_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) update_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) update_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=[constants.LOADBALANCER, constants.LISTENERS])) update_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) update_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return update_member_flow def get_rack_vthunder_create_member_flow(self): """Create a flow to create a rack vthunder member :returns: The flow for creating a rack vthunder member """ create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( requires=[constants.MEMBER, constants.LISTENERS, constants.LOADBALANCER, constants.POOL])) create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( requires=constants.MEMBER)) create_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer( requires=constants.LOADBALANCER, provides=a10constants.VTHUNDER)) create_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap( requires=a10constants.VTHUNDER, provides=a10constants.VTHUNDER)) create_member_flow.add(self.handle_vrid_for_member_subflow()) if CONF.a10_global.network_type == 'vlan': create_member_flow.add(vthunder_tasks.TagInterfaceForMember( requires=[constants.MEMBER, a10constants.VTHUNDER])) create_member_flow.add(a10_database_tasks.CountMembersWithIP( requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP)) create_member_flow.add(a10_database_tasks.GetFlavorData( rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER}, provides=constants.FLAVOR)) create_member_flow.add(server_tasks.MemberFindNatPool( requires=[constants.MEMBER, a10constants.VTHUNDER, constants.POOL, constants.FLAVOR], provides=a10constants.NAT_FLAVOR)) create_member_flow.add(a10_database_tasks.GetNatPoolEntry( requires=[constants.MEMBER, a10constants.NAT_FLAVOR], provides=a10constants.NAT_POOL)) create_member_flow.add(a10_network_tasks.ReserveSubnetAddressForMember( requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL], provides=a10constants.SUBNET_PORT)) create_member_flow.add(a10_database_tasks.UpdateNatPoolDB( requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL, a10constants.SUBNET_PORT])) create_member_flow.add(server_tasks.MemberCreate( requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL, a10constants.MEMBER_COUNT_IP, constants.FLAVOR))) create_member_flow.add(database_tasks.MarkMemberActiveInDB( requires=constants.MEMBER)) create_member_flow.add(database_tasks.MarkPoolActiveInDB( requires=constants.POOL)) create_member_flow.add(database_tasks. MarkLBAndListenersActiveInDB( requires=(constants.LOADBALANCER, constants.LISTENERS))) create_member_flow.add(vthunder_tasks.WriteMemory( requires=a10constants.VTHUNDER)) create_member_flow.add(a10_database_tasks.SetThunderUpdatedAt( requires=a10constants.VTHUNDER)) return create_member_flow
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Blob helper functions.""" import numpy as np import cv2 def im_list_to_blob(ims): """Convert a list of images into a network input. Assumes images are already prepared (means subtracted, BGR order, ...). """ max_shape = np.array([im.shape for im in ims]).max(axis=0) num_images = len(ims) blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), dtype=np.float32) for i in xrange(num_images): im = ims[i] blob[i, 0:im.shape[0], 0:im.shape[1], :] = im #n,h,w,c # Move channels (axis 3) to axis 1 # Axis order will become: (batch elem, channel, height, width) channel_swap = (0, 3, 1, 2) blob = blob.transpose(channel_swap) #n,c,h,w return blob def prep_im_for_blob(im, pixel_means, target_size, max_size): """Mean subtract and scale an image for use in a blob.""" im = im.astype(np.float32, copy=False) im -= pixel_means im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale