code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
class SearchListProxy(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.search_list_handle = SearchListHandle() <NEW_LINE> <DEDENT> def go_to_goods_detail(self, kw): <NEW_LINE> <INDENT> self.search_list_handle.click_goods(kw)
|
搜索列表-业务层
|
62599023d99f1b3c44d0659d
|
class MemcacheStore(Store): <NEW_LINE> <INDENT> def __init__(self, connection_string: str) -> None: <NEW_LINE> <INDENT> self.log: logging.Logger = logging.getLogger(self.__class__.__name__) <NEW_LINE> self.client: pylibmc.Client = pylibmc.Client( [connection_string], binary=True, ) <NEW_LINE> <DEDENT> def get_multi(self, keys: List[str]) -> Dict[str, int]: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.client.get_multi(keys) <NEW_LINE> <DEDENT> except pylibmc.Error: <NEW_LINE> <INDENT> self.log.exception("Error getting keys, falling back to empty set") <NEW_LINE> return {} <NEW_LINE> <DEDENT> <DEDENT> def incr_and_get(self, key: str, ttl_s: int = 0) -> int: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.client.add(key, 0, time=ttl_s) <NEW_LINE> return self.client.incr(key) <NEW_LINE> <DEDENT> except pylibmc.Error: <NEW_LINE> <INDENT> self.log.exception(f"Error incrementing key: '{key}', assuming null") <NEW_LINE> return 0 <NEW_LINE> <DEDENT> <DEDENT> def decr(self, key: str) -> None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.client.decr(key) <NEW_LINE> <DEDENT> except pylibmc.Error: <NEW_LINE> <INDENT> self.log.exception(f"Error decrementing key: '{key}'")
|
A memcache-backed implementation of a counter store.
Each operation in memcache is atomic, this ensures that the view of the
state that any client sees is the current view. Counters in previous
buckets will not change.
If two clients try to acquire a permit for the same key, they will each get
a different value, and different rate limits can apply. No race conditions.
|
6259902421bff66bcd723b5d
|
class ContactInfo(models.Model): <NEW_LINE> <INDENT> address = models.CharField(max_length=200, blank=True, null=True) <NEW_LINE> address_line2 = models.CharField(max_length=200, blank=True, null=True) <NEW_LINE> city = models.CharField(max_length=80, blank=True, null=True) <NEW_LINE> state = models.CharField(max_length=30, blank=True, null=True) <NEW_LINE> postal_code = models.IntegerField(blank=True, null=True) <NEW_LINE> websites = models.ManyToManyField(Link, blank=True, null=True, related_name='operators') <NEW_LINE> image = models.ImageField(blank=True, null=True, upload_to="public/images/profile") <NEW_LINE> def __unicode__(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return str(self.userprofile.user.get_full_name()) <NEW_LINE> <DEDENT> except UserProfile.DoesNotExist: <NEW_LINE> <INDENT> return self.list_phone_numbers_as_string() <NEW_LINE> <DEDENT> <DEDENT> def list_phone_numbers_as_string(self): <NEW_LINE> <INDENT> phone_numbers = "" <NEW_LINE> for phone in self.phone_numbers.all(): <NEW_LINE> <INDENT> phone_numbers += phone.number + "," <NEW_LINE> <DEDENT> return phone_numbers <NEW_LINE> <DEDENT> def get_absolute_url(self): <NEW_LINE> <INDENT> return '/contact/contact_profile/%s/' % str(self.id)
|
This is contact information that may go with an individual or entity.
Notice that 'email' is missing as this is part of auth.User or any model describing an entity.
This model basically requires incoming relationships to have any kind of normal life.
For example, people.userprofile has a 1to1 to this.
|
6259902491af0d3eaad3ad21
|
class Acl(object): <NEW_LINE> <INDENT> allow_default = True <NEW_LINE> _permissions = None <NEW_LINE> def __init__(self, user): <NEW_LINE> <INDENT> self.user = user <NEW_LINE> <DEDENT> @property <NEW_LINE> def permissions(self): <NEW_LINE> <INDENT> if not self._permissions: <NEW_LINE> <INDENT> self._generate_user_permissions() <NEW_LINE> <DEDENT> return self._permissions <NEW_LINE> <DEDENT> def has_role(self, role_key): <NEW_LINE> <INDENT> for role in self.user.roles: <NEW_LINE> <INDENT> if isinstance(role_key, (list, tuple)) and role.key in role_key: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif role.key == role_key: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False <NEW_LINE> <DEDENT> def has_permission(self, permission): <NEW_LINE> <INDENT> if permission not in self.permissions.keys(): <NEW_LINE> <INDENT> return self.allow_default <NEW_LINE> <DEDENT> if not self.permissions[permission].value: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> def _generate_user_permissions(self): <NEW_LINE> <INDENT> permissions = {} <NEW_LINE> for role in self.user.roles: <NEW_LINE> <INDENT> permissions.update( {permission.permission.key: Permission( id=permission.permission_id, name=permission.permission.name, inherited=1, value=permission.value) for permission in role.permissions}) <NEW_LINE> <DEDENT> permissions.update( {permission.permission.key: Permission( id=permission.permission_id, name=permission.permission.name, inherited=0, value=permission.value) for permission in self.user.permissions}) <NEW_LINE> self._permissions = permissions
|
Access Control List functionality for managing users' roles and
permissions.
By default, the user model contains an `acl` attribute, which allows
access to the Acl object.
Attributes:
allow_default (boolean): Whether or not to allow/deny access if the
permission has not been set on that role.
|
62599024be8e80087fbbff72
|
class GatewayEvent(six.with_metaclass(GatewayEventMeta, Model)): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def from_dispatch(client, data): <NEW_LINE> <INDENT> cls = EVENTS_MAP.get(data['t']) <NEW_LINE> if not cls: <NEW_LINE> <INDENT> raise Exception('Could not find cls for {} ({})'.format(data['t'], data)) <NEW_LINE> <DEDENT> return cls.create(data['d'], client) <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def create(cls, obj, client): <NEW_LINE> <INDENT> if hasattr(cls, '_wraps_model'): <NEW_LINE> <INDENT> alias, model = cls._wraps_model <NEW_LINE> data = { k: obj.pop(k) for k in six.iterkeys(model._fields) if k in obj } <NEW_LINE> obj[alias] = data <NEW_LINE> <DEDENT> return cls(obj, client) <NEW_LINE> <DEDENT> def __getattr__(self, name): <NEW_LINE> <INDENT> if hasattr(self, '_proxy'): <NEW_LINE> <INDENT> return getattr(getattr(self, self._proxy), name) <NEW_LINE> <DEDENT> return object.__getattribute__(self, name)
|
The GatewayEvent class wraps various functionality for events passed to us
over the gateway websocket, and serves as a simple proxy to inner values for
some wrapped event-types (e.g. MessageCreate only contains a message, so we
proxy all attributes to the inner message object).
|
6259902463f4b57ef00864f0
|
class CappedBuffer(CharacterBuffer): <NEW_LINE> <INDENT> def __init__(self, buffer, width, ignoreWhitespace=False): <NEW_LINE> <INDENT> self.buffer = buffer <NEW_LINE> self.bytesRead = 0 <NEW_LINE> self.width = width <NEW_LINE> self.ignoreWhitespace = ignoreWhitespace <NEW_LINE> <DEDENT> def getch(self): <NEW_LINE> <INDENT> if self.bytesRead < self.width: <NEW_LINE> <INDENT> nextChar = self.buffer.getch() <NEW_LINE> if not self.isIgnoredChar(nextChar): <NEW_LINE> <INDENT> self.bytesRead += len(nextChar) <NEW_LINE> <DEDENT> return nextChar <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> <DEDENT> def isIgnoredChar(self, ch): <NEW_LINE> <INDENT> return self.ignoreWhitespace and isWhitespaceChar(ch) <NEW_LINE> <DEDENT> def ungetch(self, ch): <NEW_LINE> <INDENT> self.buffer.ungetch(ch) <NEW_LINE> if not self.isIgnoredChar(ch): <NEW_LINE> <INDENT> self.bytesRead -= len(ch) <NEW_LINE> <DEDENT> assert self.bytesRead >= 0
|
Implementation of a buffer that caps the number of bytes we can
getch(). The cap may or may not include whitespace characters.
|
62599024bf627c535bcb23b2
|
class Price(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.OriginalPrice = None <NEW_LINE> self.DiscountPrice = None <NEW_LINE> self.UnitPrice = None <NEW_LINE> self.ChargeUnit = None <NEW_LINE> self.UnitPriceDiscount = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> self.OriginalPrice = params.get("OriginalPrice") <NEW_LINE> self.DiscountPrice = params.get("DiscountPrice") <NEW_LINE> self.UnitPrice = params.get("UnitPrice") <NEW_LINE> self.ChargeUnit = params.get("ChargeUnit") <NEW_LINE> self.UnitPriceDiscount = params.get("UnitPriceDiscount")
|
描述预付费或后付费云盘的价格。
|
62599024d99f1b3c44d065a1
|
class MovingThing(object): <NEW_LINE> <INDENT> def __init__(self, image, drawer, x=-1, y=-1, capacity=8): <NEW_LINE> <INDENT> self.drawer = drawer <NEW_LINE> self.image = image <NEW_LINE> self.images = { "default" : image} <NEW_LINE> dark = pygame.Surface(self.image.get_size()).convert_alpha() <NEW_LINE> dark.fill((0, 0, 0, .8 * 255)) <NEW_LINE> self.images["frozen"] = dark <NEW_LINE> self.frozen = False <NEW_LINE> self.capacity = capacity <NEW_LINE> self.carrying = 0 <NEW_LINE> self.score = 0 <NEW_LINE> (self.x, self.y) = self.drawer.random_square(default_x=x, default_y=y) <NEW_LINE> self.draw() <NEW_LINE> <DEDENT> def pos(self): <NEW_LINE> <INDENT> return (self.x, self.y) <NEW_LINE> <DEDENT> def is_at(self, pos): <NEW_LINE> <INDENT> if pos == (self.x, self.y): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> def add_replacement_image(self, image): <NEW_LINE> <INDENT> self.images["replacement"] = image <NEW_LINE> <DEDENT> def set_replacement_image(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.image = self.images["replacement"] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> print("No replacement image.") <NEW_LINE> <DEDENT> <DEDENT> def set_default_image(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.image = self.images["default"] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> print("No default image.") <NEW_LINE> <DEDENT> <DEDENT> def freeze(self): <NEW_LINE> <INDENT> self.frozen = True <NEW_LINE> self.image = self.images["frozen"] <NEW_LINE> <DEDENT> def unfreeze(self): <NEW_LINE> <INDENT> self.frozen = False <NEW_LINE> self.image = self.images["default"] <NEW_LINE> <DEDENT> def draw(self): <NEW_LINE> <INDENT> self.drawer.draw(self.image, self.x, self.y) <NEW_LINE> <DEDENT> def move_up(self, avoid_obstacles=False): <NEW_LINE> <INDENT> if self.frozen: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.drawer.in_bounds((self.x, self.y - 1), avoid_obstacles): <NEW_LINE> <INDENT> self.y -= 1 <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> def move_down(self, avoid_obstacles=False): <NEW_LINE> <INDENT> if self.frozen: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.drawer.in_bounds((self.x, self.y + 1), avoid_obstacles): <NEW_LINE> <INDENT> self.y += 1 <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> def move_left(self, avoid_obstacles=False): <NEW_LINE> <INDENT> if self.frozen: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.drawer.in_bounds((self.x - 1, self.y), avoid_obstacles): <NEW_LINE> <INDENT> self.x -= 1 <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> def move_right(self, avoid_obstacles=False): <NEW_LINE> <INDENT> if self.frozen: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.drawer.in_bounds((self.x + 1, self.y), avoid_obstacles): <NEW_LINE> <INDENT> self.x += 1 <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
|
An icon that moves around.
|
6259902463f4b57ef00864f1
|
class MapGetByIndexRangeToEnd(_BaseExpr): <NEW_LINE> <INDENT> _op = aerospike.OP_MAP_GET_BY_INDEX_RANGE_TO_END <NEW_LINE> def __init__(self, ctx: 'TypeCTX', return_type: int, index: 'TypeIndex', bin: 'TypeBinName'): <NEW_LINE> <INDENT> self._children = ( index, bin if isinstance(bin, _BaseExpr) else MapBin(bin) ) <NEW_LINE> self._fixed = {_Keys.RETURN_TYPE_KEY: return_type} <NEW_LINE> if ctx is not None: <NEW_LINE> <INDENT> self._fixed[_Keys.CTX_KEY] = ctx
|
Create an expression that selects map items starting at specified index to the end of map
and returns selected data specified by return_type.
|
625990248c3a8732951f7455
|
class WSGIRequestHandlerLogging(WSGIRequestHandler): <NEW_LINE> <INDENT> def log_message(self, format, *args): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> logger.debug("restapi - %s %s %s" % args) <NEW_LINE> <DEDENT> except BaseException: <NEW_LINE> <INDENT> print(args)
|
wsgi request handler logging
|
625990241d351010ab8f4a14
|
class Bind(xso.XSO): <NEW_LINE> <INDENT> TAG = (namespaces.rfc6120_bind, "bind") <NEW_LINE> jid = xso.ChildText( (namespaces.rfc6120_bind, "jid"), type_=xso.JID(), default=None ) <NEW_LINE> resource = xso.ChildText( (namespaces.rfc6120_bind, "resource"), default=None ) <NEW_LINE> def __init__(self, jid=None, resource=None): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.jid = jid <NEW_LINE> self.resource = resource
|
The :class:`.IQ` payload for binding to a resource.
.. attribute:: jid
The server-supplied :class:`aioxmpp.JID`. This must not be set by
client code.
.. attribute:: resource
The client-supplied, optional resource. If a client wishes to bind to a
specific resource, it must tell the server that using this attribute.
|
6259902456b00c62f0fb37bd
|
class DCTLayer(nn.Module): <NEW_LINE> <INDENT> def __init__(self, input_shape=IMAGE_SIZE): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.input_shape = input_shape <NEW_LINE> I = np.eye(self.input_shape) <NEW_LINE> I = dct(I, type=2, norm="ortho", axis=-1) <NEW_LINE> self.register_buffer("dct", torch.as_tensor(I, dtype=torch.float32)) <NEW_LINE> <DEDENT> def forward(self, x): <NEW_LINE> <INDENT> x = x.matmul(self.dct) <NEW_LINE> x = x.transpose(-1, -2) <NEW_LINE> x = x.matmul(self.dct) <NEW_LINE> x = x.transpose(-1, -2) <NEW_LINE> return x
|
DCT-2 preprocessing layer.
Only supports square input.
|
6259902426238365f5fada50
|
class WinUnicodeOutputBase(object): <NEW_LINE> <INDENT> def __init__(self, fileno, name, encoding): <NEW_LINE> <INDENT> self._fileno = fileno <NEW_LINE> self.encoding = encoding <NEW_LINE> self.name = name <NEW_LINE> self.closed = False <NEW_LINE> self.softspace = False <NEW_LINE> self.mode = 'w' <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def isatty(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> def close(self): <NEW_LINE> <INDENT> self.closed = True <NEW_LINE> <DEDENT> def fileno(self): <NEW_LINE> <INDENT> return self._fileno <NEW_LINE> <DEDENT> def flush(self): <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> def write(self, text): <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> def writelines(self, lines): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> for line in lines: <NEW_LINE> <INDENT> self.write(line) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> complain('%s.writelines: %r' % (self.name, e)) <NEW_LINE> raise
|
Base class to adapt sys.stdout or sys.stderr to behave correctly on
Windows.
Setting encoding to utf-8 is recommended.
|
62599024d164cc6175821e76
|
class MachinePanel(wx.Panel): <NEW_LINE> <INDENT> def __init__(self, parent): <NEW_LINE> <INDENT> super(MachinePanel, self).__init__(parent) <NEW_LINE> self.frame = parent.GetTopLevelParent() <NEW_LINE> self.machine_nb = wx.Notebook(self) <NEW_LINE> <DEDENT> def load_collection(self): <NEW_LINE> <INDENT> periods = self.frame.col.num_periods <NEW_LINE> self.period_panels = [PeriodPanel(self.machine_nb, i) for i in range(periods)] <NEW_LINE> while self.machine_nb.GetPageCount(): <NEW_LINE> <INDENT> self.machine_nb.DeletePage(0) <NEW_LINE> <DEDENT> for panel in self.period_panels: <NEW_LINE> <INDENT> self.machine_nb.AddPage(panel, 'Period {} - {}'.format( panel.period_num + 1, panel.period_end.strftime('%m/%d/%y'))) <NEW_LINE> <DEDENT> panel_sizer = wx.BoxSizer(wx.HORIZONTAL) <NEW_LINE> panel_sizer.Add(self.machine_nb) <NEW_LINE> self.SetSizer(panel_sizer) <NEW_LINE> panel_sizer.Fit(self)
|
Panel with sheets of washers and dryers.
|
62599024a8ecb0332587211e
|
class Corpus(object): <NEW_LINE> <INDENT> def __init__(self, path): <NEW_LINE> <INDENT> self.dictionary = Dictionary() <NEW_LINE> self.train = self.tokenize(path) <NEW_LINE> <DEDENT> def tokenize(self, path): <NEW_LINE> <INDENT> assert os.path.exists(path) <NEW_LINE> with open(path, 'r', encoding='utf-8') as f: <NEW_LINE> <INDENT> tokens = 0 <NEW_LINE> for line in f: <NEW_LINE> <INDENT> if len(line.strip()) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> words = list(line.strip()) + ['<eos>'] <NEW_LINE> tokens += len(words) <NEW_LINE> for word in words: <NEW_LINE> <INDENT> self.dictionary.add_word(word) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> with open(path, 'r', encoding='utf-8') as f: <NEW_LINE> <INDENT> ids = torch.LongTensor(tokens) <NEW_LINE> token = 0 <NEW_LINE> for line in f: <NEW_LINE> <INDENT> if len(line.strip()) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> words = list(line.strip()) + ['<eos>'] <NEW_LINE> for word in words: <NEW_LINE> <INDENT> ids[token] = self.dictionary.word2idx[word] <NEW_LINE> token += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return ids <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "Corpus length: %d, Vocabulary size: %d" % (self.train.size(0), len(self.dictionary))
|
文本预处理,获取词汇表,并将字符串文本转换为数字序列。
|
62599024c432627299fa3ef2
|
class CounterThread(qtc.QThread): <NEW_LINE> <INDENT> countChanged = qtc.pyqtSignal(int, str) <NEW_LINE> pomoComplete = qtc.pyqtSignal() <NEW_LINE> running = True <NEW_LINE> def run(self): <NEW_LINE> <INDENT> on_focus, on_break = True, False <NEW_LINE> session_time = 0 <NEW_LINE> while CounterThread.running: <NEW_LINE> <INDENT> if on_focus and session_time <= POMO_TIME: <NEW_LINE> <INDENT> prg_perc = int((session_time / POMO_TIME) * 100) <NEW_LINE> prg_str = str(POMO_TIME - session_time) + ":00" <NEW_LINE> self.countChanged.emit(prg_perc, prg_str) <NEW_LINE> session_time += 1 <NEW_LINE> time.sleep(1) <NEW_LINE> <DEDENT> elif on_break and session_time <= BREAK_TIME: <NEW_LINE> <INDENT> prg_perc = int((session_time / BREAK_TIME) * 100) <NEW_LINE> prg_str = str(BREAK_TIME - session_time) + ":00" <NEW_LINE> self.countChanged.emit(prg_perc, prg_str) <NEW_LINE> session_time += 1 <NEW_LINE> time.sleep(1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if on_focus: <NEW_LINE> <INDENT> self.pomoComplete.emit() <NEW_LINE> <DEDENT> on_focus, on_break = not on_focus, not on_break <NEW_LINE> session_time = 0 <NEW_LINE> <DEDENT> <DEDENT> return
|
Runs a counter thread
|
625990246fece00bbaccc8b9
|
class SingleInstance(object): <NEW_LINE> <INDENT> def __init__(self, proc): <NEW_LINE> <INDENT> proc = proc.split('/')[-1] <NEW_LINE> self.pidPath = '/tmp/' + proc + '.pid' <NEW_LINE> if os.path.exists(self.pidPath): <NEW_LINE> <INDENT> self.lasterror = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.lasterror = False <NEW_LINE> <DEDENT> if not self.lasterror: <NEW_LINE> <INDENT> fp = open(self.pidPath, 'w') <NEW_LINE> fp.write(str(os.getpid())) <NEW_LINE> fp.close() <NEW_LINE> <DEDENT> <DEDENT> def alreadyRunning(self): <NEW_LINE> <INDENT> return self.lasterror <NEW_LINE> <DEDENT> def __del__(self): <NEW_LINE> <INDENT> if not self.lasterror: <NEW_LINE> <INDENT> os.unlink(self.pidPath)
|
Se encarga de que este corriendo un solo encoder por grupo para cada tipo de archivo.
Crea un archivo .pid cuando la aplicacion esta corriendo, que se borra cuando se termina la ejecucion de la misma.
|
625990249b70327d1c57fc82
|
class Uchip3131(Chip): <NEW_LINE> <INDENT> pinpos = ('l', 'l', 'l', 'b', 'r', 'r', 'r', 't') <NEW_LINE> @property <NEW_LINE> def coords(self): <NEW_LINE> <INDENT> return ((-0.5, 0.25), (-0.5, 0), (-0.5, -0.25), (0.0, -0.375), (0.5, -0.25), (0.5, 0), (0.5, 0.25), (0.0, 0.375)) <NEW_LINE> <DEDENT> @property <NEW_LINE> def path(self): <NEW_LINE> <INDENT> return ((-0.5, 0.375), (0.5, 0.375), (0.5, -0.375), (-0.5, -0.375))
|
Chip of size 3 1 3 1
|
62599024c432627299fa3ef4
|
class PrepareFieldmap(FSLCommand): <NEW_LINE> <INDENT> _cmd = 'fsl_prepare_fieldmap' <NEW_LINE> input_spec = PrepareFieldmapInputSpec <NEW_LINE> output_spec = PrepareFieldmapOutputSpec <NEW_LINE> def _parse_inputs(self, skip=None): <NEW_LINE> <INDENT> if skip is None: <NEW_LINE> <INDENT> skip = [] <NEW_LINE> <DEDENT> if not isdefined(self.inputs.out_fieldmap): <NEW_LINE> <INDENT> self.inputs.out_fieldmap = self._gen_fname( self.inputs.in_phase, suffix='_fslprepared') <NEW_LINE> <DEDENT> if not isdefined(self.inputs.nocheck) or not self.inputs.nocheck: <NEW_LINE> <INDENT> skip += ['nocheck'] <NEW_LINE> <DEDENT> return super(PrepareFieldmap, self)._parse_inputs(skip=skip) <NEW_LINE> <DEDENT> def _list_outputs(self): <NEW_LINE> <INDENT> outputs = self.output_spec().get() <NEW_LINE> outputs['out_fieldmap'] = self.inputs.out_fieldmap <NEW_LINE> return outputs <NEW_LINE> <DEDENT> def _run_interface(self, runtime): <NEW_LINE> <INDENT> runtime = super(PrepareFieldmap, self)._run_interface(runtime) <NEW_LINE> if runtime.returncode == 0: <NEW_LINE> <INDENT> out_file = self.inputs.out_fieldmap <NEW_LINE> im = nib.load(out_file) <NEW_LINE> dumb_img = nib.Nifti1Image(np.zeros(im.shape), im.affine, im.header) <NEW_LINE> out_nii = nib.funcs.concat_images((im, dumb_img)) <NEW_LINE> nib.save(out_nii, out_file) <NEW_LINE> <DEDENT> return runtime
|
Interface for the fsl_prepare_fieldmap script (FSL 5.0)
Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in
rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS
images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```).
Examples
--------
>>> from nipype.interfaces.fsl import PrepareFieldmap
>>> prepare = PrepareFieldmap()
>>> prepare.inputs.in_phase = "phase.nii"
>>> prepare.inputs.in_magnitude = "magnitude.nii"
>>> prepare.inputs.output_type = "NIFTI_GZ"
>>> prepare.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii .../phase_fslprepared.nii.gz 2.460000'
>>> res = prepare.run() # doctest: +SKIP
|
6259902491af0d3eaad3ad29
|
class MultikeyDict(dict): <NEW_LINE> <INDENT> def __init__(self, *arg, **kw): <NEW_LINE> <INDENT> dict.__init__(self, *arg, **kw) <NEW_LINE> self.value_set = set() <NEW_LINE> <DEDENT> def __getitem__(self, key): <NEW_LINE> <INDENT> item = dict.__getitem__(self, key) <NEW_LINE> return item.value <NEW_LINE> <DEDENT> def __delitem__(self, key): <NEW_LINE> <INDENT> item = dict.__getitem__(self, key) <NEW_LINE> for key in item.keys: <NEW_LINE> <INDENT> dict.__delitem__(self, key) <NEW_LINE> <DEDENT> self.value_set.remove(item.value) <NEW_LINE> <DEDENT> def add(self, value): <NEW_LINE> <INDENT> self[(value,)] = value <NEW_LINE> <DEDENT> def discard(self, value): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> del self[value] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> def __setitem__(self, keys, value): <NEW_LINE> <INDENT> keys = set(keys) <NEW_LINE> keys.add(value) <NEW_LINE> new_item = DictItem(keys, value) <NEW_LINE> self.value_set.add(value) <NEW_LINE> for key in keys: <NEW_LINE> <INDENT> if key in self: <NEW_LINE> <INDENT> raise KeyError('key %s already exists' % key) <NEW_LINE> <DEDENT> dict.__setitem__(self, key, new_item) <NEW_LINE> <DEDENT> <DEDENT> def get(self, key, default=None): <NEW_LINE> <INDENT> return self[key] if key in self else default <NEW_LINE> <DEDENT> def clear(self): <NEW_LINE> <INDENT> dict.clear(self) <NEW_LINE> self.value_set.clear() <NEW_LINE> <DEDENT> def values(self): <NEW_LINE> <INDENT> return self.value_set <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self.value_set)
|
dict with multiple keys, i.e.
foo = MultikeyDict()
foo[(1, 'bar')] = 'hello'
foo[1] == foo['bar'] == 'hello'
To delete: "del foo[1]" or "del foo['bar']" or "del foo['hello']"
This is an alternative to maintaining 2 seperate dicts for e.g. player
IDs and their names, so you can do both dict[player_id] and
dict[player_name].
|
625990246e29344779b01553
|
class easygttsException(LibraryException): <NEW_LINE> <INDENT> pass
|
Base Error for easygTTS code. (premium=False)
|
62599024d99f1b3c44d065a7
|
@six.add_metaclass(ABCMeta) <NEW_LINE> class SessionInit(object): <NEW_LINE> <INDENT> def init(self, sess): <NEW_LINE> <INDENT> self._init(sess) <NEW_LINE> <DEDENT> @abstractmethod <NEW_LINE> def _init(self, sess): <NEW_LINE> <INDENT> pass
|
Base class for utilities to initialize a session
|
6259902421bff66bcd723b65
|
class Mirror(OptElement): <NEW_LINE> <INDENT> def __init__(self, r=[0,0,0], roll_ang=0.0, pitch_ang=0.0, yaw_ang=0.0, size=[1.0,1.0,0.1], id=None): <NEW_LINE> <INDENT> OptElement.__init__(self, r, roll_ang, pitch_ang, yaw_ang, size, id)
|
plane mirror
|
62599024796e427e5384f681
|
class PerfCounter(object): <NEW_LINE> <INDENT> def __init__(self, name, description): <NEW_LINE> <INDENT> if name in Registry._counters: <NEW_LINE> <INDENT> raise Exception('Counter %s already exists.' % name) <NEW_LINE> <DEDENT> self._name = name <NEW_LINE> self._description = description <NEW_LINE> self._value = 0 <NEW_LINE> Registry._counters[self.name] = self <NEW_LINE> <DEDENT> def inc(self, increment=1): <NEW_LINE> <INDENT> self._value += increment <NEW_LINE> <DEDENT> @property <NEW_LINE> def name(self): <NEW_LINE> <INDENT> return self._name <NEW_LINE> <DEDENT> @property <NEW_LINE> def description(self): <NEW_LINE> <INDENT> return self._description <NEW_LINE> <DEDENT> @property <NEW_LINE> def value(self): <NEW_LINE> <INDENT> return self._value
|
Generic in-process numeric counter; not aggregated across instances.
|
62599024a4f1c619b294f4f8
|
class AutomaticDimension(object): <NEW_LINE> <INDENT> AD_NAME = '' <NEW_LINE> AD_DESCRIPTION = '' <NEW_LINE> VALID_FOR_TYPE = () <NEW_LINE> def __init__(self, list_of_values): <NEW_LINE> <INDENT> self.att_type = type(list_of_values[0]) <NEW_LINE> self.set_of_values = set([str(val) for val in list_of_values]) <NEW_LINE> <DEDENT> def create_dimensions(self, dimensions_queue=None): <NEW_LINE> <INDENT> handling_lvl0_elements = False <NEW_LINE> if dimensions_queue is None: <NEW_LINE> <INDENT> dimensions_queue = [{self.att_type(key): None} for key in self.set_of_values] <NEW_LINE> handling_lvl0_elements = True <NEW_LINE> <DEDENT> new_dimensions_queue = {} <NEW_LINE> for element in dimensions_queue: <NEW_LINE> <INDENT> parent = self.get_parent(str(element.keys()[0])) <NEW_LINE> if parent not in new_dimensions_queue.keys(): <NEW_LINE> <INDENT> new_dimensions_queue[parent] = element <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_dimensions_queue[parent].update(element) <NEW_LINE> <DEDENT> <DEDENT> if len(new_dimensions_queue) == 1 and new_dimensions_queue.keys()[0] == BaseHierarchy.supression_node().value: <NEW_LINE> <INDENT> return new_dimensions_queue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.create_dimensions([{key: value} for key, value in new_dimensions_queue.iteritems()])
|
Class to create automatic dimensions for the hierarchies
|
62599024507cdc57c63a5caa
|
class HitVecDict(dict): <NEW_LINE> <INDENT> def __missing__(self, k): <NEW_LINE> <INDENT> self[k] = biopsy.HitVec() <NEW_LINE> return self[k]
|
A dict where missing values are initialised to biopsy.HitVec().
|
625990249b70327d1c57fc86
|
class Update: <NEW_LINE> <INDENT> def __init__(self, in_headers, args): <NEW_LINE> <INDENT> self.aggregate_headers = [] <NEW_LINE> self.input_headers = in_headers <NEW_LINE> self.output_headers = in_headers <NEW_LINE> self.column_name = args.pop(0) <NEW_LINE> self.pythonExpr = args.pop(0) <NEW_LINE> if not(self.column_name in self.input_headers): <NEW_LINE> <INDENT> raise Exception('You must construct additional pylons...and column not found') <NEW_LINE> <DEDENT> <DEDENT> def process_row(self,row): <NEW_LINE> <INDENT> row[self.column_name] = eval(self.pythonExpr, row) <NEW_LINE> return row <NEW_LINE> <DEDENT> def get_aggregate(self): <NEW_LINE> <INDENT> return {}
|
Updates the values of a column. Consume two arguments from args: a column name
and a python expression. Evaluate the expression using eval (as for Filter),
and assign the result to the designated column. Raise an exception if the
column is not in in_headers.
Does no aggregation.
Tip: use "x in l" to check if x is an element of l. See help('in').
Example: Convert firstname to lower case.
$ python3 hw4.py player_career_short.csv -Update firstname 'firstname.lower()'
id,firstname,lastname,leag,gp,minutes,pts,oreb,dreb,reb,asts,stl,blk,turnover,pf,fga,fgm,fta,ftm,tpa,tpm
ABDELAL01 ,alaa,Abdelnaby,N,256,3200,1465,283,563,846,85,71,69,247,484,1236,620,321,225,3,0
ABDULKA01 ,kareem,Abdul-jabbar,N,1560,57446,38387,2975,9394,17440,5660,1160,3189,2527,4657,28307,15837,9304,6712,18,1
ABDULMA01 ,mahmo,Abdul-rauf,N,586,15633,8553,219,868,1087,2079,487,46,963,1107,7943,3514,1161,1051,1339,474
ABDULTA01 ,tariq,Abdul-wahad,N,236,4808,1830,286,490,776,266,184,82,309,485,1726,720,529,372,76,18
ABDURSH01 ,shareef,Abdur-rahim,N,830,28883,15028,1869,4370,6239,2109,820,638,2136,2324,11515,5434,4943,4006,519,154
ABERNTO01 ,tom,Abernethy,N,319,5434,1779,374,637,1011,384,185,60,129,525,1472,724,443,331,2,0
ABLEFO01 ,forest,Able,N,1,1,0,0,0,1,1,0,0,0,1,2,0,0,0,0,0
ABRAMJO01 ,john,Abramovic,N,56,0,533,0,0,0,37,0,0,0,171,855,203,185,127,0,0
ACKERAL01 ,alex,Acker,N,30,234,81,9,20,29,16,6,4,11,13,92,34,10,5,25,8
|
6259902430c21e258be9971a
|
class BuiltinModule(object): <NEW_LINE> <INDENT> def __init__(self, name): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> self.is_main_module = False <NEW_LINE> self.to_be_mangled = False <NEW_LINE> self.exported_functions = dict() <NEW_LINE> self.dependent_modules = dict() <NEW_LINE> <DEDENT> def call_function(self, registry, func_name): <NEW_LINE> <INDENT> importFrom = ast.ImportFrom(module=self.name, names=[ast.alias(name=func_name, asname=None)], level=0) <NEW_LINE> self.exported_functions[func_name] = importFrom <NEW_LINE> return func_name <NEW_LINE> <DEDENT> def import_function(self, registry, func_name): <NEW_LINE> <INDENT> return func_name
|
Represent a builtin module.
it offer the same interface as ImportedModule class, but do not try to
validate function imported from here.
|
6259902426238365f5fada56
|
class GetTableData(object): <NEW_LINE> <INDENT> def GET(self): <NEW_LINE> <INDENT> print("-----------------------------------") <NEW_LINE> print("GetTableData Started!") <NEW_LINE> result = json.loads(web.data().decode('utf-8')) <NEW_LINE> table = str(result["table"]) <NEW_LINE> if "database" in result: <NEW_LINE> <INDENT> database = str(result["database"]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> database = "predictive_maintenance" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> mdbobject = makeObject(dbName = database,dbCollection = table) <NEW_LINE> if mdbobject.checkCollectionExists() == False: <NEW_LINE> <INDENT> print("Invalid/ Nonexistent Collection") <NEW_LINE> return "Invalid/ Nonexistent Collection" <NEW_LINE> <DEDENT> maskquery = {'_id' :0} <NEW_LINE> res = mdbobject.findRecord(maskingquery=maskquery) <NEW_LINE> jsonData = json.dumps(res) <NEW_LINE> return jsonData <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print("Error in getTableData: " + str(e)) <NEW_LINE> return e <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> print("GetTableData Finished") <NEW_LINE> print("-----------------------------------")
|
Gets data from a table / collection. Returns data if found. Otherwise, returns an error.
WARNING: Data can be non consistent if your saving method is wrong!
@author: Shreyas Gokhale
@contact: s.gokhale@campus.tu-berlin.de
|
62599024c432627299fa3ef8
|
class LatLngList(ArrayList): <NEW_LINE> <INDENT> def refresh_points(self, points): <NEW_LINE> <INDENT> coordinates = [LatLng(*p) for p in points] <NEW_LINE> self.clear() <NEW_LINE> self.addAll([bridge.encode(c) for c in coordinates]) <NEW_LINE> <DEDENT> def handle_change(self, change): <NEW_LINE> <INDENT> op = change['operation'] <NEW_LINE> if op in 'append': <NEW_LINE> <INDENT> self.add(len(change['value']), LatLng(*change['item'])) <NEW_LINE> <DEDENT> elif op == 'insert': <NEW_LINE> <INDENT> self.add(change['index'], LatLng(*change['item'])) <NEW_LINE> <DEDENT> elif op == 'extend': <NEW_LINE> <INDENT> points = [LatLng(*p) for p in change['items']] <NEW_LINE> self.addAll([bridge.encode(c) for c in points]) <NEW_LINE> <DEDENT> elif op == '__setitem__': <NEW_LINE> <INDENT> self.set(change['index'], LatLng(*change['newitem'])) <NEW_LINE> <DEDENT> elif op == 'pop': <NEW_LINE> <INDENT> self.remove(change['index']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError( "Unsupported change operation {}".format(op))
|
A ArrayList<LatLng> that handles changes from an atom ContainerList
|
6259902456b00c62f0fb37c5
|
class ForeignElement(_Element, _Tagged, _Parental, _Configurable, Signature2): <NEW_LINE> <INDENT> def __repr__(self): <NEW_LINE> <INDENT> attributes = self.render_attributes() <NEW_LINE> return "<{0}{1}/>".format(self.tagname, attributes) <NEW_LINE> children = self.render_children() <NEW_LINE> return "<{0}{1}>{2}</{0}>".format(self.tagname, attributes, children)
|
This abstract element class implements foreign elements like RECT and
CIRCLE elements (those that can optionally self-close when they have no
children).
|
62599024bf627c535bcb23bc
|
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol): <NEW_LINE> <INDENT> def _onConnect(self, request): <NEW_LINE> <INDENT> res = maybeDeferred(self.onConnect, request) <NEW_LINE> res.addCallback(self.succeedHandshake) <NEW_LINE> def forwardError(failure): <NEW_LINE> <INDENT> if failure.check(ConnectionDeny): <NEW_LINE> <INDENT> return self.failHandshake(failure.value.reason, failure.value.code) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.log.debug("Unexpected exception in onConnect ['{failure.value}']", failure=failure) <NEW_LINE> return self.failHandshake("Internal server error: {}".format(failure.value), ConnectionDeny.INTERNAL_SERVER_ERROR) <NEW_LINE> <DEDENT> <DEDENT> res.addErrback(forwardError) <NEW_LINE> <DEDENT> def get_channel_id(self, channel_id_type=u'tls-unique'): <NEW_LINE> <INDENT> return transport_channel_id(self.transport, is_server=True, channel_id_type=channel_id_type)
|
Base class for Twisted-based WebSocket server protocols.
|
625990249b70327d1c57fc88
|
class TestUserFields: <NEW_LINE> <INDENT> def test_model_has_id_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "id") <NEW_LINE> <DEDENT> def test_model_has_email_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "email") <NEW_LINE> <DEDENT> def test_model_has_first_name_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "first_name") <NEW_LINE> <DEDENT> def test_model_has_last_name_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "last_name") <NEW_LINE> <DEDENT> def test_model_has_personal_number_filed(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "personal_number") <NEW_LINE> <DEDENT> def test_model_has_created_at_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "date_joined") <NEW_LINE> <DEDENT> def test_model_has_modified_at_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "modified_at") <NEW_LINE> <DEDENT> def test_model_has_language_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "language") <NEW_LINE> <DEDENT> def test_model_has_dsgvo_accepted_field(self, user_model_class): <NEW_LINE> <INDENT> assert hasattr(user_model_class, "dsgvo_accepted") <NEW_LINE> <DEDENT> def test_field_type_id(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance(user_model_class._meta.get_field("id"), models.UUIDField) <NEW_LINE> <DEDENT> def test_field_type_email(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance(user_model_class._meta.get_field("email"), models.EmailField) <NEW_LINE> <DEDENT> def test_field_type_first_name(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("first_name"), models.CharField ) <NEW_LINE> <DEDENT> def test_field_type_last_name(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("last_name"), models.CharField ) <NEW_LINE> <DEDENT> def test_field_type_personal_number(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("personal_number"), models.CharField ) <NEW_LINE> <DEDENT> def test_field_type_created_at(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("date_joined"), models.DateTimeField ) <NEW_LINE> <DEDENT> def test_field_type_modified_at(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("modified_at"), models.DateTimeField ) <NEW_LINE> <DEDENT> def test_field_type_language(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("language"), models.CharField ) <NEW_LINE> <DEDENT> def test_field_type_dsgvo_accepted(self, user_model_class): <NEW_LINE> <INDENT> assert isinstance( user_model_class._meta.get_field("dsgvo_accepted"), models.BooleanField ) <NEW_LINE> <DEDENT> def test_field_conf_id(self, user_model_class): <NEW_LINE> <INDENT> field = user_model_class._meta.get_field("id") <NEW_LINE> assert field.primary_key <NEW_LINE> assert field.default == uuid.uuid4 <NEW_LINE> assert not field.editable
|
This Testsuit summerizes the basic field tests:
1. Do all fields exist
2. Do all fields have the correct format/class instance
|
625990245e10d32532ce4088
|
class FormattingString(str): <NEW_LINE> <INDENT> def __new__(cls, formatting, normal): <NEW_LINE> <INDENT> new = str.__new__(cls, formatting) <NEW_LINE> new._normal = normal <NEW_LINE> return new <NEW_LINE> <DEDENT> def __call__(self, text): <NEW_LINE> <INDENT> return self + text + self._normal
|
A Unicode string which can be called upon a piece of text to wrap it in
formatting
|
62599024a4f1c619b294f4fc
|
class World(object): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def stacked_proxy_safe_get(stacked_proxy, key, default=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return getattr(stacked_proxy, key) <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> return default <NEW_LINE> <DEDENT> <DEDENT> def current_user(self): <NEW_LINE> <INDENT> if c.user_is_loggedin: <NEW_LINE> <INDENT> return self.stacked_proxy_safe_get(c, 'user') <NEW_LINE> <DEDENT> <DEDENT> def current_subreddit(self): <NEW_LINE> <INDENT> site = self.stacked_proxy_safe_get(c, 'site') <NEW_LINE> if not site: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> return site.name <NEW_LINE> <DEDENT> def current_subdomain(self): <NEW_LINE> <INDENT> return self.stacked_proxy_safe_get(c, 'subdomain') <NEW_LINE> <DEDENT> def current_oauth_client(self): <NEW_LINE> <INDENT> client = self.stacked_proxy_safe_get(c, 'oauth2_client', None) <NEW_LINE> return getattr(client, '_id', None) <NEW_LINE> <DEDENT> def current_loid_obj(self): <NEW_LINE> <INDENT> return self.stacked_proxy_safe_get(c, 'loid') <NEW_LINE> <DEDENT> def current_loid(self): <NEW_LINE> <INDENT> loid = self.current_loid_obj() <NEW_LINE> if not loid: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return loid.loid <NEW_LINE> <DEDENT> def is_admin(self, user): <NEW_LINE> <INDENT> if not user or not hasattr(user, 'name'): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return user.name in self.stacked_proxy_safe_get(g, 'admins', []) <NEW_LINE> <DEDENT> def is_employee(self, user): <NEW_LINE> <INDENT> if not user: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return user.employee <NEW_LINE> <DEDENT> def user_has_beta_enabled(self, user): <NEW_LINE> <INDENT> if not user: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return user.pref_beta <NEW_LINE> <DEDENT> def has_gold(self, user): <NEW_LINE> <INDENT> if not user: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return user.gold <NEW_LINE> <DEDENT> def is_user_loggedin(self, user): <NEW_LINE> <INDENT> if not (user or self.current_user()): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> def url_features(self): <NEW_LINE> <INDENT> return set(request.GET.getall('feature')) <NEW_LINE> <DEDENT> def live_config(self, name): <NEW_LINE> <INDENT> live = self.stacked_proxy_safe_get(g, 'live_config', {}) <NEW_LINE> return live.get(name) <NEW_LINE> <DEDENT> def live_config_iteritems(self): <NEW_LINE> <INDENT> live = self.stacked_proxy_safe_get(g, 'live_config', {}) <NEW_LINE> return live.iteritems() <NEW_LINE> <DEDENT> def simple_event(self, name): <NEW_LINE> <INDENT> stats = self.stacked_proxy_safe_get(g, 'stats', None) <NEW_LINE> if stats: <NEW_LINE> <INDENT> return stats.simple_event(name)
|
A World is the proxy to the app/request state for Features.
Proxying through World allows for easy testing and caching if needed.
|
6259902491af0d3eaad3ad2e
|
class Square: <NEW_LINE> <INDENT> def __init__(self, size=0, position=(0, 0)): <NEW_LINE> <INDENT> self.size = size <NEW_LINE> self.position = position <NEW_LINE> <DEDENT> def area(self): <NEW_LINE> <INDENT> return (self.__size * self.__size) <NEW_LINE> <DEDENT> @property <NEW_LINE> def size(self): <NEW_LINE> <INDENT> return self.__size <NEW_LINE> <DEDENT> @size.setter <NEW_LINE> def size(self, value): <NEW_LINE> <INDENT> if (type(value) is not int): <NEW_LINE> <INDENT> raise TypeError("size must be an integer") <NEW_LINE> <DEDENT> if value < 0: <NEW_LINE> <INDENT> raise ValueError("size must be >= 0") <NEW_LINE> <DEDENT> self.__size = value <NEW_LINE> <DEDENT> @property <NEW_LINE> def position(self): <NEW_LINE> <INDENT> return self.__position <NEW_LINE> <DEDENT> @position.setter <NEW_LINE> def position(self, value): <NEW_LINE> <INDENT> if len(value) is not 2: <NEW_LINE> <INDENT> raise TypeError("position must be a tuple of 2 positive integers") <NEW_LINE> <DEDENT> if (type(value[0]) is not int or value[0] < 0): <NEW_LINE> <INDENT> raise TypeError("position must be a tuple of 2 positive integers") <NEW_LINE> <DEDENT> if (type(value[1]) is not int or value[1] < 0): <NEW_LINE> <INDENT> raise TypeError("position must be a tuple of 2 positive integers") <NEW_LINE> <DEDENT> self.__position = value <NEW_LINE> <DEDENT> def my_print(self): <NEW_LINE> <INDENT> print(self.__str__()) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> if self.size == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> str = '\n' * self.__position[1] <NEW_LINE> <DEDENT> for i in range(self.__size): <NEW_LINE> <INDENT> str += ' ' * self.position[0] <NEW_LINE> str += '#' * self.__size + '\n' <NEW_LINE> <DEDENT> return str[:-1]
|
Private instance attribute: size
Instantiation with area and position method
|
6259902421a7993f00c66e85
|
class MeridionalDiffusion(MeridionalAdvectionDiffusion): <NEW_LINE> <INDENT> def __init__(self, K=0., use_banded_solver=False, prescribed_flux=0., **kwargs): <NEW_LINE> <INDENT> super(MeridionalDiffusion, self).__init__( U=0., K=K, prescribed_flux=prescribed_flux, use_banded_solver=use_banded_solver, **kwargs)
|
A parent class for meridional diffusion-only processes,
with advection set to zero.
Otherwise identical to the parent class.
|
6259902463f4b57ef00864f7
|
class MakeFakeFileSet(TaskAction): <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> TaskAction.__init__(self, *args, **kwargs) <NEW_LINE> with self.config.TaskWorker.envForCMSWEB: <NEW_LINE> <INDENT> configDict = {"cacheduration": 1, "pycurl": True} <NEW_LINE> self.resourceCatalog = CRIC(logger=self.logger, configDict=configDict) <NEW_LINE> <DEDENT> <DEDENT> def getListOfSites(self): <NEW_LINE> <INDENT> with self.config.TaskWorker.envForCMSWEB: <NEW_LINE> <INDENT> sites = self.resourceCatalog.getAllPSNs() <NEW_LINE> <DEDENT> filteredSites = [site for site in sites if not site.startswith("T1_")] <NEW_LINE> return filteredSites <NEW_LINE> <DEDENT> def execute(self, *args, **kwargs): <NEW_LINE> <INDENT> totalevents = int(kwargs['task']['tm_totalunits']) <NEW_LINE> firstEvent = 1 <NEW_LINE> lastEvent = totalevents <NEW_LINE> firstLumi = 1 <NEW_LINE> lastLumi = 10 <NEW_LINE> if not kwargs['task']['tm_events_per_lumi']: <NEW_LINE> <INDENT> kwargs['task']['tm_events_per_lumi'] = 100 <NEW_LINE> <DEDENT> singleMCFileset = Fileset(name = "MCFakeFileSet") <NEW_LINE> newFile = File("MCFakeFile", size = 1000, events = totalevents) <NEW_LINE> newFile.setLocation(self.getListOfSites()) <NEW_LINE> newFile.addRun(Run(1, *range(firstLumi, lastLumi + 1))) <NEW_LINE> newFile["block"] = 'MCFakeBlock' <NEW_LINE> newFile["first_event"] = firstEvent <NEW_LINE> newFile["last_event"] = lastEvent <NEW_LINE> singleMCFileset.addFile(newFile) <NEW_LINE> return Result(task=kwargs['task'], result=singleMCFileset)
|
This is needed to make WMCore.JobSplitting lib working...
do not like very much. Given that all is fake here I am
quite sure we only need total number of events said that I
set all the other parmas to dummy values. We may want to set
them in the future
|
6259902426238365f5fada5c
|
class MapTerrainType( _DataTableRow_NamedBits ): <NEW_LINE> <INDENT> __tablename__ = "map_terrain_types"
|
A map terrain type.
|
62599024d18da76e235b78d3
|
class KernelFinder(torch.nn.Module): <NEW_LINE> <INDENT> def __init__(self, in_channel, out_channel, kernel_size=(3, 2, 2)): <NEW_LINE> <INDENT> super(KernelFinder, self).__init__() <NEW_LINE> self.kernel_size = (out_channel, *kernel_size) <NEW_LINE> self.features = nn.Sequential( nn.Conv2d(in_channel, 8, kernel_size=7), nn.MaxPool2d(2, stride=2), nn.ReLU(True), nn.Conv2d(8, 10, kernel_size=5), nn.MaxPool2d(2, stride=2), nn.ReLU(True) ) <NEW_LINE> self.kernel_finder = nn.Sequential( nn.Linear(90, 32), nn.ReLU(True), nn.Linear(32, reduce(lambda x, y: x * y, self.kernel_size)) ) <NEW_LINE> <DEDENT> def forward(self, x): <NEW_LINE> <INDENT> x = self.features(x) <NEW_LINE> x = x.view(-1, 10 * 3 * 3) <NEW_LINE> x = self.kernel_finder(x) <NEW_LINE> x = x.view(-1, *self.kernel_size) <NEW_LINE> return x
|
Given a batch of images returns the convolution
kernel that should applied to each one of them
|
62599024d164cc6175821e81
|
class _Start(cli.Command): <NEW_LINE> <INDENT> def setup(self): <NEW_LINE> <INDENT> cpu_count = multiprocessing.cpu_count() * 2 + 1 <NEW_LINE> parser = self._parser.add_parser( "start", help="Start the demo_proxy web worker.") <NEW_LINE> parser.add_argument( "--workers", type=int, default=int(os.environ.get("PROXY_WORKERS", cpu_count)), help="The number of thread workers used in order to serve " "clients. Default: %s" % cpu_count ) <NEW_LINE> parser.add_argument( "--redis-host", type=str, default=os.environ.get("PROXY_REDIS_HOST", "redis"), help="The IP address or the host name of the Redis Server. " "Default: redis" ) <NEW_LINE> parser.add_argument( "--redis-port", type=int, default=int(os.environ.get("PROXY_REDIS_PORT", 6379)), help="The port that should be used for connecting to the" "Redis Database. Default: 6379" ) <NEW_LINE> parser.add_argument( "--redis-database", type=int, default=int(os.environ.get("PROXY_REDIS_DATABASE", 0)), help="The Redis database that should be used. Default: 0" ) <NEW_LINE> parser.set_defaults(work=self.run) <NEW_LINE> <DEDENT> def _work(self): <NEW_LINE> <INDENT> pid = os.getpid() <NEW_LINE> with open(PID_FILE, "w") as file_handle: <NEW_LINE> <INDENT> file_handle.write(str(pid)) <NEW_LINE> <DEDENT> queue = demo_proxy_queue.RedisQueue(self.args.redis_host, self.args.redis_port, self.args.redis_database) <NEW_LINE> web_worker = wsd.ProxyWorker( tasks_queue=queue, workers_count=self.args.workers, delay=0.1) <NEW_LINE> web_worker.run()
|
Start the demo_proxy web worker.
|
625990248c3a8732951f7463
|
class GlossaryTermsLoader(TranslatableModelLoader): <NEW_LINE> <INDENT> FILE_EXTENSION = ".md" <NEW_LINE> @transaction.atomic <NEW_LINE> def load(self): <NEW_LINE> <INDENT> glossary_slugs = set() <NEW_LINE> for filename in listdir(self.get_localised_dir(get_default_language())): <NEW_LINE> <INDENT> if filename.endswith(self.FILE_EXTENSION): <NEW_LINE> <INDENT> glossary_slug = filename[:-len(self.FILE_EXTENSION)] <NEW_LINE> glossary_slugs.add(glossary_slug) <NEW_LINE> <DEDENT> <DEDENT> for glossary_slug in sorted(glossary_slugs): <NEW_LINE> <INDENT> term_translations = self.get_blank_translation_dictionary() <NEW_LINE> content_filename = "{}.md".format(glossary_slug) <NEW_LINE> content_translations = self.get_markdown_translations(content_filename) <NEW_LINE> for language, content in content_translations.items(): <NEW_LINE> <INDENT> term_translations[language]["definition"] = content.html_string <NEW_LINE> term_translations[language]["term"] = content.title <NEW_LINE> <DEDENT> glossary_term = GlossaryTerm( slug=glossary_slug, ) <NEW_LINE> self.populate_translations(glossary_term, term_translations) <NEW_LINE> self.mark_translation_availability(glossary_term, required_fields=["term", "definition"]) <NEW_LINE> glossary_term.save() <NEW_LINE> self.log("Added glossary term: {}".format(glossary_term.__str__())) <NEW_LINE> <DEDENT> self.log("{} glossary terms loaded!\n".format(len(glossary_slugs)))
|
Custom loader for loading glossary terms.
|
625990241d351010ab8f4a22
|
class RecentDaysTimeframe(Timeframe): <NEW_LINE> <INDENT> def __init__(self, days=14): <NEW_LINE> <INDENT> self._name = "Recent days ({0})".format(days) <NEW_LINE> self._end = datetime.datetime.utcnow() <NEW_LINE> self._start = self._end - datetime.timedelta(days=days) <NEW_LINE> self._days = days
|
Recent days timeframe (in UTC).
|
62599024a4f1c619b294f500
|
class Music(object): <NEW_LINE> <INDENT> __db = None <NEW_LINE> __url = None <NEW_LINE> def __init__(self, configFile = '163Spider.conf'): <NEW_LINE> <INDENT> self.__headers = { 'Host':'music.163.com', 'Referer':'http://music.163.com/', 'User-Agent':'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML like Gecko) Version/7.2.1.0 Safari/536.2+', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' } <NEW_LINE> self.__url = 'http://music.163.com' <NEW_LINE> self.__db = db.MySQLDB() <NEW_LINE> if configFile != '163Spider.conf': <NEW_LINE> <INDENT> self.__db.setConfig(configFile) <NEW_LINE> <DEDENT> <DEDENT> def isSingle(self,song_id): <NEW_LINE> <INDENT> sql = ' select `song_id` from music163 where `song_id` = "%s"' %(song_id) <NEW_LINE> results = self.__db.querySQL(sql) <NEW_LINE> if len(results) > 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> def viewCapture(self,link): <NEW_LINE> <INDENT> sql = 'update `playlist163` set `over` = "N" where `link` = "%s"' %(link) <NEW_LINE> self.__db.insertSQL(sql) <NEW_LINE> url = self.__url + str(link) <NEW_LINE> response = requests.get(url,headers = self.__headers) <NEW_LINE> soup = BeautifulSoup(response.content,'html.parser') <NEW_LINE> musics = json.loads(soup.find('textarea',{'style':'display:none;'}).get_text()) <NEW_LINE> for music in musics: <NEW_LINE> <INDENT> name = pymysql.escape_string(music['name']) <NEW_LINE> author = pymysql.escape_string(music['artists'][0]['name']) <NEW_LINE> song_id = pymysql.escape_string(str(music['id'])) <NEW_LINE> sql = 'insert into `music163`(`song_id`,`song_name`,`author`) values ("%s","%s","%s")' %(song_id,name,author) <NEW_LINE> if(self.isSingle(song_id)) == True: <NEW_LINE> <INDENT> self.__db.insertSQL(sql) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> common.Log('{} : {},{}'.format("Error 901",url,song_id)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def viewCaptures(self): <NEW_LINE> <INDENT> sql = 'select `link` from `playlist163` where `over` = "N" limit 10' <NEW_LINE> urls = self.__db.querySQL(sql) <NEW_LINE> for url in urls: <NEW_LINE> <INDENT> self.viewCapture(url[0]) <NEW_LINE> <DEDENT> for url in urls: <NEW_LINE> <INDENT> self.__db.insertSQL('update `playlist163` set `over` = "Y" where `link` = "%s" ' %(str(url[0]))) <NEW_LINE> <DEDENT> <DEDENT> def getPlaylistRange(self): <NEW_LINE> <INDENT> results = self.__db.querySQL('select count(*) from `playlist163` where `over` = "N"') <NEW_LINE> return results[0][0]
|
docstring for Music
|
62599024ac7a0e7691f733f6
|
class CoordSysMaker(object): <NEW_LINE> <INDENT> coord_sys_klass = CoordinateSystem <NEW_LINE> def __init__(self, coord_names, name='', coord_dtype=np.float): <NEW_LINE> <INDENT> self.coord_names = tuple(coord_names) <NEW_LINE> self.name = name <NEW_LINE> self.coord_dtype = coord_dtype <NEW_LINE> <DEDENT> def __call__(self, N, name=None, coord_dtype=None): <NEW_LINE> <INDENT> if name is None: <NEW_LINE> <INDENT> name = self.name <NEW_LINE> <DEDENT> if coord_dtype is None: <NEW_LINE> <INDENT> coord_dtype = self.coord_dtype <NEW_LINE> <DEDENT> if N > len(self.coord_names): <NEW_LINE> <INDENT> raise CoordSysMakerError('Not enough axis names (have %d, ' 'you asked for %d)' % (len(self.coord_names), N)) <NEW_LINE> <DEDENT> return self.coord_sys_klass(self.coord_names[:N], name, coord_dtype)
|
Class to create similar coordinate maps of different dimensions
|
62599024bf627c535bcb23c2
|
class Seq2SeqEncoder(d2l.Encoder): <NEW_LINE> <INDENT> def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): <NEW_LINE> <INDENT> super().__init__(*kwargs) <NEW_LINE> self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size) <NEW_LINE> self.rnn = src.Model.SequenceModel.RNN.RNN(tf.keras.layers.StackedRNNCells( [tf.keras.layers.GRUCell(num_hiddens, dropout=dropout) for _ in range(num_layers)]), return_sequences=True, return_state=True) <NEW_LINE> <DEDENT> def call(self, X, *args, **kwargs): <NEW_LINE> <INDENT> X = self.embedding(X) <NEW_LINE> output = self.rnn(X, **kwargs) <NEW_LINE> state = output[1:] <NEW_LINE> return output[0], state
|
The RNN encoder for sequence to sequence learning.
Defined in :numref:`sec_seq2seq`
|
6259902456b00c62f0fb37cc
|
class SimpleProducer(Producer): <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> self.partition_cycles = {} <NEW_LINE> self.random_start = kwargs.pop('random_start', True) <NEW_LINE> super(SimpleProducer, self).__init__(*args, **kwargs) <NEW_LINE> <DEDENT> def _next_partition(self, topic): <NEW_LINE> <INDENT> if topic not in self.partition_cycles: <NEW_LINE> <INDENT> if not self.client.has_metadata_for_topic(topic): <NEW_LINE> <INDENT> self.client.ensure_topic_exists(topic) <NEW_LINE> <DEDENT> self.partition_cycles[topic] = cycle(self.client.get_partition_ids_for_topic(topic)) <NEW_LINE> if self.random_start: <NEW_LINE> <INDENT> num_partitions = len(self.client.get_partition_ids_for_topic(topic)) <NEW_LINE> for _ in range(random.randint(0, num_partitions-1)): <NEW_LINE> <INDENT> next(self.partition_cycles[topic]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return next(self.partition_cycles[topic]) <NEW_LINE> <DEDENT> def send_messages(self, topic, *msg): <NEW_LINE> <INDENT> partition = self._next_partition(topic) <NEW_LINE> return super(SimpleProducer, self).send_messages( topic, partition, *msg ) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '<SimpleProducer batch=%s>' % (self.async_send,)
|
A simple, round-robin producer.
See Producer class for Base Arguments
Additional Arguments:
random_start (bool, optional): randomize the initial partition which
the first message block will be published to, otherwise
if false, the first message block will always publish
to partition 0 before cycling through each partition,
defaults to True.
|
62599024287bf620b6272afa
|
class _SHH(ExecutionModifier): <NEW_LINE> <INDENT> __slots__ = ('retcode', 'timeout') <NEW_LINE> def __init__(self, retcode=0, timeout=None): <NEW_LINE> <INDENT> self.retcode = retcode <NEW_LINE> self.timeout = timeout <NEW_LINE> <DEDENT> def __rand__(self, cmd): <NEW_LINE> <INDENT> return cmd.run(retcode=self.retcode, timeout=self.timeout)
|
plumbum execution modifier to ensure output is not echoed to terminal
essentially a no-op, this may be used to override argcmdr settings
and cli flags controlling this feature, on a line-by-line basis, to
hide unnecessary or problematic (e.g. highly verbose) command output.
|
625990248c3a8732951f7464
|
class Configuracao(Enum): <NEW_LINE> <INDENT> CalcularDezenasSemPontuacao = 1 <NEW_LINE> EmailManual = 2 <NEW_LINE> ValorMinimoParaEnviarEmail = 3 <NEW_LINE> EmailAutomatico = 4 <NEW_LINE> VerificaJogoOnline = 5
|
how to use
# from enums import Enums
# print(Enums(1)) //Enum.megasena
# print(Enums['megasena']) //Enum.megasena
# print(Enums.megasena) //Enum.megasena
# print(Enums.megasena.value) //1
|
62599024d99f1b3c44d065b1
|
class UnThread(CompositionCommand): <NEW_LINE> <INDENT> SYNOPSIS = (None, 'unthread', 'message/unthread', None) <NEW_LINE> HTTP_CALLABLE = ('GET', 'POST') <NEW_LINE> HTTP_QUERY_VARS = { 'mid': 'message-id'} <NEW_LINE> HTTP_POST_VARS = { 'subject': 'Update the metadata subject as well'} <NEW_LINE> def command(self): <NEW_LINE> <INDENT> session, config, idx = self.session, self.session.config, self._idx() <NEW_LINE> args = list(self.args) <NEW_LINE> if '--' in args: <NEW_LINE> <INDENT> subject = ' '.join(args[(args.index('--')+1):]) <NEW_LINE> args = args[:args.index('--')] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> subject = self.data.get('subject', [None])[0] <NEW_LINE> <DEDENT> for mid in self.data.get('mid', []): <NEW_LINE> <INDENT> args.append('=%s' % mid) <NEW_LINE> <DEDENT> emails = [self._actualize_ephemeral(i) for i in self._choose_messages(args, allow_ephemeral=True)] <NEW_LINE> if emails: <NEW_LINE> <INDENT> if self.data.get('_method', 'POST') == 'POST': <NEW_LINE> <INDENT> for email in emails: <NEW_LINE> <INDENT> idx.unthread_message(email.msg_mid(), new_subject=subject) <NEW_LINE> <DEDENT> self._background_save(index=True) <NEW_LINE> return self._return_search_results( _('Unthreaded %d messages') % len(emails), emails) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._return_search_results( _('Unthread %d messages') % len(emails), emails) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return self._error(_('Nothing to do!'))
|
Remove a message from a thread.
|
625990243eb6a72ae038b573
|
class SpacyTokenizer(object): <NEW_LINE> <INDENT> def __init__(self, lang='en'): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> import spacy <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> raise ImportError("spacy not found. Install it using pip install spacy") <NEW_LINE> <DEDENT> self.lang = lang <NEW_LINE> self.model = spacy.blank(lang) <NEW_LINE> <DEDENT> def __call__(self, text): <NEW_LINE> <INDENT> return [token.text for token in self.model(text)] <NEW_LINE> <DEDENT> def __getstate__(self): <NEW_LINE> <INDENT> return {'lang': self.lang} <NEW_LINE> <DEDENT> def __setstate__(self, state): <NEW_LINE> <INDENT> self.lang = state['lang'] <NEW_LINE> try: <NEW_LINE> <INDENT> import spacy <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> raise ImportError("spacy not found. Install it using pip install spacy") <NEW_LINE> <DEDENT> self.model = spacy.blank(self.lang) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '{}({})'.format(self.__class__.__name__, self.lang)
|
Spacy tokenizer
|
6259902421a7993f00c66e8b
|
class MyNode(SCons.Node.Node): <NEW_LINE> <INDENT> def __init__(self, name): <NEW_LINE> <INDENT> SCons.Node.Node.__init__(self) <NEW_LINE> self.name = name <NEW_LINE> self.Tag('found_includes', []) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.name <NEW_LINE> <DEDENT> def get_found_includes(self, env, scanner, target): <NEW_LINE> <INDENT> return scanner(self)
|
The base Node class contains a number of do-nothing methods that
we expect to be overridden by real, functional Node subclasses. So
simulate a real, functional Node subclass.
|
6259902463f4b57ef00864fa
|
class CarImage(object): <NEW_LINE> <INDENT> def __init__(self, filename='./topview_car_wagon.png'): <NEW_LINE> <INDENT> super(CarImage, self).__init__() <NEW_LINE> self.fig = plt.figure() <NEW_LINE> plt.rcParams['keymap.fullscreen'] = '' <NEW_LINE> plt.rcParams['keymap.home'] = '' <NEW_LINE> plt.rcParams['keymap.back'] = '' <NEW_LINE> plt.rcParams['keymap.forward'] = '' <NEW_LINE> plt.rcParams['keymap.pan'] = '' <NEW_LINE> plt.rcParams['keymap.zoom'] = '' <NEW_LINE> plt.rcParams['keymap.save'] = '' <NEW_LINE> plt.rcParams['keymap.quit'] = '' <NEW_LINE> plt.rcParams['keymap.grid'] = '' <NEW_LINE> plt.rcParams['keymap.yscale'] = '' <NEW_LINE> plt.rcParams['keymap.xscale'] = '' <NEW_LINE> plt.rcParams['keymap.all_axes'] = '' <NEW_LINE> plt.rcParams['keymap.yscale'] = '' <NEW_LINE> <DEDENT> def updateFrom(self, state): <NEW_LINE> <INDENT> print(state) <NEW_LINE> plt.scatter(state[0],state[1]) <NEW_LINE> plt.ylim([-10,10]) <NEW_LINE> plt.pause(.01) <NEW_LINE> pass
|
docstring for CarImage.
|
62599024c432627299fa3f02
|
class PageListByUserView(LoginRequiredMixin, ListView): <NEW_LINE> <INDENT> queryset = Page.objects.all() <NEW_LINE> def get_queryset(self): <NEW_LINE> <INDENT> queryset = super(PageListByUserView, self).get_queryset() <NEW_LINE> queryset = queryset.filter(category__is_private=False, category__user__username=self.kwargs["username"]) <NEW_LINE> print (queryset) <NEW_LINE> return queryset <NEW_LINE> <DEDENT> def get_context_data(self, **kwargs): <NEW_LINE> <INDENT> context = super(PageListByUserView, self).get_context_data(**kwargs) <NEW_LINE> context["pages"] = self.queryset <NEW_LINE> context["profile_request"] = get_object_or_404(UserProfile, user=self.request.user)
|
Lista páginas por usuario.
:URl: http://ip_servidor/user/<username>/page/listar
|
62599024ac7a0e7691f733fa
|
class RedisQueue(BaseQueue): <NEW_LINE> <INDENT> def setup_queue(self): <NEW_LINE> <INDENT> conn_instance = RedisConnPool.from_config(self.connection_params) <NEW_LINE> self.queue = conn_instance.get_conn() <NEW_LINE> <DEDENT> def put(self, data, queue_name=None): <NEW_LINE> <INDENT> if self.get_len(queue_name=queue_name) < self.max_queue_len: <NEW_LINE> <INDENT> res = self.queue.lpush(queue_name or self.queue_name, data) <NEW_LINE> return res <NEW_LINE> <DEDENT> raise QueueFullException <NEW_LINE> <DEDENT> def get(self, batch, timeout=1, queue_name=None): <NEW_LINE> <INDENT> count = 0 <NEW_LINE> data_lst = list() <NEW_LINE> if not self.queue.exists(self.queue_name): <NEW_LINE> <INDENT> return True, None <NEW_LINE> <DEDENT> while count < batch: <NEW_LINE> <INDENT> data = self.queue.brpop(queue_name or self.queue_name, timeout=timeout) <NEW_LINE> if timeout is not 0 and isinstance(data, tuple): <NEW_LINE> <INDENT> data_lst.append(literal_eval(data[1].decode('utf-8'))) <NEW_LINE> count += 1 <NEW_LINE> <DEDENT> elif data is not None: <NEW_LINE> <INDENT> data_lst.append(literal_eval(data.decode('utf-8'))) <NEW_LINE> count += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> return count < batch, data_lst <NEW_LINE> <DEDENT> def get_len(self, queue_name=None): <NEW_LINE> <INDENT> return self.queue.llen(queue_name or self.queue_name) <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def from_config(cls, config): <NEW_LINE> <INDENT> return cls(**config)
|
this is a fifo Queue by redis
|
6259902456b00c62f0fb37d0
|
class PRC(object): <NEW_LINE> <INDENT> logging_name = "isce.orbit.PRC.PRC" <NEW_LINE> @logged <NEW_LINE> def __init__(self, file=None): <NEW_LINE> <INDENT> self.filename = file <NEW_LINE> self.firstEpoch = 0 <NEW_LINE> self.lastEpoch = 0 <NEW_LINE> self.tdtOffset = 0 <NEW_LINE> self.orbit = Orbit() <NEW_LINE> self.orbit.configure() <NEW_LINE> self.orbit.setOrbitQuality('Precise') <NEW_LINE> self.orbit.setOrbitSource('PRC') <NEW_LINE> return None <NEW_LINE> <DEDENT> def getOrbit(self): <NEW_LINE> <INDENT> return self.orbit <NEW_LINE> <DEDENT> def parse(self): <NEW_LINE> <INDENT> if os.path.splitext(self.filename)[1] == '.Z': <NEW_LINE> <INDENT> from subprocess import Popen, PIPE <NEW_LINE> fp = Popen(["zcat", self.filename], stdout=PIPE).stdout <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fp = open(self.filename,'r') <NEW_LINE> <DEDENT> data = fp.read() <NEW_LINE> fp.close() <NEW_LINE> numLines = int(len(data)/130) <NEW_LINE> for i in range(numLines): <NEW_LINE> <INDENT> line = data[i*130:(i+1)*130] <NEW_LINE> self.__parseLine(line) <NEW_LINE> <DEDENT> <DEDENT> def __parseLine(self,line): <NEW_LINE> <INDENT> referenceFrame = line[0:6].decode('utf-8') <NEW_LINE> if (referenceFrame == 'STATE '): <NEW_LINE> <INDENT> self.__parseStateLine(line) <NEW_LINE> <DEDENT> if (referenceFrame == 'STTERR'): <NEW_LINE> <INDENT> self.__parseTerrestrialLine(line) <NEW_LINE> <DEDENT> <DEDENT> def __parseTerrestrialLine(self,line): <NEW_LINE> <INDENT> j2000Day = float(line[14:20])/10.0 + 0.5 <NEW_LINE> tdt = float(line[20:31])/1e6 <NEW_LINE> x = float(line[31:43])/1e3 <NEW_LINE> y = float(line[43:55])/1e3 <NEW_LINE> z = float(line[55:67])/1e3 <NEW_LINE> vx = float(line[67:78])/1e6 <NEW_LINE> vy = float(line[78:89])/1e6 <NEW_LINE> vz = float(line[89:100])/1e6 <NEW_LINE> quality = line[127] <NEW_LINE> tdt = tdt - self.tdtOffset <NEW_LINE> dt = self.__j2000ToDatetime(j2000Day,tdt) <NEW_LINE> sv = StateVector() <NEW_LINE> sv.configure() <NEW_LINE> sv.setTime(dt) <NEW_LINE> sv.setPosition([x,y,z]) <NEW_LINE> sv.setVelocity([vx,vy,vz]) <NEW_LINE> self.orbit.addStateVector(sv) <NEW_LINE> <DEDENT> def __parseStateLine(self,line): <NEW_LINE> <INDENT> self.firstEpoch = self.__j2000ToDatetime(float(line[6:12])/10.0,0.0) <NEW_LINE> self.lastEpoch = self.__j2000ToDatetime(float(line[12:18])/10.0,0.0) <NEW_LINE> self.tdtOffset = float(line[47:52]) <NEW_LINE> self.tdtOffset = self.tdtOffset/1e3 <NEW_LINE> <DEDENT> def __j2000ToDatetime(self,j2000Day,tdt): <NEW_LINE> <INDENT> j2000 = datetime.datetime(year=2000,month=1,day=1) <NEW_LINE> dt = j2000 + datetime.timedelta(days=j2000Day,seconds=tdt) <NEW_LINE> return dt <NEW_LINE> <DEDENT> pass
|
A class to parse orbit data from D-PAF
|
62599024be8e80087fbbff88
|
class GlyphTextureAtlas(image.Texture): <NEW_LINE> <INDENT> region_class = Glyph <NEW_LINE> x = 0 <NEW_LINE> y = 0 <NEW_LINE> line_height = 0 <NEW_LINE> def apply_blend_state(self): <NEW_LINE> <INDENT> glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) <NEW_LINE> glEnable(GL_BLEND) <NEW_LINE> <DEDENT> def fit(self, image): <NEW_LINE> <INDENT> if self.x + image.width > self.width: <NEW_LINE> <INDENT> self.x = 0 <NEW_LINE> self.y += self.line_height + 1 <NEW_LINE> self.line_height = 0 <NEW_LINE> <DEDENT> if self.y + image.height > self.height: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> self.line_height = max(self.line_height, image.height) <NEW_LINE> region = self.get_region( self.x, self.y, image.width, image.height) <NEW_LINE> if image.width > 0: <NEW_LINE> <INDENT> region.blit_into(image, 0, 0, 0) <NEW_LINE> self.x += image.width + 1 <NEW_LINE> <DEDENT> return region
|
A texture within which glyphs can be drawn.
|
6259902430c21e258be99726
|
class BaseLabel(Enum): <NEW_LINE> <INDENT> pass
|
Utility class to inherit from and use as type hint/validation.
Implementations: Labels, FormattedLabels
|
625990245166f23b2e2442e5
|
class TopResult: <NEW_LINE> <INDENT> def __init__(self, soup): <NEW_LINE> <INDENT> self.link = "???" <NEW_LINE> linksoup = soup.find("a", {"class": "tab_item_overlay"}) <NEW_LINE> if linksoup is not None: <NEW_LINE> <INDENT> self.link = linksoup.get("href") <NEW_LINE> if self.link is None: <NEW_LINE> <INDENT> self.link = "???" <NEW_LINE> <DEDENT> <DEDENT> self.image = "???" <NEW_LINE> imagesoup = soup.find("div", {"class": "tab_item_cap"}) <NEW_LINE> if imagesoup is not None: <NEW_LINE> <INDENT> img = imagesoup.get("img") <NEW_LINE> if img is not None: <NEW_LINE> <INDENT> self.image = img.get("src") <NEW_LINE> <DEDENT> <DEDENT> self.discount = "" <NEW_LINE> self.price = "" <NEW_LINE> self.discountPrice = "???" <NEW_LINE> pricesoup = soup.find("div", {"class": "discount_block"}) <NEW_LINE> if pricesoup is not None: <NEW_LINE> <INDENT> discount = pricesoup.find("div", {"class": "discount_pct"}) <NEW_LINE> if discount is not None: <NEW_LINE> <INDENT> self.discount = discount.get_text() <NEW_LINE> <DEDENT> dpsoup = pricesoup.find("div", {"class": "discount_prices"}) <NEW_LINE> if dpsoup is not None: <NEW_LINE> <INDENT> if self.discount == "": <NEW_LINE> <INDENT> price = dpsoup.find("div", {"class": "discount_final_price"}) <NEW_LINE> self.price = price.get_text() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> price = dpsoup.find("div", {"class": "discount_original_price"}) <NEW_LINE> self.price = price.get_text() <NEW_LINE> discountprice = dpsoup.find("div", {"class": "discount_final_price"}) <NEW_LINE> self.discountPrice = discountprice.get_text() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.price.lower() == "freetoplay": <NEW_LINE> <INDENT> self.price = "Free to Play" <NEW_LINE> <DEDENT> titlesoup = soup.find("div", {"class": "tab_item_content"}) <NEW_LINE> if titlesoup is not None: <NEW_LINE> <INDENT> title = soup.find("div", {"class": "tab_item_name"}) <NEW_LINE> self.title = title.get_text() <NEW_LINE> <DEDENT> self.review = "???" <NEW_LINE> self.reviewLong = "???" <NEW_LINE> self.released = "???" <NEW_LINE> <DEDENT> def get_price_text(self): <NEW_LINE> <INDENT> if self.discount == "": <NEW_LINE> <INDENT> return self.price <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.discountPrice + " (" + self.discount + ")" <NEW_LINE> <DEDENT> <DEDENT> @asyncio.coroutine <NEW_LINE> def update_price(self, currency, currency_symbol): <NEW_LINE> <INDENT> if currency != "GBP": <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.price != "???" and self.price != "" and self.price != "Free to Play": <NEW_LINE> <INDENT> rawprice = yield from exchange(float(self.price[1:]), "GBP", currency) <NEW_LINE> self.price = currency_symbol + str(rawprice) <NEW_LINE> <DEDENT> if self.discountPrice != "???" and self.price != "": <NEW_LINE> <INDENT> rawdiscountprice = yield from exchange(float(self.discountPrice[1:]), "GBP", currency) <NEW_LINE> self.discountPrice = currency_symbol + str(rawdiscountprice) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> if STEAM_PRINTING: <NEW_LINE> <INDENT> print("failed to convert currency (GBP)") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.title
|
Class containing information about the games on the front of the store (new releases, specials etc.)
|
62599024d99f1b3c44d065b3
|
class Timer: <NEW_LINE> <INDENT> def __init__(self, timeout=0.0): <NEW_LINE> <INDENT> self._timeout = None <NEW_LINE> self._start_time = None <NEW_LINE> if timeout: <NEW_LINE> <INDENT> self.rewind_to(timeout) <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def expired(self): <NEW_LINE> <INDENT> return (monotonic() - self._start_time) > self._timeout <NEW_LINE> <DEDENT> def rewind_to(self, new_timeout): <NEW_LINE> <INDENT> self._timeout = float(new_timeout) <NEW_LINE> self._start_time = monotonic()
|
A reusable class to track timeouts, like an egg timer
|
625990241d351010ab8f4a28
|
class UnknownRepositoryOrigin(PublisherError): <NEW_LINE> <INDENT> def __str__(self): <NEW_LINE> <INDENT> return _("Unknown repository origin '%s'") % self.data
|
Used to indicate that a repository URI could not be found in the
list of repository origins.
|
6259902421bff66bcd723b74
|
class LandSetCreateView(View): <NEW_LINE> <INDENT> def post(self, request): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> json_dict = json.loads(request.body.decode()) <NEW_LINE> LandDistrict.objects.create(**json_dict) <NEW_LINE> return JsonResponse({"statue": 200, 'data': '添加成功'}, safe=False) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> context = {"Result": 'false', 'Msg': {e}} <NEW_LINE> return JsonResponse(context)
|
地区设置/上传
|
62599024a8ecb03325872130
|
class Client: <NEW_LINE> <INDENT> def execute(self): <NEW_LINE> <INDENT> benz_builder = BenzBuilder() <NEW_LINE> queue = [] <NEW_LINE> queue.append('start') <NEW_LINE> queue.append('stop') <NEW_LINE> queue.append('alarm') <NEW_LINE> benz_builder.set_queue(queue) <NEW_LINE> benz = benz_builder.get_carmodel() <NEW_LINE> benz.run()
|
没有导演类的客户端
|
62599024796e427e5384f68f
|
class OSBlockINv2(nn.Module): <NEW_LINE> <INDENT> def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): <NEW_LINE> <INDENT> super(OSBlockINv2, self).__init__() <NEW_LINE> assert T >= 1 <NEW_LINE> assert out_channels >= reduction and out_channels % reduction == 0 <NEW_LINE> mid_channels = out_channels // reduction <NEW_LINE> self.conv1 = Conv1x1(in_channels, mid_channels) <NEW_LINE> self.conv2 = nn.ModuleList() <NEW_LINE> for t in range(1, T + 1): <NEW_LINE> <INDENT> self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] <NEW_LINE> <DEDENT> self.gate = ChannelGate(mid_channels) <NEW_LINE> self.conv3 = Conv1x1Linear(mid_channels, out_channels) <NEW_LINE> self.downsample = None <NEW_LINE> if in_channels != out_channels: <NEW_LINE> <INDENT> self.downsample = Conv1x1Linear(in_channels, out_channels) <NEW_LINE> <DEDENT> self.IN = nn.InstanceNorm2d(out_channels, affine=True) <NEW_LINE> <DEDENT> def forward(self, x): <NEW_LINE> <INDENT> identity = x <NEW_LINE> x1 = self.conv1(x) <NEW_LINE> x2 = 0 <NEW_LINE> for conv2_t in self.conv2: <NEW_LINE> <INDENT> x2_t = conv2_t(x1) <NEW_LINE> x2 = x2 + self.gate(x2_t) <NEW_LINE> <DEDENT> x3 = self.conv3(x2) <NEW_LINE> if self.downsample is not None: <NEW_LINE> <INDENT> identity = self.downsample(identity) <NEW_LINE> <DEDENT> out = x3 + identity <NEW_LINE> out = self.IN(out) <NEW_LINE> return F.relu(out)
|
Omni-scale feature learning block with instance normalization.
|
625990248c3a8732951f746a
|
class RouteLoader(object): <NEW_LINE> <INDENT> def __init__(self, path_prefix, path=None, app_name=None): <NEW_LINE> <INDENT> if not path: <NEW_LINE> <INDENT> raise exception.UrlError('path arg not found!') <NEW_LINE> <DEDENT> if not app_name: <NEW_LINE> <INDENT> raise exception.UrlError('app_name arg not found!') <NEW_LINE> <DEDENT> self.path_prefix = path_prefix <NEW_LINE> self.path = path if path != '/' else '' <NEW_LINE> self.app_name = app_name <NEW_LINE> <DEDENT> def urlhelper(self, *urllist): <NEW_LINE> <INDENT> urls = [] <NEW_LINE> for u in urllist: <NEW_LINE> <INDENT> handler_path = '.'.join([self.path_prefix, u.get('handler_path')]) <NEW_LINE> pattern = u.get('pattern') <NEW_LINE> if pattern.endswith('/'): <NEW_LINE> <INDENT> pattern += '?' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pattern += '/?' <NEW_LINE> <DEDENT> path = u.get('path', None) <NEW_LINE> if path: <NEW_LINE> <INDENT> if path != '/': <NEW_LINE> <INDENT> pattern = path + pattern <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> pattern = self.path + pattern <NEW_LINE> <DEDENT> kw = dict(u.get('kwargs', {})) <NEW_LINE> kw['app_name'] = self.app_name <NEW_LINE> url_name = self.app_name + '-' + u.get('name') <NEW_LINE> urls.append(urlspec(pattern, import_object(handler_path), kwargs=kw, name=url_name)) <NEW_LINE> <DEDENT> return urls
|
路由加载器,将路由加载进tornado的路由系统中,
path_prefix:为模块前缀,这样路由可以省去写前缀
path:由于设计为子应用形式,路由最终路径为 /path/你的路由,比如blog应用下的/index,会被解析为/blog/index,
如果不希望在路由前加/path,则为单个路由设置path='/',path为必填参数
app_name:设置为子应用的模块名,大小写必须相同,根据此设置来找模版位置,必填
|
62599024d99f1b3c44d065b5
|
class EventListener: <NEW_LINE> <INDENT> @abc.abstractmethod <NEW_LINE> def notify(self, event): <NEW_LINE> <INDENT> return
|
Class implementing a listener that can be notified of events.
|
62599024c432627299fa3f06
|
class Fraction(SignedWord): <NEW_LINE> <INDENT> def __init__(self, at, divisor=1.0, step=0, optional=False): <NEW_LINE> <INDENT> SignedWord.__init__(self, at, step, optional=optional) <NEW_LINE> self.divisor = divisor <NEW_LINE> <DEDENT> def fetch(self, data, pos): <NEW_LINE> <INDENT> val = SignedWord.fetch(self, data, pos) <NEW_LINE> return val / self.divisor if val is not None else None
|
A fraction with the given divisor.
|
6259902421bff66bcd723b76
|
class MultipleArduinosFoundError(Exception): <NEW_LINE> <INDENT> pass
|
Raised if multiple connected boards are found with the same serial number
|
62599024a4f1c619b294f508
|
class SmoothnessKeyMobileSites(test.Test): <NEW_LINE> <INDENT> test = smoothness.Smoothness <NEW_LINE> page_set = 'page_sets/key_mobile_sites.json'
|
Measures rendering statistics while scrolling down the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
|
62599024287bf620b6272b02
|
class Assays(APIView): <NEW_LINE> <INDENT> def get_object(self, uuid): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return Assay.objects.get(uuid=uuid) <NEW_LINE> <DEDENT> except (Assay.DoesNotExist, Assay.MultipleObjectsReturned): <NEW_LINE> <INDENT> raise Http404 <NEW_LINE> <DEDENT> <DEDENT> def get_query_set(self, study_uuid): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> study_obj = Study.objects.get( uuid=study_uuid) <NEW_LINE> return Assay.objects.filter(study=study_obj) <NEW_LINE> <DEDENT> except (Study.DoesNotExist, Study.MultipleObjectsReturned): <NEW_LINE> <INDENT> raise Http404 <NEW_LINE> <DEDENT> <DEDENT> def get(self, request, format=None): <NEW_LINE> <INDENT> if request.query_params.get('uuid'): <NEW_LINE> <INDENT> assay = self.get_object(request.query_params.get('uuid')) <NEW_LINE> serializer = AssaySerializer(assay) <NEW_LINE> return Response(serializer.data) <NEW_LINE> <DEDENT> elif request.query_params.get('study'): <NEW_LINE> <INDENT> assays = self.get_query_set(request.query_params.get('study')) <NEW_LINE> serializer = AssaySerializer(assays, many=True) <NEW_LINE> return Response(serializer.data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Http404
|
Return assay object
---
#YAML
GET:
serializer: AssaySerializer
omit_serializer: false
parameters:
- name: uuid
description: Assay uuid
paramType: query
type: string
required: false
- name: study
description: Study uuid
paramType: query
type: string
required: false
...
|
625990249b70327d1c57fc96
|
class Result(Data): <NEW_LINE> <INDENT> def __defaults__(self): <NEW_LINE> <INDENT> self.tag = 'Results' <NEW_LINE> pass <NEW_LINE> <DEDENT> def add_segment(self,new_seg): <NEW_LINE> <INDENT> tag = new_seg['tag'] <NEW_LINE> new_seg = Segment(new_seg) <NEW_LINE> self.segments[tag] = new_seg <NEW_LINE> return
|
SUAVE.Results()
Results Data
|
6259902421a7993f00c66e91
|
class CarProcessor(DataProcessor): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.labels = set() <NEW_LINE> <DEDENT> def get_train_examples(self, data_dir): <NEW_LINE> <INDENT> logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) <NEW_LINE> return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") <NEW_LINE> <DEDENT> def get_dev_examples(self, data_dir): <NEW_LINE> <INDENT> return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") <NEW_LINE> <DEDENT> def get_labels(self): <NEW_LINE> <INDENT> return list(self.labels) <NEW_LINE> <DEDENT> def _create_examples(self, lines, set_type): <NEW_LINE> <INDENT> examples = [] <NEW_LINE> for (i, line) in enumerate(lines): <NEW_LINE> <INDENT> guid = "%s-%s" % (set_type, i) <NEW_LINE> text_a = tokenization.convert_to_unicode(line[1]) <NEW_LINE> label = tokenization.convert_to_unicode(line[0]) <NEW_LINE> self.labels.add(label) <NEW_LINE> examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) <NEW_LINE> <DEDENT> return examples
|
Processor for Car.
|
6259902456b00c62f0fb37d6
|
class Logger(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> logging.basicConfig(level=logging.INFO) <NEW_LINE> self.logger = logging.getLogger(Strings.APP_NAME) <NEW_LINE> self.logger.setLevel(logging.INFO) <NEW_LINE> <DEDENT> def info(self, message): <NEW_LINE> <INDENT> self.logger.info(message) <NEW_LINE> <DEDENT> def warning(self, message): <NEW_LINE> <INDENT> self.logger.warning(message) <NEW_LINE> <DEDENT> def error(self, message): <NEW_LINE> <INDENT> self.logger.error(message) <NEW_LINE> <DEDENT> def critical(self, message): <NEW_LINE> <INDENT> self.logger.critical(message) <NEW_LINE> <DEDENT> def emergency(self, message): <NEW_LINE> <INDENT> self.logger.emergency(message)
|
Wrapper class for logging
|
6259902491af0d3eaad3ad3d
|
class ReconnectableMySQLDatabase(RetryOperationalError, MySQLDatabase): <NEW_LINE> <INDENT> pass
|
In case of broken pipe or any connection outage, reconnect to DB
|
62599024925a0f43d25e8f5d
|
@pytest.mark.draft <NEW_LINE> @pytest.mark.components <NEW_LINE> @pytest.allure.story('Distributions') <NEW_LINE> @pytest.allure.feature('POST') <NEW_LINE> class Test_PFE_Components(object): <NEW_LINE> <INDENT> @pytest.allure.link('https://jira.qumu.com/browse/TC-46856') <NEW_LINE> @pytest.mark.Distributions <NEW_LINE> @pytest.mark.POST <NEW_LINE> def test_TC_46856_POST_Distributions_Vne_Dist_Origin_Group_2_Edge_Simple_Media_Mp3(self, context): <NEW_LINE> <INDENT> with pytest.allure.step("""Verify that VOD program having MP3 file is played successfully using VNE Route (Origin as VNE Group Distribution VNEGroupEdge 2 Standalone VNEs) Delivery System in QED."""): <NEW_LINE> <INDENT> distributionDetails = context.sc.DistributionDetails( activationDate='2017-09-20T07:36:46.542Z', distributionPolicy='OPTIONAL', expirationDate=None, files=[{ 'id': 'mp3opt', 'sourceUrl': 'qedorigin://Auto_storage/BachGavotte.mp3', 'streamMetadata': { 'bitrateKbps': 100, 'width': 10, 'height': 5, 'mimeType': 'video/mp3', 'contentType': 'UNSPECIFIED' } }], id='simpleMediaMP3DistOriginGroup_2EdgeOpt', name='MP3 Distribution with Dist Origin Group and 2 EdgeOpt', status=None, tags=None, targetAudiences=[{ 'id': 'Audience_distOriginGroup_2EdgeVNE' }]) <NEW_LINE> response = check( context.cl.Distributions.createEntity( body=distributionDetails ) ) <NEW_LINE> <DEDENT> with pytest.allure.step("""Verify that VOD program having MP3 file is played successfully using VNE Route (Origin as VNE Group Distribution VNEGroupEdge 2 Standalone VNEs) Delivery System in QED."""): <NEW_LINE> <INDENT> distributionDetails = context.sc.DistributionDetails( activationDate='2017-09-20T07:36:46.542Z', distributionPolicy='OPTIONAL', expirationDate=None, files=[{ 'id': 'mp3opt', 'sourceUrl': 'qedorigin://Auto_storage/BachGavotte.mp3', 'streamMetadata': { 'bitrateKbps': 100, 'width': 10, 'height': 5, 'mimeType': 'video/mp3', 'contentType': 'UNSPECIFIED' } }], id='simpleMediaMP3DistOriginGroup_2EdgeOpt', name='MP3 Distribution with Dist Origin Group and 2 EdgeOpt', status=None, tags=None, targetAudiences=[{ 'id': 'Audience_distOriginGroup_2EdgeVNE' }]) <NEW_LINE> request = context.cl.Distributions.createEntity( body=distributionDetails ) <NEW_LINE> try: <NEW_LINE> <INDENT> client, response = check( request, quiet=True, returnResponse=True ) <NEW_LINE> <DEDENT> except (HTTPBadRequest, HTTPForbidden) as e: <NEW_LINE> <INDENT> get_error_message(e) | expect.any( should.start_with('may not be empty'), should.start_with('Invalid page parameter specified'), should.contain('Invalid Authorization Token') ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception( "Expected error message, got {} status code instead.".format( response.status_code))
|
PFE Distributions test cases.
|
625990249b70327d1c57fc98
|
class KarmaItemEncoder(json.JSONEncoder): <NEW_LINE> <INDENT> def default(self, o: Any) -> Any: <NEW_LINE> <INDENT> if isinstance(o, KarmaItem): <NEW_LINE> <INDENT> return {"name": o.name, "pluses": o.pluses, "minuses": o.minuses} <NEW_LINE> <DEDENT> return super().default(o)
|
This class defines how to JSON Serialize our KarmaItem class. We do this via
implementing the default() function which defines what to do with objects that the
JSON library is attempting to serialize. If the object happens to be an instance of
KarmaItem, we return the appropriate representation of the object, otherwise we just
call the superclass's implementation of default and let it handle the object.
|
625990246e29344779b01567
|
class SimulationState: <NEW_LINE> <INDENT> def __init__(self, cfg: RrtConfig, obstacles: list[Obstacle] = None) -> None: <NEW_LINE> <INDENT> self._cfg: RrtConfig = cfg <NEW_LINE> self.ax: plt.Axes = init_plot(cfg) <NEW_LINE> self.rr_tree: Node = cfg.start_node <NEW_LINE> self.obstacles: list[Obstacle] = obstacles if obstacles else DEFAULT_OBSTACLES <NEW_LINE> self.step_counter: int = 0 <NEW_LINE> self.global_closest: Node = cfg.start_node <NEW_LINE> self.global_closest_dist: float = 1e6 <NEW_LINE> self.running: bool = True <NEW_LINE> <DEDENT> def update(self, new_node: Node) -> None: <NEW_LINE> <INDENT> dist_to_end = new_node.distance_to(self._cfg.end_node) <NEW_LINE> if dist_to_end < self.global_closest_dist: <NEW_LINE> <INDENT> self.global_closest_dist = dist_to_end <NEW_LINE> self.global_closest = new_node <NEW_LINE> <DEDENT> print(f'[{self.step_counter}] (new node / closest global) = ' f'({dist_to_end:.2f} / global {self.global_closest_dist:.2f})') <NEW_LINE> if new_node.distance_to(self._cfg.end_node) < self._cfg.eps: <NEW_LINE> <INDENT> self.running = False <NEW_LINE> print('Target found or steps done!') <NEW_LINE> plot_tree(self.ax, self.rr_tree, self._cfg.grid_size, self._cfg.start_node, self._cfg.end_node, obstacles=self.obstacles, reached_target=True, last_node=new_node, fast_plot=self._cfg.fast_plot) <NEW_LINE> input() <NEW_LINE> <DEDENT> elif self.step_counter == self._cfg.max_steps: <NEW_LINE> <INDENT> print('Max steps reached - target not found.') <NEW_LINE> self.running = False <NEW_LINE> input() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.step_counter += 1 <NEW_LINE> start_time = time.time() <NEW_LINE> plot_tree(self.ax, self.rr_tree, self._cfg.grid_size, self._cfg.start_node, self._cfg.end_node, obstacles=self.obstacles, fast_plot=self._cfg.fast_plot) <NEW_LINE> print(f'\tplotting took {time.time() - start_time:.2f}')
|
Container class holding the state of the path finding algorithm.
|
625990243eb6a72ae038b57b
|
class BackgroundInitialSyncTestCase(jsfile.DynamicJSTestCase): <NEW_LINE> <INDENT> JS_FILENAME = os.path.join("jstests", "hooks", "run_initial_sync_node_validation.js") <NEW_LINE> def __init__( self, logger, test_name, description, base_test_name, hook, shell_options=None): <NEW_LINE> <INDENT> jsfile.DynamicJSTestCase.__init__(self, logger, test_name, description, base_test_name, hook, self.JS_FILENAME, shell_options) <NEW_LINE> <DEDENT> def run_test(self): <NEW_LINE> <INDENT> sync_node = self.fixture.get_initial_sync_node() <NEW_LINE> sync_node_conn = sync_node.mongo_client() <NEW_LINE> if self._hook.tests_run >= self._hook.n: <NEW_LINE> <INDENT> self.logger.info("%d tests have been run against the fixture, waiting for initial sync" " node to go into SECONDARY state", self._hook.tests_run) <NEW_LINE> self._hook.tests_run = 0 <NEW_LINE> cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2), ("timeoutMillis", 20 * 60 * 1000)]) <NEW_LINE> sync_node_conn.admin.command(cmd) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> state = sync_node_conn.admin.command("replSetGetStatus").get("myState") <NEW_LINE> if state != 2: <NEW_LINE> <INDENT> if self._hook.tests_run == 0: <NEW_LINE> <INDENT> msg = "Initial sync node did not catch up after waiting 20 minutes" <NEW_LINE> self.logger.exception("{0} failed: {1}".format(self._hook.description, msg)) <NEW_LINE> raise errors.TestFailure(msg) <NEW_LINE> <DEDENT> self.logger.info("Initial sync node is in state %d, not state SECONDARY (2)." " Skipping BackgroundInitialSync hook for %s", state, self._base_test_name) <NEW_LINE> if self._hook.random_restarts < 1 and random.random() < 0.2: <NEW_LINE> <INDENT> self.logger.info( "randomly restarting initial sync in the middle of initial sync") <NEW_LINE> self.__restart_init_sync(sync_node) <NEW_LINE> self._hook.random_restarts += 1 <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> <DEDENT> except pymongo.errors.OperationFailure: <NEW_LINE> <INDENT> self.logger.info( "replSetGetStatus call failed in BackgroundInitialSync hook, skipping hook for %s", self._base_test_name) <NEW_LINE> return <NEW_LINE> <DEDENT> self._hook.random_restarts = 0 <NEW_LINE> self._js_test.run_test() <NEW_LINE> self.__restart_init_sync(sync_node) <NEW_LINE> <DEDENT> def __restart_init_sync(self, sync_node): <NEW_LINE> <INDENT> sync_node.teardown() <NEW_LINE> self.logger.info("Starting the initial sync node back up again...") <NEW_LINE> sync_node.setup() <NEW_LINE> sync_node.await_ready()
|
BackgroundInitialSyncTestCase class.
|
6259902430c21e258be9972c
|
class MaskGenerator(torch.nn.Module): <NEW_LINE> <INDENT> def __init__( self, input_dim: int, num_sources: int, kernel_size: int, num_feats: int, num_hidden: int, num_layers: int, num_stacks: int, ): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.input_dim = input_dim <NEW_LINE> self.num_sources = num_sources <NEW_LINE> self.input_norm = torch.nn.GroupNorm( num_groups=1, num_channels=input_dim, eps=1e-8 ) <NEW_LINE> self.input_conv = torch.nn.Conv1d( in_channels=input_dim, out_channels=num_feats, kernel_size=1 ) <NEW_LINE> self.receptive_field = 0 <NEW_LINE> self.conv_layers = torch.nn.ModuleList([]) <NEW_LINE> for s in range(num_stacks): <NEW_LINE> <INDENT> for l in range(num_layers): <NEW_LINE> <INDENT> multi = 2 ** l <NEW_LINE> self.conv_layers.append( ConvBlock( io_channels=num_feats, hidden_channels=num_hidden, kernel_size=kernel_size, dilation=multi, padding=multi, no_residual=(l == (num_layers - 1) and s == (num_stacks - 1)), ) ) <NEW_LINE> self.receptive_field += ( kernel_size if s == 0 and l == 0 else (kernel_size - 1) * multi ) <NEW_LINE> <DEDENT> <DEDENT> self.output_prelu = torch.nn.PReLU() <NEW_LINE> self.output_conv = torch.nn.Conv1d( in_channels=num_feats, out_channels=input_dim * num_sources, kernel_size=1, ) <NEW_LINE> <DEDENT> def forward(self, input: torch.Tensor) -> torch.Tensor: <NEW_LINE> <INDENT> batch_size = input.shape[0] <NEW_LINE> feats = self.input_norm(input) <NEW_LINE> feats = self.input_conv(feats) <NEW_LINE> output = 0.0 <NEW_LINE> for layer in self.conv_layers: <NEW_LINE> <INDENT> residual, skip = layer(feats) <NEW_LINE> if residual is not None: <NEW_LINE> <INDENT> feats = feats + residual <NEW_LINE> <DEDENT> output = output + skip <NEW_LINE> <DEDENT> output = self.output_prelu(output) <NEW_LINE> output = self.output_conv(output) <NEW_LINE> output = torch.sigmoid(output) <NEW_LINE> return output.view(batch_size, self.num_sources, self.input_dim, -1)
|
TCN (Temporal Convolution Network) Separation Module
Generates masks for separation.
Args:
input_dim (int): Input feature dimension, <N>.
num_sources (int): The number of sources to separate.
kernel_size (int): The convolution kernel size of conv blocks, <P>.
num_featrs (int): Input/output feature dimenstion of conv blocks, <B, Sc>.
num_hidden (int): Intermediate feature dimention of conv blocks, <H>
num_layers (int): The number of conv blocks in one stack, <X>.
num_stacks (int): The number of conv block stacks, <R>.
Note:
This implementation corresponds to the "non-causal" setting in the paper.
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
|
6259902421bff66bcd723b7a
|
class OpenFlowHandlers (object): <NEW_LINE> <INDENT> def __init__ (self): <NEW_LINE> <INDENT> self.handlers = [] <NEW_LINE> self._build_table() <NEW_LINE> <DEDENT> def handle_default (self, con, msg): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def add_handler (self, msg_type, handler): <NEW_LINE> <INDENT> if msg_type >= len(self.handlers): <NEW_LINE> <INDENT> missing = msg_type - len(self.handlers) + 1 <NEW_LINE> self.handlers.extend([self.handle_default] * missing) <NEW_LINE> <DEDENT> self.handlers[msg_type] = handler <NEW_LINE> <DEDENT> def _build_table (self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> super(OpenFlowHandlers, self)._build_table() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> for fname in dir(self): <NEW_LINE> <INDENT> h = getattr(self, fname) <NEW_LINE> if not fname.startswith('handle_'): continue <NEW_LINE> fname = fname.split('_',1)[1] <NEW_LINE> if not fname == fname.upper(): continue <NEW_LINE> assert callable(h) <NEW_LINE> of_type = of.ofp_type_rev_map.get('OFPT_' + fname) <NEW_LINE> if of_type is None: <NEW_LINE> <INDENT> log.error("No OF message type for %s", fname) <NEW_LINE> continue <NEW_LINE> <DEDENT> from_switch = getattr(of._message_type_to_class.get(of_type), '_from_switch', False) <NEW_LINE> assert from_switch, "%s is not switch-to-controller message" % (name,) <NEW_LINE> self.add_handler(of_type, h)
|
A superclass for a thing which handles incoming OpenFlow messages
The only public part of the interface is that it should have a "handlers"
attribute which is a list where the index is an OFPT and the value is a
function to call for that type with the parameters (connection, msg). Oh,
and the add_handler() method to add a handler.
The default implementation assumes these handler functions are all methods
with the names "handle_<TYPE>" and resolves those into the handlers list
on init.
|
62599024be8e80087fbbff90
|
class TextDocumentSaveReason: <NEW_LINE> <INDENT> MANUAL = 1 <NEW_LINE> AFTER_DELAY = 2 <NEW_LINE> FOCUS_OUT = 3
|
LSP text document saving action causes.
|
62599024d99f1b3c44d065bb
|
class SpannerProjectsInstancesOperationsCancelRequest(_messages.Message): <NEW_LINE> <INDENT> name = _messages.StringField(1, required=True)
|
A SpannerProjectsInstancesOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
|
625990243eb6a72ae038b57d
|
class TestGoldStarWithoutNumbers(SimpleTestCase): <NEW_LINE> <INDENT> def test_a(self): <NEW_LINE> <INDENT> response = self.client.get( path=reverse('gold_star'), data={'score': 'a'}) <NEW_LINE> self.assertTemplateUsed(response, 'app/gold_star.html') <NEW_LINE> <DEDENT> def test_empty(self): <NEW_LINE> <INDENT> response = self.client.get( path=reverse('gold_star'), data={'score': ''}) <NEW_LINE> self.assertTemplateUsed(response, 'app/gold_star.html')
|
without numbers, it should render gold_star.html without answer in the context
|
62599024c432627299fa3f0c
|
class AccountBook: <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> year_data_path = os.path.join('Accountbook_project/Accountbook/dataset') <NEW_LINE> year_list = reversed(os.listdir(year_data_path)) <NEW_LINE> self.ledger = list() <NEW_LINE> for year in year_list: <NEW_LINE> <INDENT> year = int(year) <NEW_LINE> new_year_book = YearlyAccountBook(year=year) <NEW_LINE> self.ledger.append(new_year_book) <NEW_LINE> <DEDENT> <DEDENT> def __getitem__(self, year): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> year_book = [book for book in self.ledger if book.year==year][0] <NEW_LINE> return year_book <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("해당 연도의 장부가 없습니다.") <NEW_LINE> <DEDENT> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "박성환의 {}년간 지출내역서".format(len(self.ledger)) <NEW_LINE> <DEDENT> def average_data(self): <NEW_LINE> <INDENT> self.total_entry = 0 <NEW_LINE> self.total_amount = 0 <NEW_LINE> for ledger in self.ledger: <NEW_LINE> <INDENT> self.total_entry += ledger.entry_count <NEW_LINE> self.total_amount += ledger.year_total <NEW_LINE> <DEDENT> if self.total_entry == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.total_average = self.total_amount // self.total_entry <NEW_LINE> <DEDENT> def statistic_all(self): <NEW_LINE> <INDENT> self.average_data() <NEW_LINE> print('-' * 40) <NEW_LINE> print(" 지난 {}년간 총 사용보고서".format(len(self.ledger))) <NEW_LINE> print('-' * 40) <NEW_LINE> stats_length = max_expenditure_length([self.total_amount, self.total_entry, self.total_average]) <NEW_LINE> print("총 기록횟수 : {:{},}번".format(self.total_entry, stats_length)) <NEW_LINE> print("총 사용금액 : {:{},}원".format(self.total_amount, stats_length)) <NEW_LINE> print("\n총 평균금액 : {:{},}원".format(self.total_average, stats_length)) <NEW_LINE> print()
|
Account book for execution.
It loads all year, month and day account books
and will be used with functions(apps) in main.py
|
625990248c3a8732951f7471
|
class AuditlogHistoryField(generic.GenericRelation): <NEW_LINE> <INDENT> def __init__(self, pk_indexable=True, **kwargs): <NEW_LINE> <INDENT> kwargs['to'] = LogEntry <NEW_LINE> if pk_indexable: <NEW_LINE> <INDENT> kwargs['object_id_field'] = 'object_id' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> kwargs['object_id_field'] = 'object_pk' <NEW_LINE> <DEDENT> kwargs['content_type_field'] = 'content_type' <NEW_LINE> super(AuditlogHistoryField, self).__init__(**kwargs)
|
A subclass of django.contrib.contenttypes.generic.GenericRelation that sets some default variables. This makes it
easier to implement the audit log in models, and makes future changes easier.
By default this field will assume that your primary keys are numeric, simply because this is the most common case.
However, if you have a non-integer primary key, you can simply pass pk_indexable=False to the constructor, and
Auditlog will fall back to using a non-indexed text based field for this model.
|
625990245e10d32532ce4091
|
class InvenioBibSortWasherNotImplementedError(Exception): <NEW_LINE> <INDENT> pass
|
Exception raised when a washer method
defined in the bibsort config file is not implemented
|
62599024a8ecb03325872138
|
class FrameLookup(object): <NEW_LINE> <INDENT> def __init__(self, names_and_lengths): <NEW_LINE> <INDENT> self.files, self.lengths = zip(*names_and_lengths) <NEW_LINE> self.terminals = numpy.cumsum([s[1] for s in names_and_lengths]) <NEW_LINE> <DEDENT> def __getitem__(self, i): <NEW_LINE> <INDENT> idx = (i < self.terminals).nonzero()[0][0] <NEW_LINE> frame_no = i <NEW_LINE> if idx > 0: <NEW_LINE> <INDENT> frame_no -= self.terminals[idx - 1] <NEW_LINE> <DEDENT> return self.files[idx], self.lengths[idx], frame_no <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return self.terminals[-1] <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> raise TypeError('iteration not supported')
|
Class encapsulating the logic of turning a frame index into a
collection of files into the frame index of a specific video file.
Item-indexing on this object will yield a (filename, nframes, frame_no)
tuple, where nframes is the number of frames in the given file (mainly
for checking that we're far enough from the end so that we can
sample a big enough chunk).
Parameters
----------
names_ang_lengths : WRITEME
|
6259902556b00c62f0fb37da
|
class hsl(_trivalue_array): <NEW_LINE> <INDENT> h = property(**_trivalue_array._0()) <NEW_LINE> s = property(**_trivalue_array._1()) <NEW_LINE> l = property(**_trivalue_array._2()) <NEW_LINE> def __init__(self, *pargs): <NEW_LINE> <INDENT> super(hsl, self).__init__(*pargs) <NEW_LINE> <DEDENT> def rgb(self): <NEW_LINE> <INDENT> return hsl.torgb(self()) <NEW_LINE> <DEDENT> def torgb(hsl): <NEW_LINE> <INDENT> H = hsl[0] / 60 <NEW_LINE> S = hsl[1] / 100 <NEW_LINE> L = hsl[2] / 100 <NEW_LINE> C = (1 - abs(2 * L - 1)) * S <NEW_LINE> X = C * ( 1 - abs(H % 2 - 1) ) <NEW_LINE> RGB = { 0 : [C,X,0], 1 : [X,C,0], 2 : [0,C,X], 3 : [0,X,C], 4 : [X,0,C], 5 : [C,0,X], }[ ( hsl[0] % 360) // 60 ] <NEW_LINE> m = L - C/2 <NEW_LINE> return [ round( (RGB[0] + m) * 255 ), round( (RGB[1] + m) * 255 ), round( (RGB[2] + m) * 255 ) ] <NEW_LINE> <DEDENT> def fromrgb(rgb): <NEW_LINE> <INDENT> R = rgb[0] / 255 <NEW_LINE> G = rgb[1] / 255 <NEW_LINE> B = rgb[2] / 255 <NEW_LINE> Imax = max_index(R,G,B) <NEW_LINE> Cmax = max(R,G,B) <NEW_LINE> Cmin = min(R,G,B) <NEW_LINE> delta = Cmax - Cmin <NEW_LINE> if delta == 0: <NEW_LINE> <INDENT> H = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> H = { 0 : 60 * ( (G - B) / delta % 6 ), 1 : 60 * ( (B - R) / delta + 2 ), 2 : 60 * ( (R - G) / delta + 4 ) }[ Imax ] <NEW_LINE> <DEDENT> L = (Cmax + Cmin) / 2 <NEW_LINE> S = { True : 0, False : delta / (1-abs(2 * L - 1)) if L != 1 and L != 0 else delta }[ delta == 0 ] <NEW_LINE> return[ round(H), round(S * 100), round(L * 100) ]
|
Hue Saturation Light class for color manipulation
|
625990258c3a8732951f7472
|
class TestsEoxFSLoader(TestCase): <NEW_LINE> <INDENT> @patch('eox_theming.configuration.ThemingConfiguration.theming_helpers') <NEW_LINE> @patch('eox_theming.configuration.ThemingConfiguration.get_parent_or_default_theme') <NEW_LINE> def test_returned_default_theme_template_sources(self, parent_mock, helper_mock): <NEW_LINE> <INDENT> theme = mock.Mock() <NEW_LINE> theme.theme_dir_name = 'bragi' <NEW_LINE> theme.name = 'bragi' <NEW_LINE> helper_mock.get_current_theme.return_value = theme <NEW_LINE> helper_mock.get_project_root_name.return_value = 'lms' <NEW_LINE> parent_theme = mock.Mock() <NEW_LINE> parent_theme.theme_dir_name = 'default-theme' <NEW_LINE> parent_theme.name = 'default-theme' <NEW_LINE> parent_theme.template_dirs = Path('/ednx/var/themes/edx-platform/default-theme') <NEW_LINE> parent_mock.return_value = parent_theme <NEW_LINE> parent_theme_sources = EoxThemeFilesystemLoader.get_parent_theme_template_sources() <NEW_LINE> self.assertEqual(parent_theme_sources, parent_theme.template_dirs) <NEW_LINE> <DEDENT> @patch('eox_theming.configuration.ThemingConfiguration.theming_helpers') <NEW_LINE> @patch('eox_theming.theming.template_loaders.EoxThemeFilesystemLoader.get_grandparent_theme_template_sources') <NEW_LINE> @patch('eox_theming.theming.template_loaders.EoxThemeFilesystemLoader.get_parent_theme_template_sources') <NEW_LINE> def test_returned_theme_template_sources(self, parent_mock, grandparent_mock, helper_mock): <NEW_LINE> <INDENT> theme = mock.Mock() <NEW_LINE> theme.theme_dir_name = 'bragi' <NEW_LINE> theme.name = 'bragi' <NEW_LINE> theme.template_dirs = [Path('/ednx/var/themes/edx-platform/bragi')] <NEW_LINE> helper_mock.get_current_theme.return_value = theme <NEW_LINE> helper_mock.get_project_root_name.return_value = 'lms' <NEW_LINE> parent_theme_template_dirs = [Path('/ednx/var/themes/edx-platform/parent-theme')] <NEW_LINE> grandparent_theme_template_dirs = [Path('/ednx/var/themes/edx-platform/grandparent-theme')] <NEW_LINE> template_dirs = theme.template_dirs + parent_theme_template_dirs + grandparent_theme_template_dirs <NEW_LINE> parent_mock.return_value = parent_theme_template_dirs <NEW_LINE> grandparent_mock.return_value = grandparent_theme_template_dirs <NEW_LINE> theme_template_sources = EoxThemeFilesystemLoader.get_theme_template_sources() <NEW_LINE> self.assertEqual(theme_template_sources, template_dirs)
|
Tests for the eox_theming filesystem loader
|
62599025d99f1b3c44d065bd
|
class Paginator: <NEW_LINE> <INDENT> def __init__(self, prefix='```', suffix='```', max_size=2000): <NEW_LINE> <INDENT> self.prefix = prefix <NEW_LINE> self.suffix = suffix <NEW_LINE> self.max_size = max_size - len(suffix) <NEW_LINE> self._current_page = [prefix] <NEW_LINE> self._count = len(prefix) + 1 <NEW_LINE> self._pages = [] <NEW_LINE> <DEDENT> def add_line(self, line='', *, empty=False): <NEW_LINE> <INDENT> if self._count + len(line) + 1 > self.max_size: <NEW_LINE> <INDENT> self.close_page() <NEW_LINE> <DEDENT> self._count += len(line) + 1 <NEW_LINE> self._current_page.append(line) <NEW_LINE> if empty: <NEW_LINE> <INDENT> self._current_page.append('') <NEW_LINE> self._count += 1 <NEW_LINE> <DEDENT> <DEDENT> def close_page(self): <NEW_LINE> <INDENT> self._current_page.append(self.suffix) <NEW_LINE> self._pages.append('\n'.join(self._current_page)) <NEW_LINE> self._current_page = [self.prefix] <NEW_LINE> self._count = len(self.prefix) + 1 <NEW_LINE> <DEDENT> @property <NEW_LINE> def pages(self): <NEW_LINE> <INDENT> if len(self._current_page) > 1: <NEW_LINE> <INDENT> self.close_page() <NEW_LINE> <DEDENT> return self._pages <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> fmt = '<Paginator prefix: {0.prefix} suffix: {0.suffix} max_size: {0.max_size} count: {0._count}>' <NEW_LINE> return fmt.format(self)
|
A class that aids in paginating code blocks for Discord messages.
Attributes
-----------
prefix: str
The prefix inserted to every page. e.g. three backticks.
suffix: str
The suffix appended at the end of every page. e.g. three backticks.
max_size: int
The maximum amount of codepoints allowed in a page.
|
62599025d164cc6175821e92
|
class Model(Wireframe): <NEW_LINE> <INDENT> def __init__(self, position, scale=1.0, **kwargs): <NEW_LINE> <INDENT> super(Model, self).__init__(**kwargs) <NEW_LINE> self.position = position <NEW_LINE> self.scale = scale
|
Represents a wireframe model in the game world.
Parameters
----------
position : numpy array (of size 3)
scale : float, optional
Attributes
----------
position : numpy array
scale : float
|
62599025a8ecb0332587213a
|
class Car(): <NEW_LINE> <INDENT> def __init__(self, make, model, year): <NEW_LINE> <INDENT> self.make = make <NEW_LINE> self.model = model <NEW_LINE> self.year = year <NEW_LINE> self.odometer_reading = 0 <NEW_LINE> <DEDENT> def get_descriptive_name(self): <NEW_LINE> <INDENT> long_name = str(self.year) + " " + self.make + " " + self.model <NEW_LINE> return long_name <NEW_LINE> <DEDENT> def read_odometer(self): <NEW_LINE> <INDENT> print('这辆汽车的里程是' + str(self.odometer_reading) + '公里') <NEW_LINE> <DEDENT> def updata_odometer(self, mileage): <NEW_LINE> <INDENT> if mileage >= self.odometer_reading: <NEW_LINE> <INDENT> self.odometer_reading = mileage <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('里程数不能忘回调') <NEW_LINE> <DEDENT> <DEDENT> def increment_odomete(self, mileage): <NEW_LINE> <INDENT> self.odometer_reading += mileage <NEW_LINE> <DEDENT> def fill_gas_tank(self): <NEW_LINE> <INDENT> print('汽车最大容量5L')
|
一次模拟汽车的简单尝试
|
625990251d351010ab8f4a32
|
class Meta: <NEW_LINE> <INDENT> model = RegistrationRequest <NEW_LINE> exclude = ('uuid', 'created')
|
Meta class connecting to RegistrationRequest object model.
|
625990255166f23b2e2442f1
|
class Lines(Shape): <NEW_LINE> <INDENT> def __init__(self, surface, rgb, pos_list): <NEW_LINE> <INDENT> Shape.__init__(self, pos_list, surface, rgb) <NEW_LINE> self.color = rgb <NEW_LINE> self.point_list = pos_list <NEW_LINE> <DEDENT> def draw(self): <NEW_LINE> <INDENT> pygame.draw.lines(self.surface, self.color, False, self.point_list)
|
Connected lines shape
|
62599025d99f1b3c44d065bf
|
class RubberBandPickUp(Collectible): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> Collectible.__init__(self) <NEW_LINE> self.image = pygame.image.load("rubber_band.png") <NEW_LINE> self.rect = self.image.get_rect() <NEW_LINE> self.name = "Rubber Band" <NEW_LINE> self.stock = RUBBER_BAND_STOCK
|
A class to represent rubber bands that Snake can use as a weapon.
|
6259902563f4b57ef0086501
|
class RoiPoolingConv(Layer): <NEW_LINE> <INDENT> def __init__(self, pool_size, num_rois, **kwargs): <NEW_LINE> <INDENT> self.dim_ordering = K.common.image_dim_ordering() <NEW_LINE> self.pool_size = pool_size <NEW_LINE> self.num_rois = num_rois <NEW_LINE> super(RoiPoolingConv, self).__init__(**kwargs) <NEW_LINE> <DEDENT> def build(self, input_shape): <NEW_LINE> <INDENT> self.nb_channels = input_shape[0][3] <NEW_LINE> <DEDENT> def compute_output_shape(self, input_shape): <NEW_LINE> <INDENT> return None, self.num_rois, self.pool_size, self.pool_size, self.nb_channels <NEW_LINE> <DEDENT> def call(self, x, mask=None): <NEW_LINE> <INDENT> assert(len(x) == 2) <NEW_LINE> img = x[0] <NEW_LINE> rois = x[1] <NEW_LINE> input_shape = K.shape(img) <NEW_LINE> outputs = [] <NEW_LINE> for roi_idx in range(self.num_rois): <NEW_LINE> <INDENT> x = rois[0, roi_idx, 0] <NEW_LINE> y = rois[0, roi_idx, 1] <NEW_LINE> w = rois[0, roi_idx, 2] <NEW_LINE> h = rois[0, roi_idx, 3] <NEW_LINE> x = K.cast(x, 'int32') <NEW_LINE> y = K.cast(y, 'int32') <NEW_LINE> w = K.cast(w, 'int32') <NEW_LINE> h = K.cast(h, 'int32') <NEW_LINE> rs = tf.image.resize(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size)) <NEW_LINE> outputs.append(rs) <NEW_LINE> <DEDENT> final_output = K.concatenate(outputs, axis=0) <NEW_LINE> final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels)) <NEW_LINE> final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4)) <NEW_LINE> return final_output <NEW_LINE> <DEDENT> def get_config(self): <NEW_LINE> <INDENT> config = {'pool_size': self.pool_size, 'num_rois': self.num_rois} <NEW_LINE> base_config = super(RoiPoolingConv, self).get_config() <NEW_LINE> return dict(list(base_config.items()) + list(config.items()))
|
ROI pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun
# Arguments
pool_size: int
Size of pooling region to use. pool_size = 7 will result in a 7x7 region.
num_rois: number of regions of interest to be used
# Input shape
list of two 4D tensors [X_img,X_roi] with shape:
X_img:
`(1, rows, cols, channels)`
X_roi:
`(1,num_rois,4)` list of rois, with ordering (x,y,w,h)
# Output shape
3D tensor with shape:
`(1, num_rois, channels, pool_size, pool_size)`
|
6259902530c21e258be99734
|
class AlphaBetaAgent(MultiAgentSearchAgent): <NEW_LINE> <INDENT> def alBeAgent(self, state, depth, index, alpha, beta): <NEW_LINE> <INDENT> if index == 0: <NEW_LINE> <INDENT> return (self.maxAgent(state, depth, index, alpha, beta)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return (self.minAgent(state, depth, index, alpha, beta)) <NEW_LINE> <DEDENT> <DEDENT> def maxAgent(self, state, depth, index, alpha, beta): <NEW_LINE> <INDENT> v = -float('inf') <NEW_LINE> mmact = '' <NEW_LINE> if state.isWin() or state.isLose(): <NEW_LINE> <INDENT> return self.evaluationFunction(state) <NEW_LINE> <DEDENT> actions = state.getLegalActions(index) <NEW_LINE> for action in actions: <NEW_LINE> <INDENT> nextState = state.generateSuccessor(index, action) <NEW_LINE> val = self.alBeAgent(nextState, depth, index + 1, alpha, beta) <NEW_LINE> if val > beta: <NEW_LINE> <INDENT> return val <NEW_LINE> <DEDENT> if val > v: <NEW_LINE> <INDENT> v = val <NEW_LINE> mmact = action <NEW_LINE> <DEDENT> alpha = max(alpha,v) <NEW_LINE> <DEDENT> if depth == 1: <NEW_LINE> <INDENT> return mmact <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return v <NEW_LINE> <DEDENT> <DEDENT> def minAgent(self, state, depth, index, alpha, beta): <NEW_LINE> <INDENT> v = float('inf') <NEW_LINE> mmact = '' <NEW_LINE> agentCount = state.getNumAgents() <NEW_LINE> if state.isWin() or state.isLose(): <NEW_LINE> <INDENT> return self.evaluationFunction(state) <NEW_LINE> <DEDENT> actions = state.getLegalActions(index) <NEW_LINE> for action in actions: <NEW_LINE> <INDENT> nextState = state.generateSuccessor(index, action) <NEW_LINE> if index == agentCount - 1: <NEW_LINE> <INDENT> if depth == self.depth: <NEW_LINE> <INDENT> val = self.evaluationFunction(nextState) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> val = self.maxAgent(nextState, depth + 1, 0, alpha, beta) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> val = self.minAgent(nextState, depth, index + 1, alpha, beta) <NEW_LINE> <DEDENT> if val < alpha: <NEW_LINE> <INDENT> return val <NEW_LINE> <DEDENT> if val < v: <NEW_LINE> <INDENT> v = val <NEW_LINE> mmact = action <NEW_LINE> <DEDENT> beta = min(beta,v) <NEW_LINE> <DEDENT> return v <NEW_LINE> <DEDENT> def getAction(self, gameState): <NEW_LINE> <INDENT> state = gameState <NEW_LINE> depth = 1 <NEW_LINE> index = 0 <NEW_LINE> alpha = -float('inf') <NEW_LINE> beta = float('inf') <NEW_LINE> act = self.alBeAgent(state, depth, index, alpha, beta) <NEW_LINE> return act
|
Your minimax agent with alpha-beta pruning (question 3)
|
625990251f5feb6acb163b10
|
class CustomJSONEncoder(JSONEncoder): <NEW_LINE> <INDENT> def default(self, obj): <NEW_LINE> <INDENT> if isinstance(obj, Question): <NEW_LINE> <INDENT> return { 'question': obj.question, 'answer': obj.answer, 'distractors': obj.distractors, } <NEW_LINE> <DEDENT> return super().default(obj)
|
Class to turn Question object to JSON
|
62599025d18da76e235b78dd
|
class RegistrationSerializer(serializers.ModelSerializer): <NEW_LINE> <INDENT> password = serializers.CharField( max_length=128, min_length=8, write_only=True ) <NEW_LINE> token = serializers.CharField(max_length=255, read_only=True) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> model = User <NEW_LINE> fields = ['email', 'username', 'password', 'token'] <NEW_LINE> <DEDENT> def create(self, validated_data): <NEW_LINE> <INDENT> return User.objects.create_user(**validated_data)
|
Serializers, registration requests and creates a new user
|
625990251d351010ab8f4a35
|
class WorkSubmissionDownloadTest(test_utils.GCIDjangoTestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> super(WorkSubmissionDownloadTest, self).setUp() <NEW_LINE> self.init() <NEW_LINE> self.timeline_helper.tasksPubliclyVisible() <NEW_LINE> self.task = _createTestTask(self.program, self.org) <NEW_LINE> <DEDENT> def testXSS(self): <NEW_LINE> <INDENT> xss_payload = '><img src=http://www.google.com/images/srpr/logo4w.png>' <NEW_LINE> url = '/gci/work/download/%s/%s?id=%s' % ( self.task.program.key().name(), self.task.key().id(), xss_payload) <NEW_LINE> response = self.get(url) <NEW_LINE> self.assertResponseBadRequest(response) <NEW_LINE> self.assertNotIn(xss_payload, response.content) <NEW_LINE> self.assertIn(html.escape(xss_payload), response.content)
|
Tests the WorkSubmissionDownload class.
|
62599025c432627299fa3f12
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.