id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1747390 | <filename>syft/generic/pointers/object_pointer.py
from typing import List
from typing import Union
from typing import TYPE_CHECKING
import syft
from syft import exceptions
from syft.messaging.message import ForceObjectDeleteMessage
from syft.generic.frameworks.hook import hook_args
from syft.generic.object import AbstractObject
# this if statement avoids circular imports between base.py and pointer.py
if TYPE_CHECKING:
from syft.workers.base import BaseWorker
class ObjectPointer(AbstractObject):
"""A pointer to a remote object.
An ObjectPointer forwards all API calls to the remote. ObjectPointer objects
point to objects. They exist to mimic the entire
API of an object, but instead of computing a function locally
(such as addition, subtraction, etc.) they forward the computation to a
remote machine as specified by self.location. Specifically, every
ObjectPointer has a object located somewhere that it points to (they should
never exist by themselves).
The objects being pointed to can be on the same machine or (more commonly)
on a different one. Note further that a ObjectPointer does not know the
nature how it sends messages to the object it points to (whether over
socket, http, or some other protocol) as that functionality is abstracted
in the BaseWorker object in self.location.
"""
def __init__(
self,
location: "BaseWorker" = None,
id_at_location: Union[str, int] = None,
owner: "BaseWorker" = None,
id: Union[str, int] = None,
garbage_collect_data: bool = True,
point_to_attr: str = None,
tags: List[str] = None,
description: str = None,
):
"""Initializes a ObjectPointer.
Args:
location: An optional BaseWorker object which points to the worker
on which this pointer's object can be found.
id_at_location: An optional string or integer id of the object
being pointed to.
owner: An optional BaseWorker object to specify the worker on which
the pointer is located. It is also where the pointer is
registered if register is set to True. Note that this is
different from the location parameter that specifies where the
pointer points to.
id: An optional string or integer id of the PointerTensor.
garbage_collect_data: If true (default), delete the remote object when the
pointer is deleted.
point_to_attr: string which can tell a pointer to not point directly to\
an object, but to point to an attribute of that object such as .child or
.grad. Note the string can be a chain (i.e., .child.child.child or
.grad.child.child). Defaults to None, which means don't point to any attr,
just point to then object corresponding to the id_at_location.
"""
super().__init__(id=id, owner=owner, tags=tags, description=description)
self.location = location
self.id_at_location = id_at_location
self.garbage_collect_data = garbage_collect_data
self.point_to_attr = point_to_attr
@classmethod
def handle_func_command(cls, command):
"""
Receive an instruction for a function to be applied on a Pointer,
Get the remote location to send the command, send it and get a
pointer to the response, return.
:param command: instruction of a function command: (command name,
None, arguments[, kwargs])
:return: the response of the function command
"""
pointer = cls.find_a_pointer(command)
# Get info on who needs to send where the command
owner = pointer.owner
location = pointer.location
# Send the command
response = owner.send_command(location, command)
return response
@classmethod
def find_a_pointer(cls, command):
"""
Find and return the first pointer in the args object, using a trick
with the raising error RemoteObjectFoundError
"""
try:
cmd, _, args, kwargs = command
_ = hook_args.unwrap_args_from_function(cmd, args, kwargs)
except exceptions.RemoteObjectFoundError as err:
pointer = err.pointer
return pointer
def get(self, deregister_ptr: bool = True):
"""Requests the object being pointed to.
The object to which the pointer points will be requested, serialized and returned.
Note:
This will typically mean that the remote object will be
removed/destroyed.
Args:
deregister_ptr (bool, optional): this determines whether to
deregister this pointer from the pointer's owner during this
method. This defaults to True because the main reason people use
this method is to move the tensor from the location to the
local one, at which time the pointer has no use.
Returns:
An AbstractObject object which is the tensor (or chain) that this
object used to point to on a location.
TODO: add param get_copy which doesn't destroy remote if true.
"""
if self.point_to_attr is not None:
raise exceptions.CannotRequestObjectAttribute(
"You called .get() on a pointer to"
" a tensor attribute. This is not yet"
" supported. Call .clone().get() instead."
)
# if the pointer happens to be pointing to a local object,
# just return that object (this is an edge case)
if self.location == self.owner:
obj = self.owner.get_obj(self.id_at_location)
if hasattr(obj, "child"):
obj = obj.child
else:
# get tensor from location
obj = self.owner.request_obj(self.id_at_location, self.location)
# Remove this pointer by default
if deregister_ptr:
self.owner.de_register_obj(self)
if self.garbage_collect_data:
# data already retrieved, do not collect any more.
self.garbage_collect_data = False
return obj
def __str__(self):
"""Returns a string version of this pointer.
This is primarily for end users to quickly see things about the object.
This tostring shouldn't be used for anything else though as it's likely
to change. (aka, don't try to parse it to extract information. Read the
attribute you need directly). Also, don't use this to-string as a
serialized form of the pointer.
"""
type_name = type(self).__name__
out = (
f"["
f"{type_name} | "
f"{str(self.owner.id)}:{self.id}"
" -> "
f"{str(self.location.id)}:{self.id_at_location}"
f"]"
)
if self.point_to_attr is not None:
out += "::" + str(self.point_to_attr).replace(".", "::")
big_str = False
if self.tags is not None and len(self.tags):
big_str = True
out += "\n\tTags: "
for tag in self.tags:
out += str(tag) + " "
if big_str and hasattr(self, "shape"):
out += "\n\tShape: " + str(self.shape)
if self.description is not None:
big_str = True
out += "\n\tDescription: " + str(self.description).split("\n")[0] + "..."
return out
def __repr__(self):
"""Returns the to-string method.
When called using __repr__, most commonly seen when returned as cells
in Jupyter notebooks.
"""
return self.__str__()
def __del__(self):
"""This method garbage collects the object this pointer is pointing to.
By default, PySyft assumes that every object only has one pointer to it.
Thus, if the pointer gets garbage collected, we want to automatically
garbage collect the object being pointed to.
"""
# if .get() gets called on the pointer before this method is called, then
# the remote object has already been removed. This results in an error on
# this next line because self no longer has .owner. Thus, we need to check
# first here and not try to call self.owner.anything if self doesn't have
# .owner anymore.
if hasattr(self, "owner") and self.garbage_collect_data:
# attribute pointers are not in charge of GC
if self.point_to_attr is None:
self.owner.send_msg(ForceObjectDeleteMessage(self.id_at_location), self.location)
def _create_attr_name_string(self, attr_name):
if self.point_to_attr is not None:
point_to_attr = "{}.{}".format(self.point_to_attr, attr_name)
else:
point_to_attr = attr_name
return point_to_attr
def attr(self, attr_name):
attr_ptr = syft.ObjectPointer(
id=self.id,
owner=self.owner,
location=self.location,
id_at_location=self.id_at_location,
point_to_attr=self._create_attr_name_string(attr_name),
) # .wrap()
self.__setattr__(attr_name, attr_ptr)
return attr_ptr
def setattr(self, name, value):
self.owner.send_command(
message=("__setattr__", self, (name, value), {}), recipient=self.location
)
| StarcoderdataPython |
27843 | <filename>chatting/models.py
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Chat(models.Model):
content = models.TextField()
sender = models.ForeignKey(User)
receiver = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now_add=True) | StarcoderdataPython |
1752621 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import services
import sims4.commands
from typing import Iterator, Callable, Union
from sims.sim import Sim
from sims.sim_info import SimInfo
from objects import ALL_HIDDEN_REASONS
from sims4communitylib.utils.common_function_utils import CommonFunctionUtils
class CommonSimUtils:
"""Utilities for retrieving sims in different ways.
.. note::
Available commands:
- `s4clib_testing.display_name_of_currently_active_sim`
- `s4clib_testing.display_names_of_all_sims`
"""
@staticmethod
def get_active_sim() -> Sim:
"""get_active_sim()
Retrieve a Sim object of the Currently Active Sim.
.. note:: The Active Sim is the Sim with the Plumbob above their head.
:return: An instance of the Active Sim.
:rtype: Sim
"""
client = services.client_manager().get_first_client()
return client.active_sim
@staticmethod
def get_active_sim_id() -> int:
"""get_active_sim_id()
Retrieve the decimal identifier for the Currently Active Sim.
.. note:: The Active Sim is the Sim with the Plumbob above their head.
:return: The decimal identifier of the active Sim or -1 if the active Sim does not have an id.
:rtype: int
"""
active_sim_info = CommonSimUtils.get_active_sim_info()
if active_sim_info is None:
return -1
return CommonSimUtils.get_sim_id(active_sim_info)
@staticmethod
def get_active_sim_info() -> SimInfo:
"""get_active_sim_info()
Retrieve a SimInfo object of the Currently Active Sim.
:return: The SimInfo of the Active Sim.
:rtype: SimInfo
"""
client = services.client_manager().get_first_client()
return client.active_sim_info
@staticmethod
def get_sim_info_of_sim_with_name(first_name: str, last_name: str) -> Union[SimInfo, None]:
"""get_sim_info_of_sim_with_name(first_name, last_name)
Retrieve a SimInfo object for the first Sim with the specified First and Last Name.
:param first_name: A first name to look for.
:type first_name: str
:param last_name: A last name to look for.
:type last_name: str
:return: The first Sim found with the specified first and last name or None if no Sim is found.
:rtype: Union[SimInfo, None]
"""
for sim_info in CommonSimUtils.get_sim_info_for_all_sims_with_name_generator(first_name, last_name):
return sim_info
return None
@staticmethod
def get_sim_info_for_all_sims_with_name_generator(first_name: str, last_name: str) -> Iterator[SimInfo]:
"""get_sim_info_for_all_sims_with_name_generator(first_name, last_name)
Retrieve a SimInfo object for each and every Sim with the specified First and Last Name.
:param first_name: A first name to look for.
:type first_name: str
:param last_name: A last name to look for.
:type last_name: str
:return: An iterable of Sims found with the specified first and last name.
:rtype: Iterator[SimInfo]
"""
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
first_name = first_name.lower()
last_name = last_name.lower()
def _first_and_last_name(sim_info: SimInfo) -> bool:
return CommonSimNameUtils.get_first_name(sim_info).lower() == first_name and CommonSimNameUtils.get_last_name(sim_info).lower() == last_name
return CommonSimUtils.get_sim_info_for_all_sims_generator(include_sim_callback=_first_and_last_name)
@staticmethod
def get_all_sims_generator(include_sim_callback: Callable[[SimInfo], bool]=None) -> Iterator[Sim]:
"""get_all_sims_generator(include_sim_callback=None)
Retrieve a Sim object for each and every Sim (including hidden Sims).
:param include_sim_callback: If the result of this callback is True, the sim will be included in the results. If set to None, All sims will be included.
:type include_sim_callback: Callable[[SimInfo], bool], optional
:return: An iterable of all Sims matching the `include_sim_callback` filter.
:rtype: Iterator[Sim]
"""
for sim_info in CommonSimUtils.get_sim_info_for_all_sims_generator(include_sim_callback=include_sim_callback):
sim_instance = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if sim_instance is None:
continue
yield sim_instance
@staticmethod
def get_sim_info_for_all_sims_generator(include_sim_callback: Callable[[SimInfo], bool]=None) -> Iterator[SimInfo]:
"""get_sim_info_for_all_sims_generator(include_sim_callback=None)
Retrieve a SimInfo object for each and every sim.
:param include_sim_callback: If the result of this callback is True, the sim will be included in the results. If set to None, All sims will be included.
:type include_sim_callback: Callable[[SimInfo], bool], optional
:return: An iterable of all Sims matching the `include_sim_callback` filter.
:rtype: Iterator[SimInfo]
"""
sim_info_list = tuple(services.sim_info_manager().get_all())
for sim_info in sim_info_list:
if sim_info is None:
continue
if include_sim_callback is not None and include_sim_callback(sim_info) is False:
continue
yield sim_info
@staticmethod
def get_instanced_sim_info_for_all_sims_generator(include_sim_callback: Callable[[SimInfo], bool]=None) -> Iterator[SimInfo]:
"""get_instanced_sim_info_for_all_sims_generator(include_sim_callback=None)
Retrieve a SimInfo object for each and every sim.
.. note:: Only SimInfo with a Sim instance (:func:`~get_sim_instance`) will be returned.
:param include_sim_callback: If the result of this callback is True, the sim will be included in the results. If set to None, All sims will be included.
:type include_sim_callback: Callable[[SimInfo], bool], optional
:return: An iterable of all Sims matching the `include_sim_callback` filter.
:rtype: Iterator[SimInfo]
"""
def _is_instanced(_sim_info: SimInfo) -> bool:
return _sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) is not None
include_sim_callback = CommonFunctionUtils.run_predicates_as_one((_is_instanced, include_sim_callback))
for sim_info in CommonSimUtils.get_sim_info_for_all_sims_generator(include_sim_callback=include_sim_callback):
yield sim_info
@staticmethod
def get_sim_id(sim_identifier: Union[int, Sim, SimInfo]) -> int:
"""get_sim_id(sim_identifier)
Retrieve a SimId (int) from a Sim identifier.
:param sim_identifier: The identifier or instance of a Sim.
:type sim_identifier: Union[int, Sim, SimInfo]
:return: An identifier for the Sim instance.
:rtype: int
"""
if sim_identifier is None:
return 0
if isinstance(sim_identifier, int):
return sim_identifier
if isinstance(sim_identifier, Sim):
return sim_identifier.sim_id
if isinstance(sim_identifier, SimInfo):
return sim_identifier.id
return sim_identifier
@staticmethod
def get_sim_info(sim_identifier: Union[int, Sim, SimInfo]) -> Union[SimInfo, None]:
"""get_sim_info(sim_identifier)
Retrieve a SimInfo instance from a sim identifier.
:param sim_identifier: The identifier or instance of a Sim to use.
:type sim_identifier: Union[int, Sim, SimInfo]
:return: The SimInfo of the specified Sim instance or None if SimInfo is not found.
:rtype: Union[SimInfo, None]
"""
if sim_identifier is None or isinstance(sim_identifier, SimInfo):
return sim_identifier
if isinstance(sim_identifier, Sim):
return sim_identifier.sim_info
if isinstance(sim_identifier, int):
return services.sim_info_manager().get(sim_identifier)
return sim_identifier
@staticmethod
def get_sim_instance(sim_identifier: Union[int, Sim, SimInfo]) -> Union[Sim, None]:
"""get_sim_instance(sim_identifier)
Retrieve a Sim instance from a sim identifier.
:param sim_identifier: The identifier or instance of a Sim to use.
:type sim_identifier: Union[int, Sim, SimInfo]
:return: The instance of the specified Sim or None if no instance was found.
:rtype: Union[Sim, None]
"""
if sim_identifier is None or isinstance(sim_identifier, Sim):
return sim_identifier
if isinstance(sim_identifier, SimInfo):
return sim_identifier.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if isinstance(sim_identifier, int):
sim_info = services.sim_info_manager().get(sim_identifier)
if sim_info is None:
return None
return CommonSimUtils.get_sim_instance(sim_info)
return sim_identifier
@sims4.commands.Command('s4clib_testing.display_name_of_currently_active_sim', command_type=sims4.commands.CommandType.Live)
def _s4clib_testing_display_name_of_currently_active_sim(_connection: int=None):
output = sims4.commands.CheatOutput(_connection)
sim_info = CommonSimUtils.get_active_sim_info()
# noinspection PyPropertyAccess
output('Currently Active Sim: {} {}'.format(sim_info.first_name, sim_info.last_name))
@sims4.commands.Command('s4clib_testing.display_names_of_all_sims', command_type=sims4.commands.CommandType.Live)
def _s4clib_testing_display_names_of_all_sims(_connection: int=None):
output = sims4.commands.CheatOutput(_connection)
output('Showing the names of all sims (This may take awhile).')
current_count = 1
for sim_info in CommonSimUtils.get_sim_info_for_all_sims_generator():
# noinspection PyPropertyAccess
output('{}: {} {}'.format(str(current_count), sim_info.first_name, sim_info.last_name))
current_count += 1
output('Done showing the names of all sims.')
| StarcoderdataPython |
3299279 | from builtins import object
from django.db.models import Q, Count
from django.db.models.aggregates import Sum
from django.contrib.contenttypes.models import ContentType
from memoize import memoize
from moneyed.classes import Money
from bluebottle.clients import properties
from bluebottle.initiatives.models import Initiative
from bluebottle.activities.models import Contributor, Activity, EffortContribution
from bluebottle.members.models import Member
from bluebottle.time_based.models import (
DateActivity,
PeriodActivity,
TimeContribution
)
from bluebottle.funding.models import Donor, Funding
from bluebottle.deeds.models import Deed, DeedParticipant
from bluebottle.funding_pledge.models import PledgePayment
from bluebottle.utils.exchange_rates import convert
class Statistics(object):
def __init__(self, start=None, end=None):
self.start = start
self.end = end
timeout = 3600
def date_filter(self, field='created'):
if self.start and self.end:
filter_args = {'{}__range'.format(field): (self.start, self.end)}
elif self.start:
filter_args = {'{}__gte'.format(field): self.start}
elif self.end:
filter_args = {'{}__lte'.format(field): self.end}
else:
filter_args = {}
return Q(**filter_args)
@property
@memoize(timeout=timeout)
def people_involved(self):
"""
The (unique) total number of people that donated, fundraised, campaigned, or was a
task owner or member.
"""
contributor_ids = Contributor.objects.filter(
self.date_filter('contributor_date'),
user_id__isnull=False,
status__in=('new', 'accepted', 'active', 'succeeded')
).order_by(
'user__id'
).distinct('user').values_list('user_id', flat=True)
initiative_owner_ids = Initiative.objects.filter(
self.date_filter('created'),
status='approved'
).order_by(
'owner__id'
).distinct('owner').values_list('owner_id', flat=True)
activity_owner_ids = Activity.objects.filter(
self.date_filter('created'),
status__in=['open', 'full', 'running', 'succeeded', 'partially_funded']
).order_by(
'owner__id'
).distinct('owner').values_list('owner_id', flat=True)
people_count = len(set(contributor_ids) | set(initiative_owner_ids) | set(activity_owner_ids))
# Add anonymous donations
people_count += len(Contributor.objects.filter(
self.date_filter('contributor_date'),
user_id=None,
status='succeeded'
))
# Add donations on behalve of another person
people_count += len(Donor.objects.filter(
self.date_filter('contributor_date'),
user_id__isnull=False,
status='succeeded',
name__isnull=False,
).order_by('name').distinct('name'))
return people_count
@property
@memoize(timeout=timeout)
def time_activities_succeeded(self):
""" Total number of succeeded tasks """
date_activities = DateActivity.objects.filter(
self.date_filter('slots__start'),
status='succeeded'
)
period_activities = PeriodActivity.objects.filter(
self.date_filter('deadline'),
status='succeeded'
)
return len(date_activities) + len(period_activities)
@property
@memoize(timeout=timeout)
def fundings_succeeded(self):
""" Total number of succeeded tasks """
tasks = Funding.objects.filter(
self.date_filter('transition_date'),
status='succeeded'
)
return len(tasks)
@property
@memoize(timeout=timeout)
def deeds_succeeded(self):
""" Total number of succeeded tasks """
return len(Deed.objects.filter(
self.date_filter('slots__start'),
status='succeeded'
))
@property
@memoize(timeout=timeout)
def time_activities_online(self):
""" Total number of online tasks """
date_activities = DateActivity.objects.filter(
self.date_filter('slots__start'),
status__in=('open', 'full', 'running')
)
period_activities = PeriodActivity.objects.filter(
self.date_filter('deadline'),
status__in=('open', 'full', 'running')
)
return len(date_activities) + len(period_activities)
@property
@memoize(timeout=timeout)
def deeds_online(self):
""" Total number of online tasks """
return len(Deed.objects.filter(
self.date_filter('start'),
status__in=('open', 'full', 'running')
))
@property
@memoize(timeout=timeout)
def fundings_online(self):
""" Total number of succeeded tasks """
fundings = Funding.objects.filter(
self.date_filter('transition_date'),
status='open'
)
return len(fundings)
@property
@memoize(timeout=timeout)
def activities_succeeded(self):
""" Total number of succeeded tasks """
date_activities = DateActivity.objects.filter(
self.date_filter('slots__start'),
status='succeeded'
)
period_activities = PeriodActivity.objects.filter(
self.date_filter('deadline'),
status='succeeded'
)
funding_activities = Funding.objects.filter(
self.date_filter('deadline'),
status='succeeded'
)
deed_activities = Deed.objects.filter(
self.date_filter('end'),
status='succeeded'
)
return len(date_activities) + len(funding_activities) + len(period_activities) + len(deed_activities)
@property
@memoize(timeout=timeout)
def activities_online(self):
""" Total number of activities that have been in campaign mode"""
date_activities = DateActivity.objects.filter(
self.date_filter('slots__start'),
status__in=('open', 'full', 'running', )
)
period_activities = PeriodActivity.objects.filter(
self.date_filter('deadline'),
status__in=('open', 'full', 'running', )
)
funding_activities = Funding.objects.filter(
self.date_filter('deadline'),
status__in=('open', 'full', 'running', )
)
deed_activities = Deed.objects.filter(
self.date_filter('end'),
status__in=('open', 'running', )
)
return len(date_activities) + len(funding_activities) + len(period_activities) + len(deed_activities)
@property
@memoize(timeout=timeout)
def donated_total(self):
""" Total amount donated to all activities"""
donations = Donor.objects.filter(
self.date_filter('contributor_date'),
status='succeeded'
)
totals = donations.order_by('amount_currency').values('amount_currency').annotate(total=Sum('amount'))
amounts = [Money(total['total'], total['amount_currency']) for total in totals]
if totals:
donated = sum([convert(amount, properties.DEFAULT_CURRENCY) for amount in amounts])
else:
donated = Money(0, properties.DEFAULT_CURRENCY)
return donated
@property
@memoize(timeout=timeout)
def time_spent(self):
""" Total amount of time spent on realized tasks """
contributions = TimeContribution.objects.filter(
self.date_filter('start'),
status='succeeded'
).aggregate(time_spent=Sum('value'))
return contributions['time_spent'] or 0
@property
@memoize(timeout=timeout)
def deeds_done(self):
""" Total amount of time spent on realized tasks """
return len(EffortContribution.objects.filter(
self.date_filter('start'),
contributor__polymorphic_ctype=ContentType.objects.get_for_model(DeedParticipant),
status='succeeded'
))
@property
@memoize(timeout=timeout)
def activity_participants(self):
""" Total number of realized task members """
contributions = TimeContribution.objects.filter(
self.date_filter('start'),
status='succeeded'
).aggregate(count=Count('contributor__user', distinct=True))
return contributions['count'] or 0
@property
@memoize(timeout=timeout)
def donations(self):
""" Total number of realized task members """
donations = Donor.objects.filter(
self.date_filter('contributor_date'),
status='succeeded'
)
return len(donations)
@property
@memoize(timeout=timeout)
def amount_matched(self):
""" Total amount matched on realized (done and incomplete) activities """
totals = Funding.objects.filter(
self.date_filter('transition_date'),
status__in=['succeeded', 'open', 'partial']
).filter(
amount_matching__gt=0
).values('amount_matching_currency').annotate(total=Sum('amount_matching'))
amounts = [Money(total['total'], total['amount_matching_currency']) for total in totals]
if totals:
return sum([convert(amount, properties.DEFAULT_CURRENCY) for amount in amounts])
else:
return Money(0, properties.DEFAULT_CURRENCY)
@property
@memoize(timeout=timeout)
def participants(self):
""" Total numbers of participants (members that started a initiative, or where a realized task member) """
initiative_owner_count = len(
Initiative.objects.filter(
self.date_filter('created'),
status='approved'
).order_by(
'owner__id'
).distinct('owner').values_list('owner_id', flat=True)
)
return initiative_owner_count + self.activity_participants
@property
@memoize(timeout=timeout)
def pledged_total(self):
""" Total amount of pledged donations """
donations = PledgePayment.objects.filter(
self.date_filter('donation__contributor_date'),
donation__status='succeeded'
)
totals = donations.values(
'donation__amount_currency'
).annotate(total=Sum('donation__amount'))
amounts = [Money(total['total'], total['donation__amount_currency']) for total in totals]
if totals:
donated = sum([convert(amount, properties.DEFAULT_CURRENCY) for amount in amounts])
else:
donated = Money(0, properties.DEFAULT_CURRENCY)
return donated
@property
@memoize(timeout=timeout)
def members(self):
""" Total amount of members."""
members = Member.objects.filter(
self.date_filter('created'),
is_active=True
)
return len(members)
def __repr__(self):
start = self.start.strftime('%s') if self.start else 'none'
end = self.end.strftime('%s') if self.end else 'none'
return 'Statistics({},{})'.format(start, end)
| StarcoderdataPython |
1785134 | from hookscript import request
resource = request.values['resource']
if resource == 'cpu':
while True:
pass
elif resource == 'mem':
x = 'xxxxxxxxxxxxxxxxxx'
while True:
x += x
elif resource == 'disk':
f = open('junk','w')
while True:
print('junk', file=f)
elif resource == 'output':
i = 1000000
while i > 0:
print('junk')
i -= 1
| StarcoderdataPython |
3243702 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-08 18:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0029_auto_20170507_1251")]
operations = [
migrations.AddField(
model_name="check",
name="last_ping_body",
field=models.CharField(blank=True, max_length=1000),
)
]
| StarcoderdataPython |
4824413 | try:
from waterbutler import settings
except ImportError:
settings = {}
config = settings.get('GOOGLEDRIVE_PROVIDER_CONFIG', {})
BASE_URL = config.get('BASE_URL', 'https://www.googleapis.com/drive/v2')
BASE_UPLOAD_URL = config.get('BASE_UPLOAD_URL', 'https://www.googleapis.com/upload/drive/v2')
DRIVE_IGNORE_VERSION = config.get('DRIVE_IGNORE_VERSION', '0000000000000000000000000000000000000')
| StarcoderdataPython |
1621474 | from __future__ import absolute_import, unicode_literals
from mopidy.internal import validation
from mopidy.models import Ref, TlTrack, fields
from mopidy.models.immutable import ValidatedImmutableObject
class HistoryTrack(ValidatedImmutableObject):
"""
A history track. Wraps a :class:`Ref` and its timestamp.
:param timestamp: the timestamp
:type timestamp: int
:param track: the track reference
:type track: :class:`Ref`
"""
# The timestamp. Read-only.
timestamp = fields.Integer()
# The track reference. Read-only.
track = fields.Field(type=Ref)
class HistoryState(ValidatedImmutableObject):
"""
State of the history controller.
Internally used for save/load state.
:param history: the track history
:type history: list of :class:`HistoryTrack`
"""
# The tracks. Read-only.
history = fields.Collection(type=HistoryTrack, container=tuple)
class MixerState(ValidatedImmutableObject):
"""
State of the mixer controller.
Internally used for save/load state.
:param volume: the volume
:type volume: int
:param mute: the mute state
:type mute: int
"""
# The volume. Read-only.
volume = fields.Integer(min=0, max=100)
# The mute state. Read-only.
mute = fields.Boolean(default=False)
class PlaybackState(ValidatedImmutableObject):
"""
State of the playback controller.
Internally used for save/load state.
:param tlid: current track tlid
:type tlid: int
:param time_position: play position
:type time_position: int
:param state: playback state
:type state: :class:`validation.PLAYBACK_STATES`
"""
# The tlid of current playing track. Read-only.
tlid = fields.Integer(min=1)
# The playback position. Read-only.
time_position = fields.Integer(min=0)
# The playback state. Read-only.
state = fields.Field(choices=validation.PLAYBACK_STATES)
class TracklistState(ValidatedImmutableObject):
"""
State of the tracklist controller.
Internally used for save/load state.
:param repeat: the repeat mode
:type repeat: bool
:param consume: the consume mode
:type consume: bool
:param random: the random mode
:type random: bool
:param single: the single mode
:type single: bool
:param next_tlid: the id for the next added track
:type next_tlid: int
:param tl_tracks: the list of tracks
:type tl_tracks: list of :class:`TlTrack`
"""
# The repeat mode. Read-only.
repeat = fields.Boolean()
# The consume mode. Read-only.
consume = fields.Boolean()
# The random mode. Read-only.
random = fields.Boolean()
# The single mode. Read-only.
single = fields.Boolean()
# The id of the track to play. Read-only.
next_tlid = fields.Integer(min=0)
# The list of tracks. Read-only.
tl_tracks = fields.Collection(type=TlTrack, container=tuple)
class CoreState(ValidatedImmutableObject):
"""
State of all Core controller.
Internally used for save/load state.
:param history: State of the history controller
:type history: :class:`HistorState`
:param mixer: State of the mixer controller
:type mixer: :class:`MixerState`
:param playback: State of the playback controller
:type playback: :class:`PlaybackState`
:param tracklist: State of the tracklist controller
:type tracklist: :class:`TracklistState`
"""
# State of the history controller.
history = fields.Field(type=HistoryState)
# State of the mixer controller.
mixer = fields.Field(type=MixerState)
# State of the playback controller.
playback = fields.Field(type=PlaybackState)
# State of the tracklist controller.
tracklist = fields.Field(type=TracklistState)
| StarcoderdataPython |
106392 | <filename>tests/web.adblockplus.org/pages/landingPage.py
from pages.basePage import BasePage
DOWNLOAD_BUTTON_HREF = 'a[href*="install"]'
DOWNLOAD_BUTTON_HREF_ANDROID = 'a[href*="https://eyeo.to/adblockbrowser/android/abp-website"]'
DOWNLOAD_BUTTON_HREF_IOS = 'a[href*="https://eyeo.to/adblockplus/ios_safari_install/abp-website"]'
DOWNLOAD_BUTTON_HREF_LANG = 'a[href*="chrome_install"]'
class LandingPage(BasePage):
def __init__(self, driver, is_language_test=False):
self.driver = driver
self._download_button_href = DOWNLOAD_BUTTON_HREF
if is_language_test:
self._download_button_href = DOWNLOAD_BUTTON_HREF_LANG
@property
def get_download_button_link(self):
return self.driver.find_element_by_css_selector(self._download_button_href).get_attribute('href')
@property
def get_download_button_link_android(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).get_attribute('href')
@property
def get_download_button_link_ios(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_IOS).get_attribute('href')
@property
def get_download_button_text(self):
return self.driver.find_element_by_css_selector(self._download_button_href).get_attribute('innerText')
@property
def get_download_button_text_android(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).get_attribute('title')
@property
def get_download_button_text_ios(self):
return self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_IOS).get_attribute('title')
def click_download_button(self):
self.driver.find_element_by_css_selector(self._download_button_href).click()
def click_download_button_android(self):
self.driver.find_element_by_css_selector(DOWNLOAD_BUTTON_HREF_ANDROID).click()
| StarcoderdataPython |
1663034 | <filename>examples/cleanup_clips.py
#!/usr/bin/env python
# Copyright (c) 2015 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
"""
This script lists out and optionally deletes all clips on a NetShark.
"""
import sys
from steelscript.netshark.core.app import NetSharkApp
from steelscript.commands.steel import prompt_yn
class CreateView(NetSharkApp):
def add_options(self, parser):
super(CreateView, self).add_options(parser)
parser.add_option('--force',
help='Delete all clips without prompting',
default=False)
def validate_args(self):
"""Validate arguments if needed"""
super(CreateView, self).validate_args()
def main(self):
clips = self.netshark.get_clips(force_refetch=True)
if clips:
for i, c in enumerate(clips):
print '%3d) %s - %s' % (i, c, c.data['config']['description'])
if not self.options.force:
if not prompt_yn('Delete all these clips?',
default_yes=False):
print 'Okay, exiting.'
sys.exit(0)
for c in clips:
c.delete()
print 'Deleted.'
else:
print 'No trace clips found on NetShark.'
if __name__ == '__main__':
CreateView().run()
| StarcoderdataPython |
30333 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
from concurrent.futures import ThreadPoolExecutor
from celery.schedules import crontab
from celery.task import periodic_task, task
from apps.log_search.handlers.search.search_handlers_esquery import SearchHandler
from apps.utils.lock import share_lock
from apps.utils.log import logger
from apps.exceptions import ApiResultError
from apps.log_search.constants import BkDataErrorCode
from apps.log_search.models import LogIndexSet
@periodic_task(run_every=crontab(minute="*/10"))
@share_lock()
def sync_index_set_mapping_cache():
logger.info("[sync_index_set_mapping_cache] start")
index_set_id_list = LogIndexSet.objects.filter(is_active=True).values_list("index_set_id", flat=True)
def sync_mapping_cache(index_set_id):
logger.info("[sync_index_set_mapping_cache] index_set({}) start".format(index_set_id))
try:
SearchHandler(index_set_id=index_set_id, search_dict={}).fields()
except Exception as e: # pylint: disable=broad-except
logger.exception("[sync_index_set_mapping_cache] index_set({}) sync failed: {}".format(index_set_id, e))
return
logger.info("[sync_index_set_mapping_cache] index_set({}) sync success".format(index_set_id))
with ThreadPoolExecutor() as executor:
executor.map(sync_mapping_cache, index_set_id_list)
logger.info("[sync_index_set_mapping_cache] end")
@periodic_task(run_every=crontab(minute="0", hour="2"))
def sync_index_set_mapping_snapshot():
logger.info("[sync_index_set_mapping_snapshot] start")
index_set_list = LogIndexSet.objects.filter(is_active=True)
for index_set in index_set_list:
try:
index_set.sync_fields_snapshot(pre_check_enable=False)
except ApiResultError as e:
# 当数据平台返回为无法获取元数据报错情况
if e.code in [BkDataErrorCode.STORAGE_TYPE_ERROR, BkDataErrorCode.COULD_NOT_GET_METADATA_ERROR]:
index_set.is_active = False
index_set.save()
logger.exception(
f"[sync_index_set_mapping_snapshot] index_set({index_set.index_set_id} call mapping error: {e})"
)
continue
except Exception as e: # pylint: disable=broad-except
logger.exception(
"[sync_index_set_mapping_snapshot] index_set({}) sync failed: {}".format(index_set.index_set_id, e)
)
continue
logger.info("[sync_index_set_mapping_snapshot] index_set({}) sync success".format(index_set.index_set_id))
logger.info("[sync_index_set_mapping_snapshot] end")
@task(ignore_result=True)
def sync_single_index_set_mapping_snapshot(index_set_id=None): # pylint: disable=function-name-too-long
try:
index_set_obj = LogIndexSet.objects.get(index_set_id=index_set_id)
except LogIndexSet.DoesNotExist:
logger.exception(f"[sync_single_index_set_mapping_snapshot]index_set({index_set_id}) not exist")
else:
try:
index_set_obj.sync_fields_snapshot()
except Exception as e: # pylint: disable=broad-except
logger.exception(
f"[sync_single_index_set_mapping_snapshot] index_set({index_set_obj.index_set_id}) sync failed: {e}"
)
logger.info(f"[sync_single_index_set_mapping_snapshot] index_set({index_set_obj.index_set_id}) sync success")
| StarcoderdataPython |
1660401 | # Form implementation generated from reading ui file 'frmlogin.ui'
#
# Created: Mon Nov 13 22:02:43 2000
# by: The Python User Interface Compiler (pyuic)
#
# WARNING! All changes made in this file will be lost!
from qt import *
class Login(QDialog):
def __init__(self, parent = None, name = None, modal = 0, fl = 0):
QDialog.__init__(self, parent, name, modal, fl)
if name == None:
self.setName('frmLogin')
self.resize(418,182)
self.setCaption(self.tr('Log on to Kura'))
self.setSizeGripEnabled(0)
self.setSizePolicy(QSizePolicy(1,1,self.sizePolicy().hasHeightForWidth()))
frmLoginLayout = QVBoxLayout(self)
frmLoginLayout.setSpacing(6)
frmLoginLayout.setMargin(11)
Layout6 = QGridLayout()
Layout6.setSpacing(6)
Layout6.setMargin(0)
self.txtUsername = QLineEdit(self,'txtUsername')
Layout6.addWidget(self.txtUsername,0,1)
self.lblDatabase = QLabel(self,'lblDatabase')
self.lblDatabase.setText(self.tr('&Database'))
Layout6.addWidget(self.lblDatabase,2,0)
self.lblUsername = QLabel(self,'lblUsername')
self.lblUsername.setText(self.tr('&Username'))
Layout6.addWidget(self.lblUsername,0,0)
self.txtHost = QLineEdit(self,'txtHost')
Layout6.addWidget(self.txtHost,3,1)
self.txtPassword = QLineEdit(self,'txtPassword')
Layout6.addWidget(self.txtPassword,1,1)
self.lblHost = QLabel(self,'lblHost')
self.lblHost.setText(self.tr('&Host'))
Layout6.addWidget(self.lblHost,3,0)
self.txtDatabase = QLineEdit(self,'txtDatabase')
Layout6.addWidget(self.txtDatabase,2,1)
self.lblPassword = QLabel(self,'lblPassword')
self.lblPassword.setText(self.tr('&Password'))
Layout6.addWidget(self.lblPassword,1,0)
frmLoginLayout.addLayout(Layout6)
Layout1 = QHBoxLayout()
Layout1.setSpacing(6)
Layout1.setMargin(0)
spacer = QSpacerItem(20,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
Layout1.addItem(spacer)
self.buttonOk = QPushButton(self,'buttonOk')
self.buttonOk.setText(self.tr('&OK'))
self.buttonOk.setAutoDefault(1)
self.buttonOk.setDefault(1)
Layout1.addWidget(self.buttonOk)
self.buttonCancel = QPushButton(self,'buttonCancel')
self.buttonCancel.setText(self.tr('&Cancel'))
self.buttonCancel.setAutoDefault(1)
Layout1.addWidget(self.buttonCancel)
frmLoginLayout.addLayout(Layout1)
self.connect(self.buttonOk,SIGNAL('clicked()'),self,SLOT('accept()'))
self.connect(self.buttonCancel,SIGNAL('clicked()'),self,SLOT('reject()'))
self.setTabOrder(self.txtUsername,self.txtPassword)
self.setTabOrder(self.txtPassword,self.txtDatabase)
self.setTabOrder(self.txtDatabase,self.txtHost)
self.setTabOrder(self.txtHost,self.buttonOk)
self.setTabOrder(self.buttonOk,self.buttonCancel)
self.lblDatabase.setBuddy(self.txtDatabase)
self.lblUsername.setBuddy(self.txtUsername)
self.lblHost.setBuddy(self.txtHost)
self.lblPassword.setBuddy(self.txtPassword)
| StarcoderdataPython |
3379002 | <reponame>cheperboy/home_alarm
from flask import Flask, Blueprint, render_template, redirect, url_for, request, flash
from ..models import DBLog
alarms_panel = Blueprint('panel', __name__, url_prefix='/')
@alarms_panel.route('/')
#@<EMAIL>('/panel_nox_bind')
def index():
"""Panel for ext bind to nox"""
return render_template('alarms-panel-nox-bind.html.j2', \
logs=DBLog.log_nox_bind_ext(limit=15))
@alarms_panel.route('/both')
def both():
"""Panel for both ext and nox. should not be used when ext is bind to nox"""
return render_template('alarms-panel.html.j2', \
ext_events=DBLog.commands(scope="ext", limit=5), \
nox_events=DBLog.commands(scope="nox", limit=5))
| StarcoderdataPython |
124887 | <filename>explorecourses/classes.py
"""
This module contains classes representing various academic elements
for use in storing and manipulating information from Explore Courses.
Includes:
- School
- Department
- Course
- Section
- Schedule
- Instructor
- LearningObjective
- Attribute
- Tag
"""
from typing import Tuple
from xml.etree.ElementTree import Element
class Department(object):
"""
This class represents a department within a school.
Attributes:
name (str): The department name.
code (str): The department code used for searching courses by
department.
"""
def __init__(self, elem: Element):
"""
Constructs a new Department from an XML element.
Args:
elem (Element): The department's XML element.
"""
self.name = elem.get("longname")
self.code = elem.get("name")
def __str__(self):
"""
Returns a string representation of the Department that includes both
department name and code.
"""
return f"{self.name} ({self.code})"
class School(object):
"""
This class represents a school within the university.
Attributes:
name (str): The name of the school.
departments (Tuple[Department]): A list of departments within the
school.
"""
def __init__(self, elem: Element):
"""
Constructs a new School from an XML element.
Args:
elem (Element): The school's XML element.
"""
self.name = elem.get("name")
depts = elem.findall("department")
self.departments = tuple(Department(dept) for dept in depts)
def get_department(self, idf: str) -> Department:
"""
Gets a department within the school identified by name or code.
Args:
idf (str): An identifier of the department; either the name or code.
Returns:
Department: The department matched by the given identifier if a
match was found, None otherwise.
"""
idf = idf.lower()
find_code = lambda dept, code: dept.code.lower() == code
find_name = lambda dept, name: dept.name.lower() == name
find_dept = lambda dept, idf: find_name(dept, idf) or find_code(dept,
idf)
idx = [idx for idx, dept in enumerate(self.departments)
if find_dept(dept, idf)]
return self.departments[idx[0]] if idx else None
def __str__(self):
"""
Returns a string representation of the School that is the School's name.
"""
return self.name
class Instructor(object):
"""
This class represents an instructor for a section.
Attributes:
name (str): The instructor's name in "LastName, FirstInitial." form.
first_name (str): The instructor's first name.
middle_name (str): The instructor's middle name.
last_name (str): The instructor's last name.
sunet_id (str): The instructor's SUNet ID (as in <EMAIL>).
is_primary_instructor (bool): True if the instructor is the primary
instructor for the course, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Instructor from an XML element.
Args:
elem (Element): The instructor's XML element.
"""
self.name = elem.findtext("name")
self.first_name = elem.findtext("firstName")
self.middle_name = elem.findtext("middleName")
self.last_name = elem.findtext("lastName")
self.sunet_id = elem.findtext("sunet")
self.is_primary_instructor = elem.findtext("role") == "PI"
def __str__(self):
"""
Returns a string representation of the Instructor that includes the
instructor's first and last name and SUNet ID.
"""
return f"{self.first_name} {self.last_name} ({self.sunet_id})"
class Attribute(object):
"""
This class represents an attribute of a course.
Attributes:
name (str): The name of the attribute.
value (str): The abbreviation value of the attribute.
description (str): A description of the value of the attribute.
catalog_print (bool): True if the attribute has the catalog print flag,
False otherwise.
schedule_print (bool): True if the attribute has the schedule print
flag, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Attribute from an XML element.
Args:
elem (Element): The attribute's XML element.
"""
self.name = elem.findtext("name")
self.value = elem.findtext("value")
self.description = elem.findtext("description")
self.catalog_print = elem.findtext("catalogPrint") == "true"
self.schedule_print = elem.findtext("schedulePrint") == "true"
def __str__(self):
"""
Returns a string representation of the Attribute that includes the
attribute's name and value.
"""
return f"{self.name}::{self.value}"
class Schedule(object):
"""
This class represents the schedule of a section, including instructors.
Attributes:
start_date (str): The start date of the section's schedule.
end_date (str): The end date of the section's schedule.
start_time (str): The start time of each section.
end_time (str): The end time of each section.
location (str): The location of each section.
days (Tuple[str]): The days of the week that the section meets.
instructors (Tuple[Instructor]): The section's instructors.
"""
def __init__(self, elem: Element):
"""
Constructs a new Schedule from an XML element.
Args:
elem (Element): The schedule's XML element.
"""
self.start_date = elem.findtext("startDate")
self.end_date = elem.findtext("endDate")
self.start_time = elem.findtext("startTime")
self.end_time = elem.findtext("endTime")
self.location = elem.findtext("location")
self.days = tuple(elem.findtext("days").split())
self.instructors = tuple(Instructor(instr) for instr
in elem.find("instructors"))
def __str__(self):
"""
Returns a string representation of the Schedule that includes the
days of the week the section meets and it's time and location.
"""
return (f"{', '.join(self.days)}, {self.start_time} - {self.end_time} "
f"at {self.location}")
class Section(object):
"""
This class represents a section of a course.
Attributes:
class_id (int): The unique ID of the section.
term (str): The year and quarter during which the section is offered.
units (str): The number of units the section is offered for
section_num (str): The section number which distinguishes between
different sections of the same type.
component (str): The type of section (e.g., LEC)
curr_class_size (int): The current number of students enrolled in the
section.
max_class_size (int): The maximum number of students allowed in the
section.
curr_waitlist_size (int): The current number of students on the
waitlist to enroll in the section.
max_waitlist_size (int): The maximum number of students allowed on the
waitlist for the section.
notes (str): Any notes about the section.
schedules (Tuple[Schedule]): The different schedules of the section.
attributes (Tuple[Attribute]): The section's attributes.
"""
def __init__(self, elem: Element):
"""
Constructs a new Section from an XML element.
Args:
elem (Element): The section's XML element.
"""
self.class_id = int(elem.findtext("classId"))
self.term = elem.findtext("term")
self.units = elem.findtext("units")
self.section_num = elem.findtext("sectionNumber")
self.component = elem.findtext("component")
self.max_class_size = int(elem.findtext("maxClassSize"))
self.curr_class_size = int(elem.findtext("currentClassSize"))
self.curr_waitlist_size = int(elem.findtext("currentWaitlistSize"))
self.max_waitlist_size = int(elem.findtext("maxWaitlistSize"))
self.notes = elem.findtext("notes")
self.schedules = tuple(Schedule(sched) for sched
in elem.find("schedules"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
def __str__(self):
"""
Returns a string representation of the Section that includes the
section's component and number, and section's ID.
"""
return f"{self.component} {self.section_num} (id: {self.class_id})"
class Tag(object):
"""
This class represents a tag for a course.
Attributes:
organization (str): The organization within the school responsible for
the tag.
name (str): The name of the tag.
"""
def __init__(self, elem: Element):
"""
Constructs a new Tag from an XML element.
Args:
elem (Element): The tag's XML element.
"""
self.organization = elem.findtext("organization")
self.name = elem.findtext("name")
def __str__(self):
"""
Returns a string representation of the Tag that includes the
tag's organization and name.
"""
return f"{self.organization}::{self.name}"
class LearningObjective(object):
"""
This class represents a learning objective for a course.
Attributes:
code (str): The GER that the learning objective is for.
description (str): A description of the learning objective.
"""
def __init__(self, elem: Element):
"""
Constructs a new LearningObjective from an XML element.
Args:
elem (Element): The learning objective's XML element.
"""
self.code = elem.findtext(".//requirementCode")
self.description = elem.findtext(".//description")
def __str__(self):
"""
Returns a string representation of the LearningObjective that includes
the learning objective's code and description.
"""
return f"Learning Objective ({self.code}: {self.description})"
class Course(object):
"""
This class represents a course listed at the university.
Attributes:
year (str): The Academic year that the course is offered.
subject (str): The academic subject of the course (e.g., 'MATH').
code (str): The code listing of the course (e.g., '51').
title (str): The full title of the course.
description (str): A description of the course.
gers (Tuple[str]): The General Education Requirements satisfied
by the course.
repeatable (bool): True if the course is repeatable for credit,
False otherwise.
grading_basis (str): The grading basis options for the course.
units_min (int): The minimum number of units the course can be
taken for.
units_max (int): The maximum number of units the course can be
taken for.
objectives (Tuple[LearningObjective]): The learning objectives of
the course.
final_exam (bool): True if the course has a final exam, False otherwise.
sections (Tuple[Section]): The sections associated with the course.
tags (Tuple[Tag]): The tags associated with the course.
attributes (Tuple[Attributes]): The attributes associated with
the course.
course_id (int): The unique ID of the course.
active (bool): True if the course is currently being taught,
False otherwise.
offer_num (str): The offer number of the course.
academic_group (str): The academic group that the course is a part of.
academic_org (str): The academic organization that the course
is a part of.
academic_career (str): The academic career associated with the course.
max_units_repeat (int): The number of units that the course
can be repeated for.
max_times_repeat (int): The number of times that the course
can be repeated.
"""
def __init__(self, elem: Element):
"""
Constructs a new Course from an XML element.
Args:
elem (Element): The course's XML element.
"""
self.year = elem.findtext("year")
self.subject = elem.findtext("subject")
self.code = elem.findtext("code")
self.title = elem.findtext("title")
self.description = elem.findtext("description")
self.gers = tuple(elem.findtext("gers").split(", "))
self.repeatable = (True if elem.findtext("repeatable") == "true"
else False)
self.grading_basis = elem.findtext("grading")
self.units_min = int(elem.findtext("unitsMin"))
self.units_max = int(elem.findtext("unitsMax"))
self.objectives = tuple(LearningObjective(obj) for obj
in elem.find("learningObjectives"))
self.final_exam = (
True if elem.findtext(".//finalExamFlag") == "Y"
else False if elem.findtext(".//finalExamFlag") == "N"
else None
)
self.sections = tuple(Section(section) for section
in elem.find("sections"))
self.tags = tuple(Tag(tag) for tag in elem.find("tags"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
self.course_id = int(elem.findtext(".//courseId"))
self.active = (True if elem.findtext(".//effectiveStatus") == "A"
else False if elem.findtext(".//effectiveStatus") == "I"
else None)
self.offer_num = elem.findtext(".//offerNumber")
self.academic_group = elem.findtext(".//academicGroup")
self.academic_org = elem.findtext(".//academicOrganization")
self.academic_career = elem.findtext(".//academicCareer")
self.max_units_repeat = int(elem.findtext(".//maxUnitsRepeat"))
self.max_times_repeat = int(elem.findtext(".//maxTimesRepeat"))
def __str__(self):
"""
Returns a string representation of the Course that includes the
course's subject, code, and full title.
"""
return f"{self.subject}{self.code} {self.title}"
def __eq__(self, other):
"""
Overloads the equality (==) operator for the Course class.
A Course can only be compared to another Course. Course equality is
determined by course ID.
Args:
other: The right operand of the equality operator.
Returns:
bool: True if the object being compared is equal to the Course,
False otherwise.
"""
if type(other) != Course: return False
return self.course_id == other.course_id
def __lt__(self, other):
"""
Overloads the less than (<) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than operator.
Returns:
bool: True if the object being compared is less than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
if self.subject != other.subject:
return self.subject < other.subject
if self.code != other.code:
return self.code < other.code
if self.year != other.year:
return self.year < other.year
return False
def __gt__(self, other):
"""
Overloads the greater than (>) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than operator.
Returns:
bool: True if the object being compared is greater than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return not self.__lt__(other) and not self.__eq__(other)
def __le__(self, other):
"""
Overloads the less than or equal to operator (<=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than or equal to operator.
Returns:
bool: True if the object being compared is less than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
"""
Overloads the greater than or equal to operator (>=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than or equal to operator.
Returns:
bool: True if the object being compared is greater than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__gt__(other) or self.__eq__(other)
| StarcoderdataPython |
1773309 | <reponame>acolley/protoactor-python<filename>tests/persistence/snapshot_strategies/test_time_strategy.py
import datetime
from protoactor.persistence.messages import PersistedEvent
from protoactor.persistence.snapshot_strategies.time_strategy import TimeStrategy
def test_time_strategy_should_snapshot_according_to_the_interval():
now = datetime.datetime.strptime('2000-01-01 12:00:00', '%Y-%m-%d %H:%M:%S')
strategy = TimeStrategy(datetime.timedelta(seconds=10), lambda: now)
assert strategy.should_take_snapshot(PersistedEvent(None, 0)) is False
now = now + datetime.timedelta(seconds=5)
assert strategy.should_take_snapshot(PersistedEvent(None, 0)) is False
now = now + datetime.timedelta(seconds=5)
assert strategy.should_take_snapshot(PersistedEvent(None, 0)) is True
now = now + datetime.timedelta(seconds=5)
assert strategy.should_take_snapshot(PersistedEvent(None, 0)) is False
now = now + datetime.timedelta(seconds=5)
assert strategy.should_take_snapshot(PersistedEvent(None, 0)) is True | StarcoderdataPython |
3227468 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from mordor import AppEnv
import sys
def main():
print("hello")
print(sys.argv)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3357301 | from specreduce.utils.synth_data import make_2dspec_image
from astropy.nddata import CCDData
def test_make_2dspec_image():
ccdim = make_2dspec_image(
nx=3000,
ny=1000,
background=5,
trace_center=None,
trace_order=3,
trace_coeffs={'c0': 0, 'c1': 50, 'c2': 100},
source_amplitude=10,
source_alpha=0.1
)
assert(ccdim.data.shape == (1000, 3000))
assert(isinstance(ccdim, CCDData))
| StarcoderdataPython |
137340 | <gh_stars>10-100
from spire.mesh import ModelController
from spire.schema import SchemaDependency
from lattice.server.resources import Project as ProjectResource
from lattice.server.models import *
class ProjectController(ModelController):
resource = ProjectResource
version = (1, 0)
model = Project
mapping = 'id status description'
schema = SchemaDependency('lattice')
def _annotate_model(self, model, data):
repository = data.get('repository')
if repository:
model.repository = ProjectRepository.polymorphic_create(repository)
def _annotate_resource(self, model, resource, data):
repository = model.repository
if repository:
if repository.type == 'git':
resource['repository'] = {'type': 'git', 'url': repository.url}
elif repository.type == 'svn':
resource['repository'] = {'type': 'svn', 'url': repository.url}
| StarcoderdataPython |
3219127 | """
Man Run Land Analyzer.
"""
import analyzer_lib.utils.utils as u
import analyzer_lib.data_manipulation.tables as t
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DecimalType, DoubleType, FloatType
from pyspark.sql import DataFrame, Row
from pyspark.sql import functions as F
import configparser
import sys
# read information from configs
config = configparser.ConfigParser()
config.read('conf.ini')
file_dir = config['local']['file_dir']
company_relations_file = config['local']['company_relations_file']
land_ownership_file = config['local']['land_ownership_file']
hierarchical_structure_file_name = config['local']['hierarchical_structure_file_name']
# define schema for input files
cr_schema = StructType([
StructField("company_id", StringType(), True),
StructField("name", StringType(), True),
StructField("parent", StringType(), True)
])
lo_schema = StructType([
StructField("land_id", StringType(), True),
StructField("company_id", StringType(), True)
])
hierarchical_structure_schema = StructType([
StructField("company_id", StringType(), True),
StructField("name", StringType(), True),
StructField("description", StringType(), True),
StructField("hierarchical_structure", StringType(), True)
])
def run_with_rebuild(arg_file_dir: str, arg_company_relations_file: str, arg_cr_schema: StructType,
arg_land_ownership_file:str, arg_lo_schema: StructType,
arg_hierarchical_structure_file_name) -> DataFrame:
# READ DFs
pathcr = f"{arg_file_dir}/{arg_company_relations_file}"
pathlo = f"{arg_file_dir}/{arg_land_ownership_file}"
df_cr = u.load_df(arg_cr_schema, pathcr)\
.select(F.col("company_id").alias("company_id_cr"), F.col("name"), F.col("parent"))
df_lo = u.load_df(arg_lo_schema, pathlo)\
.select(F.col("land_id"), F.col("company_id").alias("company_id_lo"))
# ENRICH DFs WITH MORE INFORMATION
parent_df = t.create_df_with_parent_child_lists(df_cr)
company_land_count = t.create_df_with_land_parcels_ownership_count(df_lo)
company_df = t.create_df_with_full_description(df_cr, company_land_count)
data_frame = parent_df
list_of_roots = None
# GET THE LIST OF ROOTS
try:
list_of_roots = data_frame\
.where( (F.col("parent")=="") | (F.col("parent").isNull()) )\
.select(F.col("company_list"))\
.first()[0]
except Exception as e:
print(e)
# BUILD HIERARICHCAL STRUCTURE FOR EACH ROOT - TOP DOWN
if list_of_roots is not None:
company_hierarchical_structure_dict = u.build_list_of_dict_with_hierarchical_structure_for_each_root(data_frame, list_of_roots)
hierarchical_structure_df = u.create_company_hierarchical_structure_df(company_hierarchical_structure_dict)
else:
print(f"no roots in the list of roots in the df: {data_frame}")
# BUILD FINAL ENRICHED TABLE
df_total = company_df.join(hierarchical_structure_df, company_df.company_id==hierarchical_structure_df.id, how="left")\
.drop(F.col("parent_2"))\
.drop(F.col("id"))
u.write_df(df_total, arg_file_dir, arg_hierarchical_structure_file_name)
return df_total
if __name__ == '__main__':
# example python main.py CR995643170992 rebuild
if len(sys.argv) > 2 and sys.argv[2] == "rebuild":
print("building of hierarchical_structure_file_name has started and may take few minutes ... ")
df = run_with_rebuild(file_dir, company_relations_file, cr_schema,
land_ownership_file, lo_schema, hierarchical_structure_file_name)
print("building of hierarchical_structure_file_name has finished")
u.print_dict_for_company_id(df, sys.argv[1])
elif len(sys.argv) == 2:
df = u.load_df(hierarchical_structure_schema, f"{file_dir}/{hierarchical_structure_file_name}")
u.print_dict_for_company_id(df, sys.argv[1]) | StarcoderdataPython |
1733631 | #! /usr/bin/env python
"""
Set a register preset to test different configurations on YM2612 chip
"""
from __future__ import print_function
import sys
import time
import serial
import rtmidi
# Defined constants
YM_MAX_NUM_USER_PRESETS = 8
YM_MAX_NUM_DEFAULT_PRESETS = 8
# Num voices
YM_MAX_VOICES = 6
# Num operators
YM_MAX_OPERATORS = 4
# Defined SYSEX CMD
YM_SYSEX_CMD_SET_REG = 0
YM_SYSEX_CMD_SAVE_PRESET = 1
YM_SYSEX_CMD_LOAD_PRESET = 2
YM_SYSEX_CMD_LOAD_DEFAULT_PRESET = 3
# Defined default PRESETS
YM_PRESET_DX_PIANO = 0
YM_PRESET_GUITAR = 1
YM_PRESET_SAWTOOTH = 2
YM_PRESET_SONIC = 3
class YM2612Chip:
"""
Class to handle and store information regading to FM synth chip
"""
def __init__(self,
ser_com=None,
midi_com=None,
lfo_on=0,
lfo_freq=0):
# Create serial port handler
self.ser_com = ser_com
if ser_com:
self.ser = serial.Serial(ser_com, baudrate="115200", timeout=1)
print("CMD: Using port", ser_com)
else:
self.ser = None
# Create midi port
self.midi_com = midi_com
if midi_com:
self.midiout = rtmidi.MidiOut()
self.midiout.open_port(self.midi_com)
print("MIDI: Using port", self.midiout.get_port_name(self.midi_com))
else:
self.midiout = None
# Vendor id
self.vendor_id = 0x001234
# Chip general register
self.lfo_on = lfo_on
self.lfo_freq = lfo_freq
# Channel definition
self.channel = {
0: self.__YMChannel(channel_id=0),
1: self.__YMChannel(channel_id=1),
2: self.__YMChannel(channel_id=2),
3: self.__YMChannel(channel_id=3),
4: self.__YMChannel(channel_id=4),
5: self.__YMChannel(channel_id=5)
}
def __send_cmd(self, cli_cmd):
retval = False
# Try to send data
if self.ser:
self.ser.write(cli_cmd)
rx_buff = ""
while True:
byte_rx = self.ser.read()
if len(byte_rx) == 0:
break
rx_buff += byte_rx.decode("utf-8")
if "OK" in rx_buff:
retval = True
break
if retval:
print("CMD: OK")
time.sleep(0.01)
else:
print("CMD: ERR")
else:
print("CMD: Not init")
return retval
def __write_reg(self, addr, data, bank):
reg_addr = int(addr)
reg_data = int(data)
reg_bank = int(bank)
print("CMD: writeReg: %02X-%02X-%02X" % (reg_addr, reg_data, reg_bank))
retval = False
if self.ser_com:
cli_cmd = b'writeReg %d %d %d\n' % (reg_addr, reg_data, reg_bank)
retval = self.__send_cmd(cli_cmd)
else:
print("CMD: Not init")
return retval
def __send_midi_cmd(self, midi_cmd, cmd_payload):
retval = False
# Check if midi interface def
if self.midiout:
print("MIDI: Send data start", self.midiout.get_port_name(self.midi_com))
# CMD set preset
sysex_cmd = midi_cmd
# Set data into array
sysex_data = []
# SysEx init
sysex_data.append(0xF0) # SysEx init
# Vendor id
sysex_data.append((self.vendor_id >> 16) & 0xFF)
sysex_data.append((self.vendor_id >> 8) & 0xFF)
sysex_data.append((self.vendor_id >> 0) & 0xFF)
# Cmd
sysex_data.append(sysex_cmd)
# Preset pos
sysex_data.extend(cmd_payload)
# SysEx end
sysex_data.append(0xF7)
# Data TX
self.midiout.send_message(sysex_data)
# Show used parameters
print("MIDI: Data len", len(sysex_data))
print("MIDI: SysEx CMD x%02X" % sysex_cmd)
print("MIDI: Send finish")
retval = True
else:
print("MIDI: Not init")
return retval
def reset_board(self):
print("CMD: reset")
retval = False
if self.ser_com:
cli_cmd = b'reset\n'
retval = self.__send_cmd(cli_cmd)
if retval:
time.sleep(2)
else:
print("CMD: Not init")
return retval
def setup_channel(self, channel_id):
retval = False
print("CMD: Setup channel", channel_id)
if self.ser_com:
if (channel_id < 6):
channel_offset = channel_id % 3
reg_sel = channel_id / 3
# Set feedback and algorithm
reg = self.channel[channel_id].get_reg_FBALG()
retval = self.__write_reg(0xB0 + channel_offset, reg, reg_sel)
# Set Audio out and lfo destination
reg = self.channel[channel_id].get_reg_LRAMSPMS()
retval &= self.__write_reg(0xB4 + channel_offset, reg, reg_sel)
return retval
else:
print("CH: id not valid")
else:
print("CMD: Not init")
return retval
def setup_operator(self, channel_id, operator_id):
retval = False
print("CMD: Setup channel", channel_id, "Operator", operator_id)
if (channel_id < 6) and (operator_id < 4):
operator_offset = operator_id * 4
channel_offset = channel_id % 3
reg_sel = channel_id / 3
# Set DT/MUL
reg = self.channel[channel_id].operator[operator_id].get_reg_DETMUL()
retval = self.__write_reg(0x30 + channel_offset + operator_offset, reg, reg_sel)
# Set TL, 0dB-96dB
reg = self.channel[channel_id].operator[operator_id].get_reg_TL()
retval &= self.__write_reg(0x40 + channel_offset + operator_offset, reg, reg_sel)
# Set KS/AR
reg = self.channel[channel_id].operator[operator_id].get_reg_KSAR()
retval &= self.__write_reg(0x50 + channel_offset + operator_offset, reg, reg_sel)
# Set AM/DR
reg = self.channel[channel_id].operator[operator_id].get_reg_AMDR()
retval &= self.__write_reg(0x60 + channel_offset + operator_offset, reg, reg_sel)
# Set SR
reg = self.channel[channel_id].operator[operator_id].get_reg_SR()
retval &= self.__write_reg(0x70 + channel_offset + operator_offset, reg, reg_sel)
# Set SL/RL
reg = self.channel[channel_id].operator[operator_id].get_reg_SLRL()
retval &= self.__write_reg(0x80 + channel_offset + operator_offset, reg, reg_sel)
# Set SSG-EG
reg = self.channel[channel_id].operator[operator_id].get_reg_SSGEG()
retval &= self.__write_reg(0x90 + channel_offset + operator_offset, reg, reg_sel)
else:
print("OP: id not valid")
return retval
def setup_LFO(self):
retval = False
if self.ser_com:
reg = self.__get_LFO()
retval = self.__write_reg(0x22, reg, 0)
else:
print("CMD: Not init")
return retval
def __get_LFO(self):
reg = ((self.lfo_on & 0x01) << 3) | (self.lfo_freq & 0x07)
return reg
def set_reg_values(self):
"""
Set current values
"""
retval = False
print("CMD: Write reg values on chip")
if self.ser_com:
print("SETCFG: Start")
# Set board registers
retval = self.setup_LFO()
for voice in range(6):
retval &= self.setup_channel(voice)
for operator in range(4):
retval &= self.setup_operator(voice, operator)
print("SETCFG: End")
else:
print("CMD: Not init")
return retval
def __get_reg_values_array(self):
reg_array = []
# Get general regs
reg_array.append(self.lfo_on)
reg_array.append(self.lfo_freq)
# Get channel info
for ch_id in range(6):
reg_array.append(self.channel[ch_id].feedback)
reg_array.append(self.channel[ch_id].op_algorithm)
reg_array.append(self.channel[ch_id].audio_out)
reg_array.append(self.channel[ch_id].amp_mod_sens)
reg_array.append(self.channel[ch_id].phase_mod_sens)
# Add operator data
for op_id in range(4):
reg_array.append(self.channel[ch_id].operator[op_id].detune)
reg_array.append(self.channel[ch_id].operator[op_id].multiple)
reg_array.append(self.channel[ch_id].operator[op_id].total_level)
reg_array.append(self.channel[ch_id].operator[op_id].key_scale)
reg_array.append(self.channel[ch_id].operator[op_id].attack_rate)
reg_array.append(self.channel[ch_id].operator[op_id].amp_mod_on)
reg_array.append(self.channel[ch_id].operator[op_id].decay_rate)
reg_array.append(self.channel[ch_id].operator[op_id].sustain_rate)
reg_array.append(self.channel[ch_id].operator[op_id].sustain_level)
reg_array.append(self.channel[ch_id].operator[op_id].release_rate)
reg_array.append(self.channel[ch_id].operator[op_id].ssg_envelope)
# Show reg values
self.show_reg_values()
# Return collected data
return reg_array
def show_reg_values(self):
"""
Print current reg values in c format
"""
print("CMD: Show current reg values\r\n")
print("")
print(">> ===== C_REGISTER_MAP ===== <<")
print("")
# Set board registers
print("static const xFmDevice_t xPreset = {")
print(".u8LfoOn = %dU," % self.lfo_on)
print(".u8LfoFreq = %dU," % self.lfo_freq)
for voice in range(YM_MAX_VOICES):
print(" // VOICE", voice)
print(".xChannel[%dU].u8Algorithm = %dU," % (voice, self.channel[voice].op_algorithm))
print(".xChannel[%dU].u8Feedback = %dU," % (voice, self.channel[voice].feedback))
print(".xChannel[%dU].u8AudioOut = %dU," % (voice, self.channel[voice].audio_out))
print(".xChannel[%dU].u8PhaseModSens = %dU," % (voice, self.channel[voice].phase_mod_sens))
print(".xChannel[%dU].u8AmpModSens = %dU," % (voice, self.channel[voice].amp_mod_sens))
for operator in range(YM_MAX_OPERATORS):
print(" // OPERATOR", operator)
print(".xChannel[%dU].xOperator[%dU].u8Detune = %dU," % (voice, operator, self.channel[voice].operator[operator].detune))
print(".xChannel[%dU].xOperator[%dU].u8Multiple = %dU," % (voice, operator, self.channel[voice].operator[operator].multiple))
print(".xChannel[%dU].xOperator[%dU].u8TotalLevel = %dU," % (voice, operator, self.channel[voice].operator[operator].total_level))
print(".xChannel[%dU].xOperator[%dU].u8KeyScale = %dU," % (voice, operator, self.channel[voice].operator[operator].key_scale))
print(".xChannel[%dU].xOperator[%dU].u8AttackRate = %dU," % (voice, operator, self.channel[voice].operator[operator].attack_rate))
print(".xChannel[%dU].xOperator[%dU].u8AmpMod = %dU," % (voice, operator, self.channel[voice].operator[operator].amp_mod_on))
print(".xChannel[%dU].xOperator[%dU].u8DecayRate = %dU," % (voice, operator, self.channel[voice].operator[operator].decay_rate))
print(".xChannel[%dU].xOperator[%dU].u8SustainRate = %dU," % (voice, operator, self.channel[voice].operator[operator].sustain_rate))
print(".xChannel[%dU].xOperator[%dU].u8SustainLevel = %dU," % (voice, operator, self.channel[voice].operator[operator].sustain_level))
print(".xChannel[%dU].xOperator[%dU].u8ReleaseRate = %dU," % (voice, operator, self.channel[voice].operator[operator].release_rate))
print(".xChannel[%dU].xOperator[%dU].u8SsgEg = %dU," % (voice, operator, self.channel[voice].operator[operator].ssg_envelope))
print("};")
print("")
def midi_set_reg_values(self):
"""
Send register values over midi interface
"""
# Generate CMD payload
cmd_midi = YM_SYSEX_CMD_SET_REG
cmd_payload = []
cmd_payload.extend(self.__get_reg_values_array())
# Send command
retval = self.__send_midi_cmd(cmd_midi, cmd_payload)
return retval
def midi_save_preset(self, preset_pos=0, preset_name=""):
"""
Save current register conf into specified preset position
preset_pos: preset position [0:15]
preset_name: string of 15 characters to identify the preset
"""
retval = False
if preset_pos < YM_MAX_NUM_USER_PRESETS:
# Generate CMD payload
cmd_midi = YM_SYSEX_CMD_SAVE_PRESET
cmd_payload = []
# Preset pos
cmd_payload.append(preset_pos)
# Preset name
format_preset_name = "{:<15}".format(preset_name)
for str_ch in format_preset_name:
cmd_payload.append((ord(str_ch) >> 0) & 0x0F)
cmd_payload.append((ord(str_ch) >> 4) & 0x0F)
# Register data
cmd_payload.extend(self.__get_reg_values_array())
# Send command
retval = self.__send_midi_cmd(cmd_midi, cmd_payload)
return retval
def midi_load_default_preset(self, preset_pos=0):
"""
Load default const preset
preset_pos: preset position [0:8]
"""
retval = False
if preset_pos < YM_MAX_NUM_DEFAULT_PRESETS:
# Generate CMD payload
cmd_midi = YM_SYSEX_CMD_LOAD_DEFAULT_PRESET
cmd_payload = []
# Preset pos
cmd_payload.append(preset_pos)
# Send command
retval = self.__send_midi_cmd(cmd_midi, cmd_payload)
return retval
def midi_load_preset(self, preset_pos=0):
"""
Load saved preset
preset_pos: preset position [0:8]
"""
retval = False
if preset_pos < YM_MAX_NUM_USER_PRESETS:
# Generate CMD payload
cmd_midi = YM_SYSEX_CMD_LOAD_PRESET
cmd_payload = []
# Preset pos
cmd_payload.append(preset_pos)
# Send command
retval = self.__send_midi_cmd(cmd_midi, cmd_payload)
return retval
def set_custom_preset(self):
"""
Set preset custom preset
"""
retval = True
# Set board registers
self.lfo_on = 1
self.lfo_freq = 0
for voice in range(6):
print("PRESET: Setup voice", voice)
# Setup voice 0
self.channel[voice].op_algorithm = 4
self.channel[voice].feedback = 3
self.channel[voice].audio_out = 3
self.channel[voice].phase_mod_sens = 0
self.channel[voice].amp_mod_sens = 2
# Setup operator 0
self.channel[voice].operator[0].total_level = 0x28 # 30
self.channel[voice].operator[0].multiple = 15
self.channel[voice].operator[0].detune = 3
self.channel[voice].operator[0].attack_rate = 31
self.channel[voice].operator[0].decay_rate = 4
self.channel[voice].operator[0].sustain_level = 0
self.channel[voice].operator[0].sustain_rate = 10
self.channel[voice].operator[0].release_rate = 3
self.channel[voice].operator[0].key_scale = 1
self.channel[voice].operator[0].amp_mod_on = 1
self.channel[voice].operator[0].ssg_envelope = 0x00 # OFF
# Setup operator 1
self.channel[voice].operator[1].total_level = 0x07
self.channel[voice].operator[1].multiple = 3
self.channel[voice].operator[1].detune = 5 # -1
self.channel[voice].operator[1].attack_rate = 30
self.channel[voice].operator[1].decay_rate = 8
self.channel[voice].operator[1].sustain_level = 3
self.channel[voice].operator[1].sustain_rate = 6
self.channel[voice].operator[1].release_rate = 3
self.channel[voice].operator[1].key_scale = 1
self.channel[voice].operator[1].amp_mod_on = 0
self.channel[voice].operator[1].ssg_envelope = 0x00 # OFF
# Setup operator 2
self.channel[voice].operator[2].total_level = 0x19
self.channel[voice].operator[2].multiple = 7
self.channel[voice].operator[2].detune = 5 # -1
self.channel[voice].operator[2].attack_rate = 31
self.channel[voice].operator[2].decay_rate = 4
self.channel[voice].operator[2].sustain_level = 3
self.channel[voice].operator[2].sustain_rate = 17
self.channel[voice].operator[2].release_rate = 1
self.channel[voice].operator[2].key_scale = 1
self.channel[voice].operator[2].amp_mod_on = 0
self.channel[voice].operator[2].ssg_envelope = 0x00 # OFF
# Setup operator 3
self.channel[voice].operator[3].total_level = 0x03
self.channel[voice].operator[3].multiple = 2
self.channel[voice].operator[3].detune = 4
self.channel[voice].operator[3].attack_rate = 31
self.channel[voice].operator[3].decay_rate = 5
self.channel[voice].operator[3].sustain_level = 2
self.channel[voice].operator[3].sustain_rate = 12
self.channel[voice].operator[3].release_rate = 3
self.channel[voice].operator[3].key_scale = 1
self.channel[voice].operator[3].amp_mod_on = 0
self.channel[voice].operator[3].ssg_envelope = 0x00 # OFF
if retval:
if self.midi_com:
retval = self.midi_set_reg_values()
elif self.ser_com:
retval = self.set_reg_values()
print("PRESET: End")
return retval
class __YMChannel:
"""
Class to handle YM2612 voice
"""
def __init__(self,
channel_id=0,
feedback=0,
op_algorithm=0,
audio_out=3,
phase_mod_sens=0,
amp_mod_sens=0):
# Channel attributes
self.channel_id = channel_id
self.feedback = feedback
self.op_algorithm = op_algorithm
self.audio_out = audio_out
self.phase_mod_sens = phase_mod_sens
self.amp_mod_sens = amp_mod_sens
# Channel operators
self.operator = {
0: self.__YMOperator(op_id=0),
1: self.__YMOperator(op_id=1),
2: self.__YMOperator(op_id=2),
3: self.__YMOperator(op_id=3),
}
def get_reg_LRAMSPMS(self):
reg = ((self.audio_out & 0x03) << 6) | ((self.amp_mod_sens & 0x03) << 4) | (self.phase_mod_sens & 0x07)
return reg
def get_reg_FBALG(self):
reg = ((self.feedback & 0x03) << 3) | (self.op_algorithm & 0x07)
return reg
class __YMOperator:
"""
Class with operator data
"""
def __init__(self,
op_id=0,
detune=0,
multiple=0,
total_level=0,
key_scale=0,
attack_rate=0,
amp_mod_on=0,
decay_rate=0,
sustain_rate=0,
sustain_level=0,
release_rate=0,
ssg_envelope=0):
# Operator attributes
self.detune = detune
self.multiple = multiple
self.total_level = total_level
self.key_scale = key_scale
self.attack_rate = attack_rate
self.amp_mod_on = amp_mod_on
self.decay_rate = decay_rate
self.sustain_rate = sustain_rate
self.sustain_level = sustain_level
self.release_rate = release_rate
self.ssg_envelope = ssg_envelope
def get_reg_DETMUL(self):
reg = ((self.detune << 4) & 0x70) | (self.multiple & 0x0F)
return reg
def get_reg_TL(self):
reg = self.total_level & 0x7F
return reg
def get_reg_KSAR(self):
reg = ((self.key_scale & 0x03) << 6) | (self.attack_rate & 0x1F)
return reg
def get_reg_AMDR(self):
reg = ((self.amp_mod_on & 0x01) << 7) | (self.decay_rate & 0x1F)
return reg
def get_reg_SR(self):
reg = (self.sustain_rate & 0x1F)
return reg
def get_reg_SLRL(self):
reg = ((self.sustain_level & 0x0F) << 4) | (self.release_rate & 0x0F)
return reg
def get_reg_SSGEG(self):
reg = self.ssg_envelope & 0x0F
return reg
def main():
print("\r\nYM2612: Preset TEST\r\n")
# synth = YM2612Chip(ser_com="COM3")
synth = YM2612Chip(midi_com=2)
# synth.set_custom_preset()
synth.midi_load_default_preset(YM_PRESET_SAWTOOTH)
if __name__ == "__main__":
main()
sys.exit(0)
| StarcoderdataPython |
3224636 | class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
while (m>0 and n>0):
if (nums1[m-1]>=nums2[n-1]):
nums1[m+n-1]=nums1[m-1]
m=m-1
else:
nums1[m+n-1]=nums2[n-1]
n=n-1
if (n>0): #if the length(nums1)=0, i.e m=0 then return all the elements from list 2 nums1=[0] nums2=[2] this test case is required when there are no numbers in the nums1
nums1[:n]=nums2[:n]
# Problem Number 88 #############
###### Time complexity= O(m +n), Space Complexity= O(1) (need to check) ###################
| StarcoderdataPython |
4835830 | <reponame>billchenxi/ClaraGenomicsAnalysis
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""Utility functions for I/O of custom files."""
def read_poa_group_file(file_path, num_windows=0):
"""Parses data file containing POA groups.
Args:
file_path : Path to POA group file.
num_windows : Number of windows to extract from
file. If requested is more than available
in file, windows are repoeated in a circular
loop like fashion.
0 (default) implies using only those windows
available in file.
File format is as follows -
<num sequences>
seq 1...
seq 2...
<num sequences>
seq 1...
seq 2...
seq 3...
"""
with open(file_path, "r") as window_file:
num_seqs_in_group = 0
group_list = []
current_seq_list = []
first_seq = True
for line in window_file:
line = line.strip()
# First line is num sequences in group
if (num_seqs_in_group == 0):
if first_seq:
first_seq = False
else:
group_list.append(current_seq_list)
current_seq_list = []
num_seqs_in_group = int(line)
else:
current_seq_list.append(line)
num_seqs_in_group = num_seqs_in_group - 1
if (num_windows > 0):
if (num_windows < len(group_list)):
group_list = group_list[:num_windows]
else:
original_num_windows = len(group_list)
num_windows_to_add = num_windows - original_num_windows
for i in range(num_windows_to_add):
group_list.append(group_list[i % original_num_windows])
return group_list
| StarcoderdataPython |
3278094 | <filename>funcx_sdk/funcx/sdk/client.py
from __future__ import annotations
import asyncio
import json
import logging
import os
import typing as t
import uuid
import warnings
from funcx.sdk._environments import get_web_service_url, get_web_socket_url
from funcx.sdk.asynchronous.funcx_task import FuncXTask
from funcx.sdk.asynchronous.ws_polling_task import WebSocketPollingTask
from funcx.sdk.search import SearchHelper
from funcx.sdk.utils.batch import Batch
from funcx.sdk.web_client import FunctionRegistrationData
from funcx.serialize import FuncXSerializer
from funcx.utils.errors import SerializationError, TaskPending, VersionMismatch
from funcx.utils.handle_service_response import handle_response_errors
from .login_manager import LoginManager, LoginManagerProtocol
from .version import PARSED_VERSION, parse_version
logger = logging.getLogger(__name__)
_FUNCX_HOME = os.path.join("~", ".funcx")
class FuncXClient:
"""Main class for interacting with the funcX service
Holds helper operations for performing common tasks with the funcX service.
"""
FUNCX_SDK_CLIENT_ID = os.environ.get(
"FUNCX_SDK_CLIENT_ID", "4cf29807-cf21-49ec-9443-ff9a3fb9f81c"
)
FUNCX_SCOPE = os.environ.get(
"FUNCX_SCOPE",
"https://auth.globus.org/scopes/facd7ccc-c5f4-42aa-916b-a0e270e2c2a9/all",
)
def __init__(
self,
http_timeout=None,
funcx_home=_FUNCX_HOME,
funcx_service_address=None,
asynchronous=False,
loop=None,
results_ws_uri=None,
use_offprocess_checker=True,
environment: str | None = None,
task_group_id: t.Union[None, uuid.UUID, str] = None,
do_version_check: bool = True,
openid_authorizer: t.Any = None,
search_authorizer: t.Any = None,
fx_authorizer: t.Any = None,
*,
login_manager: LoginManagerProtocol | None = None,
**kwargs,
):
"""
Initialize the client
Parameters
----------
http_timeout: int
Timeout for any call to service in seconds.
Default is no timeout
funcx_service_address: str
For internal use only. The address of the web service.
results_ws_uri: str
For internal use only. The address of the websocket service.
environment: str
For internal use only. The name of the environment to use.
do_version_check: bool
Set to ``False`` to skip the version compatibility check on client
initialization
Default: True
asynchronous: bool
Should the API use asynchronous interactions with the web service? Currently
only impacts the run method
Default: False
loop: AbstractEventLoop
If asynchronous mode is requested, then you can provide an optional event loop
instance. If None, then we will access asyncio.get_event_loop()
Default: None
use_offprocess_checker: Bool,
Use this option to disable the offprocess_checker in the FuncXSerializer
used by the client.
Default: True
task_group_id: str|uuid.UUID
Set the TaskGroup ID (a UUID) for this FuncXClient instance. Typically,
one uses this to submit new tasks to an existing session or to reestablish
FuncXExecutor futures.
Default: None (will be auto generated)
Keyword arguments are the same as for BaseClient.
"""
# resolve URLs if not set
if funcx_service_address is None:
funcx_service_address = get_web_service_url(environment)
if results_ws_uri is None:
results_ws_uri = get_web_socket_url(environment)
self._task_status_table: t.Dict[str, t.Dict] = {}
self.use_offprocess_checker = use_offprocess_checker
self.funcx_home = os.path.expanduser(funcx_home)
self.session_task_group_id = (
task_group_id and str(task_group_id) or str(uuid.uuid4())
)
for (arg, name) in [
(openid_authorizer, "openid_authorizer"),
(fx_authorizer, "fx_authorizer"),
(search_authorizer, "search_authorizer"),
]:
if arg is not None:
warnings.warn(
f"The '{name}' argument is deprecated. "
"It will be removed in a future release.",
DeprecationWarning,
)
# if a login manager was passed, no login flow is triggered
if login_manager is not None:
self.login_manager: LoginManagerProtocol = login_manager
# but if login handling is implicit (as when no login manager is passed)
# then ensure that the user is logged in
else:
self.login_manager = LoginManager(environment=environment)
self.login_manager.ensure_logged_in()
self.web_client = self.login_manager.get_funcx_web_client(
base_url=funcx_service_address
)
self.fx_serializer = FuncXSerializer(
use_offprocess_checker=self.use_offprocess_checker
)
self.funcx_service_address = funcx_service_address
if do_version_check:
self.version_check()
self.results_ws_uri = results_ws_uri
self.asynchronous = asynchronous
if asynchronous:
self.loop = loop if loop else asyncio.get_event_loop()
# Start up an asynchronous polling loop in the background
self.ws_polling_task = WebSocketPollingTask(
self,
self.loop,
init_task_group_id=self.session_task_group_id,
results_ws_uri=self.results_ws_uri,
)
else:
self.loop = None
# TODO: remove this
self._searcher = None
@property
def searcher(self):
# TODO: remove this
if self._searcher is None:
self._searcher = SearchHelper(self.login_manager.get_search_client())
return self._searcher
def version_check(self, endpoint_version: str | None = None) -> None:
"""Check this client version meets the service's minimum supported version."""
data = self.web_client.get_version()
min_ep_version = data["min_ep_version"]
min_sdk_version = data["min_sdk_version"]
if endpoint_version is not None:
if parse_version(endpoint_version) < parse_version(min_ep_version):
raise VersionMismatch(
f"Your version={endpoint_version} is lower than the "
f"minimum version for an endpoint: {min_ep_version}. "
"Please update. "
f"pip install funcx-endpoint>={min_ep_version}"
)
else:
if PARSED_VERSION < parse_version(min_sdk_version):
raise VersionMismatch(
f"Your version={PARSED_VERSION} is lower than the "
f"minimum version for funcx SDK: {min_sdk_version}. "
"Please update. "
f"pip install funcx>={min_sdk_version}"
)
def logout(self):
"""Remove credentials from your local system"""
self.login_manager.logout()
def _update_task_table(self, return_msg: str | t.Dict, task_id: str):
"""
Parses the return message from the service and updates the
internal _task_status_table
Parameters
----------
return_msg : str | t.Dict
Return message received from the funcx service
task_id : str
task id string
"""
if isinstance(return_msg, str):
r_dict = json.loads(return_msg)
else:
r_dict = return_msg
r_status = r_dict.get("status", "unknown").lower()
pending = r_status not in ("success", "failed")
status = {"pending": pending, "status": r_status}
if not pending:
if "result" in r_dict:
try:
r_obj = self.fx_serializer.deserialize(r_dict["result"])
completion_t = r_dict["completion_t"]
except Exception:
raise SerializationError("Result Object Deserialization")
else:
status.update({"result": r_obj, "completion_t": completion_t})
elif "exception" in r_dict:
try:
r_exception = self.fx_serializer.deserialize(r_dict["exception"])
completion_t = r_dict["completion_t"]
logger.info(f"Exception : {r_exception}")
except Exception:
raise SerializationError("Task's exception object deserialization")
else:
status.update(
{
"exception": r_exception,
"completion_t": completion_t,
}
)
else:
reason = r_dict.get("reason", str(r_dict))
status["exception"] = Exception(reason)
self._task_status_table[task_id] = status
return status
def get_task(self, task_id):
"""Get a funcX task.
Parameters
----------
task_id : str
UUID of the task
Returns
-------
dict
Task block containing "status" key.
"""
task = self._task_status_table.get(task_id, {})
if task.get("pending", True) is False:
return task
r = self.web_client.get_task(task_id)
logger.debug(f"Response string : {r}")
try:
rets = self._update_task_table(r.text, task_id)
except Exception as e:
raise e
return rets
def get_result(self, task_id):
"""Get the result of a funcX task
Parameters
----------
task_id: str
UUID of the task
Returns
-------
Result obj: If task completed
Raises
------
Exception obj: Exception due to which the task failed
"""
task = self.get_task(task_id)
if task["pending"] is True:
raise TaskPending(task["status"])
else:
if "result" in task:
return task["result"]
else:
logger.warning("We have an exception : {}".format(task["exception"]))
task["exception"].reraise()
def get_batch_result(self, task_id_list):
"""Request status for a batch of task_ids"""
assert isinstance(
task_id_list, list
), "get_batch_result expects a list of task ids"
pending_task_ids = [
task_id
for task_id in task_id_list
if self._task_status_table.get(task_id, {}).get("pending", True) is True
]
results = {}
if pending_task_ids:
r = self.web_client.get_batch_status(pending_task_ids)
logger.debug(f"Response string : {r}")
pending_task_ids = set(pending_task_ids)
for task_id in task_id_list:
if task_id in pending_task_ids:
try:
data = r["results"][task_id]
rets = self._update_task_table(data, task_id)
results[task_id] = rets
except KeyError:
logger.debug("Task {} info was not available in the batch status")
except Exception:
logger.exception(
"Failure while unpacking results fom get_batch_result"
)
else:
results[task_id] = self._task_status_table[task_id]
return results
def run(self, *args, endpoint_id=None, function_id=None, **kwargs) -> str:
"""Initiate an invocation
Parameters
----------
*args : Any
Args as specified by the function signature
endpoint_id : uuid str
Endpoint UUID string. Required
function_id : uuid str
Function UUID string. Required
asynchronous : bool
Whether or not to run the function asynchronously
Returns
-------
task_id : str
UUID string that identifies the task if asynchronous is False
funcX Task: asyncio.Task
A future that will eventually resolve into the function's result if
asynchronous is True
"""
assert endpoint_id is not None, "endpoint_id key-word argument must be set"
assert function_id is not None, "function_id key-word argument must be set"
batch = self.create_batch()
batch.add(*args, endpoint_id=endpoint_id, function_id=function_id, **kwargs)
r = self.batch_run(batch)
return r[0]
def create_batch(self, task_group_id=None) -> Batch:
"""
Create a Batch instance to handle batch submission in funcX
Parameters
----------
task_group_id : str
Override the session wide session_task_group_id with a different
task_group_id for this batch.
If task_group_id is not specified, it will default to using the client's
session_task_group_id
Returns
-------
Batch instance
Status block containing "status" key.
"""
if not task_group_id:
task_group_id = self.session_task_group_id
return Batch(task_group_id=task_group_id)
def batch_run(self, batch) -> t.List[str]:
"""Initiate a batch of tasks to funcX
Parameters
----------
batch: a Batch object
Returns
-------
task_ids : a list of UUID strings that identify the tasks
"""
assert isinstance(batch, Batch), "Requires a Batch object as input"
assert len(batch.tasks) > 0, "Requires a non-empty batch"
data = batch.prepare()
# Send the data to funcX
r = self.web_client.submit(data)
task_uuids: t.List[str] = []
for result in r["results"]:
task_id = result["task_uuid"]
task_uuids.append(task_id)
if result["http_status_code"] != 200:
# this method of handling errors for a batch response is not
# ideal, as it will raise any error in the multi-response,
# but it will do until batch_run is deprecated in favor of Executer
handle_response_errors(result)
if self.asynchronous:
task_group_id = r["task_group_id"]
asyncio_tasks = []
for task_id in task_uuids:
funcx_task = FuncXTask(task_id)
asyncio_task = self.loop.create_task(funcx_task.get_result())
asyncio_tasks.append(asyncio_task)
self.ws_polling_task.add_task(funcx_task)
self.ws_polling_task.put_task_group_id(task_group_id)
return asyncio_tasks
return task_uuids
def map_run(
self, *args, endpoint_id=None, function_id=None, asynchronous=False, **kwargs
):
"""Initiate an invocation
Parameters
----------
*args : Any
Args as specified by the function signature
endpoint_id : uuid str
Endpoint UUID string. Required
function_id : uuid str
Function UUID string. Required
asynchronous : bool
Whether or not to run the function asynchronously
Returns
-------
task_id : str
UUID string that identifies the task
"""
assert endpoint_id is not None, "endpoint_id key-word argument must be set"
assert function_id is not None, "function_id key-word argument must be set"
ser_kwargs = self.fx_serializer.serialize(kwargs)
batch_payload = []
iterator = args[0]
for arg in iterator:
ser_args = self.fx_serializer.serialize((arg,))
payload = self.fx_serializer.pack_buffers([ser_args, ser_kwargs])
batch_payload.append(payload)
data = {
"endpoints": [endpoint_id],
"func": function_id,
"payload": batch_payload,
"is_async": asynchronous,
}
# Send the data to funcX
r = self.web_client.submit_batch(data)
return r["task_uuids"]
def register_endpoint(
self, name, endpoint_uuid, metadata=None, endpoint_version=None
):
"""Register an endpoint with the funcX service.
Parameters
----------
name : str
Name of the endpoint
endpoint_uuid : str
The uuid of the endpoint
metadata : dict
endpoint metadata, see default_config example
endpoint_version: str
Version string to be passed to the webService as a compatibility check
Returns
-------
A dict
{'endpoint_id' : <>,
'address' : <>,
'client_ports': <>}
"""
self.version_check()
r = self.web_client.register_endpoint(
endpoint_name=name,
endpoint_id=endpoint_uuid,
metadata=metadata,
endpoint_version=endpoint_version,
)
return r.data
def get_containers(self, name, description=None):
"""
Register a DLHub endpoint with the funcX service and get the containers to
launch.
Parameters
----------
name : str
Name of the endpoint
description : str
Description of the endpoint
Returns
-------
int
The port to connect to and a list of containers
"""
data = {"endpoint_name": name, "description": description}
r = self.web_client.post("get_containers", data=data)
return r.data["endpoint_uuid"], r.data["endpoint_containers"]
def get_container(self, container_uuid, container_type):
"""Get the details of a container for staging it locally.
Parameters
----------
container_uuid : str
UUID of the container in question
container_type : str
The type of containers that will be used (Singularity, Shifter, Docker)
Returns
-------
dict
The details of the containers to deploy
"""
self.version_check()
r = self.web_client.get(f"containers/{container_uuid}/{container_type}")
return r.data["container"]
def get_endpoint_status(self, endpoint_uuid):
"""Get the status reports for an endpoint.
Parameters
----------
endpoint_uuid : str
UUID of the endpoint in question
Returns
-------
dict
The details of the endpoint's stats
"""
r = self.web_client.get_endpoint_status(endpoint_uuid)
return r.data
def register_function(
self,
function,
function_name=None,
container_uuid=None,
description=None,
public=False,
group=None,
searchable=True,
):
"""Register a function code with the funcX service.
Parameters
----------
function : Python Function
The function to be registered for remote execution
function_name : str
The entry point (function name) of the function. Default: None
container_uuid : str
Container UUID from registration with funcX
description : str
Description of the file
public : bool
Whether or not the function is publicly accessible. Default = False
group : str
A globus group uuid to share this function with
searchable : bool
If true, the function will be indexed into globus search with the
appropriate permissions
Returns
-------
function uuid : str
UUID identifier for the registered function
"""
data = FunctionRegistrationData(
function=function,
failover_source="",
container_uuid=container_uuid,
entry_point=function_name,
description=description,
public=public,
group=group,
searchable=searchable,
serializer=self.fx_serializer,
)
logger.info(f"Registering function : {data}")
r = self.web_client.register_function(data)
return r.data["function_uuid"]
def search_function(self, q, offset=0, limit=10, advanced=False):
"""Search for function via the funcX service
Parameters
----------
q : str
free-form query string
offset : int
offset into total results
limit : int
max number of results to return
advanced : bool
allows elastic-search like syntax in query string
Returns
-------
FunctionSearchResults
"""
return self.searcher.search_function(
q, offset=offset, limit=limit, advanced=advanced
)
def search_endpoint(self, q, scope="all", owner_id=None):
"""
Parameters
----------
q
scope : str
Can be one of {'all', 'my-endpoints', 'shared-with-me'}
owner_id
should be urn like f"urn:globus:auth:identity:{owner_uuid}"
Returns
-------
"""
return self.searcher.search_endpoint(q, scope=scope, owner_id=owner_id)
def register_container(self, location, container_type, name="", description=""):
"""Register a container with the funcX service.
Parameters
----------
location : str
The location of the container (e.g., its docker url). Required
container_type : str
The type of containers that will be used (Singularity, Shifter, Docker).
Required
name : str
A name for the container. Default = ''
description : str
A description to associate with the container. Default = ''
Returns
-------
str
The id of the container
"""
payload = {
"name": name,
"location": location,
"description": description,
"type": container_type,
}
r = self.web_client.post("containers", data=payload)
return r.data["container_id"]
def add_to_whitelist(self, endpoint_id, function_ids):
"""Adds the function to the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
function_ids : list
A list of function id's to be whitelisted
Returns
-------
json
The response of the request
"""
return self.web_client.whitelist_add(endpoint_id, function_ids)
def get_whitelist(self, endpoint_id):
"""List the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
Returns
-------
json
The response of the request
"""
return self.web_client.get_whitelist(endpoint_id)
def delete_from_whitelist(self, endpoint_id, function_ids):
"""List the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
function_ids : list
A list of function id's to be whitelisted
Returns
-------
json
The response of the request
"""
if not isinstance(function_ids, list):
function_ids = [function_ids]
res = []
for fid in function_ids:
res.append(self.web_client.whitelist_remove(endpoint_id, fid))
return res
| StarcoderdataPython |
150032 | # -*- coding: utf-8 -*-
import click
import argparse
def _display_menu():
print 'TBC'
def _run_update():
return ('TBC')
# Backup the file
# Do the replacements
# Inform user of backup + new file name
@click.command()
@click.option('--file', help='Full path to an Apache httpd.conf file.')
@click.option('--update', is_flag=True, help='Generate a new config file?')
def main(file, update):
"""Console script for checkapache"""
_display_menu()
# click.echo("Put code into checkapache.cli.main")
# click.echo("See click documentation at http://click.pocoo.org/")
if update:
_run_update()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1718419 | import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
# from matplotlib import pyplot as plt
import scipy
from scipy import signal
from PIL import Image
from scipy.ndimage import median_filter
# 由于卷积核的大小一般是奇数,因此这里假设卷积核是奇数的
'''
####################
图像处理的基本函数
####################
'''
# 图像加框
def addBoundary(img, kernel):
'''
给图像添加边界
:param img: 输入图像
:param kernel:卷积核
:return: 加边界后的图像
'''
kernel_size = kernel.shape[0]
addLine = (int)((kernel_size - 1) / 2)
img_ = cv2.copyMakeBorder(img, addLine, addLine, addLine, addLine, cv2.BORDER_CONSTANT, value=0);
return img_
def convolve1(img, kernel, filter_type, mode='same'):
'''
单通道图像与卷积核的卷积,主要用于灰度图
:param img: 输入单通道图像矩阵
:param kernel: 卷积核
:param model: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像
'''
if mode == 'same':
img_ = addBoundary(img, kernel)
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
# 横向卷积、纵向卷积的次数
conv_height = img_.shape[0] - kernel_height + 1
conv_width = img_.shape[1] - kernel_width + 1
# 卷积结果存储在conv中
conv = np.zeros((conv_height, conv_width), dtype='uint8')
for i in range(conv_height):
for j in range(conv_width):
conv[i][j] = wise_element_sum(img_[i:i + kernel_height, j:j + kernel_width], kernel, filter_type)
return conv
def wise_element_sum(img, kernel, filter_type):
'''
对于某一次卷积结果的取值
:param img: 输入的图片片段矩阵
:param kernel: 卷积核
:param modle: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 返回该像素值
'''
if filter_type == 'medium_Filter':
temp = img * kernel
list = []
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
list.append(temp[i][j])
list.sort()
if list[int(len(list) / 2)] > 255:
return 255
elif list[int(len(list) / 2)] < 0:
return 0
else:
return list[int(len(list) / 2)]
# 均值、高斯滤波等
else:
result = (img * kernel).sum()
if result < 0:
return 0
elif result > 255:
return 255
else:
return result
def convolve(img, kernel, filter_type, mode='same'):
'''
三通道卷积,主要用于彩色图
:param img: 输入图像矩阵
:param kernel: 卷积核
:param mode: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像矩阵
'''
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
conv_B = convolve1(img[:, :, 0], kernel, filter_type, mode)
conv_G = convolve1(img[:, :, 1], kernel, filter_type, mode)
conv_R = convolve1(img[:, :, 2], kernel, filter_type, mode)
conv_img = np.dstack([conv_B, conv_G, conv_R])
return conv_img
'''
############################################
噪声函数
脉冲噪声:add_PulseNoise(img, SNR)
椒盐噪声:add_Salt_PepperNoise(img, SNR)
高斯噪声:add_Gauss_Noise(img, mean, sigma)
#############################################
'''
# 添加脉冲噪声
def add_PulseNoise(img, SNR):
'''
给图像添加脉冲噪声
:param img: 输入图像
:param SNR: 信噪比,决定添加多少噪声
:return: 添加噪声后的图像
'''
rows, cols, dims = img.shape
# 创建与图像大小一样的矩阵
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
# RGB图转换为灰度图的著名公式: Grap = R*0.299+G*0.587+B*0.114
Grey = R * 0.299 + G * 0.587 + B * 0.114
# 噪声点数目
noise = int((1 - SNR) * rows * cols)
# 添加噪声
for i in range(noise):
# 随机选择图片矩阵的一个格子,设置为脉冲噪声值
rand_rows = random.randint(0, rows - 1)
rand_cols = random.randint(0, cols - 1)
Grey[rand_rows, rand_cols] = 255
# img[rand_rows, rand_cols] = 255
return Grey
# 添加椒盐噪声
def add_Salt_PepperNoise(img, SNR):
'''
给图像添加椒盐噪声
:param img: 输入图像
:param SNR: 输入信噪比,决定添加多少噪声
:return: 输出添加噪声后的图像
'''
rows, cols, dims = img.shape
# 创建与图像大小一样的矩阵
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
# RGB图转换为灰度图的著名公式: Grap = R*0.299+G*0.587+B*0.114
Grey = R * 0.299 + G * 0.587 + B * 0.114
# 噪声点数目
noise = int((1 - SNR) * rows * cols)
# 添加噪声
for i in range(noise):
# 随机选择图片矩阵的一个格子,设置为椒盐噪声值
rand_rows = random.randint(0, rows - 1)
rand_cols = random.randint(0, cols - 1)
if random.randint(0, 1) == 0:
Grey[rand_rows, rand_cols] = 0 # 盐噪声为255
else:
Grey[rand_rows, rand_cols] = 255 # 椒噪声为0
return Grey
def add_Gauss_Noise(img, mean, sigma):
'''
添加高斯噪声
:param img:输入图像
:param mean: 高斯分布的均值
:param sigma: 高斯分布的标准差
:return: 添加高斯噪声后的图像
'''
rows, cols, dims = img.shape
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
# 产生灰度图
Grey = R * 0.299 + G * 0.587 + B * 0.114
# numpy.random.normal(mean,sigma,shape)是正态分布函数,mean是均值,sigma是标准差,shape表示输出值放在size里
noise = np.random.normal(mean, sigma, Grey.shape)
# 将噪声和图片叠加
Grey = noise + Grey
# np.min(Grey):取Grey中的最小值;np.full(arry,num):给arry全部赋值num
Grey = Grey - np.full(Grey.shape, np.min(Grey))
Grey = Grey * 255 / np.max(Grey)
Grey_p = Grey.astype(np.uint8) # 类型转换
return Grey
'''
##################
均值滤波器:mean_Fileter(img, size)
中值滤波器: medium_Fileter(img, size)
高斯滤波器:gauss_Kernel(mean, sigma, kernel_size)
'''
def mean_Fileter(img, kernel_size):
'''
均值滤波器
:param img: 输入图像
:param kernel_size:卷积核大小
:return: 均值滤波后的图像
'''
# kernel_size * kernel_size 滤波器, 每个系数都是 1/9
kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)
# mode = same 表示输出尺寸等于输入尺寸
# boundary 表示采用对称边界条件处理图像边缘
# img_out = scipy.signal.convolve2d(img, kernel, mode='same', boundary='symm')
img_out = convolve1(img, kernel, filter_type='mean_Fileter', mode='same')
return img_out.astype(np.uint8)
def medium_Filter(img, kernel_size):
'''
中值滤波器
:param img: 输入图像
:param size: 卷积核大小
:return: 中值滤波后的图像
'''
kernel = np.ones((kernel_size, kernel_size))
# mode = same 表示输出尺寸等于输入尺寸
# boundary 表示采用对称边界条件处理图像边缘
img_out = convolve1(img, kernel, filter_type='medium_Filter', mode="same")
# img_out = scipy.signal.convolve2d(img, kernel, mode='same', boundary='symm')
return img_out.astype(np.uint8)
def Gauss_Fileter(img, kernel_size, sigma):
'''
高斯滤波器
:param img: 输入图像
:param kernel_size: 卷积核大小
:param sigma: 高斯函数的标准差
:return: 高斯滤波后的图片
'''
#避免除0
if sigma == 0:
sigma = 6
kernel = np.zeros([kernel_size, kernel_size])
kernel_center = (int)(kernel_size / 2) # 卷积核中心位置
sum_val = 0 # 记录卷积核中数字之和
for i in range(0, kernel_size):
for j in range(0, kernel_size):
kernel[i, j] = np.exp(-((i - kernel_center) ** 2 + (j - kernel_center) ** 2) / (2 * (sigma ** 2)))
sum_val += kernel[i, j]
# 得到卷积核
kernel = kernel / sum_val
img_out = convolve1(img, kernel, filter_type='Gauss_Fileter', mode='same')
# img_out = scipy.signal.convolve2d(img, kernel, mode='same', boundary='symm')
# 返回图片
return img_out
def main():
img = np.array(Image.open('LenaRGB.bmp'))
# 加上各种噪声
img1 = add_PulseNoise(img, 0.9)
img2 = add_Salt_PepperNoise(img, 0.9)
img3 = add_Gauss_Noise(img, 0, 8)
plt.subplot(321)
plt.title('PulseNoise')
plt.imshow(img1, cmap='gray')
plt.subplot(322)
plt.title('Salt_PepperNoise')
plt.imshow(img2, cmap='gray')
plt.subplot(323)
plt.title('GaussNoise')
plt.imshow(img3, cmap='gray')
'''
#三种滤波器对脉冲噪声的效果
img1_1 = mean_Fileter(img1, 3)
img1_2 = medium_Filter(img1, 3)
img1_3 = Gauss_Fileter(img1, 3, 8)
plt.subplot(324)
plt.title('PauseNoise_meanfilter')
plt.imshow(img1_1, cmap='gray')
plt.subplot(325)
plt.title('PauseNoise_mediumfilter')
plt.imshow(img1_2, cmap='gray')
plt.subplot(326)
plt.title('PauseNoise_Gaussfilter')
plt.imshow(img1_3, cmap='gray')
plt.show()
#三种滤波器对椒盐噪声的效果
img2_1 = mean_Fileter(img2, 3)
img2_2 = medium_Filter(img2, 3)
img2_3 = Gauss_Fileter(img2, 3, 8)
plt.subplot(327)
plt.title('Salt_Pepper_Noise_meanfilter')
plt.imshow(img2_1, cmap='gray')
plt.subplot(328)
plt.title('Salt_Pepper_Noise_mediumfilter')
plt.imshow(img2_2, cmap='gray')
plt.subplot(329)
plt.title('Salt_PepperNoise_Gaussfilter')
plt.imshow(img2_3, cmap='gray')
#三种滤波器对高斯噪声的效果
img3_1 = mean_Fileter(img3, 3)
img3_2 = medium_Filter(img3, 3)
img3_3 = Gauss_Fileter(img3, 3, 8)
plt.subplot(330)
plt.title('GaussNoise_meanfilter')
plt.imshow(img3_1, cmap='gray')
plt.subplot(331)
plt.title('GaussNoise_mediumfilter')
plt.imshow(img3_2, cmap='gray')
plt.subplot(332)
plt.title('GaussNoise_Gaussfilter')
plt.imshow(img3_3, cmap='gray')
plt.show()
'''
# 不同尺寸的box filter对噪声图片的效果
createVar = locals()
num = [3, 5, 7]
j = 324
for i in num:
createVar['kernel_' + str(i)]=97
print('kernel_' + str(i))
kerral_num = []
for i in num:
kerral_num.append(Gauss_Fileter(img1, i, 6))
plt.subplot(j)
plt.title('GaussNoise_meanfilter_kernel_' + str(i))
plt.imshow(kerral_num.pop(), cmap='gray')
j = j + 1
plt.show()
'''
img = np.array(Image.open('LenaRGB.bmp'))
img1 = add_Gauss_Noise(img, 0, 6.5)
plt.subplot(321)
plt.title('Gauss')
plt.imshow(img1, cmap='gray')
plt.subplot(322)
plt.title('Grey gauss noise')
plt.imshow(img2, cmap='gray')
# plt.show()
img3 = add_Salt_PepperNoise(img, 0.99)
plt.subplot(323)
plt.title('Salt_Pepper')
plt.imshow(img3, cmap='gray')
# 中值滤波
img1_mf = scipy.ndimage.median_filter(img1, (8, 8))
img3_mf = scipy.ndimage.median_filter(img3, (8, 8))
#高斯滤波
img1_gf = cv2.GaussianBlur(img1, (3, 3), 0)
# 均值滤波
plt.subplot(324)
plt.title('Salt_pepper_no')
plt.imshow(img3_mf, cmap='gray')
plt.subplot(325)
plt.title('Gauss_no')
plt.imshow(img1_mf, cmap='gray')
plt.show()
'''
if __name__ == '__main__':
main()
| StarcoderdataPython |
184862 | from ocdskit.cli.__main__ import main
from tests import assert_streaming
def test_command(capsys, monkeypatch):
assert_streaming(capsys, monkeypatch, main, ['split-record-packages', '1'],
['realdata/record-package_package.json'], ['realdata/record-package_split.json'])
| StarcoderdataPython |
3282272 | ''' A script to send all messages from one chat to another. '''
import asyncio
import logging
from telethon.tl.patched import MessageService
from telethon.errors.rpcerrorlist import FloodWaitError
from telethon import TelegramClient
from telethon.sessions import StringSession
from settings import API_ID, API_HASH, REPLACEMENTS, forwards, get_forward, update_offset, STRING_SESSION
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
SENT_VIA = f'\n__Sent via__ `{str(__file__)}`'
def intify(string):
try:
return int(string)
except:
return string
def replace(message):
for old, new in REPLACEMENTS.items():
message.text = str(message.text).replace(old, new)
return message
async def forward_job():
''' the function that does the job 😂 '''
loop = asyncio.get_running_loop()
while True:
if STRING_SESSION:
session = StringSession(STRING_SESSION)
else:
session = 'forwarder'
async with TelegramClient(session, API_ID, API_HASH) as client:
error_occured = False
for forward in forwards:
from_chat, to_chat, offset = get_forward(forward)
if not offset:
offset = 0
last_id = 0
async for message in client.iter_messages(intify(from_chat), reverse=True, offset_id=offset):
if isinstance(message, MessageService):
continue
try:
await client.send_message(intify(to_chat), replace(message))
last_id = str(message.id)
logging.info('forwarding message with id = %s', last_id)
update_offset(forward, last_id)
except FloodWaitError as fwe:
print(f'{fwe}')
await asyncio.sleep(delay=fwe.seconds)
except Exception as err:
logging.exception(err)
error_occured = True
break
logging.info('Completed working with %s', forward)
#await client.send_file('me', 'config.ini', caption='This is your config file for telegram-chat-forward.')
message = 'Your forward job has completed.' if not error_occured else 'Some errors occured. Please see the output on terminal. Contact Developer.'
await client.send_message('me', f'''Hi !
\n**{message}**
\n**Telegram Chat Forward** is developed by @AahnikDaw.
\nPlease star 🌟 on [GitHub](https://github.com/aahnik/telegram-chat-forward).
{SENT_VIA}''', link_preview=False)
await asyncio.sleep(5)
if __name__ == "__main__":
# assert forwards
asyncio.run(forward_job())
| StarcoderdataPython |
1631334 | from src.appIndicator import AppIndicator
if __name__ == "__main__":
app = AppIndicator()
| StarcoderdataPython |
1758797 | <gh_stars>0
def word_to_weird_case(word):
return "".join([word[i].upper() if i%2==0 else word[i].lower() for i in range(len(word))])
def to_weird_case(string):
return " ".join([word_to_weird_case(word) for word in string.split()])
| StarcoderdataPython |
3352087 | <filename>batman/uq/cosi.py
"""
Cosine transformation indices
-----------------------------
<NAME>., How to compute variance-based sensitivity indicators with your
spreadsheet software, Environmental Modelling & Software,
2012. DOI: 10.1016/j.envsoft.2012.03.004
"""
import numpy as np
from scipy.fftpack import dct
def cosi(sample, data):
"""Cosine transformation sensitivity.
Use Discret Cosine Transformation (DCT) to compute sensitivity indices.
:param array_like sample: Sample of parameters of Shape
(n_samples, n_params).
:param array_like data: Sample of realization which corresponds to the
sample of parameters :attr:`sample` (n_samples, ).
:returns: First order sensitivity indices.
:rtype: (Sobol, n_features).
"""
sample = np.asarray(sample)
data = np.asarray(data).flatten()
ns, dim = sample.shape
n_coeffs = int(max(np.ceil(np.sqrt(ns)), 3))
s_indices = []
for d in range(dim):
idx = sample[:, d].argsort()
data_r = data[idx]
coeffs = dct(data_r) # cosine transformation frequencies
# Do not take the first coefficient which is the mean
var_u = sum(coeffs[1:] ** 2)
var_c = sum(coeffs[1:n_coeffs] ** 2)
s_c = var_c / var_u
s_indices.append(s_c)
return s_indices
| StarcoderdataPython |
76231 | <reponame>GeoStoner/Final-Project-Magnetite-Diffusion
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 11:59:58 2016
@author: ryanstoner
Model to calculate diffusion of He out of Magnetite at high diffusivities
relative to the production of He. Magnetite model from Blackburn et al. (2007)
Changing color in plot.
"""
"""
Initialize
"""
import numpy as np
import matplotlib.pyplot as plt
# Setting up initial parameters for simple Arrhenius equation.
a = 0.0001 # Grain size [m], 100 microns radius
D0a2 = 10**6.8 # Diffusivity [s^-1] @ "infinite" temp. norm.
# to grain size.
Ea = 220*10**3 # Arrhenius activation energy [J/mol]
T = 673 # Temperature [K]
R = 8.1345 # Ideal gas constant [J/Kmol]
Ndpart = 10**(-8) # Partial pressure of He [Pa]
# Grain dimensions
rmin = 0 # Minimum radius [m]
rmax = np.copy(a) # Radius [m]
dr = 10**-6 # Distance step [m]
r = np.arange(rmin, rmax,dr) # Radius array [m]
Nd = np.zeros(len(r)) # Array of concentration [m]
Nd.fill(Ndpart)
# Calculate diffusivity to evalue stability to find time step.
D = D0a2*np.exp(-Ea/(R*T)) # Diffusivity [m^2/s]
dttheory = (dr**2)/(2*D*a**2) # Max. time step for stability [s]
dt = 10**6 # Actual time step [s], approx. 30 yrs
if dttheory<=dt: # Check if dt is too small and poss. print
print('Unstable code. Time step too large')
print('Your time step (dt) is:' + str(dt) + '. It should be less than:' + \
str(dttheory))
"""
Loop
"""
total_time = 2*10**10 # Time for diffusion to take place (s)
pltint = 800 # Number of loops between plots
time = np.arange(0,total_time,dt) # Time array, t
dNdr = np.zeros(len(Nd)-1) # Empty flux array for 2/r*dN/dr term
d2Ndr2 = np.zeros(len(Nd)-1) # Empty flux array for d^2N/dr^2 term
q = np.zeros(len(Nd)) # Empty diff array for d^2N/dr^2 term
Ndlen = len(Nd) # Length of Nd just to save time later
counter = 0 # Count which loop we're on
for i in np.array(time):
# First find flux for 2/r dN/dr term. Also find gradient for first term.
# Then find gradient for d^2N/dr^2. Gradient also accounts for first term.
dNdr[1:] = (Nd[2:Ndlen]-\
Nd[0:Ndlen-2])/(2*dr)
dNdr[0]=(dNdr[1]-dNdr[0])/2*dr
# d2Ndr2 = np.gradient(Nd)[0:Ndlen-1]/(dr**2)
q[1:] = np.diff(Nd)/dr
d2Ndr2 = np.diff(q)/dr
# Calculate change in concentration over time.
dNdt = D*a**2*(d2Ndr2+(2/r[1:Ndlen])*dNdr)
Nd[:(Ndlen-1)] += dNdt*dt
Nd[Ndlen-1] = 0
counter += 1
if counter % pltint==0:
# Create figure
fig = plt.figure(1)
plt.clf()
ax = fig.add_subplot(111, projection='3d')
# Converting to spherical coordinates
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
# Coords. for sphere marking surface of the grain
x = (rmax) * np.outer(np.cos(u), np.sin(v))
y = (rmax) * np.outer(np.sin(u), np.sin(v))
z = (rmax) * np.outer(np.ones(np.size(u)), np.cos(v))
# Coords. for inner sphere marking "contour" of concentrations
x_contour = 0.5 * rmax * np.outer(np.cos(u), np.sin(v))
y_contour = 0.5 * rmax * np.outer(np.sin(u), np.sin(v))
z_contour = 0.5 * rmax * np.outer(np.ones(np.size(u)), np.cos(v))
# Using to convert from m to mm in plotting. Alpha changes w. conc.
scaling_factor = 1000
ax.plot_surface(x*scaling_factor, y*scaling_factor, z*scaling_factor\
,rstride=4, cstride=4, color='b', alpha=Nd[Ndlen-(Ndlen/2)]/Ndpart)
ax.plot_surface(x_contour*scaling_factor, y_contour*scaling_factor,\
z_contour*scaling_factor ,rstride=4, cstride=4, color='b',\
alpha=Nd[Ndlen-(Ndlen/2)]/Ndpart)
# Setting plotting details
titlefont = {'fontname':'Verdana'}
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
ax.set_xlabel('x dimension (m)',**titlefont)
ax.set_ylabel('y dimension (m)',**titlefont)
ax.set_zlabel('z dimension (m)',**titlefont)
ax.set_zlim(-a*scaling_factor, a*scaling_factor)
time_string = str(round(i/(31.536*10**6),1))
title = plt.title(' Magnetite Diffusion Example \n'+
'Time elapsed: ' + time_string + ' yrs \n \n',**titlefont)
plt.pause(0.001)
plt.draw()
| StarcoderdataPython |
22462 | <reponame>David-Ciz/kitt
import heapq
import logging
import os
from tensorflow.keras.callbacks import Callback
class ModelCheckpoint(Callback):
def __init__(
self,
filepath: str,
monitor: str,
mode: str = "max",
save_every_n_epochs: int = None,
save_n_best=1,
save_optimizer=False,
):
"""
:param filepath: Filepath where to save the model. Can contain "epoch" and "<monitor>"
formatting placeholders.
:param monitor: What metric to observe.
:param mode: One of {"min", "max"}. Whether to consider the monitored metric to improve
if it gets lower or higher.
:param save_n_best: Save last N best models.
:param save_every_n_epochs: Save the model every N epochs.
:param save_optimizer: Include optimizer state in the saved model checkpoint.
"""
super().__init__()
self.filepath = str(filepath)
self.monitor = monitor
self.save_n_best = save_n_best or 0
self.save_every_n_epochs = save_every_n_epochs
self.epochs_since_save = 0
self.save_optimizer = save_optimizer
assert self.save_every_n_epochs is None or self.save_every_n_epochs > 0
assert self.save_n_best >= 0
if mode == "max":
self.metric_map_fn = lambda x: x
elif mode == "min":
self.metric_map_fn = lambda x: -x
else:
raise Exception(f"Unknown mode {mode}")
# Invariants
# self.best_queue[0] is the worst saved model
# self.best_queue[-1] is the best saved model
self.best_queue = []
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_save += 1
metric_value = logs[self.monitor]
path = self.get_filepath(epoch + 1, logs=logs)
saved = False
if self.save_every_n_epochs:
if self.epochs_since_save % self.save_every_n_epochs == 0:
self.epochs_since_save = 0
self.save_model(path)
saved = True
if self.save_n_best > 0 and self.is_better(metric_value):
self.push_better(epoch, metric_value, path, saved)
if not saved:
self.save_model(path)
def on_train_end(self, logs=None):
directory = os.path.dirname(self.filepath)
self.save_model(os.path.join(directory, "final.hdf5"))
def is_better(self, metric_value: float):
if len(self.best_queue) < self.save_n_best:
return True
value = self.metric_map_fn(metric_value)
return value > self.best_queue[0][0]
def push_better(self, epoch: int, metric_value: float, path: str, pin=False):
value = self.metric_map_fn(metric_value)
heapq.heappush(self.best_queue, (value, epoch, path, pin))
if len(self.best_queue) > self.save_n_best:
_, _, previous_path, is_pinned = heapq.heappop(self.best_queue)
if not is_pinned:
try:
os.unlink(previous_path)
except IOError as e:
logging.error(
f"Could not remove previously stored model path {previous_path}: {e}"
)
def save_model(self, path: str):
self.model.save(path, overwrite=True, include_optimizer=self.save_optimizer)
def get_filepath(self, epoch, logs):
return self.filepath.format(epoch=epoch, **logs)
| StarcoderdataPython |
4816412 | from functools import lru_cache
@lru_cache()
def rfa(*rfa_names):
""" award.rfa auditor condition factory
"""
def rfa_condition(value, system):
context = system['context']
award_uuid = context.upgrade_properties()['award']
rfa = _award_rfa(award_uuid, system['root'])
return rfa in rfa_names
return rfa_condition
@lru_cache()
def _award_rfa(award_uuid, root):
award = root.get_by_uuid(award_uuid)
return award.upgrade_properties().get('rfa')
| StarcoderdataPython |
1667448 | # Generated by Django 3.0.3 on 2020-02-27 16:15
import autoslug.fields
from django.db import migrations
def migrate_data_forward(apps, schema_editor):
# see https://stackoverflow.com/a/37310620/330911
models = [
apps.get_model("erp", "Activite"),
apps.get_model("erp", "Cheminement"),
apps.get_model("erp", "EquipementMalentendant"),
apps.get_model("erp", "Erp"),
apps.get_model("erp", "Label"),
]
for Model in models:
for instance in Model.objects.all():
instance.save()
def migrate_data_backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
("erp", "0053_auto_20200227_1500"),
]
operations = [
migrations.AddField(
model_name="activite",
name="slug",
field=autoslug.fields.AutoSlugField(
blank=True,
default=None,
editable=False,
help_text="Identifiant d'URL (slug)",
null=True,
populate_from="nom",
unique=True,
),
),
migrations.AddField(
model_name="cheminement",
name="slug",
field=autoslug.fields.AutoSlugField(
blank=True,
default=None,
editable=False,
help_text="Identifiant d'URL (slug)",
null=True,
populate_from="nom",
unique=True,
),
),
migrations.AddField(
model_name="equipementmalentendant",
name="slug",
field=autoslug.fields.AutoSlugField(
blank=True,
default=None,
editable=False,
help_text="Identifiant d'URL (slug)",
null=True,
populate_from="nom",
unique=True,
),
),
migrations.AddField(
model_name="erp",
name="slug",
field=autoslug.fields.AutoSlugField(
blank=True,
default=None,
editable=False,
help_text="Identifiant d'URL (slug)",
null=True,
populate_from="nom",
unique=True,
),
),
migrations.AddField(
model_name="label",
name="slug",
field=autoslug.fields.AutoSlugField(
blank=True,
default=None,
editable=False,
help_text="Identifiant d'URL (slug)",
null=True,
populate_from="nom",
unique=True,
),
),
migrations.RunPython(migrate_data_forward, migrate_data_backward,),
]
| StarcoderdataPython |
36942 | import os
import time
from flask import Flask, render_template, request
from aws_requests_auth.aws_auth import AWSRequestsAuth
import requests
import uuid
import base64
import shutil
from config import Config
app = Flask(__name__)
config = Config()
@app.route("/", methods=["GET", "POST"])
def index():
if "uploadFile" in request.files:
try:
shutil.rmtree("static/temp")
except:
pass
os.makedirs("static/temp", exist_ok=True)
uploaded_file = request.files.get("uploadFile", None)
uploaded_file = uploaded_file.read()
file_name = f"{uuid.uuid4().hex}.png"
endpoint = f"{config.ENDPOINT}/upload?filename={file_name}"
response = requests.get(endpoint, auth=sign())
response = response.json()
files = {"file": (file_name, uploaded_file)}
http_response = requests.post(
response["url"], data=response["fields"], files=files
)
full_filename = download_processed_file(file_name)
with open(f"static/temp/{file_name}", "wb") as f:
f.write(full_filename)
processed_image = os.path.join("static/temp", file_name)
uploaded_file = base64.b64encode(uploaded_file).decode("utf-8")
else:
processed_image = None
uploaded_file = None
return render_template(
"home.html", processed_image=processed_image, uploaded_file=uploaded_file
)
def sign():
auth = AWSRequestsAuth(
aws_access_key=config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY,
aws_host=config.HOST,
aws_region="us-east-1",
aws_service="execute-api",
)
return auth
def download_processed_file(file_name):
while True:
endpoint = f"{config.ENDPOINT}/download?filename={file_name}"
response = requests.get(endpoint, auth=sign())
if response.status_code == 200:
response = requests.get(response.text)
return response.content
time.sleep(1)
| StarcoderdataPython |
1723817 | <filename>fib1.py
def fib1(n: int) ->int:
return fib1(n-1) + fib1(n-2)
if __name__ == "__main__":
print(fib1(5))
| StarcoderdataPython |
1694143 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Tools for acquiring and normalizing the content from BioDati's demo server."""
import json
import os
import click
import pybel.grounding
HERE = os.path.abspath(os.path.dirname(__file__))
NETWORK_ID = '01E46GDFQAGK5W8EFS9S9WMH12'
RAW_PATH = os.path.join(HERE, 'covid19-biodati-raw.bel.nodelink.json')
GROUNDED_PATH = os.path.join(HERE, 'covid19-biodati-grounded.bel.nodelink.json')
@click.command()
@click.option('--force', is_flag=True)
@click.option('--user', prompt=True)
@click.password_option()
def main(force: bool, user: str, password: str):
"""Download and dump the BioDati 'rona graph."""
if not os.path.exists(GROUNDED_PATH) and not force:
if not os.path.exists(RAW_PATH) and not force:
graph = pybel.from_biodati(
network_id=NETWORK_ID,
username='<EMAIL>',
password='<PASSWORD>',
base_url='https://networkstore.demo.biodati.com',
)
pybel.dump(graph, RAW_PATH)
graph = pybel.load(RAW_PATH)
# This will probably not work for you (yet!)
graph = pybel.grounding.ground(graph)
graph.summarize()
pybel.dump(graph, GROUNDED_PATH)
else:
graph = pybel.load(GROUNDED_PATH)
res = pybel.to_bel_commons(
graph=graph,
host='https://bel.labs.coronawhy.org',
user=user,
password=password,
)
click.secho(json.dumps(res.json(), indent=2))
if __name__ == '__main__':
main()
| StarcoderdataPython |
159259 | from statistics import mode
from typing import Tuple, List, Optional
from ._fixer_tool import FixerToolInterface
from ._languages import Languages
from ._sentence_pair import SentencePair
from .fixer_configurator import FixerConfigurator
from .fixer_statistics import FixerStatisticsMarks as StatisticsMarks
class NamesFixer(FixerToolInterface):
"""Fixer of wrong translation of proper names of person
Via external tools for recognising proper names of person it
check whenever tha name is in the same form in translated sentence
and if not it replace the wrong translation.
Names in source and translated sentence are aligned with external
aligner.
:param configuration: Configuration of the package
"""
def __init__(self, configuration: FixerConfigurator):
self.configuration = configuration
self.source_lang = configuration.source_lang
self.target_lang = configuration.target_lang
def fix(self, sentence_pair: SentencePair) -> Tuple[str, List[StatisticsMarks]]:
"""It verifies whenever the sentence contains problem and tries to fix it
If there is only name name in source and translated sentence,
those are automaticly matched together.
For more names the external aligner is called.
:param sentence_pair: Internal class with details about the sentence and translation
:return: Possible repaired sentence and statistics
"""
src_names_only = sentence_pair.source_names
trg_names_only = sentence_pair.target_names
if len(src_names_only) == 0 or len(trg_names_only) == 0:
return sentence_pair.target_text, []
# Replace if there is only one name
if len(src_names_only) == 1 and len(trg_names_only) == 1:
single_name_result, has_changed = self.__single_name_in_sentence(sentence_pair, sentence_pair.target_text, src_names_only[0], trg_names_only[0])
return single_name_result, [StatisticsMarks.N_SINGLE_NAME_SENTENCE] + [StatisticsMarks.N_NAME_CORRECT] if not has_changed else [StatisticsMarks.N_NAME_CHANGED]
result_sentence, marks = self.__match_names(sentence_pair)
return sentence_pair.target_text if not result_sentence else result_sentence, marks
def __single_name_in_sentence(self, sentence_pair: SentencePair, target_text: str, source_name: List[str], target_name: List[str]) -> Tuple[str, bool]:
""""Replace name in translated sentence for the name in source sentence.
Lemmas of the names are used as the replacement.
:param sentence_pair: Information about source and translated sentnce
:param target_text: Translated sentence with possible changes
:param source_name: Name in source sentence
:param target_name: Name in translated sentence
:return: target text with replaced name and flag if there was any change
"""
if source_name == target_name:
return target_text, False
lemmas = {word["word"]: word["lemma"] for word in sentence_pair.source_lemmas}
lemmas_source_names = [lemmas[name] for name in source_name]
return target_text.replace(" ".join(target_name), " ".join(lemmas_source_names)), True
def __match_names(self, sentence_pair: SentencePair) -> Tuple[Optional[str], List[StatisticsMarks]]:
"""It align names tokens in source and translated sentence
If any word from source sentence is aligned (via external aligner) to any word
in translated sentence, those names are matched.
If more than one name is matched to some name in translated sentence,
those source names are ignored.
After finishing the matching, the the replacement method is called.
:param sentence_pair: Information about source and translated sentence
:return: - string if some name was changed
- list of statistics marks
"""
alignment = sentence_pair.alignment
src_alignment = 0 if self.source_lang == Languages.EN else 1
uses_of_target_names = len(sentence_pair.target_names) * [0]
matches = []
for source_name in sentence_pair.source_names:
possible_alignments = set()
for token in source_name: # find translated tokens aligned with token from source names
possible_alignments.update([align[1 - src_alignment] for align in alignment if align[src_alignment] == token])
possible_target_names_idxs = []
# find possible translated names
for possible_alignment in possible_alignments:
for idx, target_name in enumerate(sentence_pair.target_names):
if possible_alignment in target_name:
possible_target_names_idxs.append(idx)
if not possible_target_names_idxs:
return None, [StatisticsMarks.N_PROBLEM_UNFIXABLE]
selected_target_name = mode(possible_target_names_idxs) # select name with the most matched words
matches.append((source_name, sentence_pair.target_names[selected_target_name]))
uses_of_target_names[selected_target_name] += 1
# remove matches with more than one matched source name to one translated name
matches_to_remove = []
for idx, use_of_target_names in enumerate(uses_of_target_names):
if use_of_target_names > 1:
for match_id, match in enumerate(matches):
if match[1] == sentence_pair.target_names[idx]:
matches_to_remove.append(match_id)
if len(matches_to_remove) == len(matches):
return None, [StatisticsMarks.N_PROBLEM_UNFIXABLE]
marks = []
translated_sentence = sentence_pair.target_text
for match_id, match in enumerate(matches):
if match_id in matches_to_remove:
continue
translated_sentence, has_changed = self.__single_name_in_sentence(sentence_pair, translated_sentence, match[0], match[1])
marks.append(StatisticsMarks.N_NAME_CORRECT if not has_changed else StatisticsMarks.N_NAME_CHANGED)
return translated_sentence, [StatisticsMarks.N_MULTIPLE_NAMES_SENTENCE] + marks
| StarcoderdataPython |
3308823 | from osim.env import Arm2DEnv
import math
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from CEM_Agent import Agent
# Set up CUDA device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Test the environment
env = Arm2DEnv(visualize=True)
env.seed(101)
print("======== Information of Observation Space ========")
print('observation space:', env.observation_space)
print('observation shape:', env.observation_space.shape[0])
print("======== Information of Action Space ========")
print('action space:', env.action_space)
print('action shape:', env.action_space.shape[0])
print(" - low:", env.action_space.low)
print(" - high:", env.action_space.high)
np.random.seed(101)
agent = Agent(env).to(device, 64)
# load the weights from file
# load the weights from file
agent.load_state_dict(torch.load('checkpoint.pth'))
state = env.reset()
n = 0
i = 0
while True:
i += 1
state = torch.from_numpy(state).float().to(device)
with torch.no_grad():
action = agent(state)
env.render()
n += 1
next_state, reward, done, _ = env.step(action)
state = next_state
if i >= 1000:
break
if done:
print('Finished the task in: {} steps'.format(n))
n = 0
state = env.reset()
env.close()
| StarcoderdataPython |
23582 | # -*- coding: utf-8 -*-
"""
Created on 2020/03/16
Feature selection: Relief-based feature selection algorithm.
------
@author: <NAME>
"""
import numpy as np
from sklearn import preprocessing
import os
from sklearn.externals import joblib
from el_classify_sensitive_person_train_validation import ClassifyFourKindOfPersonTrain
from eslearn.utils.lc_evaluation_model_performances import eval_performance
class ClassifyFourKindOfPersonTest():
"""
This class is used to testing classification model for 2 kind of sensitive person identification.
Parameters
----------
data_test_file: path str
Path of the dataset
label_test_file: path str
Path of the label
path_out :
Path to save results
is_feature_selection : bool
if perfrome feature selection.
is_showfig_finally: bool
If show figure after all iteration finished.
Returns
-------
Save all classification results and figures to local disk.
"""
def __init__(selftest,
data_test_file=None,
label_test_file=None,
data_train_file=None,
models_path=None,
path_out=None,
is_feature_selection=False,
is_showfig_finally=True):
selftest.data_test_file = data_test_file
selftest.label_test_file = label_test_file
selftest.data_train_file = data_train_file
selftest.path_out = path_out
selftest.models_path = models_path
selftest.is_feature_selection = is_feature_selection
selftest.is_showfig_finally = is_showfig_finally
def main_function(selftest):
"""
"""
print('Training model and testing...\n')
# load data and mask
mask_lassocv = joblib.load(os.path.join(selftest.path_out, 'mask_selected_features_lassocv.pkl'))
model_feature_selection = joblib.load(os.path.join(selftest.models_path, 'model_feature_selection.pkl'))
model_classification = joblib.load(os.path.join(selftest.models_path, 'model_classification.pkl'))
feature_test, selftest.label_test, feature_train = selftest._load_data()
# Age encoding
feature_test[:,2] = ClassifyFourKindOfPersonTrain().age_encodeing(feature_train[:,2], feature_test[:,2])
# Feature selection
if selftest.is_feature_selection:
feature_test = feature_test[:, mask_lassocv != 0]
# Testting
selftest.prediction, selftest.decision = selftest.testing(model_classification, feature_test)
# Evaluating classification performances
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
accuracy_kfold=None, sensitivity_kfold=None, specificity_kfold=None, AUC_kfold=None,
verbose=1, is_showfig=0)
# Save results and fig to local path
selftest.save_results()
selftest.save_fig()
print("--" * 10 + "Done!" + "--" * 10 )
return selftest
def _load_data(selftest):
"""
Load data
"""
data_test = np.load(selftest.data_test_file)
label_test = np.load(selftest.label_test_file)
data_train = np.load(selftest.data_train_file)
return data_test, label_test, data_train
def testing(selftest, model, test_X):
predict = model.predict(test_X)
decision = model.decision_function(test_X)
return predict, decision
def save_results(selftest):
# Save performances and others
import pandas as pd
performances_to_save = np.array([selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC]).reshape(1,4)
de_pred_label_to_save = np.vstack([selftest.decision.T, selftest.prediction.T, selftest.label_test.T]).T
performances_to_save = pd.DataFrame(performances_to_save, columns=[['Accuracy','Sensitivity', 'Specificity', 'AUC']])
de_pred_label_to_save = pd.DataFrame(de_pred_label_to_save, columns=[['Decision','Prediction', 'Sorted_Real_Label']])
performances_to_save.to_csv(os.path.join(selftest.path_out, 'test_Performances.txt'), index=False, header=True)
de_pred_label_to_save.to_csv(os.path.join(selftest.path_out, 'test_Decision_prediction_label.txt'), index=False, header=True)
def save_fig(selftest):
# Save ROC and Classification 2D figure
acc, sens, spec, auc = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC,
verbose=0, is_showfig=selftest.is_showfig_finally, is_savefig=1,
out_name=os.path.join(selftest.path_out, 'Classification_performances_test.pdf'),
legend1='Healthy', legend2='Unhealthy')
#
if __name__ == '__main__':
# =============================================================================
# All inputs
data_file = r'D:\workstation_b\Fundation\给黎超.xlsx'
path_out = r'D:\workstation_b\Fundation'
models_path = r'D:\workstation_b\Fundation'
# =============================================================================
selftest = ClassifyFourKindOfPersonTest(data_test_file=r'D:\workstation_b\Fundation\feature_test.npy',
label_test_file=r'D:\workstation_b\Fundation\label_test.npy',
data_train_file=r'D:\workstation_b\Fundation\feature_train.npy',
path_out=path_out,
models_path=models_path,
is_feature_selection=1)
selftest.main_function()
| StarcoderdataPython |
108288 | <filename>OpenGLCffi/GL/EXT/SGIS/point_parameters.py
from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'param'])
def glPointParameterfSGIS(pname, param):
pass
@params(api='gl', prms=['pname', 'params'])
def glPointParameterfvSGIS(pname, params):
pass
| StarcoderdataPython |
157364 | import logging
from metadrive.component.road_network.node_road_network import NodeRoadNetwork
import numpy as np
from panda3d.core import TransparencyAttrib, LineSegs, NodePath
from metadrive.component.lane.circular_lane import CircularLane
from metadrive.component.map.base_map import BaseMap
from metadrive.component.pgblock.first_block import FirstPGBlock
from metadrive.component.road_network import Road
from metadrive.constants import RENDER_MODE_ONSCREEN, CamMask
from metadrive.engine.asset_loader import AssetLoader
from metadrive.utils import clip, norm, get_np_random
from metadrive.utils.coordinates_shift import panda_position
from metadrive.utils.scene_utils import ray_localization
from metadrive.utils.space import Parameter, BlockParameterSpace
class BaseNavigation:
navigation_info_dim = 10
NAVI_POINT_DIST = 50
PRE_NOTIFY_DIST = 40
MIN_ALPHA = 0.15
CKPT_UPDATE_RANGE = 5
FORCE_CALCULATE = False
LINE_TO_DEST_HEIGHT = 0.6
def __init__(
self,
engine,
show_navi_mark: bool = False,
random_navi_mark_color=False,
show_dest_mark=False,
show_line_to_dest=False
):
"""
This class define a helper for localizing vehicles and retrieving navigation information.
It now only support from first block start to the end node, but can be extended easily.
"""
self.map = None
self.checkpoints = None
self._target_checkpoints_index = None
self.current_ref_lanes = None
self.next_ref_lanes = None
self.final_lane = None
self.current_lane = None
self._navi_info = np.zeros((self.navigation_info_dim, ), dtype=np.float32) # navi information res
# Vis
self._show_navi_info = (engine.mode == RENDER_MODE_ONSCREEN and not engine.global_config["debug_physics_world"])
self.origin = NodePath("navigation_sign") if self._show_navi_info else None
self.navi_mark_color = (0.6, 0.8, 0.5) if not random_navi_mark_color else get_np_random().rand(3)
self.navi_arrow_dir = [0, 0]
self._dest_node_path = None
self._goal_node_path = None
self._line_to_dest = None
self._show_line_to_dest = show_line_to_dest
if self._show_navi_info:
# nodepath
self._line_to_dest = self.origin.attachNewNode("line")
self._goal_node_path = self.origin.attachNewNode("target")
self._dest_node_path = self.origin.attachNewNode("dest")
if show_navi_mark:
navi_point_model = AssetLoader.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
navi_point_model.reparentTo(self._goal_node_path)
if show_dest_mark:
dest_point_model = AssetLoader.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
dest_point_model.reparentTo(self._dest_node_path)
if show_line_to_dest:
line_seg = LineSegs("line_to_dest")
line_seg.setColor(self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7)
line_seg.setThickness(2)
self._dynamic_line_np = NodePath(line_seg.create(True))
self._dynamic_line_np.reparentTo(self.origin)
self._line_to_dest = line_seg
self._goal_node_path.setTransparency(TransparencyAttrib.M_alpha)
self._dest_node_path.setTransparency(TransparencyAttrib.M_alpha)
self._goal_node_path.setColor(
self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7
)
self._dest_node_path.setColor(
self.navi_mark_color[0], self.navi_mark_color[1], self.navi_mark_color[2], 0.7
)
self._goal_node_path.hide(CamMask.AllOn)
self._dest_node_path.hide(CamMask.AllOn)
self._goal_node_path.show(CamMask.MainCam)
self._dest_node_path.show(CamMask.MainCam)
logging.debug("Load Vehicle Module: {}".format(self.__class__.__name__))
def reset(self, map: BaseMap, current_lane):
self.map = map
self.current_lane = current_lane
def set_route(self, current_lane_index: str, destination: str):
"""
Find a shortest path from start road to end road
:param current_lane_index: start road node
:param destination: end road node or end lane index
:return: None
"""
raise NotImplementedError
def update_localization(self, ego_vehicle):
"""
It is called every step
"""
raise NotImplementedError
def _get_info_for_checkpoint(self, lanes_id, ref_lane, ego_vehicle):
navi_information = []
# Project the checkpoint position into the target vehicle's coordination, where
# +x is the heading and +y is the right hand side.
later_middle = (float(self.get_current_lane_num()) / 2 - 0.5) * self.get_current_lane_width()
check_point = ref_lane.position(ref_lane.length, later_middle)
dir_vec = check_point - ego_vehicle.position # get the vector from center of vehicle to checkpoint
dir_norm = norm(dir_vec[0], dir_vec[1])
if dir_norm > self.NAVI_POINT_DIST: # if the checkpoint is too far then crop the direction vector
dir_vec = dir_vec / dir_norm * self.NAVI_POINT_DIST
ckpt_in_heading, ckpt_in_rhs = ego_vehicle.projection(dir_vec) # project to ego vehicle's coordination
# Dim 1: the relative position of the checkpoint in the target vehicle's heading direction.
navi_information.append(clip((ckpt_in_heading / self.NAVI_POINT_DIST + 1) / 2, 0.0, 1.0))
# Dim 2: the relative position of the checkpoint in the target vehicle's right hand side direction.
navi_information.append(clip((ckpt_in_rhs / self.NAVI_POINT_DIST + 1) / 2, 0.0, 1.0))
if lanes_id == 0:
lanes_heading = ref_lane.heading_theta_at(ref_lane.local_coordinates(ego_vehicle.position)[0])
else:
lanes_heading = ref_lane.heading_theta_at(min(self.PRE_NOTIFY_DIST, ref_lane.length))
# Try to include the current lane's information into the navigation information
bendradius = 0.0
dir = 0.0
angle = 0.0
if isinstance(ref_lane, CircularLane):
bendradius = ref_lane.radius / (
BlockParameterSpace.CURVE[Parameter.radius].max +
self.get_current_lane_num() * self.get_current_lane_width()
)
dir = ref_lane.direction
if dir == 1:
angle = ref_lane.end_phase - ref_lane.start_phase
elif dir == -1:
angle = ref_lane.start_phase - ref_lane.end_phase
# Dim 3: The bending radius of current lane
navi_information.append(clip(bendradius, 0.0, 1.0))
# Dim 4: The bending direction of current lane (+1 for clockwise, -1 for counterclockwise)
navi_information.append(clip((dir + 1) / 2, 0.0, 1.0))
# Dim 5: The angular difference between the heading in lane ending position and
# the heading in lane starting position
navi_information.append(
clip((np.rad2deg(angle) / BlockParameterSpace.CURVE[Parameter.angle].max + 1) / 2, 0.0, 1.0)
)
return navi_information, lanes_heading, check_point
def _update_target_checkpoints(self, ego_lane_index, ego_lane_longitude):
raise NotImplementedError
def get_navi_info(self):
return self._navi_info
def destroy(self):
if self._show_navi_info:
try:
self._line_to_dest.removeNode()
except AttributeError:
pass
self._dest_node_path.removeNode()
self._goal_node_path.removeNode()
self.next_ref_lanes = None
self.current_ref_lanes = None
def set_force_calculate_lane_index(self, force: bool):
self.FORCE_CALCULATE = force
def __del__(self):
logging.debug("{} is destroyed".format(self.__class__.__name__))
def get_current_lateral_range(self, current_position, engine) -> float:
raise NotImplementedError
def get_current_lane_width(self) -> float:
return self.current_lane.width
def get_current_lane_num(self) -> float:
return len(self.current_ref_lanes)
def _get_current_lane(self, ego_vehicle):
"""
Called in update_localization to find current lane information
"""
possible_lanes = ray_localization(
ego_vehicle.heading, ego_vehicle.position, ego_vehicle.engine, return_all_result=True
)
for lane, index, l_1_dist in possible_lanes:
if lane in self.current_ref_lanes:
return lane, index
nx_ckpt = self._target_checkpoints_index[-1]
if nx_ckpt == self.checkpoints[-1] or self.next_ref_lanes is None:
return possible_lanes[0][:-1] if len(possible_lanes) > 0 else (None, None)
if self.map.road_network_type == NodeRoadNetwork:
nx_nx_ckpt = nx_ckpt + 1
next_ref_lanes = self.map.road_network.graph[self.checkpoints[nx_ckpt]][self.checkpoints[nx_nx_ckpt]]
else:
next_ref_lanes = self.next_ref_lanes
for lane, index, l_1_dist in possible_lanes:
if lane in next_ref_lanes:
return lane, index
return possible_lanes[0][:-1] if len(possible_lanes) > 0 else (None, None)
def _update_current_lane(self, ego_vehicle):
lane, lane_index = self._get_current_lane(ego_vehicle)
if lane is None:
lane, lane_index = ego_vehicle.lane, ego_vehicle.lane_index
ego_vehicle.on_lane = False
if self.FORCE_CALCULATE:
lane_index, _ = self.map.road_network.get_closest_lane_index(ego_vehicle.position)
lane = self.map.road_network.get_lane(lane_index)
self.current_lane = lane
assert lane_index == lane.index, "lane index mismatch!"
return lane, lane_index
def _ray_lateral_range(self, engine, start_position, dir, length=50):
"""
It is used to measure the lateral range of special blocks
:param start_position: start_point
:param dir: ray direction
:param length: length of ray
:return: lateral range [m]
"""
end_position = start_position[0] + dir[0] * length, start_position[1] + dir[1] * length
start_position = panda_position(start_position, z=0.15)
end_position = panda_position(end_position, z=0.15)
mask = FirstPGBlock.CONTINUOUS_COLLISION_MASK
res = engine.physics_world.static_world.rayTestClosest(start_position, end_position, mask=mask)
if not res.hasHit():
return length
else:
return res.getHitFraction() * length
def _draw_line_to_dest(self, start_position, end_position):
if not self._show_line_to_dest:
return
line_seg = self._line_to_dest
line_seg.moveTo(panda_position(start_position, self.LINE_TO_DEST_HEIGHT))
line_seg.drawTo(panda_position(end_position, self.LINE_TO_DEST_HEIGHT))
self._dynamic_line_np.removeNode()
self._dynamic_line_np = NodePath(line_seg.create(False))
self._dynamic_line_np.hide(CamMask.Shadow | CamMask.RgbCam)
self._dynamic_line_np.reparentTo(self.origin)
def detach_from_world(self):
if isinstance(self.origin, NodePath):
self.origin.detachNode()
def attach_to_world(self, engine):
if isinstance(self.origin, NodePath):
self.origin.reparentTo(engine.render)
| StarcoderdataPython |
4833827 | <filename>packages/postgres-database/src/simcore_postgres_database/migration/versions/64e91497d257_add_aborted_state.py
"""add aborted state
Revision ID: 64e91497d257
Revises: 27c6a30d7c24
Create Date: 2020-10-14 20:05:26.968038+00:00
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "64e91497d257"
down_revision = "27c6a30d7c24"
branch_labels = None
depends_on = None
DB_PROCEDURE_NAME: str = "notify_comp_tasks_changed"
DB_TRIGGER_NAME: str = f"{DB_PROCEDURE_NAME}_event"
def upgrade():
# change current name of enum
op.execute(
sa.DDL(
"""
ALTER TYPE statetype RENAME TO statetype_old;
"""
)
)
# create the new statetype with ABORTED
state_type = postgresql.ENUM(
"NOT_STARTED",
"PUBLISHED",
"PENDING",
"RUNNING",
"SUCCESS",
"FAILED",
"ABORTED",
name="statetype",
)
state_type.create(op.get_bind())
# update all the columns, trigger depending on it and drop the old type
op.execute(
sa.DDL(
f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
ALTER TABLE comp_tasks ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_tasks ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_tasks ALTER COLUMN state SET DEFAULT 'NOT_STARTED';
ALTER TABLE comp_pipeline ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_pipeline ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_pipeline ALTER COLUMN state SET DEFAULT 'NOT_STARTED';
DROP TYPE statetype_old;
"""
)
)
op.execute(
sa.DDL(
f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
CREATE TRIGGER {DB_TRIGGER_NAME}
AFTER UPDATE OF outputs,state ON comp_tasks
FOR EACH ROW
WHEN ((OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb OR OLD.state IS DISTINCT FROM NEW.state)
AND NEW.node_class <> 'FRONTEND')
EXECUTE PROCEDURE {DB_PROCEDURE_NAME}();
"""
)
)
def downgrade():
# set the ABORTED value to NOT_STARTED and rename the statetype
op.execute(
sa.DDL(
"""
UPDATE comp_tasks SET state = 'NOT_STARTED' WHERE state = 'ABORTED';
UPDATE comp_pipeline SET state = 'NOT_STARTED' WHERE state = 'ABORTED';
ALTER TYPE statetype RENAME TO statetype_old;
"""
)
)
# create the statetype
state_type = postgresql.ENUM(
"NOT_STARTED",
"PUBLISHED",
"PENDING",
"RUNNING",
"SUCCESS",
"FAILED",
name="statetype",
)
state_type.create(op.get_bind())
# update all the columns, trigger depending on it
op.execute(
sa.DDL(
f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
ALTER TABLE comp_tasks ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_tasks ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_tasks ALTER COLUMN state SET DEFAULT 'NOT_STARTED';
ALTER TABLE comp_pipeline ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_pipeline ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_pipeline ALTER COLUMN state SET DEFAULT 'NOT_STARTED';
DROP TYPE statetype_old;
"""
)
)
op.execute(
sa.DDL(
f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
CREATE TRIGGER {DB_TRIGGER_NAME}
AFTER UPDATE OF outputs,state ON comp_tasks
FOR EACH ROW
WHEN ((OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb OR OLD.state IS DISTINCT FROM NEW.state)
AND NEW.node_class <> 'FRONTEND')
EXECUTE PROCEDURE {DB_PROCEDURE_NAME}();
"""
)
)
| StarcoderdataPython |
3230476 | from ads import Ads, Project, Service, ServiceSet, Profile, BadSelectorException
| StarcoderdataPython |
3293767 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from classification import parse_aug_fn
# Read Image
img_string = tf.io.read_file('./test/test_classification_image.jpg')
img = tf.image.decode_image(img_string)
img = img.numpy()
# Create dataset pipline
img_ds = tf.data.Dataset.from_tensor_slices([img])
label_ds = tf.data.Dataset.from_tensor_slices(['Meerkat'])
img_label_ds = tf.data.Dataset.zip({'image':img_ds, 'label':label_ds})
img_label_ds = img_label_ds.map(lambda dataset: parse_aug_fn(dataset, (416, 416)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
h, w = (416, 416)
images = np.zeros((h * 4, w * 4, 3))
for count, [img, label] in enumerate(img_label_ds.repeat().take(16)):
img = img.numpy()
i = count // 4
j = count % 4
images[h * i:h * (i + 1), w * j:w * (j + 1)] = img
plt.figure(figsize=(12, 12))
plt.imshow(images)
plt.show()
| StarcoderdataPython |
78702 | import cv2
from darcyai.perceptor.coral.people_perceptor import PeoplePerceptor
from darcyai.input.camera_stream import CameraStream
from darcyai.output.live_feed_stream import LiveFeedStream
from darcyai.pipeline import Pipeline
#Create a callback function for handling the input that is about to pass to the People Perceptor
def people_input_callback(input_data, pom, config):
#Just take the frame from the incoming Input Stream and send it onward - no need to modify the frame
frame = input_data.data.copy()
return frame
#Create a callback function for handling the Live Feed output stream data before it gets presented
def live_feed_callback(pom, input_data):
#Start wth the annotated video frame available from the People Perceptor
frame = pom.peeps.annotatedFrame().copy()
#Add some text telling how many people are in the scene
label = "{} peeps".format(pom.peeps.peopleCount())
color = (0, 255, 0)
cv2.putText(frame, str(label), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
#If we have anyone, demonstrate looking up that person in the POM by getting their face size
#And then put it on the frame as some text
#NOTE: this will just take the face size from the last person in the array
if pom.peeps.peopleCount() > 0:
for person_id in pom.peeps.people():
face_size = pom.peeps.faceSize(person_id)
face_height = face_size[1]
label2 = "{} face height".format(face_height)
color = (0, 255, 255)
cv2.putText(frame, str(label2), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
#Pass the finished frame out of this callback so the Live Feed output stream can display it
return frame
#Create a callback function for handling the "New Person" event from the People Perceptor
#Just print the person ID to the console
def new_person_callback(person_id):
print("New person: {}".format(person_id))
#Instantiate an Camera Stream input stream object
camera = CameraStream(video_device="/dev/video0", fps=20)
#Instantiate the Pipeline object and pass it the Camera Stream object as its input stream source
pipeline = Pipeline(input_stream=camera)
#Create a Live Feed output stream object and specify some URL parameters
live_feed = LiveFeedStream(path="/", port=3456, host="0.0.0.0")
#Add the Live Feed output stream to the Pipeline and use the callback from above as the handler
pipeline.add_output_stream("output", live_feed_callback, live_feed)
#Instantiate a People Perceptor
people_ai = PeoplePerceptor()
#Subscribe to the "New Person" event from the People Perceptor and use our callback from above as the handler
people_ai.on("new_person_entered_scene", new_person_callback)
#Add the People Perceptor instance to the Pipeline and use the input callback from above as the input preparation handler
pipeline.add_perceptor("peeps", people_ai, input_callback=people_input_callback)
#Update the configuration of the People Perceptor to show the pose landmark dots on the annotated video frame
pipeline.set_perceptor_config("peeps", "show_pose_landmark_dots", True)
pipeline.set_perceptor_config("peeps", "pose_landmark_dot_size", 2)
pipeline.set_perceptor_config("peeps", "pose_landmark_dot_color", "0,255,0")
#Start the Pipeline
pipeline.run() | StarcoderdataPython |
1752472 | LAMBDA_FUNCION_CODE = """
import json
import boto3
from base64 import b64decode
def safeget(dct, *keys, default=None):
for key in keys:
try:
dct = dct[key]
except KeyError:
return default
return dct
def lambda_handler(event, context):
runtime = boto3.client('runtime.sagemaker')
try:
sagemaker_response = runtime.invoke_endpoint(
EndpointName="{endpoint_name}",
ContentType=safeget(event, 'headers', 'content-type', default='application/json'),
CustomAttributes=safeget(event, 'rawPath', default='')[1:],
Body=b64decode(event.get('body')) if event.get('isBase64Encoded') else event.get('body')
)
except Exception as e:
return {{
'statusCode': e.response.get('OriginalStatusCode'),
'body': e.response.get('Error')['Message']
}}
else:
return {{
'statusCode': safeget(sagemaker_response, 'ResponseMetadata', 'HTTPStatusCode'),
'body': sagemaker_response.get('Body').read()
}}
"""
| StarcoderdataPython |
3295763 | <reponame>mdop-wh/pulumi-aws<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetPolicyDocumentStatementArgs',
'GetPolicyDocumentStatementConditionArgs',
'GetPolicyDocumentStatementNotPrincipalArgs',
'GetPolicyDocumentStatementPrincipalArgs',
]
@pulumi.input_type
class GetPolicyDocumentStatementArgs:
def __init__(__self__, *,
actions: Optional[List[str]] = None,
conditions: Optional[List['GetPolicyDocumentStatementConditionArgs']] = None,
effect: Optional[str] = None,
not_actions: Optional[List[str]] = None,
not_principals: Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']] = None,
not_resources: Optional[List[str]] = None,
principals: Optional[List['GetPolicyDocumentStatementPrincipalArgs']] = None,
resources: Optional[List[str]] = None,
sid: Optional[str] = None):
"""
:param List[str] actions: A list of actions that this statement either allows
or denies. For example, ``["ec2:RunInstances", "s3:*"]``.
:param List['GetPolicyDocumentStatementConditionArgs'] conditions: A nested configuration block (described below)
that defines a further, possibly-service-specific condition that constrains
whether this statement applies.
:param str effect: Either "Allow" or "Deny", to specify whether this
statement allows or denies the given actions. The default is "Allow".
:param List[str] not_actions: A list of actions that this statement does *not*
apply to. Used to apply a policy statement to all actions *except* those
listed.
:param List['GetPolicyDocumentStatementNotPrincipalArgs'] not_principals: Like `principals` except gives principals that
the statement does *not* apply to.
:param List[str] not_resources: A list of resource ARNs that this statement
does *not* apply to. Used to apply a policy statement to all resources
*except* those listed.
:param List['GetPolicyDocumentStatementPrincipalArgs'] principals: A nested configuration block (described below)
specifying a principal (or principal pattern) to which this statement applies.
:param List[str] resources: A list of resource ARNs that this statement applies
to. This is required by AWS if used for an IAM policy.
:param str sid: An ID for the policy statement.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if effect is not None:
pulumi.set(__self__, "effect", effect)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
if not_principals is not None:
pulumi.set(__self__, "not_principals", not_principals)
if not_resources is not None:
pulumi.set(__self__, "not_resources", not_resources)
if principals is not None:
pulumi.set(__self__, "principals", principals)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if sid is not None:
pulumi.set(__self__, "sid", sid)
@property
@pulumi.getter
def actions(self) -> Optional[List[str]]:
"""
A list of actions that this statement either allows
or denies. For example, ``["ec2:RunInstances", "s3:*"]``.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[List[str]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def conditions(self) -> Optional[List['GetPolicyDocumentStatementConditionArgs']]:
"""
A nested configuration block (described below)
that defines a further, possibly-service-specific condition that constrains
whether this statement applies.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[List['GetPolicyDocumentStatementConditionArgs']]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Either "Allow" or "Deny", to specify whether this
statement allows or denies the given actions. The default is "Allow".
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[List[str]]:
"""
A list of actions that this statement does *not*
apply to. Used to apply a policy statement to all actions *except* those
listed.
"""
return pulumi.get(self, "not_actions")
@not_actions.setter
def not_actions(self, value: Optional[List[str]]):
pulumi.set(self, "not_actions", value)
@property
@pulumi.getter(name="notPrincipals")
def not_principals(self) -> Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']]:
"""
Like `principals` except gives principals that
the statement does *not* apply to.
"""
return pulumi.get(self, "not_principals")
@not_principals.setter
def not_principals(self, value: Optional[List['GetPolicyDocumentStatementNotPrincipalArgs']]):
pulumi.set(self, "not_principals", value)
@property
@pulumi.getter(name="notResources")
def not_resources(self) -> Optional[List[str]]:
"""
A list of resource ARNs that this statement
does *not* apply to. Used to apply a policy statement to all resources
*except* those listed.
"""
return pulumi.get(self, "not_resources")
@not_resources.setter
def not_resources(self, value: Optional[List[str]]):
pulumi.set(self, "not_resources", value)
@property
@pulumi.getter
def principals(self) -> Optional[List['GetPolicyDocumentStatementPrincipalArgs']]:
"""
A nested configuration block (described below)
specifying a principal (or principal pattern) to which this statement applies.
"""
return pulumi.get(self, "principals")
@principals.setter
def principals(self, value: Optional[List['GetPolicyDocumentStatementPrincipalArgs']]):
pulumi.set(self, "principals", value)
@property
@pulumi.getter
def resources(self) -> Optional[List[str]]:
"""
A list of resource ARNs that this statement applies
to. This is required by AWS if used for an IAM policy.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[List[str]]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def sid(self) -> Optional[str]:
"""
An ID for the policy statement.
"""
return pulumi.get(self, "sid")
@sid.setter
def sid(self, value: Optional[str]):
pulumi.set(self, "sid", value)
@pulumi.input_type
class GetPolicyDocumentStatementConditionArgs:
def __init__(__self__, *,
test: str,
values: List[str],
variable: str):
"""
:param str test: The name of the
[IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html)
to evaluate.
:param List[str] values: The values to evaluate the condition against. If multiple
values are provided, the condition matches if at least one of them applies.
(That is, the tests are combined with the "OR" boolean operation.)
:param str variable: The name of a
[Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys)
to apply the condition to. Context variables may either be standard AWS
variables starting with `aws:`, or service-specific variables prefixed with
the service name.
"""
pulumi.set(__self__, "test", test)
pulumi.set(__self__, "values", values)
pulumi.set(__self__, "variable", variable)
@property
@pulumi.getter
def test(self) -> str:
"""
The name of the
[IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html)
to evaluate.
"""
return pulumi.get(self, "test")
@test.setter
def test(self, value: str):
pulumi.set(self, "test", value)
@property
@pulumi.getter
def values(self) -> List[str]:
"""
The values to evaluate the condition against. If multiple
values are provided, the condition matches if at least one of them applies.
(That is, the tests are combined with the "OR" boolean operation.)
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: List[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def variable(self) -> str:
"""
The name of a
[Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys)
to apply the condition to. Context variables may either be standard AWS
variables starting with `aws:`, or service-specific variables prefixed with
the service name.
"""
return pulumi.get(self, "variable")
@variable.setter
def variable(self, value: str):
pulumi.set(self, "variable", value)
@pulumi.input_type
class GetPolicyDocumentStatementNotPrincipalArgs:
def __init__(__self__, *,
identifiers: List[str],
type: str):
"""
:param List[str] identifiers: List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
:param str type: The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> List[str]:
"""
List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
"""
return pulumi.get(self, "identifiers")
@identifiers.setter
def identifiers(self, value: List[str]):
pulumi.set(self, "identifiers", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: str):
pulumi.set(self, "type", value)
@pulumi.input_type
class GetPolicyDocumentStatementPrincipalArgs:
def __init__(__self__, *,
identifiers: List[str],
type: str):
"""
:param List[str] identifiers: List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
:param str type: The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> List[str]:
"""
List of identifiers for principals. When `type`
is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs.
"""
return pulumi.get(self, "identifiers")
@identifiers.setter
def identifiers(self, value: List[str]):
pulumi.set(self, "identifiers", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: str):
pulumi.set(self, "type", value)
| StarcoderdataPython |
42542 | <filename>setup.py
from setuptools import setup, find_packages
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
setup(
name="slack_utils",
version="0.0.1",
description="my slack utils",
author="mollinaca",
packages=find_packages(),
install_requires=install_requirements,
entry_points={
"console_scripts": [
"slack_utils=slack_utils.__main__:main",
]
},
classifiers=[
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
| StarcoderdataPython |
7322 | import struct
import msgpack
from lbrynet.wallet.transaction import Transaction, Output
from torba.server.hash import hash_to_hex_str
from torba.server.block_processor import BlockProcessor
from lbrynet.schema.claim import Claim
from lbrynet.wallet.server.model import ClaimInfo
class LBRYBlockProcessor(BlockProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.coin.NET == "regtest":
self.prefetcher.polling_delay = 0.5
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
self.logger.info("LbryumX Block Processor - Validating signatures: {}".format(self.should_validate_signatures))
def advance_blocks(self, blocks):
# save height, advance blocks as usual, then hook our claim tx processing
height = self.height + 1
super().advance_blocks(blocks)
pending_undo = []
for index, block in enumerate(blocks):
undo = self.advance_claim_txs(block.transactions, height + index)
pending_undo.append((height+index, undo,))
self.db.write_undo(pending_undo)
def advance_claim_txs(self, txs, height):
# TODO: generate claim undo info!
undo_info = []
add_undo = undo_info.append
update_inputs = set()
for etx, txid in txs:
update_inputs.clear()
tx = Transaction(etx.serialize())
for index, output in enumerate(tx.outputs):
if not output.is_claim:
continue
if output.script.is_claim_name:
add_undo(self.advance_claim_name_transaction(output, height, txid, index))
elif output.script.is_update_claim:
update_input = self.db.get_update_input(output.claim_hash, tx.inputs)
if update_input:
update_inputs.add(update_input)
add_undo(self.advance_update_claim(output, height, txid, index))
else:
info = (hash_to_hex_str(txid), output.claim_id,)
self.logger.error("REJECTED: {} updating {}".format(*info))
for txin in tx.inputs:
if txin not in update_inputs:
abandoned_claim_id = self.db.abandon_spent(txin.txo_ref.tx_ref.hash, txin.txo_ref.position)
if abandoned_claim_id:
add_undo((abandoned_claim_id, self.db.get_claim_info(abandoned_claim_id)))
return undo_info
def advance_update_claim(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
old_claim_info = self.db.get_claim_info(claim_id)
self.db.put_claim_id_for_outpoint(old_claim_info.txid, old_claim_info.nout, None)
if old_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(old_claim_info.cert_id, claim_id)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, old_claim_info
def advance_claim_name_transaction(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, None
def backup_from_undo_info(self, claim_id, undo_claim_info):
"""
Undo information holds a claim state **before** a transaction changes it
There are 4 possibilities when processing it, of which only 3 are valid ones:
1. the claim is known and the undo info has info, it was an update
2. the claim is known and the undo info doesn't hold any info, it was claimed
3. the claim in unknown and the undo info has info, it was abandoned
4. the claim is unknown and the undo info does't hold info, error!
"""
undo_claim_info = ClaimInfo(*undo_claim_info) if undo_claim_info else None
current_claim_info = self.db.get_claim_info(claim_id)
if current_claim_info and undo_claim_info:
# update, remove current claim
self.db.remove_claim_id_for_outpoint(current_claim_info.txid, current_claim_info.nout)
if current_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(current_claim_info.cert_id, claim_id)
elif current_claim_info and not undo_claim_info:
# claim, abandon it
self.db.abandon_spent(current_claim_info.txid, current_claim_info.nout)
elif not current_claim_info and undo_claim_info:
# abandon, reclaim it (happens below)
pass
else:
# should never happen, unless the database got into an inconsistent state
raise Exception("Unexpected situation occurred on backup, this means the database is inconsistent. "
"Please report. Resetting the data folder (reindex) solves it for now.")
if undo_claim_info:
self.db.put_claim_info(claim_id, undo_claim_info)
if undo_claim_info.cert_id:
cert_id = self._checksig(undo_claim_info.value, undo_claim_info.address)
self.db.put_claim_id_signed_by_cert_id(cert_id, claim_id)
self.db.put_claim_id_for_outpoint(undo_claim_info.txid, undo_claim_info.nout, claim_id)
def backup_txs(self, txs):
self.logger.info("Reorg at height {} with {} transactions.".format(self.height, len(txs)))
undo_info = msgpack.loads(self.db.claim_undo_db.get(struct.pack(">I", self.height)), use_list=False)
for claim_id, undo_claim_info in reversed(undo_info):
self.backup_from_undo_info(claim_id, undo_claim_info)
return super().backup_txs(txs)
def backup_blocks(self, raw_blocks):
self.db.batched_flush_claims()
super().backup_blocks(raw_blocks=raw_blocks)
self.db.batched_flush_claims()
async def flush(self, flush_utxos):
self.db.batched_flush_claims()
return await super().flush(flush_utxos)
def claim_info_from_output(self, output: Output, txid, nout, height):
address = self.coin.address_from_script(output.script.source)
name, value, cert_id = output.script.values['claim_name'], output.script.values['claim'], None
assert txid and address
cert_id = self._checksig(value, address)
return ClaimInfo(name, value, txid, nout, output.amount, address, height, cert_id)
def _checksig(self, value, address):
try:
claim_dict = Claim.from_bytes(value)
cert_id = claim_dict.signing_channel_hash
if not self.should_validate_signatures:
return cert_id
if cert_id:
cert_claim = self.db.get_claim_info(cert_id)
if cert_claim:
certificate = Claim.from_bytes(cert_claim.value)
claim_dict.validate_signature(address, certificate)
return cert_id
except Exception:
pass
| StarcoderdataPython |
1643538 | <reponame>varajala/microtest
"""
Utility functions for microtest.core that don't require any state.
Author: <NAME>
"""
import os
import functools
import inspect
from microtest.objects import Module, Types
def capture_exception(func: Types.Function) -> Types.Function:
@functools.wraps(func)
def wrapper(*args, **kwargs):
error = None
try:
func(*args, **kwargs)
except Exception as exc:
error = exc
return error
return wrapper
def generate_signature(obj: object) -> list:
"""
Generate a list of argument names from the function signature.
The provided object must be a function method or a wrapper object
with the actual function object available as obj.func attribute.
TypeError is raised if these restrictions aren\'t met.
"""
func_obj = None
if inspect.isfunction(obj) or inspect.ismethod(obj):
func_obj = obj
if hasattr(obj, 'func'):
func_obj = obj.func
if func_obj is None:
info = '\n'.join([
'Failed to create a function signature.',
'The provided object must be a function, method',
'or some wrapper object with the actual',
'function object available as object.func attribute...',
])
raise TypeError(info)
signature = inspect.signature(func_obj)
return [ param for param in signature.parameters ]
def check_logger_object(obj: object):
logger_interface = (
('log_start_info', list()),
('log_module_info', ['module_path']),
('log_test_info', ['name', 'result', 'exc']),
('log_module_exec_error', ['module_path', 'exc_type', 'exc', 'tb']),
('log_results', ['tests', 'failed', 'errors', 'time']),
('terminate', list())
)
if obj is None:
raise TypeError('No logger object set')
for requirement in logger_interface:
method_name, signature = requirement
if not hasattr(obj, method_name):
info = f'Invalid Logger implementation: Expected Logger to have attribute "{method_name}"'
raise TypeError(info)
method_obj = getattr(obj, method_name)
if not hasattr(method_obj, '__call__'):
info = f'Invalid Logger implementation: Attribute "{method_name}" is not callable'
raise TypeError(info)
method_obj_signature = generate_signature(method_obj)
if method_obj_signature != signature:
info = ''.join([
'Invalid Logger implementation: ',
f'Signature of "{method_name}" doesn\'t match the interface requirement.\n\n',
f'{method_obj_signature} != {signature}'
])
raise TypeError(info)
def filter_tests(module: Module, only_groups: set, excluded_groups: set) -> Types.Iterable:
"""
Filter tests inside a given module based on their groups.
If only_groups is not empty, only tests with those groups are returned,
even if their group is in exclude_groups.
If included_group is empty, the excluded_group is checked for filters.
If the module has a fixture, the tests are passed to the fixture and
the fixture instance is returned.
"""
tests = module.tests.copy()
if only_groups:
tests = list(filter(lambda test: test.group in only_groups, module.tests))
elif excluded_groups:
tests = list(filter(lambda test: test.group not in excluded_groups, module.tests))
if module.fixture:
module.fixture.tests = tests
return module.fixture
return tests
def filter_modules(modules: tuple, only_modules: set, excluded_modules: set) -> tuple:
"""
Filter the executed modules based on inlcuded_modules and exclude_modules.
These sets can contain restrictions as strings representing filepaths or parts of filepaths.
If the restriction is an absolute filepath the paths are comapred with '=='.
Otherwise the comaprison will be 'restriction in path' (path is an absolute filepath).
If only_modules is not empty only those modules will be executed,
even if exclude_modules is not empty.
If exclude_modules is not empty these modules will be filtered out.
"""
def path_meets_restriction(module_path: str, restriction: str) -> bool:
if os.path.isabs(restriction):
return module_path == restriction
return restriction in module_path
if only_modules:
filtered_modules = list()
for restriction in only_modules:
for module_path in modules:
if path_meets_restriction(module_path, restriction):
filtered_modules.append(module_path)
return tuple(filtered_modules)
if not excluded_modules:
return modules
filtered_modules = list(modules)
removed = 0
for index, module_path in enumerate(modules):
for restriction in excluded_modules:
if path_meets_restriction(module_path, restriction):
filtered_modules.pop(index - removed)
removed += 1
break
return tuple(filtered_modules)
| StarcoderdataPython |
3328717 | <filename>face_tracking.py<gh_stars>1-10
# coding: utf-8
import picamera
import picamera.array
import cv2
import pigpio
import time
xsv = 25 #X軸サーボのPort番号
ysv = 24 #y軸サーボのPort番号
span = 300 #サーボのセンターからの可動範囲duty値
xct = 1550 #X軸サーボのセンターduty値
yct = 1490 #X軸サーボのセンターduty値
dly = 0.01 #サーボ駆動時のウェイト時間
stp = 2 #サーボ駆動時のdutyステップ値
xsize = 320 #RGB 水平サイズ
ysize = 240 #RGB 垂直サイズ
#サーボの駆動範囲
xmin = xct - span
xmax = xct + span
ymin = yct - span
ymax = yct + span
#グローバル変数
xpos = xct
ypos = yct
xpos0 = xpos
ypos0 = ypos
sv = pigpio.pi()
def move(svn,in0,in1,step):
if in1 > in0:
for duty in range(in0,in1,step):
sv.set_servo_pulsewidth(svn,duty)
time.sleep(dly)
if in1 < in0:
for duty in range(in0,in1,-step):
sv.set_servo_pulsewidth(svn,duty)
time.sleep(dly)
#カメラをセンターに移動
move(xsv,sv.get_servo_pulsewidth(xsv),xpos,stp)
move(ysv,sv.get_servo_pulsewidth(ysv),ypos,stp)
cascade_file = "./haarcascade_frontalface_default.xml"
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (xsize, ysize)
camera.vflip = True
camera.hflip = True
while True:
# stream.arrayにRGBの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# グレースケールに変換
gray = cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
# カスケードファイルを利用して顔の位置を見つける
cascade = cv2.CascadeClassifier(cascade_file)
face_list = cascade.detectMultiScale(gray, minSize=(100, 100))
if len(face_list):
for (x, y, w, h) in face_list:
print("face_position:",x, y, w, h)
color = (0, 0, 255)
pen_w = 5
cv2.rectangle(stream.array, (x, y), (x+w, y+h), color, thickness = pen_w)
# カメラ移動
xdf = (x + w/2) - xsize/2
ydf = (y + h/2) - ysize/2
xpos = int(xpos0 - xdf*0.2)
ypos = int(ypos0 + ydf*0.2)
if xpos > xmax:
xpos = xmax
if xpos < xmin:
xpos = xmin
if ypos > ymax:
ypos = ymax
if ypos < ymin:
ypos = ymin
move(xsv,xpos0,xpos,stp)
move(ysv,ypos0,ypos,stp)
xpos0 = xpos
ypos0 = ypos
# system.arrayをウィンドウに表示
cv2.imshow('frame', stream.array)
# "q"でウィンドウを閉じる
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# streamをリセット
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()
| StarcoderdataPython |
4822327 | <gh_stars>0
from greenflow.dataframe_flow import Node, PortsSpecSchema
from greenflow.dataframe_flow.portsSpecSchema import ConfSchema
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
__all__ = ["AddSignIndicatorNode"]
class AddSignIndicatorNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'in'
self.OUTPUT_PORT_NAME = 'out'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:in}"
}
}
name = self.conf.get('sign', 'sign')
addition = {name: "int64"}
cols_required = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def update(self):
TemplateNodeMixin.update(self)
meta_inports = self.template_meta_setup().inports
required = meta_inports[self.INPUT_PORT_NAME]
if 'column' in self.conf:
col_name = self.conf['column']
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME not in input_meta:
required[col_name] = None
else:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
if col_name in col_from_inport:
required[col_name] = col_from_inport[col_name]
else:
required[col_name] = None
meta_inports[self.INPUT_PORT_NAME] = required
self.template_meta_setup(in_ports=meta_inports, out_ports=None)
def conf_schema(self):
json = {
"title": "Add Sign Indicator configure",
"type": "object",
"description": """If the number is bigger than zero,
the sign is 1, otherwise the sign is 0
""",
"properties": {
"column": {
"type": "string",
"description": """the column that is used to calcuate
sign"""
},
"sign": {
"type": "string",
"description": "the sign column name",
"default": "sign"
}
},
"required": ["column"],
}
ui = {
}
input_meta = self.get_input_meta()
if self.INPUT_PORT_NAME in input_meta:
col_from_inport = input_meta[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['column']['enum'] = enums
return ConfSchema(json=json, ui=ui)
else:
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Rename the column name in the datafame from `old` to `new` defined in
the node's conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
name = self.conf.get('sign', 'sign')
input_df[name] = (input_df[self.conf['column']] > 0).astype('int64')
return {self.OUTPUT_PORT_NAME: input_df}
| StarcoderdataPython |
3207438 | <filename>userbot/database/settings.py
from userbot.database import database
class Settings:
def __init__(self):
self.pm_table = database()['settings']
def set_pm_permit(self, status: bool):
self.pm_table.update_one({'key': 'pm_permit', 'value': status}, '')
| StarcoderdataPython |
1786273 | <reponame>maschmi/sparbahn
from bahn.bahn import Sparbahn, TripData
from datetime import datetime, timedelta, date
from alert.mailalert import Mailalert
def allweekdays(start_date, weekday, weeks):
"""generates the date for a given weekday for x weeks in the future starting from start_date"""
d = start_date
d += timedelta(days = weekday - d.weekday())
max_date = d + timedelta(days = 7*weeks)
while d <= max_date:
if d >= datetime.now().date():
yield d
d += timedelta(days = 7)
start = 'Dresden Hbf'
target = 'N'
future_weeks = 1
datesTo = list(allweekdays(datetime.now().date(),3,future_weeks))
#datesTo.extend(list(allweekdays(datetime.now().date(),4,future_weeks)))
datesBack = list(allweekdays(datetime.now().date(),5,future_weeks))
#datesBack.extend(list(allweekdays(datetime.now().date(),6,future_weeks)))
trips_to = []
trips_back = []
for i in range(0,future_weeks*2):
trip = Sparbahn(start = start, target = target, fast = False, tripType='return', dateTo=datesTo[i], dateBack=datesBack[i])
trip.getData()
trip.writeToFile("testdata")
tripdata_to = TripData()
tripdata_to.readTrip(trip.toData)
trips_to.append(tripdata_to)
tripdata_back = TripData()
tripdata_back.readTrip(trip.backData)
trips_back.append(tripdata_back)
for tripdata in trips_to:
tripdata.trips = tripdata.findFilter(tripdata.tripDate.strftime("%d.%m.%Y"),"15:00","21:00",30)
alert = Mailalert(tripdata)
alert.createMessage(...)
alert.sendMessage(...)
for tripdata in trips_back:
tripdata.trips = tripdata.findFilter(tripdata.tripDate.strftime("%d.%m.%Y"),"16:00","23:59",30)
alert = Mailalert(tripdata)
alert.createMessage(...)
alert.sendMessage(...)
| StarcoderdataPython |
171552 | with open("pokemon_list.txt", "r") as f:
pokemon_lista = f.readlines()
pokemon_lista = [elemento.strip('\n') for elemento in pokemon_lista]
import data as d
def validate(name, p_l = pokemon_lista, mensaje = d.validacion_pokemon()):
if name =='codigo-cero':
name = 'type-null'
while name not in p_l:
name = input(mensaje).lower()
return name
if __name__ == '__main__':
name = 'codigo-cero'
print(validate(name))
| StarcoderdataPython |
1712694 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import gtk
import gobject
import gtksourceview2 as gtksourceview
import gio
import pango
import gobject
from editor_base import *
def get_language_for_mime_type(mime):
lang_manager = gtksourceview.language_manager_get_default()
lang_ids = lang_manager.get_language_ids()
for i in lang_ids:
lang = lang_manager.get_language(i)
for m in lang.get_mime_types():
if m == mime:
return lang
return None
def init_buffer(full_filename):
buffer = gtksourceview.Buffer()
mgr = gtksourceview.style_scheme_manager_get_default()
style_scheme = mgr.get_scheme('classic')
if style_scheme:
buffer.set_style_scheme(style_scheme)
f = gio.File(os.path.abspath(full_filename))
path = f.get_path()
info = f.query_info("*")
mime_type = info.get_content_type()
language = None
if mime_type:
language = get_language_for_mime_type(mime_type)
if not language:
print 'No language found for mime type "%s"' % mime_type
else:
print 'Couldn\'t get mime type for file "%s"' % full_filename
buffer.set_language(language)
buffer.set_highlight_syntax(True)
f = open(full_filename, "r")
buffer.set_text(f.read())
f.close()
return buffer
class SourceViewTab(gtk.ScrolledWindow):
def __init__(self,editor,file_handle):
if file_handle.exists == False:
raise Exception("Cannot create SourceViewTab with a nonexistant file.")
gtk.ScrolledWindow.__init__(self)
# self.set_shadow_type(gtk.SHADOW_IN)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._editor = editor
self._file_handle = file_handle
self.buffer = init_buffer(file_handle.absolute_name)
self._view = gtksourceview.View(self.buffer)
self.add(self._view)
font_desc = pango.FontDescription('monospace %s' % editor.mc.resources.CODE_FONT_SIZE)
if font_desc:
self._view.modify_font(font_desc)
self._view.set_show_line_numbers(True)
self._view.set_editable(False)
self._view.set_show_line_marks(True)
self._view.set_property('highlight-current-line', True)
self._view.connect_object('button-press-event', self._on_button_pressed,None)
self._view.connect('size-allocate', self._on_size_allocated)
self._current_line_tag = self.buffer.create_tag(None,background=editor.mc.resources.COLOR_CURRENT_LINE)
self._active_frame_tag = self.buffer.create_tag(None,background=editor.mc.resources.COLOR_ACTIVE_FRAME)
for mark_res in self._editor._mc.resources.mark_resources.values():
self._view.set_mark_category_pixbuf(mark_res.name, mark_res.pixmap)
def grab_focus(self):
self._view.grab_focus()
def focus_line(self,line):
l = self.buffer.get_iter_at_line(line-1)
self.buffer.move_mark_by_name("insert", l)
self.buffer.move_mark_by_name("selection_bound", l)
self._view.scroll_mark_onscreen(self.buffer.get_mark("insert"))
@property
def file_handle(self):
return self._file_handle
def _on_size_allocated(self,a,b):
self._view.scroll_mark_onscreen(self.buffer.get_mark("insert"))
def get_current_line(self):
line = self.buffer.get_iter_at_mark(self.buffer.get_mark("insert")).get_line()+1
return line
def set_line_mark_states(self,added,changed,removed):
for l in added.keys():
self._set_line_mark_state(l,added[l])
for l in changed.keys():
self._set_line_mark_state(l,changed[l])
completely_unmarked = LineMarkState()
for l in removed:
self._set_line_mark_state(l,completely_unmarked)
def _set_line_mark_state(self,line,mark_state):
line_begin_iter = self.buffer.get_iter_at_line(line-1)
line_end_iter = self.buffer.get_iter_at_line(line)
# remove any marks currently on this line...
self.buffer.remove_source_marks(line_begin_iter,line_end_iter)
# create source mark
res = mark_state.get_mark_resource(self._editor._mc.resources)
if res:
self.buffer.create_source_mark(None,res.name,line_begin_iter)
# set or remove the highlight
if mark_state.current_line:
self.buffer.apply_tag(self._current_line_tag, line_begin_iter, line_end_iter)
else:
self.buffer.remove_tag(self._current_line_tag, line_begin_iter, line_end_iter)
# update the active_frame highlight
if mark_state.active_frame:
self.buffer.apply_tag(self._active_frame_tag, line_begin_iter, line_end_iter)
else:
self.buffer.remove_tag(self._active_frame_tag, line_begin_iter, line_end_iter)
def _on_button_pressed(self, view, ev):
buffer = self.buffer
# check that the click was on the left gutter
if ev.window == self._view.get_window(gtk.TEXT_WINDOW_LEFT):
x_buf, y_buf = self._view.window_to_buffer_coords(gtk.TEXT_WINDOW_LEFT,
int(ev.x), int(ev.y))
# get line bounds
line_start = self._view.get_line_at_y(y_buf)[0].get_line()+1
loc = self._file_handle.make_location(line_start)
self._editor.toggle_breakpoint(loc)
return False
if __name__ == "__main__":
__import__('pygtk').require('2.0')
w = gtk.Window()
w.set_title("test")
sv = SourceViewTab("./test/test1.c")
w.set_size_request(400,300)
sv.set_current_breakpoints([1,3,5])
sv.set_current_line(4)
sv.show()
w.add(sv)
w.show()
gtk.main()
| StarcoderdataPython |
1624033 | # Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError
from euca2ools.commands.s3 import S3, S3Request
class S3AccessMixin(object):
ARGS = [Arg('--s3-url', metavar='URL', route_to=None,
help='object storage service endpoint URL'),
Arg('-o', '--owner-akid', metavar='KEY_ID', route_to=None,
help='''access key to use for the object storage service
(default: same as that for the compute service)'''),
Arg('-w', '--owner-sak', metavar='KEY', route_to=None,
help='''secret key to use for the object storage service
(default: same as that for the compute service)'''),
# Pass-throughs
Arg('--s3-service', route_to=None, help=argparse.SUPPRESS),
Arg('--s3-auth', route_to=None, help=argparse.SUPPRESS)]
def configure_s3_access(self):
if self.args.get('owner_akid') and not self.args.get('owner_sak'):
raise ArgumentError('argument -o/--owner-akid also requires '
'-w/--owner-sak')
if self.args.get('owner_sak') and not self.args.get('owner_akid'):
raise ArgumentError('argument -w/--owner-sak also requires '
'-o/--owner-akid')
if not self.args.get('s3_auth'):
if self.args.get('owner_sak') and self.args.get('owner_akid'):
self.args['s3_auth'] = S3Request.AUTH_CLASS.from_other(
self.auth, key_id=self.args['owner_akid'],
secret_key=self.args['owner_sak'])
else:
self.args['s3_auth'] = S3Request.AUTH_CLASS.from_other(
self.auth)
if not self.args.get('s3_service'):
self.args['s3_service'] = S3.from_other(self.service)
| StarcoderdataPython |
43201 | <filename>2021/d03/d03.py<gh_stars>0
#!/usr/bin/env python3
inp = []
with open('03.txt') as fp:
for line in fp:
inp.append(line.strip())
def part1(arr):
acc = [0] * len(arr[0])
for x in arr:
for i in range(len(x)):
acc[i] += int(x[i])
gamma = list(map(lambda x: '1' if x >= len(arr)/2.0 else '0', acc))
epsilon = list(map(lambda x: '1' if x == '0' else '0', gamma))
return ''.join(gamma), ''.join(epsilon)
def part2(arr):
return oxygen(arr) * co2(arr)
def oxygen(arr):
num_len = len(arr[0])
for i in range(num_len):
gamma, epsilon = part1(arr)
arr = filter(lambda x: x[i] == gamma[i], arr)
if (len(arr) == 1): break
retval = int(arr[0], 2)
return retval
def co2(arr):
num_len = len(arr[0])
for i in range(num_len):
gamma, epsilon = part1(arr)
arr = filter(lambda x: x[i] == epsilon[i], arr)
if (len(arr) == 1): break
retval = int(arr[0], 2)
return retval
gamma, epsilon = part1(inp)
print(int(gamma, 2) * int(epsilon, 2))
print(part2(inp)) | StarcoderdataPython |
3222325 | import sys
import unittest
from dynd import nd, ndt
if sys.version_info >= (3, 0):
unicode = str
@unittest.skip('Test disabled since callables were reworked')
class TestUnicode(unittest.TestCase):
def test_array_string(self):
a = nd.array("Testing 1 2 3")
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(str(a),
'nd.array("Testing 1 2 3",\n type="string")')
# self.assertEqual(unicode(a), u"Testing 1 2 3")
def test_bytes_string(self):
if sys.version_info >= (3, 0):
return
# This needs to be fixed for Python 3
a = nd.array(b"Testing 1 2 3")
b = nd.array([b"First", b"Second"])
else:
# In Python 2, str and bytes are the same,
# so we have to manually request a bytes type
a = nd.array(b"Testing 1 2 3", type=ndt.bytes)
b = nd.array([b"First", b"Second"], type=ndt.make_fixed_dim(2, ndt.bytes))
self.assertEqual(nd.type_of(a), ndt.bytes)
self.assertEqual(nd.dtype_of(b), ndt.bytes)
self.assertEqual(nd.as_py(a), b"Testing 1 2 3")
self.assertEqual(nd.as_py(b), [b"First", b"Second"])
def test_array_unicode(self):
a = nd.array(u"\uc548\ub155")
b = nd.array([u"\uc548\ub155", u"Hello"])
self.assertEqual(nd.type_of(a), ndt.string)
self.assertEqual(nd.dtype_of(b), ndt.string)
# self.assertEqual(unicode(a), u"\uc548\ub155")
self.assertEqual(nd.as_py(b), [u"\uc548\ub155", u"Hello"])
# In Python 2, 'str' is not unicode
# if sys.version_info < (3, 0):
# self.assertRaises(UnicodeEncodeError, str, a)
# def test_ascii_decode_error(self):
# a = nd.array(128, type=ndt.uint8).view_scalars("fixed_string[1,'A']")
# self.assertRaises(UnicodeDecodeError, a.cast("string").eval)
@unittest.skip('Test disabled since callables were reworked')
class TestEncodings(unittest.TestCase):
encodings = ["ascii", "utf8", "utf16", "utf32", "ucs2"]
def test_string_encoding(self):
t = ndt.type("string")
self.assertEqual(t.encoding, "utf8")
def test_fixed_string_encoding(self):
for x in self.encodings:
t = ndt.type("fixed_string[10, '%s']" % x)
self.assertEqual(t.encoding, x)
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
1678961 | <reponame>geickelb/mimiciii-antibiotics-modeling<filename>src/testing/testing_2.py<gh_stars>1-10
# Markdown and code chunks testing
This file is a test of pycharm's markdown and codecell support
#%%
import numpy as np
import matplotlib.pyplot as plt
#%% build a scatterplot
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
#%% plot y vs x a slines
X = np.linspace(-np.pi, np.pi, 256,endpoint=True)
C,S = np.cos(X), np.sin(X)
plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-")
plt.plot(X, S, color="red", linewidth=2.5, linestyle="-")
plt.xlim(X.min()*1.1, X.max()*1.1)
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],
[r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
plt.ylim(C.min()*1.1,C.max()*1.1)
plt.yticks([-1, 0, +1],
[r'$-1$', r'$0$', r'$+1$'])
plt.show() | StarcoderdataPython |
3300449 | """Heap data structure example
Copyright 2017, <NAME>
"""
import random
"""Heap data structure
The 'max' property determines whether this is a max or min heap
"""
class Heap:
def __init__(self, property):
self.property = property
self.keys = []
def __repr__(self):
return "{} heap containing {}".format(self.property, self.keys)
# Complexity: O(1)
@staticmethod
def get_parent_index(key_index):
return (key_index + 1) // 2 - 1
# Complexity: O(1)
@staticmethod
def get_left_child_index(key_index):
return 2 * (key_index + 1) - 1
# Complexity: O(1)
@staticmethod
def get_right_child_index(key_index):
return 2 * (key_index + 1)
"""Swap operation
Complexity: O(1)
"""
def swap(self, left_index, right_index):
print("{} <-> {}".format(self.keys[left_index], self.keys[right_index]))
temp = self.keys[left_index]
self.keys[left_index] = self.keys[right_index]
self.keys[right_index] = temp
"""Insert operation
Complexity: O(log n)
"""
def insert(self, value):
print("Inserting {}".format(value))
# Add the key
self.keys.append(value)
key_index = len(self.keys) - 1
# Swim through to restore property
while True:
if key_index == 0:
# This root cannot have parents
print("Root node, no swimming to be done")
break
# Query parent
parent_index = Heap.get_parent_index(key_index)
parent = self.keys[parent_index]
# Verify if property holds
holds = value <= parent if self.property == "MIN" else value >= parent
if holds:
print("Before swap: {}".format(self))
self.swap(key_index, parent_index)
print("After swap: {}".format(self))
key_index = parent_index # Continue swimming on the new position
else:
message = "{} >= {}" if self.property == "MIN" else "{} <= {}"
print("Property holds: " + message.format(value, parent))
# Done swimming, the property now holds
break;
print("Finished adding {}".format(value))
"""Extract operation
Complexity: O(log n)
"""
def extract(self):
if len(self.keys) == 1:
print("Extracting {}".format(self.keys[0]))
self.keys = []
elif len(self.keys) > 1:
# Replace root with last key
print("Extracting {}".format(self.keys[0]))
self.keys[0] = self.keys[len(self.keys) - 1]
self.keys.pop()
print("New root: {}".format(self.keys[0]))
# Restore heap property
self.heapify()
else:
print("Nothing to extract")
"""Heapify operation tailored to be used after extraction
Complexity: O(log n)
"""
def heapify(self):
print("Restoring heap property")
key_index = 0
# Loop until the heap property is restored
while True:
left_child_index = Heap.get_left_child_index(key_index)
right_child_index = Heap.get_right_child_index(key_index)
child_index = -1
if left_child_index < len(self.keys):
child_index = left_child_index
print("Child index: {}".format(child_index))
if right_child_index < len(self.keys):
left_child = self.keys[left_child_index]
right_child = self.keys[right_child_index]
if self.property == "MIN":
# Target child will be the smaller one
if left_child > right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
else:
# Target child will be the larger one
if left_child <= right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
key = self.keys[key_index]
child_key = self.keys[child_index]
swap = key > child_key if self.property == "MIN" else key < child_key
if swap:
# Swap elements to further restore the property
self.swap(key_index, child_index)
# Set key index for next iteration
key_index = child_index
else:
# Property holds
print("Property holds, no swap necessary")
break
else:
print("No further children")
break
print("Finished extraction")
# Main program logic
def program():
# Build a min heap
print("Constructing min heap:")
min_heap = Heap("MIN")
for i in range(8):
min_heap.insert(random.randrange(100))
print("Result: {}\n".format(min_heap))
print("Extracting from min heap:")
min_heap.extract()
print("Result: {}\n".format(min_heap))
# Build a max heap
print("Constructing max heap:")
max_heap = Heap("MAX")
for i in range(8):
max_heap.insert(random.randrange(100))
print("Result: {}\n".format(max_heap))
print("Extracting from max heap:")
max_heap.extract()
print("Result: {}\n".format(max_heap))
# Run the program
program()
| StarcoderdataPython |
1771265 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 27 11:12:10 2018
@author: avelinojaver
"""
import sys
from pathlib import Path
dname = Path(__file__).resolve().parents[1]
sys.path.append(str(dname))
from cell_localization.models import UNet
import torch
import numpy as np
import pandas as pd
import tqdm
import cv2
import matplotlib.pylab as plt
from cell_localization.evaluation.localmaxima import evaluate_coordinates
from skimage.feature import peak_local_max
#%%
if __name__ == '__main__':
save_dir = Path('/Users/avelinojaver/OneDrive - Nexus365/heba/cell_detection/results/')
bn = 'cell-loc_unet_l1smooth_20190425_160121_adam_lr0.001_wd0.0_batch64'
#bn = 'cell-loc_unet-bn_l1smooth_20190425_160022_adam_lr0.001_wd0.0_batch64'
#bn = 'cell-loc_unet-bn_l1smooth_20190425_205040_adam_lr0.001_wd0.0_batch64'
#bn = 'heba/heba_unet-bn_l1smooth_20190501_083712_adam_lr0.001_batch64/heba_unet-bn_l1smooth_20190501_083712_adam_lr0.001_batch64'
#bn = 'heba/heba_unet_l1smooth_20190501_105726_adam_lr0.001_batch64'
#bn = 'heba/heba_unet_maskfocal_20190501_163856_adam_lr0.001_batch64'
#model_path = Path('/Volumes/loco/') / 'workspace/localization/results/cell_detection' / bn / 'checkpoint.pth.tar'
#n_epoch = 399
#check_name = f'checkpoint-{n_epoch}.pth.tar'
check_name = 'checkpoint.pth.tar'
#model_path = Path.home() / 'workspace/localization/results/locmax_detection/' / bn / check_name
model_path = Path.home() / 'workspace/localization/results/cell_detection' / bn / check_name
#model_path = Path('/Volumes/loco/') / 'workspace/localization/results/cell_detection' / bn / check_name
scale_int = (0, 4095)
n_ch_in, n_ch_out = 1, 1
if '-separated' in bn:
n_ch_out = 3
batchnorm = '-bn' in bn
model = UNet(n_channels = n_ch_in, n_classes = n_ch_out, batchnorm = batchnorm)
state = torch.load(model_path, map_location = 'cpu')
model.load_state_dict(state['state_dict'])
model.eval()
#%%
#input_dir = Path('/Users/avelinojaver/OneDrive - Nexus365/heba/cell_detection/raw/')
#fnames = input_dir.glob('*.tif')
#input_dir = Path('/Users/avelinojaver/OneDrive - Nexus365/heba/cell_detection/data/validation')
imput_dir = Path('/Users/avelinojaver/Desktop/UnannotateImages_v2/')
fnames = imput_dir.glob('*.tif')
metrics = []
for fname in tqdm.tqdm(fnames):
img_id = fname.stem
img = cv2.imread(str(fname), -1)
x = img[None].astype(np.float32)
x = (x - scale_int[0])/(scale_int[1] - scale_int[0])
with torch.no_grad():
X = torch.from_numpy(x[None])
Xhat = model(X)
xhat = Xhat.squeeze().detach().numpy()
xr = x.squeeze()
#%%
#coords_pred = cv2_peak_local_max(xhat, threshold_relative = 0.1, threshold_abs = 0.05)
coords_pred = peak_local_max(xhat, min_distance = 5, threshold_abs = 0.05, threshold_rel = 0.5)
coords_pred = coords_pred[:, ::-1]
fig, axs = plt.subplots(1, 3, sharex=True, sharey=True, figsize = (15, 5))
axs[0].imshow(xr, cmap='gray')#, vmin=0, vmax=1)
axs[0].set_title('Original')
#axs[1].imshow(xhat, cmap='gray')#, vmin=0.4)
axs[1].imshow(xr, cmap='gray')
axs[1].imshow(xhat, cmap='inferno', alpha=0.5)
axs[1].set_title('Believe Maps')
axs[2].imshow(xr, cmap='gray')
#axs[2].plot(coords_pred[..., 0], coords_pred[..., 1], '.r')
#if coords_df is not None:
# axs[2].plot(coords_df['cx'], coords_df['cy'], 'x', color='y')
axs[2].set_title('Predicted Coordinates')
plt.suptitle(fname.stem)
#plt.xlim((800, 1000))
#plt.ylim((0, 200))
for ax in axs:
ax.axis('off')
axs[2].plot(coords_pred[:, 0], coords_pred[:, 1], 'o', color = 'c')
#%% | StarcoderdataPython |
1793522 | <filename>labs_challenge/apps.py
from django.apps import AppConfig
class LabsConfig(AppConfig):
name = 'labs_challenge'
| StarcoderdataPython |
1709319 | <reponame>ifm/ifm3d-ros2
#
# Copyright (C) 2019 ifm electronic, gmbh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import ExecuteProcess
def generate_launch_description():
package_name = 'ifm3d_ros2'
rviz_config = os.path.join(
get_package_share_directory(package_name),
'etc', 'ifm3d.rviz'
)
return LaunchDescription([
ExecuteProcess(
cmd=['ros2', 'run', 'rviz2', 'rviz2', '-d', rviz_config],
output='screen',
log_cmd=True
),
])
| StarcoderdataPython |
1655658 | <filename>stitches/fx_stitch.py
# Import packages
import numpy as np
import pandas as pd
import pkg_resources
import xarray as xr
import os as os
import stitches.fx_util as util
import stitches.fx_data as data
import stitches.fx_pangeo as pangeo
def find_zfiles(rp):
""" Determine which cmip files must be downlaoded from pangeo.
:param rp: data frame of the recepies
:return: numpy.ndarray array of the gs:// files to pull from pangeo
"""
# Figure out which columns contain the string file
flat_list = rp.filter(regex='file', axis=1).values.flatten()
unique_list = np.unique(flat_list)
return unique_list
def find_var_cols(x):
""" Determine which variables that are going to be downloaded.
:param x: pandas data frame of the stitches recipe
:return: a list of the variables that are going to be written out to the netcdf files.
"""
# Parse out the variable name so that we can use it
# to label the final output.
set = x.filter(regex='file').columns.tolist()
out = []
for text in set:
new = text.replace("_file", "")
out.append(new)
return out
def get_netcdf_values(i, dl, rp, fl, name):
"""Extract the archive values from the list of downloaded cmip data
:param i: int index of the row of the recipe data frame
:param dl: list of xarray cmip files
:param rp: data frame of the recipe
:param fl: list of the cmip files
:param name: name of the variable file that is going to be processed.
:return: a slice of xarray (not sure confident on the technical term)
"""
file = rp[name][i]
start_yr = rp["archive_start_yr"][i]
end_yr = rp["archive_end_yr"][i]
# Figure out which index level we are on and then get the
# xarray from the list.
index = int(np.where(fl == file)[0])
extracted = dl[index].sortby('time')
v = name.replace("_file", "")
# Have to have special time handeler, consider functionalizinng this.
times = extracted.indexes['time']
if type(times) in [xr.coding.cftimeindex.CFTimeIndex, pd.core.indexes.datetimes.DatetimeIndex]:
yrs = extracted.indexes['time'].year # pull out the year information from the time index
flags = list(map(lambda x: x in range(start_yr, end_yr+1), yrs))
to_keep = times[flags]
else:
raise TypeError(f"unsupported time type")
dat = extracted.sel(time=to_keep)[v].values.copy()
if ((times.freq == 'D') | (times.freq == 'day')):
expected_times = pd.date_range(start=str(start_yr) + "-01-01", end=str(end_yr) + "-12-31", freq='D')
if times.calendar == 'noleap':
expected_len = len(expected_times[~((expected_times.month == 2) & (expected_times.day == 29))])
else:
expected_len = len(pd.date_range(start=str(start_yr) + "-01-01", end=str(end_yr) + "-12-31", freq='M'))
assert (len(dat) == expected_len), "Not enough data in " + file + "for period " + str(start_yr) + "-" + str(end_yr)
return dat
def get_var_info(rp, dl, fl, name):
"""Extract the cmip variable attribute information.
:param rp: data frame of the recipes
:param dl: list of the data files
:param fl: list of the data file names
:param name: string of the column containing the variable file name from rp
:return: pandas dataframe of the variable meta data
TODO add a check to make sure that there is only one stitching id being passed into
the function.
"""
util.check_columns(rp, {name})
file = rp[name][0]
index = int(np.where(fl == file)[0])
extracted = dl[index]
attrs = data.get_ds_meta(extracted)
attrs["calendar"] = extracted.indexes['time'].calendar
return attrs
def get_atts(rp, dl, fl, name):
"""Extract the cmip variable attribute information.
:param rp: data frame of the recepies
:param dl: list of the data files
:param fl: list of the data file names
:param name: string of the column containing the variable files to process
:return: dict object containing the cmip variable information
TODO add a check to make sure that there is only one stitching id being passed into
the function.
"""
file = rp[name][0]
index = int(np.where(fl == file)[0])
extracted = dl[index]
v=name.replace("_file", "")
out=extracted[v].attrs.copy()
return out
def internal_stitch(rp, dl, fl):
"""Stitch a single recpie into netcdf outputs
:param dl: list of xarray cmip files
:param rp: data frame of the recipe
:param fl: list of the cmip files
:return: a list of the data arrays for the stitched products of the different variables.
"""
rp.reset_index(drop=True, inplace=True)
variables = find_var_cols(rp)
out = []
# For each of the of the variables stitch the
# data together.
for v in variables:
# Get the information about the variable that is going to be stitched together.
col = v + '_file'
var_info = get_var_info(rp, dl, fl, col)
# For each of time slices extract the data & concatenate together.
gridded_data = get_netcdf_values(i=0, dl=dl, rp=rp, fl=fl, name=col)
# Now add the other time slices.
for i in range(1, len(rp)):
new_vals = get_netcdf_values(i=i, dl=dl, rp=rp, fl=fl, name=col)
gridded_data = np.concatenate((gridded_data, new_vals), axis=0)
# Note that the pd.date_range call need the date/month defined otherwise it will
# truncate the year from start of first year to start of end year which is not
# what we want. We want the full final year to be included in the times series.
start = str(min(rp["target_start_yr"]))
end = str(max(rp["target_end_yr"]))
if var_info["frequency"][0].lower() == "mon":
freq = "M"
elif var_info["frequency"][0].lower() == "day":
freq = "D"
else:
raise TypeError(f"unsupported frequency")
times = pd.date_range(start=start + "-01-01", end=end + "-12-31", freq=freq)
# Again, some ESMs stop in 2099 instead of 2100 - so wejust drop the
# last year of gridded_data when that is the case.
#TODO this will need something extra/different for daily data; maybe just
# a simple len(times)==len(gridded_data)-12 : len(times) == len(gridded_data)-(nDaysInYear)
# with correct parentheses would do it
if ((max(rp["target_end_yr"]) == 2099) & (len(times) == (len(gridded_data) - 12))):
gridded_data = gridded_data[0:len(times), 0:, 0:].copy()
if ((var_info["calendar"][0].lower() == "noleap") & (freq == "D")):
times = times[~((times.month == 2) & (times.day == 29))]
assert (len(gridded_data) == len(times)), "Problem with the length of time"
# Extract the lat and lon information that will be used to structure the
# empty netcdf file. Make sure to copy all of the information including
# the attributes!
lat = dl[0].lat.copy()
lon = dl[0].lon.copy()
r = rp.reset_index(drop=True).to_string()
rslt = xr.Dataset({v: xr.DataArray(
gridded_data,
coords=[times, lat, lon],
dims=["time", "lat", 'lon'],
attrs={'units': var_info['units'][0],
'variable': var_info['variable'][0],
'experiment': var_info['experiment'][0],
'ensemble': var_info['ensemble'][0],
'model': var_info['model'][0],
'stitching_id': rp['stitching_id'].unique()[0],
'recipe': r})
})
out.append(rslt)
out_dict = dict(zip(variables, out))
return out_dict
def gridded_stitching(out_dir, rp):
"""Stitch
:param out_dir: string directory location where to write the netcdf files out to
:param rp: data frame of the recipe
:return: a list of the netcdf files paths
"""
flag = os.path.isdir(out_dir)
if not flag:
raise TypeError(f'The output directory does not exist.')
# Check inputs.
util.check_columns(rp, {'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble', 'stitching_id',
'archive_start_yr', 'archive_end_yr'})
# Determine which variables will be downloaded.
variables = find_var_cols(rp)
if not (len(variables) >= 1):
raise TypeError(f'No variables were found to be processed.')
# Determine which files need to be downloaded from pangeo.
file_list = find_zfiles(rp)
# Make sure that all of the files are available to download from pangeo.
# Note that this might be excessively cautious but this is an issue we have run into in
# the past.
avail = pangeo.fetch_pangeo_table()
flag = all(item in list(avail['zstore']) for item in list(file_list))
if not flag:
raise TypeError(f'Trying to request a zstore file that does not exist')
# Download all of the data from pangeo.
data_list = list(map(pangeo.fetch_nc, file_list))
# For each of the stitching recipes go through and stitch a recipe.
for single_id in rp['stitching_id'].unique():
# initialize f to be empty just to be safe now that we've added a
# try...except approach. It's technically possible the first id
# tried will fail and the function will try to return a non-existent f.
f = []
try:
print((
'Stitching gridded netcdf for: ' + rp.archive_model.unique() + " " + rp.archive_variable.unique() + " " + single_id))
# Do the stitching!
# ** this can be a slow step and prone to errors
single_rp = rp.loc[rp['stitching_id'] == single_id].copy()
rslt = internal_stitch(rp=single_rp, dl=data_list, fl=file_list)
# Print the files out at netcdf files
f = []
for i in rslt.keys():
ds = rslt[i]
ds = ds.sortby('time').copy()
fname = (out_dir + '/' + "stitched_" + ds[i].attrs['model'] + '_' +
ds[i].attrs['variable'] + '_' + single_id + '.nc')
ds.to_netcdf(fname)
f.append(fname)
# end For loop over rslt keys
#end try
except:
print(('Stitching gridded netcdf for: ' + rp.archive_model.unique() + " " + rp.archive_variable.unique() + " " + single_id +' failed. Skipping. Error thrown within gridded_stitching fxn.'))
# end except
# end for loop over single_id
return f
# end gridded stitching function
def gmat_internal_stitch(row, data):
""" Select data from a tas archive based on a single row in a recipe data frame, this
function is used to iterate over an entire recipe to do the stitching.
:param row: pandas.core.series.Series a row entry of a fully formatted recpie
:param data: pandas.core.frame.DataFrame containing the tas values to be stiched togeher
:return: pandas.core.frame.DataFrame of tas values
"""
years = list(range(int(row["target_start_yr"]), int(row["target_end_yr"]) + 1))
select_years = list(range(int(row["archive_start_yr"]), int(row["archive_end_yr"]) + 1))
selected_data = data.loc[(data["experiment"] == row["archive_experiment"]) &
(data["year"].isin(select_years)) &
(data["ensemble"] == row["archive_ensemble"])]
# some models stop at 2099 instead of 2100 - so there is a mismatch
# between len(years) and selected data but not a fatal one.
# Write a very specific if statement to catch this & just chop the extra year
# off the end of selected_data.
if ((len(years) == (util.nrow(selected_data) - 1)) & (max(years) == 2099) ):
selected_data = selected_data.iloc[0:len(years), ].copy()
if len(years) != util.nrow(selected_data):
raise TypeError(f"Trouble with selecting the tas data.")
new_vals = selected_data['value']
d = {'year': years,
'value': new_vals}
df = pd.DataFrame(data=d)
df['variable'] = 'tas'
return df
# TODO ACS we do have a bit of a behavior change here so that this function so that the
# TODO rp read in here is the same as the rp read in to the gridded_stitching function.
def gmat_stitching(rp):
""" Based on a recipe data frame stitch together a time series of global tas data.
:param rp: pandas.core.frame.DataFrame a fully formatted recipe data frame.
:return: pandas.core.frame.DataFrame of stitched together tas data.
"""
# Check inputs.
util.check_columns(rp, {'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble', 'stitching_id',
'archive_start_yr', 'archive_end_yr', 'tas_file'})
# One the assumptions of this function is that it only works with tas, so
# we can safely add tas as the variable column.
rp['variable'] = 'tas'
out = []
for name, match in rp.groupby(['stitching_id']):
# Reset the index in the match data frame so that we can use a for loop
# to iterate through match data frame an apply the gmat_internal_stitch.
match = match.reset_index(drop=True)
# Find the tas data to be stitched together.
dir_path = pkg_resources.resource_filename('stitches', 'data/tas-data')
all_files = util.list_files(dir_path)
# Load the tas data for a particular model.
model = match['archive_model'].unique()[0]
csv_to_load = [file for file in all_files if (model in file)][0]
data = pd.read_csv(csv_to_load)
# Format the data so that if we have historical years in the future scenarios
# then that experiment is relabeled as "historical".
fut_exps = ['ssp245', 'ssp126', 'ssp585', 'ssp119', 'ssp370', 'ssp434', 'ssp534-over', 'ssp460']
nonssp_data = data.loc[~data["experiment"].isin(fut_exps)]
fut_data = data.loc[(data["experiment"].isin(fut_exps)) &
(data["year"] > 2014)]
hist_data = data.loc[(data["experiment"].isin(fut_exps)) &
(data["year"] <= 2014)]
hist_data["experiment"] = "historical"
tas_data = pd.concat([nonssp_data, fut_data, hist_data])[['variable', 'experiment', 'ensemble', 'model', 'year',
'value']].drop_duplicates().reset_index(drop=True)
# Stitch the data together based on the matched recpies.
dat = []
for i in match.index:
row = match.iloc[i, :]
dat.append(gmat_internal_stitch(row, tas_data))
dat = pd.concat(dat)
# Add the stitiching id column to the data frame.
dat['stitching_id'] = name
# Add the data to the out list
out.append(dat)
# Format the list of data frames into a single data frame.
final_output = pd.concat(out)
final_output = final_output.reset_index(drop=True).copy()
final_output = final_output.sort_values(['stitching_id', 'year']).copy()
final_output = final_output.reset_index(drop=True).copy()
return final_output
| StarcoderdataPython |
3276152 | <filename>screen/display/display.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from importlib import import_module
from screen.custom import *
class Display:
def __init__(self, driver_name):
self.driver_name = driver_name
self.get_driver()
self.get_display_size()
def get_driver(self):
try:
driver_path = f'screen.display.driver.{self.driver_name}'
self.driver = import_module(driver_path)
self.epd = self.driver.EPD()
except ImportError:
raise Exception('This module is not supported. Check your spellings?')
except FileNotFoundError:
raise Exception('SPI could not be found. Please check if SPI is enabled')
def get_display_size(self):
self.width = self.driver.EPD_WIDTH
self.height = self.driver.EPD_HEIGHT
def update(self, black_image, red_image):
self.epd.clear()
self.epd.display(black_image, red_image)
logger.info(f'Screen display update complete')
def sleep(self):
self.epd.sleep()
logger.info(f'Screen display sleep')
def calibrate(self, cycles=1):
self.epd.init()
white = Image.new('1', (self.width, self.height), 'white')
black = Image.new('1', (self.width, self.height), 'black')
for _ in range(cycles):
self.epd.display(black, white)
self.epd.display(white, black)
self.epd.display(white, white)
logger.info(f'Screen display calibrate complete')
| StarcoderdataPython |
1721148 | #!/usr/bin/env python
# coding: utf-8
"""Clusterless Decoding Analysis W-Track | Compute decoded position and
distance metrics based on CA1 Marks and associated body position |
Inputs: Marks, Posdlc, Task, Tetinfo | https://github.com/edeno/pose_analysis
"""
import logging
import os
import numpy as np
import xarray as xr
from loren_frank_data_processing import make_epochs_dataframe
from replay_trajectory_classification import ClusterlessClassifier
from sklearn.model_selection import KFold
from src.analysis import calculate_replay_distance
from src.load_data import load_data, make_track_graph
from src.parameters import (ANIMALS, EDGE_ORDER, EDGE_SPACING,
PROCESSED_DATA_DIR, classifier_parameters,
discrete_state_transition)
from trajectory_analysis_tools import (get_distance_metrics,
get_highest_posterior_threshold,
get_HPD_spatial_coverage,
get_trajectory_data)
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(level='INFO', format=FORMAT, datefmt='%d-%b-%y %H:%M:%S')
def run_analysis(epoch_key):
logging.info(epoch_key)
# Load Data
# Specifiy animal, day, epoch and body position estimate to be used to
# encode pos-mark relationship.
logging.info("Loading data...")
data = load_data(epoch_key,
position_to_linearize=['nose_x', 'nose_y'],
max_distance_from_well=30,
min_distance_traveled=50,
)
track_graph, center_well_id = make_track_graph(epoch_key, ANIMALS)
is_running = np.abs(data["position_info"].nose_vel) > 0
# is_running = np.abs(data["position_info"].forepawR_vel) > 4
# is_outbound = data["position_info"].task == "Outbound"
# Calculate posterior
# Builds the classifier and calculates the posterior estimates for each
# bin. Default is 5x cross validation. Some concerns if that is appropriate
# in 15 minute run epochs,but AJ checked that the overal posterior was
# similar in 2x,3x, and 5x versions. Maybe stick to 3x for 15 minute data?
cv = KFold()
cv_classifier_clusterless_results = []
logging.info("Decoding...")
for fold_ind, (train, test) in enumerate(
cv.split(data["position_info"].index)):
logging.info(f"\tFold #{fold_ind + 1}")
# train = train[is_outbound[train].values]
cv_classifier = ClusterlessClassifier(**classifier_parameters)
logging.info("\tFitting model...")
cv_classifier.fit(
position=data["position_info"].iloc[train].linear_position,
multiunits=data["multiunits"].isel(time=train),
is_training=is_running.iloc[train],
track_graph=track_graph,
center_well_id=center_well_id,
edge_order=EDGE_ORDER,
edge_spacing=EDGE_SPACING,
)
cv_classifier.discrete_state_transition_ = discrete_state_transition
logging.info('\tPredicting posterior...')
cv_classifier_clusterless_results.append(
cv_classifier.predict(
data["multiunits"].isel(time=test),
time=data["position_info"].iloc[test].index /
np.timedelta64(1, "s"),
)
)
# concatenate cv classifier results
cv_classifier_clusterless_results = xr.concat(
cv_classifier_clusterless_results, dim="time"
)
# Calculate Distance Metrics
logging.info("Calculating metrics...")
# Important calculate distance metrics. Loads Causal Posterior and
# get_trajectory_data and get_distance_metrics to calculate ahead-behind
# distance based on body_dir
# CAUSAL
posterior_causal = (cv_classifier_clusterless_results["causal_posterior"]
.sum("state", skipna=False))
# extracting the peak of the posterior
trajectory_data = get_trajectory_data(
posterior=posterior_causal,
track_graph=track_graph,
decoder=cv_classifier,
position_info=data["position_info"],
direction_variable="body_dir"
)
distance_metrics = get_distance_metrics(
track_graph, *trajectory_data)
ahead_behind_distance_causal = (
distance_metrics.mental_position_ahead_behind_animal *
distance_metrics.mental_position_distance_from_animal)
# Calculate the corresponding 95% HPD credible interval
hpd_threshold_95_causal = get_highest_posterior_threshold(
posterior_causal.dropna("position"), coverage=0.95)
spatial_coverage_95_causal = get_HPD_spatial_coverage(
posterior_causal, hpd_threshold_95_causal)
# Calculate the corresponding 50% HPD credible interval
hpd_threshold_50_causal = get_highest_posterior_threshold(
posterior_causal.dropna("position"), coverage=0.50)
spatial_coverage_50_causal = get_HPD_spatial_coverage(
posterior_causal, hpd_threshold_50_causal)
# calculate distance metrics acausal posterior. Loads acausal posterior and
# distance metrics. ACAUSAL
posterior_acausal = (cv_classifier_clusterless_results["acausal_posterior"]
.sum("state", skipna=False))
# extracting the peak of the posterior
trajectory_data = get_trajectory_data(
posterior=posterior_acausal,
track_graph=track_graph,
decoder=cv_classifier,
position_info=data["position_info"],
direction_variable="body_dir"
)
distance_metrics = get_distance_metrics(
track_graph, *trajectory_data)
ahead_behind_distance_acausal = (
distance_metrics.mental_position_ahead_behind_animal *
distance_metrics.mental_position_distance_from_animal)
# ACAUSAL 95% CI
hpd_threshold_95_acausal = get_highest_posterior_threshold(
posterior_acausal.dropna("position"), coverage=0.95)
spatial_coverage_95_acausal = get_HPD_spatial_coverage(
posterior_acausal, hpd_threshold_95_acausal)
# ACAUSAL 50% CI
hpd_threshold_50_acausal = get_highest_posterior_threshold(
posterior_acausal.dropna("position"), coverage=0.50)
spatial_coverage_50_acausal = get_HPD_spatial_coverage(
posterior_acausal, hpd_threshold_50_acausal)
# WHILE WE ARE AT IT, ALSO A GOOD IDEA TO CALCULATE THE ABSOLUTE DISTANCE.
# CAUSAL
replay_distance_from_animal_position_causal = calculate_replay_distance(
posterior=cv_classifier_clusterless_results.causal_posterior.sum(
'state'),
track_graph=track_graph,
decoder=cv_classifier,
position_2D=data['position_info'].loc[:, ["nose_x", "nose_y"]],
track_segment_id=data['position_info'].loc[:, ["track_segment_id"]],
)
# WHILE WE ARE AT IT, ALSO A GOOD IDEA TO CALCULATE THE ABSOLUTE DISTANCE.
# ACAUSAL
replay_distance_from_animal_position_acausal = calculate_replay_distance(
posterior=cv_classifier_clusterless_results.acausal_posterior.sum(
'state'),
track_graph=track_graph,
decoder=cv_classifier,
position_2D=data['position_info'].loc[:, ["nose_x", "nose_y"]],
track_segment_id=data['position_info'].loc[:, ["track_segment_id"]],
)
# ### Save the distance and CI values with the classifier results
cv_classifier_clusterless_results[
'abs_distance_from_animal_position_causal'] = (
('time'), replay_distance_from_animal_position_causal)
cv_classifier_clusterless_results[
'abs_distance_from_animal_position_acausal'] = (
('time'), replay_distance_from_animal_position_acausal)
# maybe this will works and we can save both distances
cv_classifier_clusterless_results[
'rel_distance_from_animal_position_causal'] = (
('time'), ahead_behind_distance_causal)
cv_classifier_clusterless_results[
'rel_distance_from_animal_position_acausal'] = (
('time'), ahead_behind_distance_acausal)
# get HPD estimate of the distance associated
cv_classifier_clusterless_results['hpd_threshold_95_causal'] = (
('time'), hpd_threshold_95_causal)
cv_classifier_clusterless_results['hpd_threshold_50_causal'] = (
('time'), hpd_threshold_50_causal)
cv_classifier_clusterless_results['hpd_threshold_95_acausal'] = (
('time'), hpd_threshold_95_acausal)
cv_classifier_clusterless_results['hpd_threshold_50_acausal'] = (
('time'), hpd_threshold_50_acausal)
# get CI of the distance associated
cv_classifier_clusterless_results['credible_interval_95_causal'] = (
('time'), spatial_coverage_95_causal)
cv_classifier_clusterless_results['credible_interval_50_causal'] = (
('time'), spatial_coverage_50_causal)
cv_classifier_clusterless_results['credible_interval_95_acausal'] = (
('time'), spatial_coverage_95_acausal)
cv_classifier_clusterless_results['credible_interval_50_acausal'] = (
('time'), spatial_coverage_50_acausal)
logging.info("Saving results...")
# save the results as .nc format. ncread matlab can read these
epoch_identifier = f"{epoch_key[0]}_{epoch_key[1]:02d}_{epoch_key[2]:02d}"
cv_classifier_clusterless_results.to_netcdf(
os.path.join(
PROCESSED_DATA_DIR,
(f"{epoch_identifier}_cv_classifier_clusterless_vel_0_nose_alltime"
"_results.nc")
)
)
# save the model
cv_classifier.save_model(
os.path.join(PROCESSED_DATA_DIR,
f"{epoch_identifier}_cv_classifier_clusterless_nose.pkl"))
# Save position info
data['position_info'].to_csv(
os.path.join(PROCESSED_DATA_DIR,
f"{epoch_identifier }_position_info_nose.csv"))
def main():
epoch_info = make_epochs_dataframe(ANIMALS)
for epoch in epoch_info.index:
run_analysis(epoch)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3272347 | <gh_stars>0
# 克隆自聚宽文章:https://www.joinquant.com/post/669
# 标题:【回测来啦】——鳄鱼法则交易系统,15年至今114%
# 作者:陈小米。
import numpy as np
def initialize(context):
g.up_price = {} #向上碎形最高价
g.low_price = {} #向下碎形最低价
g.up_fractal_exists = {} #判断有效向上碎形
g.down_fractal_exists = {} #判断有效向下碎形
g.AO_index = {} #存放连续的AO指标数据
g.cal_AC_index = {} #计算AC指标中转存储
g.AC_index = {} #存放连续的AC指标数据
g.amount = {} #满仓仓位
g.stock = get_index_stocks('000300.XSHG')
g.buy_stock = []
set_benchmark('000300.XSHG')
g.month = context.current_dt.month
run_monthly(select_universe,1,'open')
#重置全局变量
def reset_global():
g.up_price = {} #向上碎形最高价
g.low_price = {} #向下碎形最低价
g.up_fractal_exists = {} #判断有效向上碎形
g.down_fractal_exists = {} #判断有效向下碎形
g.AO_index = {} #存放连续的AO指标数据
g.cal_AC_index = {} #计算AC指标中转存储
g.AC_index = {} #存放连续的AC指标数据
g.amount = {} #满仓仓位
g.buy_stock = []
def initial_stock_global(stock):
g.up_price[stock] = 0
g.low_price[stock] = 0
g.up_fractal_exists[stock] = False
g.down_fractal_exists[stock] = False #判断有效向下碎形
g.AO_index[stock] = [0] #存放连续的AO指标数据
g.cal_AC_index[stock] = [0] #计算AC指标中转存储
g.AC_index[stock] = [0] #存放连续的AC指标数据
g.amount[stock] = 0 #满仓仓位
#轮换选股后清空持仓
def reset_position(context):
for stock in g.buy_stock:
order_target(stock,0)
log.info("sell %s for reset position"%stock)
#选股
def select_universe(context):
#每三个月操作一次
month = context.current_dt.month
if month%6 != g.month%6:
return
#清空全局变量
reset_position(context)
reset_global()
hist = history(30,'1d','close',g.stock,df = False)
for stock in g.stock:
if is_sleeping_alligator(stock,hist,20):
g.buy_stock.append(stock)
#初始化该股票全局变量
initial_stock_global(stock)
print g.buy_stock
return None
#睡着的鳄鱼
def is_sleeping_alligator(stock,hist,nday):
for i in range(nday):
if is_struggle(stock,hist,i) == False:
return False
return True
#均线纠缠,BRG三线非常接近
def is_struggle(stock,hist,delta):
blue_line = hist[stock][-21-delta:-8-delta].mean()
red_line = hist[stock][-13-delta:-5-delta].mean()
green_line = hist[stock][-8-delta:-3-delta].mean()
if abs(blue_line/red_line-1)<0.02 and abs(red_line/green_line-1)<0.02:
return True
else:
return False
#判断 向上 或 向下 碎形
def is_fractal(stock,direction):
hist = history(5,'1d',direction,[stock],df = False)
if direction == 'high'\
and hist[stock][2] > hist[stock][0]\
and hist[stock][2] > hist[stock][1]\
and hist[stock][2] > hist[stock][3]\
and hist[stock][2] > hist[stock][4]:
g.up_price[stock] = hist[stock][2]
return True
elif direction == 'low'\
and hist[stock][2] < hist[stock][0]\
and hist[stock][2] < hist[stock][1]\
and hist[stock][2] < hist[stock][3]\
and hist[stock][2] < hist[stock][4]:
g.low_price[stock] = hist[stock][2]
return True
return False
#通过比较碎形与红线位置,判断碎形是否有效
def is_effective_fractal(stock, direction):
if is_fractal(stock,direction):
hist = history(13,'1d','close',[stock],df = False)
red_line = hist[stock][:-5].mean()
close_price = hist[stock][-1]
if direction == 'high':
if close_price > red_line:
g.up_fractal_exists[stock] = True
else:
g.up_fractal_exists[stock] = False
elif direction == 'low':
if close_price < red_line:
g.down_fractal_exists[stock] = True
else:
g.down_fractal_exists[stock] = False
#N日内最高价格的N日线
def nday_high_point(stock,n):
hist = history(2*n,'1d','high',[stock],df = False)[stock]
high_point = []
for i in range(n):
high_point.append(max(hist[-5-i:-1-i]))
return np.array(high_point).mean()
#N日内最低价格的N日线
def nday_low_point(stock,n):
hist = history(2*n,'1d','low',[stock],df = False)[stock]
low_point = []
for i in range(n):
low_point.append(max(hist[-5-i:-1-i]))
return np.array(low_point).mean()
#AO=5日内(最高-最低)/2的5日移动平均-34日内(最高-最低)/2的34日移动平均
def AO_index(stock):
g.AO_index[stock].append(nday_high_point(stock,5)/2 + nday_low_point(stock,5)/2\
- nday_high_point(stock,34)/2 - nday_low_point(stock,34)/2)
return None
#AO-AO的5日平均值的5日平均
def AC_index(stock):
AO_index(stock)
if len(g.AO_index[stock]) >= 5:
g.cal_AC_index[stock].append(g.AO_index[stock][-1] - np.array(g.AO_index[stock][-5:]).mean())
if len(g.cal_AC_index[stock]) >=5:
g.AC_index[stock].append(np.array(g.cal_AC_index[stock][-5:]).mean())
#判断序列n日上行
def is_up_going(alist,n):
if len(alist) < n:
return False
for i in range(n-1):
if alist[-(1+i)] <= alist[-(2+i)]:
return False
return True
#判断序列n日下行
def is_down_going(alist,n):
if len(alist) < n:
return False
for i in range(n-1):
if alist[-(1+i)] >= alist[-(2+i)]:
return False
return True
#碎形被突破
def active_fractal(stock,direction):
close_price = history(1,'1d','close',[stock],df=False)[stock][0]
if direction == 'up' and close_price > g.up_price[stock]:
return True
elif direction == 'down' and close_price < g.low_price[stock]:
return True
return False
#进场,初始仓位
def set_initial_position(stock,context):
close_price = history(1,'1d','close',[stock],df=False)[stock][0]
g.amount[stock] = context.portfolio.cash/close_price/len(g.buy_stock)*3
order(stock, g.amount[stock])
log.info("buying %s 股数为 %s"%(stock,g.amount[stock]))
g.down_fractal_exists[stock] = False
#卖出
def sell_all_stock(stock,context):
order_target(stock,0)
log.info("selling %s"%stock)
g.up_fractal_exists[stock] = False
#加仓
def adjust_position(stock,context,position):
order(stock,g.amount[stock]*position)
log.info("adjust position buying %s 股数为 %s"%(stock,g.amount[stock]*position))
# 计算股票前n日收益率
def security_return(days,security_code):
hist1 = attribute_history(security_code, days + 1, '1d', 'close',df=False)
security_returns = (hist1['close'][-1]-hist1['close'][0])/hist1['close'][0]
return security_returns
# 止损,根据前n日收益率
def conduct_nday_stoploss(context,security_code,days,bench):
if security_return(days,security_code)<= bench:
for stock in g.buy_stock:
order_target_value(stock,0)
log.info("Sell %s for stoploss" %stock)
return True
else:
return False
# 计算股票累计收益率(从建仓至今)
def security_accumulate_return(context,data,stock):
current_price = data[stock].price
cost = context.portfolio.positions[stock].avg_cost
if cost != 0:
return (current_price-cost)/cost
else:
return None
# 个股止损,根据累计收益
def conduct_accumulate_stoploss(context,data,stock,bench):
if security_accumulate_return(context,data,stock) != None\
and security_accumulate_return(context,data,stock) < bench:
order_target_value(stock,0)
log.info("Sell %s for stoploss" %stock)
return True
else:
return False
# 个股止盈,根据累计收益
def conduct_accumulate_stopwin(context,data,stock,bench):
if security_accumulate_return(context,data,stock) != None\
and security_accumulate_return(context,data,stock) > bench:
order_target_value(stock,0)
log.info("Sell %s for stopwin" %stock)
return True
else:
return False
def handle_data(context,data):
#大盘止损
if conduct_nday_stoploss(context,'000300.XSHG',3,-0.03):
return
for stock in g.buy_stock:
#个股止损
if conduct_accumulate_stopwin(context,data,stock,0.3)\
or conduct_accumulate_stoploss(context,data,stock,-0.1):
return
#计算AO,AC指标
AC_index(stock)
#空仓时,寻找机会入场
if context.portfolio.positions[stock].amount == 0:
#计算向上碎形
is_effective_fractal(stock,'high')
#有效向上碎形存在,并被突破,买入
if g.up_fractal_exists and active_fractal(stock,'up'):
close_price = history(5, '1d', 'close', [stock],df = False)
if is_up_going(g.AO_index[stock],5)\
and is_up_going(g.AC_index[stock],3)\
and is_up_going(close_price[stock],2):
set_initial_position(stock,context)
#有持仓时,加仓或离场
else:
#计算向下碎形
is_effective_fractal(stock,'low')
#出场条件1:有效向下碎形存在,并被突破,卖出
if g.down_fractal_exists and active_fractal(stock,'down'):
sell_all_stock(stock,context)
return
#出场条件2:
#加仓10%:AO,AC同时5日上行,且收盘价走高
# if is_up_going(g.AO_index[stock],5)\
# and is_up_going(g.AC_index[stock],3)\
# and is_up_going(close_price[stock],2):
# adjust_position(stock,context,0.1)
# #减仓10%:AO,AC同时3日下行,且收盘价走低
# if is_down_going(g.AO_index[stock],5)\
# and is_down_going(g.AC_index[stock],3)\
# and is_down_going(close_price[stock],2):
# adjust_position(stock,context,-0.1) | StarcoderdataPython |
3396882 | <reponame>mauricio-chavez/waitress-backend
"""Development settings."""
from .base import * # NOQA
from .base import env
from datetime import timedelta
# Base
DEBUG = env.bool('DJANGO_DEBUG', True)
# Security
SECRET_KEY = env(
'DJANGO_SECRET_KEY',
default='4ra7dxard(&tmw$b5s9--akuakuisreal0v4!!f_-h7i)b_96aw'
)
ALLOWED_HOSTS = [
'localhost',
'0.0.0.0',
'127.0.0.1',
]
# Templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# Development apps
INSTALLED_APPS += [
'django_extensions',
]
MEDIA_URL = '/media/'
MEDIA_ROOT = APPS_DIR / 'media'
# Security
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
# Admin
ADMIN_URL = env('DJANGO_ADMIN_URL', default='admin/')
# JWT tokens
GRAPHQL_JWT['JWT_EXPIRATION_DELTA'] = timedelta(days=1) | StarcoderdataPython |
1768289 | from typing import List
import pytest
from thinc.api import fix_random_seed, Adam, set_dropout_rate
from thinc.api import Ragged, reduce_mean, Logistic, chain, Relu
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy
from spacy.ml.models import build_Tok2Vec_model, MultiHashEmbed, MaxoutWindowEncoder
from spacy.ml.models import build_bow_text_classifier, build_simple_cnn_text_classifier
from spacy.ml.models import build_spancat_model
from spacy.ml.staticvectors import StaticVectors
from spacy.ml.extract_spans import extract_spans, _get_span_indices
from spacy.lang.en import English
from spacy.lang.en.examples import sentences as EN_SENTENCES
def get_textcat_bow_kwargs():
return {
"exclusive_classes": True,
"ngram_size": 1,
"no_output_layer": False,
"nO": 34,
}
def get_textcat_cnn_kwargs():
return {"tok2vec": test_tok2vec(), "exclusive_classes": False, "nO": 13}
def get_all_params(model):
params = []
for node in model.walk():
for name in node.param_names:
params.append(node.get_param(name).ravel())
return node.ops.xp.concatenate(params)
def get_docs():
nlp = English()
return list(nlp.pipe(EN_SENTENCES + [" ".join(EN_SENTENCES)]))
def get_gradient(model, Y):
if isinstance(Y, model.ops.xp.ndarray):
dY = model.ops.alloc(Y.shape, dtype=Y.dtype)
dY += model.ops.xp.random.uniform(-1.0, 1.0, Y.shape)
return dY
elif isinstance(Y, List):
return [get_gradient(model, y) for y in Y]
else:
raise ValueError(f"Could not get gradient for type {type(Y)}")
def get_tok2vec_kwargs():
# This actually creates models, so seems best to put it in a function.
return {
"embed": MultiHashEmbed(
width=32,
rows=[500, 500, 500],
attrs=["NORM", "PREFIX", "SHAPE"],
include_static_vectors=False,
),
"encode": MaxoutWindowEncoder(
width=32, depth=2, maxout_pieces=2, window_size=1
),
}
def test_tok2vec():
return build_Tok2Vec_model(**get_tok2vec_kwargs())
def test_multi_hash_embed():
embed = MultiHashEmbed(
width=32,
rows=[500, 500, 500],
attrs=["NORM", "PREFIX", "SHAPE"],
include_static_vectors=False,
)
hash_embeds = [node for node in embed.walk() if node.name == "hashembed"]
assert len(hash_embeds) == 3
# Check they look at different columns.
assert list(sorted(he.attrs["column"] for he in hash_embeds)) == [0, 1, 2]
# Check they use different seeds
assert len(set(he.attrs["seed"] for he in hash_embeds)) == 3
# Check they all have the same number of rows
assert [he.get_dim("nV") for he in hash_embeds] == [500, 500, 500]
# Now try with different row factors
embed = MultiHashEmbed(
width=32,
rows=[1000, 50, 250],
attrs=["NORM", "PREFIX", "SHAPE"],
include_static_vectors=False,
)
hash_embeds = [node for node in embed.walk() if node.name == "hashembed"]
assert [he.get_dim("nV") for he in hash_embeds] == [1000, 50, 250]
@pytest.mark.parametrize(
"seed,model_func,kwargs",
[
(0, build_Tok2Vec_model, get_tok2vec_kwargs()),
(0, build_bow_text_classifier, get_textcat_bow_kwargs()),
(0, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs()),
],
)
def test_models_initialize_consistently(seed, model_func, kwargs):
fix_random_seed(seed)
model1 = model_func(**kwargs)
model1.initialize()
fix_random_seed(seed)
model2 = model_func(**kwargs)
model2.initialize()
params1 = get_all_params(model1)
params2 = get_all_params(model2)
assert_array_equal(model1.ops.to_numpy(params1), model2.ops.to_numpy(params2))
@pytest.mark.parametrize(
"seed,model_func,kwargs,get_X",
[
(0, build_Tok2Vec_model, get_tok2vec_kwargs(), get_docs),
(0, build_bow_text_classifier, get_textcat_bow_kwargs(), get_docs),
(0, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs(), get_docs),
],
)
def test_models_predict_consistently(seed, model_func, kwargs, get_X):
fix_random_seed(seed)
model1 = model_func(**kwargs).initialize()
Y1 = model1.predict(get_X())
fix_random_seed(seed)
model2 = model_func(**kwargs).initialize()
Y2 = model2.predict(get_X())
if model1.has_ref("tok2vec"):
tok2vec1 = model1.get_ref("tok2vec").predict(get_X())
tok2vec2 = model2.get_ref("tok2vec").predict(get_X())
for i in range(len(tok2vec1)):
for j in range(len(tok2vec1[i])):
assert_array_equal(
numpy.asarray(model1.ops.to_numpy(tok2vec1[i][j])),
numpy.asarray(model2.ops.to_numpy(tok2vec2[i][j])),
)
try:
Y1 = model1.ops.to_numpy(Y1)
Y2 = model2.ops.to_numpy(Y2)
except Exception:
pass
if isinstance(Y1, numpy.ndarray):
assert_array_equal(Y1, Y2)
elif isinstance(Y1, List):
assert len(Y1) == len(Y2)
for y1, y2 in zip(Y1, Y2):
try:
y1 = model1.ops.to_numpy(y1)
y2 = model2.ops.to_numpy(y2)
except Exception:
pass
assert_array_equal(y1, y2)
else:
raise ValueError(f"Could not compare type {type(Y1)}")
@pytest.mark.parametrize(
"seed,dropout,model_func,kwargs,get_X",
[
(0, 0.2, build_Tok2Vec_model, get_tok2vec_kwargs(), get_docs),
(0, 0.2, build_bow_text_classifier, get_textcat_bow_kwargs(), get_docs),
(0, 0.2, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs(), get_docs),
],
)
def test_models_update_consistently(seed, dropout, model_func, kwargs, get_X):
def get_updated_model():
fix_random_seed(seed)
optimizer = Adam(0.001)
model = model_func(**kwargs).initialize()
initial_params = get_all_params(model)
set_dropout_rate(model, dropout)
for _ in range(5):
Y, get_dX = model.begin_update(get_X())
dY = get_gradient(model, Y)
get_dX(dY)
model.finish_update(optimizer)
updated_params = get_all_params(model)
with pytest.raises(AssertionError):
assert_array_equal(
model.ops.to_numpy(initial_params), model.ops.to_numpy(updated_params)
)
return model
model1 = get_updated_model()
model2 = get_updated_model()
assert_array_almost_equal(
model1.ops.to_numpy(get_all_params(model1)),
model2.ops.to_numpy(get_all_params(model2)),
)
@pytest.mark.parametrize("model_func,kwargs", [(StaticVectors, {"nO": 128, "nM": 300})])
def test_empty_docs(model_func, kwargs):
nlp = English()
model = model_func(**kwargs).initialize()
# Test the layer can be called successfully with 0, 1 and 2 empty docs.
for n_docs in range(3):
docs = [nlp("") for _ in range(n_docs)]
# Test predict
model.predict(docs)
# Test backprop
output, backprop = model.begin_update(docs)
backprop(output)
def test_init_extract_spans():
model = extract_spans().initialize()
def test_extract_spans_span_indices():
model = extract_spans().initialize()
spans = Ragged(
model.ops.asarray([[0, 3], [2, 3], [5, 7]], dtype="i"),
model.ops.asarray([2, 1], dtype="i"),
)
x_lengths = model.ops.asarray([5, 10], dtype="i")
indices = _get_span_indices(model.ops, spans, x_lengths)
assert list(indices) == [0, 1, 2, 2, 10, 11]
def test_extract_spans_forward_backward():
model = extract_spans().initialize()
X = Ragged(model.ops.alloc2f(15, 4), model.ops.asarray([5, 10], dtype="i"))
spans = Ragged(
model.ops.asarray([[0, 3], [2, 3], [5, 7]], dtype="i"),
model.ops.asarray([2, 1], dtype="i"),
)
Y, backprop = model.begin_update((X, spans))
assert list(Y.lengths) == [3, 1, 2]
assert Y.dataXd.shape == (6, 4)
dX, spans2 = backprop(Y)
assert spans2 is spans
assert dX.dataXd.shape == X.dataXd.shape
assert list(dX.lengths) == list(X.lengths)
def test_spancat_model_init():
model = build_spancat_model(
build_Tok2Vec_model(**get_tok2vec_kwargs()), reduce_mean(), Logistic()
)
model.initialize()
def test_spancat_model_forward_backward(nO=5):
tok2vec = build_Tok2Vec_model(**get_tok2vec_kwargs())
docs = get_docs()
spans_list = []
lengths = []
for doc in docs:
spans_list.append(doc[:2])
spans_list.append(doc[1:4])
lengths.append(2)
spans = Ragged(
tok2vec.ops.asarray([[s.start, s.end] for s in spans_list], dtype="i"),
tok2vec.ops.asarray(lengths, dtype="i"),
)
model = build_spancat_model(
tok2vec, reduce_mean(), chain(Relu(nO=nO), Logistic())
).initialize(X=(docs, spans))
Y, backprop = model((docs, spans), is_train=True)
assert Y.shape == (spans.dataXd.shape[0], nO)
backprop(Y)
| StarcoderdataPython |
3381145 | <filename>qa_lib.py<gh_stars>0
import os
import shutil
import time
import pysftp
from PIL import Image
'''
TODO: Move folders from 002-ingest to 003-ingested
example:
sudo cp -R . /library/lib-sftp/003-ingested/P005_University_of_Denver_Clarion-resources_1194/
sudo cp -R . /library/lib-sftp/001-ready/new_000_qa_test-resources_496/
'''
'''
Checks if folder name conforms to naming standard
@param: folder
@returns: Array
'''
def check_folder_name(folder):
errors = []
if folder.find('new_') == -1:
errors.append('Folder name is missing "new_" part.')
if folder.find('-resources') == -1:
errors.append('Folder name is missing "-resources" part.')
if folder.find('resources_') == -1:
errors.append('Folder name is missing "resources_" part')
tmp = folder.split('_')
is_id = tmp[-1].isdigit()
if is_id == False:
errors.append('Folder is missing "URI" part')
if len(errors) > 0:
return errors
else:
return []
'''
Checks package names and fixes case issues and removes spaces
@param: ready_path
@param: folder
@returns: void
'''
def check_package_names(ready_path, folder):
packages = [f for f in os.listdir(ready_path + folder) if not f.startswith('.')]
[os.remove(ready_path + folder + '/' + f) for f in os.listdir(ready_path + folder) if f.startswith('.')]
if len(packages) == 0:
return -1
for i in packages:
package = ready_path + folder + '/'
if i.upper():
call_number = i.find('.')
if call_number == -1:
os.rename(package + i, package + i.lower().replace(' ', ''))
'''
Checks file names and fixes case issues and removes spaces
@param: ready_path
@param: folder
@returns: Array
'''
def check_file_names(ready_path, folder):
errors = []
files_arr = []
packages = [f for f in os.listdir(ready_path + folder) if not f.startswith('.')]
for i in packages:
package = ready_path + folder + '/' + i + '/'
files = [f for f in os.listdir(package) if not f.startswith('.')]
[os.remove(package + f) for f in os.listdir(package) if f.startswith('.')]
if len(files) < 2:
errors.append(i)
for j in files:
files_arr.append(j)
if j.upper():
call_number = j.find('.')
if call_number == -1:
os.rename(package + j, package + j.lower().replace(' ', ''))
elif call_number != -1:
os.rename(package + j, package + j.replace(' ', ''))
# check images here
file = package + j
if file.endswith('.tiff') or file.endswith('.tif') or file.endswith('.jpg') or file.endswith('.png'):
result = check_image_file(file, j)
if result.get('error') != False:
errors.append(result)
# check pdfs here
file = package + j
if file.endswith('.pdf'):
result = check_pdf_file(file, j)
if result.get('error') != False:
errors.append(result)
local_file_count = len(files_arr)
return dict(local_file_count=local_file_count, errors=errors)
'''
Checks image files to determine if they are broken/corrupt
@param: full_path
@param: file_name
@returns: Object
'''
def check_image_file(full_path, file_name):
try:
img = Image.open(full_path)
img.verify() # confirm that file is an image
img.close()
img = Image.open(full_path)
img.transpose(Image.FLIP_LEFT_RIGHT) # attempt to manipulate file to determine if it's broken
img.close()
return dict(error=False, file='')
except OSError as error:
return dict(error=str(error), file=file_name)
def check_pdf_file(full_path, file_name):
try:
# print(full_path)
# print(file_name)
# TODO: reject anything over 900mb
# https://stackoverflow.com/questions/2104080/how-can-i-check-file-size-in-python
# i.e. Path('somefile.txt').stat().st_size
return dict(error=False, file='')
except OSError as error:
return dict(error=str(error), file=file_name)
# TODO: check dates (start/end) in archivesspace record // check on client side?
'''
Checks for missing uri.txt files
@param: ready_path
@param: folder
@returns: Array
'''
def check_uri_txt(ready_path, folder):
errors = []
packages = [f for f in os.listdir(ready_path + folder) if not f.startswith('.')]
if len(packages) == 0:
return errors.append(-1)
for i in packages:
package = ready_path + folder + '/' + i + '/'
files = [f for f in os.listdir(package) if not f.startswith('.')]
if 'uri.txt' not in files:
errors.append(i)
return errors
'''
Checks package file size (bytes)
@param: ready_path
@param: folder
@returns: Integer
https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
'''
def get_package_size(ready_path, folder):
package = ready_path + folder
total_size = 0
for dirpath, dirnames, filenames in os.walk(package):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
'''
Moves folder from ready to ingest folder and renames it using pid
@param: ready_path
@param: ingest_path
@param: folder
@returns: String (if there is an error)
'''
def move_to_ingest(ready_path, ingest_path, pid, folder):
errors = []
mode = 0o666
try:
shutil.move(ready_path + folder, ingest_path + folder)
except:
return errors.append('ERROR: Unable to move folder (move_to_ingest)')
# TODO: calculate time based on size of collection
time.sleep(15.0)
try:
os.rename(ingest_path + folder, ingest_path + pid)
except:
return errors.append('ERROR: Unable to rename folder (move_to_ingest)')
try:
os.mkdir(ready_path + folder, mode)
except:
return errors.append('ERROR: Unable to create folder (move_to_ingest)')
return errors
'''
Moves folder to archivematica sftp
@param: host
@param: username
@param: password
@param: sftp_path
@returns: void
'''
def move_to_sftp(ingest_path, pid):
host = os.getenv('SFTP_HOST')
username = os.getenv('SFTP_ID')
password = os.getenv('SFTP_PWD')
sftp_path = os.getenv('SFTP_REMOTE_PATH')
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
errors = []
with pysftp.Connection(host=host, username=username, password=password, cnopts=cnopts) as sftp:
sftp.put_r(ingest_path, sftp_path, preserve_mtime=True)
packages = sftp.listdir()
if pid not in packages:
errors.append(-1)
'''
checks upload on archivematica sftp
@param: host
@param: username
@param: password
@param: sftp_path
@returns: Array
'''
def check_sftp(pid, local_file_count):
host = os.getenv('SFTP_HOST')
username = os.getenv('SFTP_ID')
password = os.getenv('SFTP_PWD')
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
sftp_path = os.getenv('SFTP_REMOTE_PATH')
file_names = []
dir_names = []
un_name = []
def store_files_name(fname):
file_names.append(fname)
def store_dir_name(dirname):
dir_names.append(dirname)
def store_other_file_types(name):
un_name.append(name)
with pysftp.Connection(host=host, username=username, password=password, cnopts=cnopts) as sftp:
remote_package = sftp_path + '/' + pid + '/'
sftp.cwd(remote_package)
sftp.walktree(remote_package, store_files_name, store_dir_name, store_other_file_types, recurse=True)
remote_file_count = len(file_names)
with sftp.cd(remote_package):
remote_package_size = sftp.execute('du -h -s')
if int(local_file_count) == remote_file_count:
return dict(message='upload_complete', data=[file_names, remote_file_count])
return dict(message='in_progress', file_names=file_names, remote_file_count=remote_file_count,
local_file_count=local_file_count,
remote_package_size=remote_package_size[0].decode().strip().replace('\t', ''))
| StarcoderdataPython |
3274313 | class NoContentError(RuntimeError):
pass
class DuplicateUploadError(RuntimeError):
pass
class ApiError(RuntimeError):
pass
class NotFoundError(ApiError):
pass
| StarcoderdataPython |
3218933 | """
test_newick
===========
Tests for the `newick` module.
"""
# Import the library being tested
import ngesh
def test_sorted_newick():
"""
Tests if sorted_newick() is returning the expected string.
"""
original_nw = "(Ei:0.98,(Mepale:0.39,(Srufo:0.14,Pulet:0.14):0.24):0.58);"
sorted_nw = "(((Pulet:0.14,Srufo:0.14):0.24,Mepale:0.39):0.58,Ei:0.98);"
assert ngesh.sorted_newick(original_nw) == sorted_nw
| StarcoderdataPython |
3391259 | # Python > Regex and Parsing > Re.findall() & Re.finditer()
# Find all the pattern matches using the expressions re.findall() and re.finditer().
#
# https://www.hackerrank.com/challenges/re-findall-re-finditer/problem
#
import re
found = False
s = input()
for i in re.findall("(?<=[^aeiou])([aeiou]{2,})[^aeiou]", s, flags=re.I):
print(i)
found = True
if not found:
print(-1)
| StarcoderdataPython |
4828885 | <gh_stars>10-100
# Copyright (c) Nanjing University, Vision Lab.
# <NAME> (<EMAIL>), <NAME> (<EMAIL>); Nanjing University, Vision Lab.
# Last update: 2020.06.06
import os
import sys
# sys.path.append('.')
import glob
import subprocess
import argparse
import logging
from time import time
import torch
import torch.utils.data
from torch.utils.data.sampler import Sampler
import numpy as np
import MinkowskiEngine as ME
from dataprocess.data_basic import loadh5, loadply
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
def collate_pointcloud_fn(list_data):
new_list_data = []
num_removed = 0
for data in list_data:
if data is not None:
new_list_data.append(data)
else:
num_removed += 1
list_data = new_list_data
if len(list_data) == 0:
raise ValueError('No data in the batch')
# coords, feats, labels = list(zip(*list_data))
coords, feats = list(zip(*list_data))
coords_batch = ME.utils.batched_coordinates(coords)
feats_batch = torch.from_numpy(np.vstack(feats)).float()
return coords_batch, feats_batch
class PCDataset(torch.utils.data.Dataset):
def __init__(self, files, feature_format):
self.files = []
self.cache = {}
self.last_cache_percent = 0
self.files = files
self.feature_format = feature_format
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
pc_file = self.files[idx]
if idx in self.cache:
coords, feats = self.cache[idx]
else:
if pc_file.endswith('.h5'):
coords, feats = loadh5(pc_file, self.feature_format)
elif pc_file.endswith('.ply'):
coords, feats = loadply(pc_file, self.feature_format)
self.cache[idx] = (coords, feats)
cache_percent = int((len(self.cache) / len(self)) * 100)
if cache_percent > 0 and cache_percent % 10 == 0 and cache_percent != self.last_cache_percent:
print('cache percent:', len(self.cache) / len(self))
self.last_cache_percent = cache_percent
return (coords, feats)
def make_data_loader(dataset, batch_size, shuffle, num_workers, repeat):
args = {
'batch_size': batch_size,
'num_workers': num_workers,
'collate_fn': collate_pointcloud_fn,
'pin_memory': True,
'drop_last': False
}
if repeat:
args['sampler'] = InfSampler(dataset, shuffle)
else:
args['shuffle'] = shuffle
loader = torch.utils.data.DataLoader(dataset, **args)
return loader
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, default='')
args = parser.parse_args()
import glob
filedirs = glob.glob(args.dataset+'*.h5')
test_dataset = PCDataset(filedirs[:100], feature_format='rgb')
test_dataloader = make_data_loader(dataset=test_dataset,
batch_size=8,
shuffle=True,
num_workers=1,
repeat=True)
test_iter = iter(test_dataloader)
import time
s = time.time()
for i in range(1000):
coords, feats = test_iter.next()
if i % 20 == 0:
print('iter::::', i, time.time() - s)
s = time.time()
| StarcoderdataPython |
3239112 | <reponame>cameronfr/trusat-backend
#!/usr/bin/env python
"""
This is a script to rename files retrieved from John's hypermail scraper,
try and determine the date from the filename, and then rename the filename
with the successfully parsed date.
Note that I've had to do some manual cleanup both before and after this
script to create a clean dataset
"""
import os
import re
import sys
from csv import writer
from datetime import datetime
from dateutil import parser
from dateutil.tz import gettz
import shutil
def get_submit_time_from_filename(filename):
""" Determine the submitted date from the filename string from John's hypermail scraper """
file_string = os.path.basename(filename)
file_string = os.path.splitext(file_string)[0]
# Fri,_10_Jul_1998_14:11:27_+0200
# 'Fri,_11_Dec_1998_05:53:14_-0800_(PST)'
# '8_Feb_99_17:27:56_MET'
# (4) 01 Dec 1995 14:48:46
# (5) '8_Feb_99_17:27:56_MET'
# (6) Fri,_10_Jul_1998_14:11:27_+0200
# (6) Fri,_01_Dec_95_14:48:46_gmt
# (6) Fri,_10_Nov_95_18:08_+0100
# (7) 'Fri,_11_Dec_1998_05:53:14_-0800_(PST)'
# (7) 'Fri,_10_Nov_95_05:49:00_UTC_0000'
# (7) 'Fri,_13_Oct_95__09:38:39_EDT'
# (7) 'Fri,_14_Jul_1995_23:22:18_-40975532_(EDT)'
# (8) 'Fri,_14_Jul_1995_01:13:15_+0200_(MET_DST)'
# (8) 'Fri,__1_Dec_95_06:13:00_UTC_0000
# (9) 'Fri,_15_Dec_1995___10:30:09__+0100'
# (10) 'Fri,__1_Dec_1995___13:41:05__+0100'
# (11) 'Fri,__1_Dec_1995___13:41:05__+0100'
#tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
date_patterns = [
"%a,_%d_%b_%Y_%X_%z", # Fri,_10_Jul_1998_14:11:27_+0200
"%d_%b_%Y_%X_%Z", # 01_Aug_1995_12:14:27_GMT
"%d_%b_%Y_%X_%z", # 05_Dec_1995_20:53:00_+0200
"%d_%b_%y_%X_%Z", # 09_Nov_95_11:45:55_EST
"%d_%b_%y_%X_" # 10_Feb_1995_08:20:13_U
]
file_string_spaces = file_string.replace('_',' ')
try:
# print("1st: {}".format(file_string_spaces))
submit_datetime = parser.parse(file_string_spaces)
if submit_datetime.utcoffset():
submit_datetime = submit_datetime - submit_datetime.utcoffset()
submit_datetime = submit_datetime.replace(tzinfo=None)
return submit_datetime.strftime('%Y-%m-%d %H:%M:%S')
except:
for pattern in date_patterns:
try:
submit_datetime = datetime.strptime(file_string, pattern)
if submit_datetime.utcoffset():
submit_datetime = submit_datetime - submit_datetime.utcoffset()
submit_datetime = submit_datetime.replace(tzinfo=None)
return submit_datetime.strftime('%Y-%m-%d %H:%M:%S')
except:
pass
try:
# 'Fri,_10_Nov_95_05:49:00_UTC_0000'
# 'Fri,_14_Jul_1995_01:13:15_+0200_(MET_DST)'
# 'Fri,_14_Jul_1995_23:22:18_-40975532_(EDT)'
file_string_spaces = file_string_spaces.replace('gmt', '')
file_string_spaces = file_string_spaces.replace('0000', '')
file_string_spaces = file_string_spaces.replace('- ', '')
file_string_spaces = file_string_spaces.replace('MET DST', '')
file_string_spaces = file_string_spaces.replace('CST6CDT', '')
file_string_spaces = file_string_spaces.replace('-40975532', '')
file_string_spaces = file_string_spaces.replace('(EDT)', '-0400')
file_string_spaces = file_string_spaces.replace('EDT', '-0400')
file_string_spaces = file_string_spaces.replace('(EST)', '-0500')
file_string_spaces = file_string_spaces.replace('est', '-0500')
file_string_spaces = file_string_spaces.replace('W. Europe Standard Time+', '')
file_string_spaces = file_string_spaces.replace('-500', '-0500')
file_string_spaces = file_string_spaces.replace('+-1000', '-1000')
file_string_spaces = file_string_spaces.replace('+-100', '-0100')
file_string_spaces = file_string_spaces.replace('+120', '+1200')
file_string_spaces = file_string_spaces.replace('+-300', '-0300')
file_string_spaces = file_string_spaces.replace('ind', 'IST')
file_string_spaces = file_string_spaces.replace('"GMT"', '')
file_string_spaces = file_string_spaces.replace(':_', ':00_')
file_string_spaces = re.sub(':_(\d)_', ':0\1_', file_string_spaces) # pylint: disable=anomalous-backslash-in-string
file_string_spaces = re.sub('\(.*\)', '', file_string_spaces) # pylint: disable=anomalous-backslash-in-string
# print("2nd: {}".format(file_string_spaces))
submit_datetime = parser.parse(file_string_spaces)
if submit_datetime.utcoffset():
submit_datetime = submit_datetime - submit_datetime.utcoffset()
submit_datetime = submit_datetime.replace(tzinfo=None)
return submit_datetime.strftime('%Y-%m-%d %H:%M:%S')
except:
pass
print("Date is not in expected format: String: '{}' Filename: '{}'".format(file_string,filename))
DateFile = open("seesat_hypermail_dates.csv", 'w')
writer_date = writer(DateFile, dialect='unix')
for dirName, subdirList, fileList in os.walk('/Users/chris/Downloads/all_emails'):
subdirList.sort()
for fname in sorted(fileList):
if(fname == ".DS_Store"):
continue
submit_time = get_submit_time_from_filename(fname)
try:
submit_datetime = datetime.strptime(submit_time,'%Y-%m-%d %H:%M:%S')
new_fname = submit_datetime.strftime('%Y%m%d_%H%M%S.txt')
except:
new_fname = '00FIXME_' + fname
old_fname_full = os.path.join(dirName,fname)
new_fname_full = os.path.join(dirName,new_fname)
os.rename(old_fname_full, new_fname_full)
writer_date.writerow( [submit_time, fname, new_fname, new_fname_full] ) | StarcoderdataPython |
4829032 | <reponame>HuaweiWang/GaitAnalysisToolKit<filename>gaitanalysis/tests/test_gait.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# builtin
import os
# external
import numpy as np
from numpy import testing
import pandas
from pandas.util.testing import assert_frame_equal
from nose.tools import assert_raises
# local
from ..gait import find_constant_speed, interpolate, GaitData
from dtk.process import time_vector
# debugging
try:
from IPython.core.debugger import Tracer
except ImportError:
pass
else:
set_trace = Tracer()
def test_find_constant_speed():
speed_array = np.loadtxt(os.path.join(os.path.dirname(__file__),
'data/treadmill-speed.csv'),
delimiter=',')
time = speed_array[:, 0]
speed = speed_array[:, 1]
indice, constant_speed_time = find_constant_speed(time, speed, plot=False)
assert 6.0 < constant_speed_time < 7.0
def test_interpolate():
df = pandas.DataFrame({'a': [np.nan, 3.0, 5.0, 7.0],
'b': [5.0, np.nan, 9.0, 11.0],
'c': [2.0, 4.0, 6.0, 8.0],
'd': [0.5, 1.0, 1.5, np.nan]},
index=[0.0, 2.0, 4.0, 6.0])
time = [0.0, 1.0, 3.0, 5.0]
interpolated = interpolate(df, time)
# NOTE : pandas.Series.interpolate does not extrapolate (because
# np.interp doesn't.
df_expected = pandas.DataFrame({'a': [4.0, 4.0, 4.0, 6.0],
'b': [5.0, 6.0, 8.0, 10.0],
'c': [2.0, 3.0, 5.0, 7.0],
'd': [0.5, 0.75, 1.25, 1.5]},
index=time)
testing.assert_allclose(interpolated.values, df_expected.values)
testing.assert_allclose(interpolated.values, df_expected.values)
testing.assert_allclose(interpolated.index.values.astype(float),
df_expected.index.values.astype(float))
class TestGaitData():
def setup(self):
time = time_vector(1000, 100)
cortex_time = time
dflow_time = time
omega = 2 * np.pi
right_grf = 1000 * (0.75 + np.sin(omega * time))
right_grf[right_grf < 0.0] = 0.0
right_grf += 2.0 * np.random.normal(size=right_grf.shape)
left_grf = 1000 * (0.75 + np.cos(omega * time))
left_grf[left_grf < 0.0] = 0.0
left_grf += 2.0 * np.random.normal(size=left_grf.shape)
right_knee_angle = np.arange(len(time))
right_knee_moment = np.arange(len(time))
self.data_frame = \
pandas.DataFrame({'Right Vertical GRF': right_grf,
'Left Vertical GRF': left_grf,
'Right Knee Angle': right_knee_angle,
'Right Knee Moment': right_knee_moment,
'Cortex Time': cortex_time,
'D-Flow Time': dflow_time},
index=time)
self.threshold = 10.0
def test_init(self):
gait_data = GaitData(self.data_frame)
assert gait_data.data is self.data_frame
def test_inverse_dynamics_2d(self):
# This only tests to make sure new columns were inserted after the
# command. There is a test for the underlying leg2d Octave program
# that actually tests the computed values.
# Add some columns for the data we need.
lmark = ['LSHO.PosX', 'LSHO.PosY',
'LGTRO.PosX', 'LGTRO.PosY',
'LLEK.PosX', 'LLEK.PosY',
'LLM.PosX', 'LLM.PosY',
'LHEE.PosX', 'LHEE.PosY',
'LMT5.PosX', 'LMT5.PosY']
rmark = ['RSHO.PosX', 'RSHO.PosY',
'RGTRO.PosX', 'RGTRO.PosY',
'RLEK.PosX', 'RLEK.PosY',
'RLM.PosX', 'RLM.PosY',
'RHEE.PosX', 'RHEE.PosY',
'RMT5.PosX', 'RMT5.PosY']
lforce = ['FP1.ForX', 'FP1.ForY', 'FP1.MomZ']
rforce = ['FP2.ForX', 'FP2.ForY', 'FP2.MomZ']
columns = lmark + rmark + lforce + rforce
rand = np.random.random((len(self.data_frame), len(columns)))
new_data = pandas.DataFrame(rand, index=self.data_frame.index,
columns=columns)
data_frame = self.data_frame.join(new_data)
gait_data = GaitData(data_frame)
data_frame = gait_data.inverse_dynamics_2d(lmark, rmark, lforce,
rforce, 72.0, 6.0)
# The new columns that should be created.
new_columns = ['Left.Hip.Flexion.Angle',
'Left.Hip.Flexion.Rate',
'Left.Hip.Flexion.Moment',
'Left.Hip.X.Force',
'Left.Hip.Y.Force',
'Left.Knee.Flexion.Angle',
'Left.Knee.Flexion.Rate',
'Left.Knee.Flexion.Moment',
'Left.Knee.X.Force',
'Left.Knee.Y.Force',
'Left.Ankle.PlantarFlexion.Angle',
'Left.Ankle.PlantarFlexion.Rate',
'Left.Ankle.PlantarFlexion.Moment',
'Left.Ankle.X.Force',
'Left.Ankle.Y.Force',
'Right.Hip.Flexion.Angle',
'Right.Hip.Flexion.Rate',
'Right.Hip.Flexion.Moment',
'Right.Hip.X.Force',
'Right.Hip.Y.Force',
'Right.Knee.Flexion.Angle',
'Right.Knee.Flexion.Rate',
'Right.Knee.Flexion.Moment',
'Right.Knee.X.Force',
'Right.Knee.Y.Force',
'Right.Ankle.PlantarFlexion.Angle',
'Right.Ankle.PlantarFlexion.Rate',
'Right.Ankle.PlantarFlexion.Moment',
'Right.Ankle.X.Force',
'Right.Ankle.Y.Force']
for col in new_columns:
assert col in gait_data.data.columns
def test_grf_landmarks(self, plot=False):
# Test for force plate version
gait_data = GaitData(self.data_frame)
min_idx = len(self.data_frame) / 3
max_idx = 2*len(self.data_frame) / 3
min_time = self.data_frame.index.values.astype(float)[min_idx]
max_time = self.data_frame.index.values.astype(float)[max_idx]
right_strikes, left_strikes, right_offs, left_offs = \
gait_data.grf_landmarks('Right Vertical GRF',
'Left Vertical GRF',
min_time=min_time,
max_time=max_time,
threshold=self.threshold,
do_plot=plot)
right_zero = self.data_frame['Right Vertical GRF'].iloc[min_idx:max_idx] \
< self.threshold
instances = right_zero.apply(lambda x: 1 if x else 0).diff()
expected_right_offs = \
instances[instances == 1].index.values.astype(float)
expected_right_strikes = \
instances[instances == -1].index.values.astype(float)
left_zero = self.data_frame['Left Vertical GRF'].iloc[min_idx:max_idx] \
< self.threshold
instances = left_zero.apply(lambda x: 1 if x else 0).diff()
expected_left_offs = \
instances[instances == 1].index.values.astype(float)
expected_left_strikes = \
instances[instances == -1].index.values.astype(float)
testing.assert_allclose(expected_right_offs, right_offs)
testing.assert_allclose(expected_right_strikes, right_strikes)
testing.assert_allclose(expected_left_offs, left_offs)
testing.assert_allclose(expected_left_strikes, left_strikes)
# TODO : Add test for accelerometer based gait landmarks
def test_plot_landmarks(self):
gait_data = GaitData(self.data_frame)
gait_data.grf_landmarks('Right Vertical GRF',
'Left Vertical GRF',
threshold=self.threshold)
side = 'right'
col_names = ['Right Vertical GRF','Right Knee Angle','Right Knee Moment']
time = gait_data.data.index.values.astype(float)
assert_raises(ValueError, gait_data.plot_landmarks, [], side)
assert_raises(ValueError, gait_data.plot_landmarks, col_names, '')
# TODO: Test to see if user wants heelstrikes or toeoffs
# assert_raises(ValueError, gait_data.plot_landmarks, col_names, side, event='')
def test_split_at(self, plot=False):
gait_data = GaitData(self.data_frame)
gait_data.grf_landmarks('Right Vertical GRF',
'Left Vertical GRF',
threshold=self.threshold)
side = 'right'
series = 'Right Vertical GRF'
gait_cycles = gait_data.split_at(side)
for i, cycle in gait_cycles.iteritems():
start_heelstrike_time = gait_data.strikes[side][i]
end_heelstrike_time = gait_data.strikes[side][i + 1]
hs_to_hs = gait_data.data[series][start_heelstrike_time:end_heelstrike_time]
num_samples = len(cycle[series])
new_time = np.linspace(0.0, end_heelstrike_time,
num=num_samples + 1)
old_time = np.linspace(0.0, end_heelstrike_time, num=num_samples)
new_values = np.interp(new_time, old_time, hs_to_hs.values)
testing.assert_allclose(cycle[series], new_values[:-1])
if plot is True:
gait_data.plot_gait_cycles(series, 'Left Vertical GRF')
gait_cycles = gait_data.split_at(side, 'stance')
for i, cycle in gait_cycles.iteritems():
start_heelstrike_time = gait_data.strikes[side][i]
end_toeoff_time = gait_data.offs[side][i + 1]
hs_to_toeoff = gait_data.data[series][start_heelstrike_time:end_toeoff_time]
num_samples = len(cycle[series])
new_time = np.linspace(0.0, end_toeoff_time,
num=num_samples + 1)
old_time = np.linspace(0.0, end_toeoff_time, num=num_samples)
new_values = np.interp(new_time, old_time, hs_to_toeoff.values)
testing.assert_allclose(cycle[series], new_values[:-1])
if plot is True:
gait_data.plot_gait_cycles(series, 'Left Vertical GRF')
gait_cycles = gait_data.split_at(side, 'swing')
for i, cycle in gait_cycles.iteritems():
start_toeoff_time = gait_data.offs[side][i]
end_heelstrike_time = gait_data.strikes[side][i]
toeoff_to_heelstrike = gait_data.data[series][start_toeoff_time:end_heelstrike_time]
num_samples = len(cycle[series])
new_time = np.linspace(0.0, end_heelstrike_time,
num=num_samples + 1)
old_time = np.linspace(0.0, end_heelstrike_time, num=num_samples)
new_values = np.interp(new_time, old_time,
toeoff_to_heelstrike.values)
testing.assert_allclose(cycle[series], new_values[:-1])
if plot is True:
gait_data.plot_gait_cycles(series, 'Left Vertical GRF')
import matplotlib.pyplot as plt
plt.show()
# TODO : Add tests for gait cycle statistics, i.e. stride frequency,
# etc.
def test_plot_gait_cycles(self):
gait_data = GaitData(self.data_frame)
gait_data.grf_landmarks('Right Vertical GRF',
'Left Vertical GRF',
threshold=self.threshold)
gait_data.split_at('right')
assert_raises(ValueError, gait_data.plot_gait_cycles)
def test_save_load(self):
gait_data = GaitData(self.data_frame)
gait_data.grf_landmarks('Right Vertical GRF',
'Left Vertical GRF',
threshold=self.threshold)
gait_data.split_at('right')
gait_data.save('some_data.h5')
gait_data_from_file = GaitData('some_data.h5')
assert_frame_equal(gait_data.data, gait_data_from_file.data)
for key, cycle in gait_data.gait_cycles.iteritems():
assert_frame_equal(cycle, gait_data_from_file.gait_cycles[key])
assert_frame_equal(gait_data.gait_cycle_stats,
gait_data_from_file.gait_cycle_stats)
assert all(gait_data.strikes['right'] ==
gait_data_from_file.strikes['right'])
assert all(gait_data.strikes['left'] ==
gait_data_from_file.strikes['left'])
assert all(gait_data.offs['right'] ==
gait_data_from_file.offs['right'])
assert all(gait_data.offs['left'] ==
gait_data_from_file.offs['left'])
def teardown(self):
try:
open('some_data.h5')
except IOError:
pass
else:
os.remove('some_data.h5')
| StarcoderdataPython |
1613677 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base_blocks import BasicConv
class CEM(nn.Module):
"""Context Enhancement Module"""
def __init__(self, channels, fea_channel):
super(CEM, self).__init__()
self.conv1 = BasicConv(channels[0], fea_channel, kernel_size=1, padding=0, relu=False)
self.conv2 = nn.Sequential(
BasicConv(channels[1], fea_channel, kernel_size=1, padding=0, relu=False),
nn.Upsample(scale_factor=2, mode='nearest'),
)
self.conv3 = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
BasicConv(channels[1], fea_channel, kernel_size=1, padding=0, relu=False),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, inputs):
C4_lat = self.conv1(inputs[0])
C5_lat = self.conv2(inputs[1])
Cglb_lat = self.conv3(inputs[1])
return self.relu(C4_lat + C5_lat + Cglb_lat)
def fpn_feature_extractor(fpn_level, fea_channel):
layers = [BasicConv(fea_channel, fea_channel, kernel_size=3, stride=1, padding=1)]
for _ in range(fpn_level - 1):
layers.append(BasicConv(fea_channel, fea_channel, kernel_size=3, stride=2, padding=1))
return nn.ModuleList(layers)
def lateral_convs(fpn_level, fea_channel):
layers = []
for _ in range(fpn_level):
layers.append(BasicConv(fea_channel, fea_channel, kernel_size=1))
return nn.ModuleList(layers)
def fpn_convs(fpn_level, fea_channel):
layers = []
for _ in range(fpn_level):
layers.append(BasicConv(fea_channel, fea_channel, kernel_size=3, stride=1, padding=1))
return nn.ModuleList(layers)
class FPNNeck(nn.Module):
def __init__(self, fpn_level, channels, fea_channel):
super(FPNNeck, self).__init__()
self.fpn_level = fpn_level
self.ft_module = CEM(channels, fea_channel)
self.pyramid_ext = fpn_feature_extractor(self.fpn_level, fea_channel)
self.lateral_convs = lateral_convs(self.fpn_level, fea_channel)
self.fpn_convs = fpn_convs(self.fpn_level, fea_channel)
def forward(self, x):
x = self.ft_module(x)
fpn_fea = list()
for v in self.pyramid_ext:
x = v(x)
fpn_fea.append(x)
laterals = [lateral_conv(x) for (x, lateral_conv) in zip(fpn_fea, self.lateral_convs)]
for i in range(self.fpn_level - 1, 0, -1):
size = laterals[i - 1].size()[-2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(laterals[i], size=size, mode='nearest')
fpn_fea = [fpn_conv(x) for (x, fpn_conv) in zip(laterals, self.fpn_convs)]
return fpn_fea
| StarcoderdataPython |
127886 | import zengl
class Context:
context = None
main_uniform_buffer = None
main_uniform_buffer_data = bytearray(b'\x00' * 64)
@classmethod
def initialize(cls):
ctx = zengl.context()
cls.context = ctx
cls.main_uniform_buffer = ctx.buffer(size=64)
ctx.includes['main_uniform_buffer'] = '''
layout (std140) uniform MainUniformBuffer {
mat4 mvp;
};
'''
@classmethod
def update_camera(cls, eye, target, aspect, fov):
cls.main_uniform_buffer_data[0:64] = zengl.camera(eye, target, aspect=aspect, fov=fov)
@classmethod
def flush_uniform_buffer(cls):
cls.main_uniform_buffer.write(cls.main_uniform_buffer_data)
| StarcoderdataPython |
3355086 | <filename>utils/FastRotators/calc_rv.py
import pyfits
import argparse
import os
import spfr
import glob
from pylab import *
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-pars',default='6000,4.5,0.0,100')
parser.add_argument('-model_path',default='../../data/COELHO2014/')
args = parser.parse_args()
dirin = args.directorio
pars = args.pars
model_path = args.model_path
teff = float(pars.split(',')[0])
logg = float(pars.split(',')[1])
feh = float(pars.split(',')[2])
vsini = float(pars.split(',')[3])
try:
sc = pyfits.getdata(dirin)
fits = [dirin]
dirin = dirin.split('/')
dirin = dirin[:-1]
dt = ''
for fn in dirin:
dt += fn+'/'
dirin = dt
except:
fits = glob.glob(dirin + '/*fits')
os.system('mkdir '+dirin+'/FR')
for fit in fits:
print 'RV computation of file ' + fit + ' whith model:' + pars
sc = pyfits.getdata(fit)
hd = pyfits.getheader(fit)
I = np.where(sc[0,:,0]<5200)[0]
npix = sc.shape[2]
SNR_5130 = np.median(sc[8,I[0],int(0.5*npix)-100:int(0.5*npix)+100] )
if SNR_5130 < 1.:
SNR_5130 = 1.
pix = (sc[0,0,0]/(sc[0,0,1]-sc[0,0,0]))/float(hd['RESOL'])
SNR_5130_R = np.around(SNR_5130*np.sqrt(pix))
p1gau,vels,xc_av,XCmodelgau = spfr.RVforFR(sc[0],sc[5], teff=teff, logg=logg, feh=feh, vsini=vsini, model_path=model_path,vstep=5.)
A = 0.11081
B = 0.0016
D = 0.32815
C = 0.00453
RVerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
BSerr = D / float(np.round(SNR_5130)) + C
RVerr = B + (1.6+0.2*p1gau[2]*0.5)*A/np.round(SNR_5130)
depth_fact = 1. + p1gau[0]/(p1gau[2]*np.sqrt(2*np.pi))
if depth_fact < 0.6:
depth_fact = 0.6
if depth_fact >= 1.:
RVerr2 = -999.000
else:
depth_fact = (1 - 0.6) / (1 - depth_fact)
RVerr2 = RVerr * depth_fact
RVerr2 = np.around(RVerr2,4)
BSerr = np.around(BSerr,4)
print '\tRV = ',p1gau[1], '+/-', RVerr2, 'km/s'
p1gau_m = p1gau
XCmodel = XCmodelgau
xc_dict = {'p1gau':p1gau, 'vels':vels, 'xc_av':xc_av,'XCmodelgau':XCmodelgau}
ccf_pdf = dirin + '/FR/' + fit.split('/')[-1][:-4] + '_XC_FR.pdf'
pkl_xc = dirin + '/FR/' + fit.split('/')[-1][:-4] + '_XC_FR.pkl'
pickle.dump( xc_dict, open( pkl_xc, 'w' ) )
spfr.plot_CCF_FR(xc_dict,path=ccf_pdf)
SP2 = spfr.calc_bss2(vels,xc_av,p1gau)
print '\tBS = ',SP2, '+/-', BSerr,'km/s'
line_out = "%-15s %18.8f %9.4f %7.4f %9.3f %5.3f %s ceres_FR %8d %6d %5.2f %5.2f %5.1f %4.2f %5.2f %6.1f %4d %s\n"%\
(hd['HIERARCH TARGET NAME'], 2400000.5 + float(hd['HIERARCH MBJD']), p1gau[1], RVerr2, SP2, BSerr, hd['INST'], int(hd['RESOL']), teff, logg, feh, vsini, xc_av.min(), p1gau[2],\
hd['TEXP (s)'], SNR_5130, ccf_pdf)
i = 0
isin = False
if os.access(dirin+'/FR/results_FR.txt',os.F_OK):
f = open(dirin+'/FR/results_FR.txt','r')
lines = f.readlines()
for line in lines:
cos = line.split()
if cos[1] == line_out.split()[1]:
lines[i] = line_out
isin = True
break
i+=1
if not isin:
lines.append(line_out)
else:
lines = [line_out]
f = open(dirin+'/FR/results_FR.txt','w')
for line in lines:
f.write(line)
f.close()
| StarcoderdataPython |
3216989 | import struct
import sys
from models import *
from utils.utils import *
size = sys.argv[3]
model_name = sys.argv[2]
model = Darknet('cfg/'+model_name, (size, size))
weights = sys.argv[1]
device = torch_utils.select_device('0')
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
model = model.eval()
with open(model_name[:-3] + 'wts', 'w') as f:
f.write('{}\n'.format(len(model.state_dict().keys())))
for k, v in model.state_dict().items():
vr = v.reshape(-1).cpu().numpy()
f.write('{} {} '.format(k, len(vr)))
for vv in vr:
f.write(' ')
f.write(struct.pack('>f',float(vv)).hex())
f.write('\n')
| StarcoderdataPython |
4816578 | <filename>IMAGE_EXP/train.py<gh_stars>1-10
import os
import pickle
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import config as conf
import matplotlib.pyplot as plt
from models.gan import GAN
from models.nsgan import NSGAN
from models.dcgan import DCGAN
from models.wgan_gp import WGAN_GP
from pprint import pprint
from data_generator import DataGenerator
from sklearn.model_selection import train_test_split
import pandas as pd
os.environ["CUDA_VISIBLE_DEVICES"]="{}".format(conf.GPU)
MODELS = {"WGAN_GP":WGAN_GP,"GAN":GAN,"NSGAN":NSGAN,"DCGAN":DCGAN}
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def feature_normalize(features, feature_depth):
return (features/255 - 0.5) / 0.5
def feature_denormalize(features, feature_shape):
return (features + 1) / 2
def get_data_generator(data_path,batch_size,image_size):
images = []
for dirname, dirnames, filenames in os.walk(data_path):
images += [os.path.join(dirname, f) for f in filenames]
train_images,test_images = train_test_split(images)
test_images,eval_images = train_test_split(test_images)
nbatch = int(np.ceil(len(images) / batch_size))
return DataGenerator(train_images,image_size=image_size,batch_size=batch_size),DataGenerator(test_images,image_size=image_size,batch_size=batch_size),DataGenerator(eval_images,image_size=image_size,batch_size=batch_size)
def main():
save_path = './EXP/{}/{}/{}'.format(conf.DATASET,conf.MODEL_NAME,conf.SETTING)
if not os.path.exists(save_path):
os.makedirs(save_path)
with open('{}/config.txt'.format(save_path), 'wt') as out:
pprint(vars(conf), stream=out)
print('Saving To : {}'.format(save_path))
pprint(vars(conf))
logdir = os.path.join(save_path,'Logs')
os.makedirs(logdir,exist_ok=True)
writer = tf.summary.create_file_writer(logdir)
hyparams = conf.HYPARAMS[conf.DATASET]
latent_depth = conf.LATENT_DEPTH
batch_size = conf.BATCH_SIZE
num_epochs = conf.NUM_EPOCHS
n_critic = conf.N_CRITIC
n_generator= conf.N_GENERATOR
clip_const = conf.CLIP_CONST
log_freq = conf.LOG_FREQ
dynamic_noise = conf.DYNAMIC_NOISE
classic_reference = conf.CLASSIC_REFERENCE
print(conf)
if(conf.DATASET=='celeb_a_'):
data_dir = './../SAGAN-tensorflow2.0/Dataset/CELEBA/'
train_loader,test_loader,eval_loader = get_data_generator(data_dir,batch_size,32)
train_loader = train_loader.generator()
test_loader = test_loader.generator()
eval_loader = eval_loader.generator()
num_sets = 150000
feature_shape = 3
feature_depth = 3
else:
train_loader, info = tfds.load(conf.DATASET,split="train[:80%]",with_info=True, shuffle_files=True,download=True,data_dir='./data')
test_loader,_ = tfds.load(conf.DATASET,split="train[80%:90%]",with_info=True, shuffle_files=True,download=True,data_dir='./data')
eval_loader,_ = tfds.load(conf.DATASET,split="train[90%:]",with_info=True, shuffle_files=True,download=True,data_dir='./data')
img_dim = 28
if(conf.DATASET=='celeb_a_' or conf.DATASET=='cifar10'):
img_dim = 32
train_loader= train_loader.map(lambda x: {"image":(tf.image.resize(x['image'],[img_dim,img_dim]))}).repeat().shuffle(1024).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test_loader = test_loader.map(lambda x: {"image":(tf.image.resize(x['image'],[img_dim,img_dim]))}).repeat().shuffle(1024).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
eval_loader = eval_loader.map(lambda x: {"image":(tf.image.resize(x['image'],[img_dim,img_dim]))}).repeat().shuffle(1024).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
num_sets = info.splits["train"].num_examples
feature_shape = info.features["image"].shape
feature_depth = np.prod(feature_shape)
model = MODELS[conf.MODEL_NAME](
project_shape=hyparams["project_shape"],
gen_filters_list=hyparams["gen_filters_list"],
gen_strides_list=hyparams["gen_strides_list"],
disc_filters_list=hyparams["disc_filters_list"],
disc_strides_list=hyparams["disc_strides_list"],
setting = conf.SETTING
)
dg_model = MODELS[conf.MODEL_NAME](
project_shape=hyparams["project_shape"],
gen_filters_list=hyparams["gen_filters_list"],
gen_strides_list=hyparams["gen_strides_list"],
disc_filters_list=hyparams["disc_filters_list"],
disc_strides_list=hyparams["disc_strides_list"],
setting = conf.SETTING
)
D_norm = []
G_norm = []
D_var = []
G_var = []
lr_d = conf.LR_D
lr_g = conf.LR_G
if('WGAN' in conf.MODEL_NAME):
generator_opt = tf.keras.optimizers.RMSprop(learning_rate=lr_g)
discriminator_opt = tf.keras.optimizers.RMSprop(learning_rate=lr_d)
dg_generator_opt = tf.keras.optimizers.RMSprop(lr_g)
dg_discriminator_opt = tf.keras.optimizers.RMSprop(lr_d)
else:
generator_opt = tf.keras.optimizers.Adam(lr_g)
discriminator_opt = tf.keras.optimizers.Adam(lr_d)
dg_generator_opt = tf.keras.optimizers.Adam(lr_g)
dg_discriminator_opt = tf.keras.optimizers.Adam(lr_d)
if(conf.SETTING=='mode_collapse'):
generator_opt = tf.keras.optimizers.RMSprop(learning_rate=lr_g)
discriminator_opt = tf.keras.optimizers.RMSprop(learning_rate=lr_d)
def classic_discriminator_loss(real_output, fake_output):
if('WGAN' in conf.MODEL_NAME):
real_output = tf.math.sigmoid(real_output)
fake_output = tf.math.sigmoid(fake_output)
real_loss = tf.reduce_mean(tf.math.log(real_output+1e-4))
fake_loss = tf.reduce_mean(tf.math.log(1 - fake_output + 1e-4))
total_loss = -(real_loss + fake_loss)/2
return total_loss
def classic_generator_loss(fake_output):
if( 'WGAN' in conf.MODEL_NAME):
fake_output = tf.math.sigmoid(fake_output)
return tf.reduce_mean(tf.math.log(1 - fake_output + 1e-4))
@tf.function
def train_disc_step(x, z):
with tf.GradientTape() as discriminator_tape:
discriminator_loss = model.discriminator_loss(x, z)
grads_discriminator_loss = discriminator_tape.gradient(
target=discriminator_loss, sources=model.discriminator.trainable_variables
)
discriminator_opt.apply_gradients(
zip(grads_discriminator_loss, model.discriminator.trainable_variables)
)
if('WGAN' in conf.MODEL_NAME):
for w in model.discriminator.trainable_variables:
w.assign(tf.clip_by_value(w, -clip_const, clip_const))
return discriminator_loss
@tf.function
def dg_train_disc_step(x, z,classic_reference=False):
with tf.GradientTape() as discriminator_tape:
if(classic_reference):
discriminator_loss = classic_discriminator_loss(dg_model.discriminator(x),dg_model.discriminator(dg_model.generator(z)))
else:
discriminator_loss = dg_model.discriminator_loss(x, z)
grads_discriminator_loss = discriminator_tape.gradient(
target=discriminator_loss, sources=dg_model.discriminator.trainable_variables
)
dg_discriminator_opt.apply_gradients(
zip(grads_discriminator_loss, dg_model.discriminator.trainable_variables)
)
if('WGAN' in conf.MODEL_NAME):
for w in dg_model.discriminator.trainable_variables:
w.assign(tf.clip_by_value(w, -clip_const, clip_const))
return discriminator_loss
@tf.function
def train_gen_step(z):
with tf.GradientTape() as generator_tape:
generator_loss = model.generator_loss(z)
grads_generator_loss = generator_tape.gradient(
target=generator_loss, sources=model.generator.trainable_variables
)
generator_opt.apply_gradients(
zip(grads_generator_loss, model.generator.trainable_variables)
)
return generator_loss
@tf.function
def dg_train_gen_step(z,classic_reference=False):
with tf.GradientTape() as generator_tape:
if(classic_reference):
generator_loss = classic_generator_loss(dg_model.discriminator(dg_model.generator(z)))
else:
generator_loss = dg_model.generator_loss(z)
grads_generator_loss = generator_tape.gradient(
target=generator_loss, sources=dg_model.generator.trainable_variables
)
dg_generator_opt.apply_gradients(
zip(grads_generator_loss, dg_model.generator.trainable_variables)
)
return generator_loss
steps_per_epoch = num_sets // batch_size
train_steps = steps_per_epoch * num_epochs
generator_losses = []
discriminator_losses = []
generator_losses_epoch = []
discriminator_losses_epoch = []
x_fakes = []
DG = {
'vanilla':[],
'local_random':[]
}
M1 = {
'vanilla':[],
'local_random':[]
}
M2 = {
'vanilla':[],
'local_random':[]
}
def score():
epochs = 200
scores = []
for _ in range(epochs):
x = next(eval_iterator)
x_i = feature_normalize(x["image"], feature_depth)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
if(classic_reference):
scores.append(-1*classic_discriminator_loss(dg_model.discriminator(x_i), dg_model.discriminator(dg_model.generator(z_i))))
else:
scores.append(-1*dg_model.discriminator_loss(x_i, z_i))
return np.average(scores)
def compute_duality_gap():
M_u_v_worst = 0
M_u_worst_v = 0
epochs = 300
g_opt_weight = [tf.zeros_like(var) for var in dg_generator_opt.get_weights()]
d_opt_weight = [tf.zeros_like(var) for var in dg_discriminator_opt.get_weights()]
progbar = tf.keras.utils.Progbar(epochs)
dg_generator_opt.set_weights(g_opt_weight)
dg_discriminator_opt.set_weights(d_opt_weight)
dg_model.generator.set_weights(model.generator.get_weights())
dg_model.discriminator.set_weights(model.discriminator.get_weights())
for e in range(epochs):
progbar.update(e)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
generator_loss_i = dg_train_gen_step(z_i)
M_u_worst_v = score()
dg_model.generator.set_weights(model.generator.get_weights())
dg_model.discriminator.set_weights(model.discriminator.get_weights())
progbar = tf.keras.utils.Progbar(epochs)
for e in range(epochs):
progbar.update(e)
x = next(test_iterator)
x_i = feature_normalize(x["image"], feature_depth)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
discriminator_loss_i = dg_train_disc_step(x_i, z_i)
M_u_v_worst = score()
DG['vanilla'].append(abs(M_u_v_worst - M_u_worst_v))
M1['vanilla'].append(M_u_v_worst)
M2['vanilla'].append(M_u_worst_v)
if(dynamic_noise):
random_weight_init = [ w + np.random.normal(size=w.shape,scale=np.sqrt(np.asarray(D_var)[-1,i])).reshape(w.shape) for i,w in enumerate(model.discriminator.get_weights())]
else:
random_weight_init = [ w + np.random.normal(size=w.shape,scale=conf.PERTUB_STD).reshape(w.shape) for i,w in enumerate(model.discriminator.get_weights())]
dg_generator_opt.set_weights(g_opt_weight)
dg_discriminator_opt.set_weights(d_opt_weight)
dg_model.discriminator.set_weights(random_weight_init)
dg_model.generator.set_weights(model.generator.get_weights())
progbar = tf.keras.utils.Progbar(epochs)
for e in range(epochs):
progbar.update(e)
x = next(test_iterator)
x_i = feature_normalize(x["image"], feature_depth)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
discriminator_loss_i = dg_train_disc_step(x_i, z_i)
M_u_v_worst = score()
if(dynamic_noise):
random_weight_init = [ w + np.random.normal(size=w.shape,scale=np.sqrt(np.asarray(G_var)[-1,i])).reshape(w.shape) for i,w in enumerate(model.generator.get_weights())]
else:
random_weight_init = [ w + np.random.normal(size=w.shape,scale=conf.PERTUB_STD).reshape(w.shape) for i,w in enumerate(model.generator.get_weights())]
dg_model.generator.set_weights(random_weight_init)
dg_model.discriminator.set_weights(model.discriminator.get_weights())
progbar = tf.keras.utils.Progbar(epochs)
for e in range(epochs):
progbar.update(e)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
generator_loss_i = dg_train_gen_step(z_i)
M_u_worst_v = score()
DG['local_random'].append(abs(M_u_v_worst - M_u_worst_v))
M1['local_random'].append(M_u_v_worst)
M2['local_random'].append(M_u_worst_v)
print('Dulaity Gaps : \n\t Vanilla :{} \n\t Random : {}\n'.format(DG['vanilla'][-1],DG['local_random'][-1]))
train_iterator = iter(train_loader)
test_iterator = iter(test_loader)
eval_iterator = iter(eval_loader)
x = next(train_iterator)
x_i = feature_normalize(x["image"], feature_depth)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
dg_train_disc_step(x_i, z_i)
dg_train_gen_step(z_i)
dg_model.discriminator.summary()
dg_model.generator.summary()
z_vis = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
for i in range(0, train_steps+1):
epoch = i // steps_per_epoch
iteration = i
print("Epoch: %i ====> %i / %i" % (epoch+1, i % steps_per_epoch, steps_per_epoch), end="\r")
x = next(train_iterator)
x_i = feature_normalize(x["image"], feature_depth)
z_i = np.random.normal(size=[batch_size, latent_depth]).astype(np.float32)
for _ in range(n_critic):
discriminator_loss_i = train_disc_step(x_i, z_i)
discriminator_losses.append(discriminator_loss_i)
for _ in range(n_generator):
generator_loss_i = train_gen_step(z_i)
generator_losses.append(generator_loss_i)
if i % steps_per_epoch == 0:
x_fake = model.generator(z_i, training=False)
x_fake = feature_denormalize(x_fake, feature_shape)
generator_loss_epoch = np.mean(generator_losses[-steps_per_epoch//n_critic:])
discriminator_loss_epoch = np.mean(discriminator_losses[-steps_per_epoch:])
print("Epoch: %i, Generator Loss: %f, Discriminator Loss: %f" % \
(epoch, generator_loss_epoch, discriminator_loss_epoch)
)
generator_losses_epoch.append(generator_loss_epoch)
discriminator_losses_epoch.append(discriminator_loss_epoch)
x_fakes.append(x_fake)
if i % log_freq == 0:
d_weights = model.discriminator.get_weights()
g_weights = model.generator.get_weights()
d_norm = [ np.linalg.norm(x) for x in d_weights ]
g_norm = [ np.linalg.norm(x) for x in g_weights ]
d_var = [np.var(x) for x in d_weights]
g_var = [np.var(x) for x in g_weights]
D_norm.append(d_norm)
G_norm.append(g_norm)
D_var.append(d_var)
G_var.append(g_var)
compute_duality_gap()
save_data = {
'DG':DG,
'M1':M1,
'M2':M2,
'D_norm':np.asarray(D_norm),
'G_norm':np.asarray(G_norm),
'D_var':np.asarray(D_var),
'G_var':np.asarray(G_var),
'D_Losses':discriminator_losses,
'G_Losses':generator_losses,
'X_Fakes':x_fakes
}
with open( os.path.join(save_path,'data.pkl'), 'wb') as fp:
pickle.dump(save_data, fp)
r, c = 5, 5
noise = z_vis
gen_imgs = model.generator(noise).numpy()
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
if(conf.DATASET=='cifar10' or conf.DATASET=='celeb_a'):
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,:])
axs[i,j].axis('off')
cnt += 1
else:
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
os.makedirs('{}/images'.format(save_path),exist_ok=True)
fig.savefig("{}/images/{}.png".format(save_path,epoch))
plt.close()
weight_save_path = '{}/weights'.format(save_path)
generator_weights = '{}/weights/G/step_{}/'.format(save_path,iteration)
discriminator_weights = '{}/weights/D/step_{}/'.format(save_path,iteration)
os.makedirs(generator_weights,exist_ok = True)
os.makedirs(discriminator_weights,exist_ok = True)
model.generator.save(generator_weights)
model.discriminator.save(discriminator_weights)
r, c = 3, 2
fig, axs = plt.subplots(r, c,figsize=(16,16))
cnt = 0
dg_x_axis = [i for i in range(len(DG['vanilla']))]
axs[0,1].set_title('M1')
_DG_ = {}
_DG_['vanilla'] = pd.DataFrame(DG['vanilla']).ewm(alpha=0.1).mean()
_DG_['local_random'] = pd.DataFrame(DG['local_random']).ewm(alpha=0.1).mean()
axs[0,1].plot(dg_x_axis,M1['vanilla'],color='r',label='Vanilla')
axs[0,1].plot(dg_x_axis,M1['local_random'], color='b',label='Local Random')
axs[0,0].set_title('Duality Gap')
axs[0,0].plot(dg_x_axis,_DG_['vanilla'],color='r',label='Vanilla')
axs[0,0].plot(dg_x_axis,_DG_['local_random'], color='b',label='Ours')
axs[0,0].legend()
norm_axis = [i*log_freq for i in range(len(D_norm))]
var_axis = [i*log_freq for i in range(len(D_var))]
axs[1,0].set_title('Discriminator Weight Norm')
for i in range(len(model.discriminator.get_weights())):
axs[1,0].plot(norm_axis,np.asarray(D_norm)[:,i],label='Layer {}'.format(i+1))
axs[1,1].set_title('Discriminator Weight Variance')
for i in range(len(model.discriminator.get_weights())):
axs[1,1].plot(var_axis,np.asarray(D_var)[:,i],label='Layer {}'.format(i+1))
axs[2,0].set_title('Generator Weight Norm')
for i in range(len(model.generator.get_weights())):
axs[2,0].plot(norm_axis,np.asarray(G_norm)[:,i],label='Layer {}'.format(i+1))
axs[2,1].set_title('Generator Weight Variance')
for i in range(len(model.generator.get_weights())):
axs[2,1].plot(var_axis,np.asarray(G_var)[:,i],label='Layer {}'.format(i+1))
# plt.legend()
fig_dir = '{}/dg_plots'.format(save_path)
if not os.path.exists(fig_dir):
os.makedirs(fig_dir,exist_ok=True)
plt.savefig(fig_dir+'/{}.png'.format(iteration))
plt.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
1779663 | <gh_stars>0
# Write your solution here
word = input("Please type in a string: ")
sub = input("Please type in a substring: ")
i = 0
count = 0
while True:
index = word.find(sub)
if (index == -1):
print("The substring does not occur twice in the string.")
break
if (sub in word):
word = word[len(sub)+index:]
count += index
i += 1
if (i == 2):
print(f"The second occurrence of the substring is at index {count+len(sub)}.")
break | StarcoderdataPython |
1606748 | <filename>DataLoaderTask/DataLoaderTask.py
# MIT License
# Copyright (c) 2020 <NAME> and <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import time
import queue
from PIL import Image
from DataLoaderTask.DataExample import DataExample
from skimage.transform import resize
import csv
import numpy as np
import os
class DataLoaderTask (threading.Thread):
def __init__(self, config, queue):
threading.Thread.__init__(self)
self.q = queue
self.datafolder = config['rgbd_dataset']['root_dir']
self.dimensions = (config['dataset']['image_width'],
config['dataset']['image_height'])
subfolders = [name for name in os.listdir(
self.datafolder) if os.path.isdir(os.path.join(self.datafolder, name))]
self.x_data = []
self.y_data = []
self.mask = []
self.calibration = []
for folder in subfolders:
path = os.path.join(self.datafolder, folder)
subsubfolders = [name for name in os.listdir(
path) if os.path.isdir(os.path.join(path, name))]
for dataset in subsubfolders:
path_dataset = os.path.join(path, dataset, "data.csv")
with open(path_dataset, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
"""
self.x_data.append(self.datafolder+'/'+row[0])
self.y_data.append(self.datafolder+'/'+row[1])
self.mask.append(self.datafolder+'/'+row[2])
self.calibration.append(self.datafolder+'/'+row[3])
"""
self.x_data.append(row[0])
self.y_data.append(row[1])
self.mask.append(row[2])
self.calibration.append(row[3])
self.exit_flag = False
self.queueLock = threading.Lock()
def getIds(self):
ids = []
for path in self.x_data:
ids.append(path.replace("/", "_"))
return ids
def getNbrSamples(self):
return len(self.x_data)
def setExit(self):
self.queueLock.acquire()
self.exit_flag = True
self.queueLock.release()
def run(self):
if len(self.x_data) == 0:
return
while True:
indices = np.random.randint(0, len(self.x_data))
x_path = self.x_data[indices].replace("/", "_")
im = Image.open(self.x_data[indices])
sx = self.dimensions[0]/im.width
sy = self.dimensions[1]/im.height
image = np.array(im.resize(self.dimensions)
).astype(np.float32)
image /= 255.
depth = np.array(Image.open(self.y_data[indices])).astype(
np.float32)/1000.0
mask = (depth != 0).astype(np.float32)
depth_r = resize(depth, (self.dimensions[1], self.dimensions[0]))
mask_r = resize(mask, (self.dimensions[1], self.dimensions[0]))
mask_r = mask_r == 1
mask_r = mask_r.astype(np.float32)
depth_r = depth_r*mask_r
depth = depth_r
mask = mask_r.astype(np.bool)
with open(self.calibration[indices], newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
# division by two due to rescaling
K = np.array([[float(row[0])*sx, 0.0, float(row[2])*sx],
[0.0, float(row[1])*sy, float(row[3])*sy],
[0.0, 0.0, 1.0]], dtype=np.float32)
while True:
self.queueLock.acquire()
if self.exit_flag:
self.queueLock.release()
return True
self.queueLock.release()
try:
self.q.put(DataExample(x_path, image, depth, mask, K),
block=True, timeout=1)
break
except queue.Full:
# check exit flag
i = 0
| StarcoderdataPython |
78969 | '''
Objective function for hyperparameter optimization of an autoencoder with 3 hidden layers
Author(s): <NAME> (<EMAIL>)
'''
import pickle
import os
import sys
import ConfigParser
import numpy as np
from sklearn import preprocessing
from sklearn.cross_validation import KFold
sys.path.append('../..')
from deep_network import stacked_ae
def mapping(hidden_size_l1, hidden_size_l2, hidden_size_l3, learning_rate, lr_decay, regularizer, weight_decay, momentum, samples):
config = ConfigParser.ConfigParser()
config.read('../../config.ini')
n_features = config.getint('Global', 'n_features')
n_folds = config.getint('Global', 'n_folds')
regularizers = ['l1', 'l2']
reg_fn = regularizers[regularizer]
source = config.get('Global', 'source')
data = np.load('../../raw_parametric_'+source+'.npy')[samples]
n_images = data.shape[0]
print n_images
# Normalize each sample
s = preprocessing.MaxAbsScaler()
data_norm = s.fit_transform(data.T).T # Min-Max normalization
#np.savetxt("parametric.csv", data_norm, delimiter=",")
# K-fold cross-validation
kf = KFold(n_images, n_folds=n_folds, shuffle=True)
rec_err_test = 0
i = 1
for train, test in kf:
print 'cross validation: %d' % i
i += 1
# Get loss
cost = stacked_ae(data_norm, n_features, train, test, hidden_size_l1, hidden_size_l2, hidden_size_l3, 0,
learning_rate, lr_decay, reg_fn, weight_decay, momentum, evaluation=True)
rec_err_test += cost
rec_err_test /= n_folds
#print 'reconstruction error = %f' % rec_err_test
con1 = int(hidden_size_l1-hidden_size_l2-1) # hidden_size_l1 > hidden_size_l2
con2 = int(hidden_size_l2-hidden_size_l3-1) # hidden_size_l2 > hidden_size_l3
return {
"mapping" : rec_err_test,
"l2_less_l1" : con1,
"l3_less_l2" : con2
}
#return rec_err_test
def main(job_id, params):
print params
config = ConfigParser.ConfigParser()
config.read('../../config.ini')
n_features = config.getint('Global', 'n_features')
n_samples = config.getint('Global', 'n_samples')
source = config.get('Global', 'source')
# Specify number of training and testing samples
alls = range(n_samples)
test_start = config.getint('Global', 'test_start')
test_end = config.getint('Global', 'test_end')
test = range(test_start-1, test_end)
train = [item for item in alls if item not in test]
errname = '../temp/err_ae4l'
if os.path.isfile(errname):
with open(errname, 'rb') as f:
l_err = pickle.load(f) # l_err = [rec_err_cv, count]
else:
l_err = [1., 0]
cfgname = '../hp_'+source+'_'+str(n_samples)+'_'+str(n_features)+'_'+str(test_start)+'-'+str(test_end)+'.ini'
hp = ConfigParser.ConfigParser()
hp.read(cfgname)
results = mapping(params['size_l1'][0], params['size_l2'][0], params['size_l3'][0], params['learning_rate'][0],
params['lr_decay'][0], params['regularizer'][0], params['weight_decay'][0], params['momentum'][0], train)
rec_err_cv = results["mapping"]
if not hp.has_section('ae4l'):
# Create the section if it does not exist.
hp.add_section('ae4l')
hp.write(open(cfgname,'w'))
hp.read(cfgname)
if rec_err_cv < l_err[0]:
# Modify the config file if new reconstruction error is smaller
hp.set('ae4l','hidden_size_l1',params['size_l1'][0])
hp.set('ae4l','hidden_size_l2',params['size_l2'][0])
hp.set('ae4l','hidden_size_l3',params['size_l3'][0])
hp.set('ae4l','learning_rate',params['learning_rate'][0])
hp.set('ae4l','lr_decay',params['lr_decay'][0])
regularizers = ['l1', 'l2']
reg_fn = regularizers[params['regularizer'][0]]
hp.set('ae4l','regularizer',reg_fn)
hp.set('ae4l','weight_decay',params['weight_decay'][0])
hp.set('ae4l','momentum',params['momentum'][0])
l_err = [rec_err_cv, 0]
hp.write(open(cfgname,'w'))
else:
l_err[1] += 1
with open(errname, 'wb') as f:
pickle.dump(l_err, f)
print 'optimal: ', l_err[0]
print 'count: ', l_err[1]
return results
| StarcoderdataPython |
3201618 | from __future__ import annotations # prevents NameError for typehints
from typing import List
from pydantic import BaseModel
from chatbot.schemas import OutMixin, QuestionIn, QuestionOut, Response
class SurveyBase(BaseModel):
"""Base schema for survey"""
name: str
class SurveyQuestionsIn(SurveyBase):
"""Deserializes the request body when creating or updating a survey"""
questions: List[QuestionIn]
class SurveyQuestionsOut(SurveyBase, OutMixin):
"""Serializes a survey and its questions for a response body"""
questions: List[QuestionOut]
class SurveyResponsesOut(SurveyBase, OutMixin):
"""Serializes a survey and its responses for a response body"""
responses: List[Response]
| StarcoderdataPython |
1726854 | import gspread
from os.path import join, dirname,realpath
from oauth2client.service_account import ServiceAccountCredentials
import argparse
from config import google_credential_file
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(join(dirname(realpath(__file__)),google_credential_file), scope)
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("--scp",action="store_true", help="Print url and port to be used for scp")
args = parser.parse_args()
client = gspread.authorize(creds)
sheet = client.open("Rpi Ngrok URL").sheet1
url=sheet.acell('A1').value
#print(url)
new_url, port = url.split("//")[1].split(":")
if args.scp:
print("-P {} pi@{}".format(port, new_url))
else:
print("pi@{} -p {}".format(new_url, port))
| StarcoderdataPython |
125026 | <reponame>antoorofino/Emergence_in_complex_systems_EVOLIFE
#!/usr/bin/env python#!/usr/bin/python3
#!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr <NAME> #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
" Checks whether running version is up to date "
import sys
import os
import datetime as dt
# compatibility with python 2
Python3 = (sys.version_info >= (3,0))
if Python3:
from urllib.request import urlopen, urlretrieve
Input = input
else:
from urllib import urlopen, urlretrieve
Input = raw_input
EVOLIFEURL = 'http://evolife.telecom-paristech.fr'
EVOLIFEFILE = 'Evolife.zip'
EVOLIFECHECKFILE = 'Main.py' # used to determine local version date
def CheckUpToDate(RemoteUrl, LocalCheckFile):
if RemoteUrl.startswith('http'):
try:
if Python3: f = urlopen(RemoteUrl, timeout=1)
else: f = urlopen(RemoteUrl)
except Exception as E:
# print(E)
return False # no connexion, don't care about updates
last_modified = f.headers['last-modified']
Remote_date = dt.datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
else: # assuming local file
Remote_date = dt.datetime.fromtimestamp(os.path.getmtime(RemoteUrl))
# Now = dt.datetime.now()
Local_date = dt.datetime.fromtimestamp(os.path.getmtime(LocalCheckFile))
# print(Remote_date); print(Local_date)
return (Remote_date > Local_date)
def Reload(Url, RemoteFile, LocalFile):
try: urlretrieve(Url+RemoteFile, LocalFile)
except Exception as E:
print(E)
print("**** Error downloading %s \nfrom \n%s" % (RemoteFile, Url))
def CheckVersion(Url, RemoteFile, LocalCheckFile, LocalVersion=None):
if LocalVersion is None: LocalVersion = os.path.join('..', os.path.basename(RemoteFile))
if not Url.endswith('/'): Url += '/'
if CheckUpToDate(Url, LocalCheckFile):
print('New version of %s available' % os.path.splitext(os.path.basename(LocalVersion))[0])
if os.path.exists(LocalVersion) and not CheckUpToDate(Url, LocalVersion):
print('\nYour local download: %s \nis ready to be installed' % LocalVersion)
print("You should now unzip it and run 'starter' again.")
print('Be careful, as all modified files will be lost \nif you unzip %s at the same location.' % EVOLIFEFILE)
R = Input('\n[Return], or "s" + [Return] to suppress this warning.\n')
if R.lower().startswith('s'):
if os.path.exists(EVOLIFECHECKFILE):
os.utime(EVOLIFECHECKFILE, None) # file is given current time
else:
R = Input('download (d), not now (n), or ignore (i) this new version (d/n/i)? ').lower()
if R.startswith('d'):
print('Downloading...')
Reload(Url, RemoteFile, LocalVersion)
print('\nNew version saved to', os.path.abspath(LocalVersion))
print("\nYou should now unzip it and run 'starter' again.")
print('Be careful, as all modified files will be lost \nif you unzip %s at the same location.' % EVOLIFEFILE)
Input('[Return]')
elif R.startswith('i'):
if os.path.exists(EVOLIFECHECKFILE):
os.utime(EVOLIFECHECKFILE, None) # file is given current time
else:
# print('Your version is probably up to date')
pass
if __name__ == "__main__":
CheckVersion(EVOLIFEURL, EVOLIFEFILE, EVOLIFECHECKFILE)
| StarcoderdataPython |
1625761 | <reponame>MauMendes/python-for-everybody<gh_stars>0
<<<<<<< HEAD
<<<<<<< HEAD
largest = None
smallest = None
while True:
num = input('Enter a number: ')
if num == "done" :
break
try:
ival = int(num)
except:
print("Invalid input")
if largest is None:
largest = ival
elif largest < ival:
largest = ival
if smallest is None:
smallest = ival
elif smallest > ival:
smallest = ival
print('Maximum is', largest)
=======
largest = None
smallest = None
while True:
num = input('Enter a number: ')
if num == "done" :
break
try:
ival = int(num)
except:
print("Invalid input")
if largest is None:
largest = ival
elif largest < ival:
largest = ival
if smallest is None:
smallest = ival
elif smallest > ival:
smallest = ival
print('Maximum is', largest)
>>>>>>> 23016d10d2185085345ada24602b1839dcc2efb5
=======
largest = None
smallest = None
while True:
num = input('Enter a number: ')
if num == "done" :
break
try:
ival = int(num)
except:
print("Invalid input")
if largest is None:
largest = ival
elif largest < ival:
largest = ival
if smallest is None:
smallest = ival
elif smallest > ival:
smallest = ival
print('Maximum is', largest)
>>>>>>> 21776b9fadd437428c9fbf1b550d7669b8d9631d
print('Minimum is', smallest) | StarcoderdataPython |
55836 | <gh_stars>0
import string
import re
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
exclude = set(string.punctuation)
stops = set(stopwords.words('english'))
def clean_document(doc, lang='en'):
clean_punc = ''.join(ch if ch not in exclude else ' ' for ch in doc.lower())
clean_punc_tokens = clean_punc.split()
clean_stop = [tok for tok in clean_punc_tokens if tok not in stops]
clean_digits = [tok for tok in clean_stop if re.search(r'\d', tok) is None]
clean_short = [tok for tok in clean_digits if 2 < len(tok) < 20]
return clean_short
def prune_vocabulary(documents, min_df=5, max_df=0.8, min_len=20):
print("Truncating vocab with min_word_freq =", min_df, "and max_doc_prop =", max_df)
docs = [" ".join(doc) for doc in documents]
cvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=stops)
cvectorizer.fit_transform(docs).sign()
dictionary = list(cvectorizer.vocabulary_)
print("Truncated vocab size:", len(dictionary))
pruned_documents = []
for doc in documents:
pruned_doc = [w for w in doc if w in dictionary]
#if len(pruned_doc) >= min_len:
pruned_documents.append(pruned_doc)
return pruned_documents
def prune_vocabulary2(documents_raw, min_df=5, max_df=0.8, min_len=20):
print("Truncating vocab with min_word_freq =", min_df, "and max_doc_prop =", max_df)
documents_clean = [' '.join(clean_document(doc.lower())) for doc in documents_raw]
print('documents_raw:', len(documents_raw))
print('documents_clean:', len(documents_clean))
cvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=stops)
cvectorizer.fit_transform(documents_clean).sign()
dictionary = list(cvectorizer.vocabulary_)
print("Truncated vocab size:", len(dictionary))
documents_unproc = []
documents_proc = []
for i in range(len(documents_clean)):
pruned_doc = [w for w in documents_clean[i].split() if w in dictionary]
if len(pruned_doc) >= min_len:
documents_proc.append(' '.join(pruned_doc))
documents_unproc.append(documents_raw[i].lower())
return documents_unproc, documents_proc
| StarcoderdataPython |
3245221 | <gh_stars>10-100
from typing import Optional
from typing import Tuple
import tensorflow as tf
from tensorflow.keras import initializers
class Equidistant(initializers.Initializer):
"""
Initializer that generates tensors where the last axis is initialized with 'equidistant' values.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
"""
def __init__(self, minval: float = 0.0, maxval: float = 1.0):
self.minval = minval
self.maxval = maxval
def __call__(
self, shape: Tuple[Optional[int], ...], dtype: Optional[tf.DType] = None
):
"""
Compute equidistant initializations on the last axis.
Args:
shape: Shape of Tensor to initialize.
dtype: DType of Tensor to initialize.
Returns:
Initial value.
Raises:
ValueError: If shape cannot be determined.
"""
rank = len(shape)
last_dim = shape[-1]
if last_dim is None:
raise ValueError("Cannot compute Equidistant with unknown last dimension")
linspace = tf.reshape(
tf.linspace(self.minval, self.maxval, num=last_dim),
[1] * (rank - 1) + [last_dim],
)
return tf.cast(
tf.tile(linspace, tf.concat([shape[:-1], [1]], axis=0)), dtype=dtype
)
def get_config(self) -> dict:
"""
Obtain config.
Returns:
Key-value mapping of the configuration.
"""
return {"minval": self.minval, "maxval": self.maxval}
| StarcoderdataPython |
143210 | <reponame>pulumi/pulumi-kubernetes-crds
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'IBMBlockCSISpec',
'IBMBlockCSISpecController',
'IBMBlockCSISpecControllerAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecControllerAffinityPodAffinity',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinity',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerTolerations',
'IBMBlockCSISpecNode',
'IBMBlockCSISpecNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecNodeAffinityPodAffinity',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinity',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeTolerations',
'IBMBlockCSISpecSidecars',
'IBMBlockCSIStatus',
]
@pulumi.output_type
class IBMBlockCSISpec(dict):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
"""
def __init__(__self__, *,
controller: 'outputs.IBMBlockCSISpecController',
node: 'outputs.IBMBlockCSISpecNode',
image_pull_secrets: Optional[Sequence[str]] = None,
sidecars: Optional[Sequence['outputs.IBMBlockCSISpecSidecars']] = None):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
:param 'IBMBlockCSISpecControllerArgs' controller: IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecNodeArgs' node: IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
pulumi.set(__self__, "controller", controller)
pulumi.set(__self__, "node", node)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if sidecars is not None:
pulumi.set(__self__, "sidecars", sidecars)
@property
@pulumi.getter
def controller(self) -> 'outputs.IBMBlockCSISpecController':
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
return pulumi.get(self, "controller")
@property
@pulumi.getter
def node(self) -> 'outputs.IBMBlockCSISpecNode':
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
return pulumi.get(self, "node")
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "image_pull_secrets")
@property
@pulumi.getter
def sidecars(self) -> Optional[Sequence['outputs.IBMBlockCSISpecSidecars']]:
return pulumi.get(self, "sidecars")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecController(dict):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
def __init__(__self__, *,
repository: str,
tag: str,
affinity: Optional['outputs.IBMBlockCSISpecControllerAffinity'] = None,
image_pull_policy: Optional[str] = None,
tolerations: Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']] = None):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecControllerAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param str image_pull_policy: PullPolicy describes a policy for if/when to pull a container image
"""
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
PullPolicy describes a policy for if/when to pull a container image
"""
return pulumi.get(self, "image_pull_policy")
@property
@pulumi.getter
def tolerations(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']]:
return pulumi.get(self, "tolerations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
def __init__(__self__, *,
node_selector_terms: Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs'] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms(dict):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinity(dict):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerTolerations(dict):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
"""
def __init__(__self__, *,
effect: Optional[str] = None,
key: Optional[str] = None,
operator: Optional[str] = None,
toleration_seconds: Optional[int] = None,
value: Optional[str] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param str effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param str key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param str operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param int toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param str value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[int]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNode(dict):
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
def __init__(__self__, *,
repository: str,
tag: str,
affinity: Optional['outputs.IBMBlockCSISpecNodeAffinity'] = None,
image_pull_policy: Optional[str] = None,
tolerations: Optional[Sequence['outputs.IBMBlockCSISpecNodeTolerations']] = None):
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
:param 'IBMBlockCSISpecNodeAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param str image_pull_policy: PullPolicy describes a policy for if/when to pull a container image
"""
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
PullPolicy describes a policy for if/when to pull a container image
"""
return pulumi.get(self, "image_pull_policy")
@property
@pulumi.getter
def tolerations(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeTolerations']]:
return pulumi.get(self, "tolerations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
def __init__(__self__, *,
node_selector_terms: Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs'] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms(dict):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinity(dict):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeTolerations(dict):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
"""
def __init__(__self__, *,
effect: Optional[str] = None,
key: Optional[str] = None,
operator: Optional[str] = None,
toleration_seconds: Optional[int] = None,
value: Optional[str] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param str effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param str key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param str operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param int toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param str value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[int]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecSidecars(dict):
def __init__(__self__, *,
name: str,
repository: str,
tag: str,
image_pull_policy: Optional[str] = None):
"""
:param str name: The name of the csi sidecar image
:param str repository: The repository of the csi sidecar image
:param str tag: The tag of the csi sidecar image
:param str image_pull_policy: The pullPolicy of the csi sidecar image
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the csi sidecar image
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def repository(self) -> str:
"""
The repository of the csi sidecar image
"""
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
"""
The tag of the csi sidecar image
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
The pullPolicy of the csi sidecar image
"""
return pulumi.get(self, "image_pull_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSIStatus(dict):
"""
IBMBlockCSIStatus defines the observed state of IBMBlockCSI
"""
def __init__(__self__, *,
controller_ready: bool,
node_ready: bool,
phase: str,
version: str):
"""
IBMBlockCSIStatus defines the observed state of IBMBlockCSI
:param str phase: Phase is the driver running phase
:param str version: Version is the current driver version
"""
pulumi.set(__self__, "controller_ready", controller_ready)
pulumi.set(__self__, "node_ready", node_ready)
pulumi.set(__self__, "phase", phase)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="controllerReady")
def controller_ready(self) -> bool:
return pulumi.get(self, "controller_ready")
@property
@pulumi.getter(name="nodeReady")
def node_ready(self) -> bool:
return pulumi.get(self, "node_ready")
@property
@pulumi.getter
def phase(self) -> str:
"""
Phase is the driver running phase
"""
return pulumi.get(self, "phase")
@property
@pulumi.getter
def version(self) -> str:
"""
Version is the current driver version
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
123865 | # -*- coding: utf-8 -*-
"""
This module handle finding the be engine dll's
"""
# ---------------------------------------------------------------------------- #
# Include
# ---------------------------------------------------------------------------- #
import sys
import os
import logging
if sys.version_info < (3, 0):
# python2
import _winreg as winreg
else:
# python3
import winreg
# ---------------------------------------------------------------------------- #
# Constants
# ---------------------------------------------------------------------------- #
ENGINE_DLLS = ('Be15Eng.dll', 'Be10Eng.dll')
REG_KEY_TARGET_PATH = r'SOFTWARE\Wow6432Node\SBi' # HKLM
REG_KEY_INSTALL_DIR_TARGET = 'InstallDir' # The name of the registry value containing the install location
# ---------------------------------------------------------------------------- #
# Functions
# ---------------------------------------------------------------------------- #
def find_engine_location():
# type: () -> str
"""
This function tries to locate the newest availabe calculation engine.
:return: path to dll if sucessessfull else None
"""
# Check current working dir
res = check_dir(os.getcwd())
if res is not None:
return res
# Check registry
res = find_newest_engine_from_registry()
if res is not None:
return res
# Failure
return None
def check_dir(dir_path, engine_file_names=ENGINE_DLLS):
# type: (str, list) -> str
"""
This function checks whether the engine dll is located in the target directory
:return: If found a string containing the path to the dll is returned else None
"""
# Assert that dir_path is a path to a directory
assert os.path.isdir(dir_path)
# Iterate all possibilities
for dll in engine_file_names:
path = os.path.abspath(os.path.join(dir_path, dll))
if os.path.exists(path):
return path
# If we got here we had no success finding the engine
return None
def find_all_engines_from_registry():
# type: (list) -> [str]
"""
This function returns all detected engine locations
:return: returns a list of all detected install locations
"""
try:
dir_paths = [path for path in scan_sub_keys(REG_KEY_TARGET_PATH) if os.path.exists(path)]
dll_paths = list()
for dir_path in dir_paths:
path = check_dir(dir_path)
if path is not None:
dll_paths.append(path)
return dll_paths
except FileNotFoundError as Ex:
logging.error(Ex)
return []
def find_newest_engine_from_registry():
# type: () -> str
"""
This function checks the registry for possible location of the engines and selected the newest
:return: If found a string containing the path to the installation directory is returned else None
"""
engines = find_all_engines_from_registry()
if len(engines) == 0:
return None
engines.sort()
return engines[-1]
def scan_sub_keys(reg_key_path):
# type: (str) -> [str]
"""
This function calls a scan on the target reg key and recursively scans the sub keys
:param reg_key_path: the target registry key object
:return: returns a list of all detected install locations
"""
# Detected paths
paths = []
# Open key
reg_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_key_path, 0, winreg.KEY_READ | winreg.KEY_WOW64_64KEY)
# Scan the current key
res = scan_reg_key_for_values(reg_key)
if res is not None:
paths.append(res)
# Iterate the sub keys
try:
index = 0
while True:
# Get name of key
name = winreg.EnumKey(reg_key, index)
# Construct new path
new_reg_key_path = r'{}\{}'.format(reg_key_path, name)
# Handle new key
res_list = scan_sub_keys(new_reg_key_path)
paths = paths + res_list
# Increment index
index += 1
except OSError:
# We receive this exception when there are no more keys
pass
# Return
return paths
def scan_reg_key_for_values(reg_key):
# type: () -> str
"""
This function scan the target key for the value of the install location of be10/15 installations
:param reg_key: the target registry key object
:return: If found a string containing the path to the installation directory is returned else None
"""
try:
# Iterate values
index = 0
while True:
# Get name of value
name, value, type = winreg.EnumValue(reg_key, index)
# Check if we have a match
if name == REG_KEY_INSTALL_DIR_TARGET:
return value
# Incrment index
index += 1
except OSError:
# We receive this exception when there are no more values
pass
return None
| StarcoderdataPython |
3275478 | <filename>old (ignore)/old/runner-old.py
#Sources:
#https://www.geeksforgeeks.org/reading-excel-file-using-python/
#https://developers.google.com/calendar/v3/reference
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# Reading an excel file using Python
import xlrd
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
# Give the location of the file
loc = ("Python-Test.xlsx")
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
#Get Title/Summary
summary_in = (sheet.cell_value(1,0))
#Get Location
loc_in = (sheet.cell_value(1,1))
#Get Desc
desc_in = (sheet.cell_value(1,2))
#Get Start Time and Date
starttime_in = (sheet.cell_value(1,3))
startdate_in = (sheet.cell_value(1,4))
start_dts = startdate_in + ' ' + starttime_in
#Get End Time and Date
endtime_in = (sheet.cell_value(1,5))
enddate_in = (sheet.cell_value(1,6))
end_dts = enddate_in + ' ' + endtime_in
dto_start = datetime.datetime.strptime(start_dts, '%m-%d-%Y %H:%M %p')
dto_end = datetime.datetime.strptime(end_dts, '%m-%d-%Y %H:%M %p')
#Get Attendees
#attendee = (sheet.cell_value(7,1))
attendees = ["<EMAIL>", "<EMAIL>"]
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
page_token = None
while True:
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print (calendar_list_entry['summary'])
print (calendar_list_entry['id'])
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
event = {
'summary': summary_in,
'location': loc_in,
'description': desc_in,
'start': {
'dateTime': dto_start.isoformat("T"),
'timeZone': 'US/Eastern',
},
'end': {
'dateTime': dto_end.isoformat("T"),
'timeZone': 'US/Eastern',
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
'attendees': [
{'email': attendees },
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 24 * 60},
{'method': 'popup', 'minutes': 10},
],
},
}
event = service.events().insert(calendarId='qs64rv6jvd<EMAIL>', body=event, sendUpdates='all').execute()
print ('Event created: %s' % (event.get('htmlLink')))
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='<EMAIL>', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3387567 | """Embedding with Tensorflow1"""
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def find_embedding(graph, dimension, max_steps=1000):
"""Tries to embed given graph in given dimension.
Args:
graph: the graph, in which every edge is assumed to have a 'sign'
which is either +1 or -1.
dimension: the dimension in which to try embedding the graph.
max_steps: the maximum number of gradient steps to perform.
Returns:
obj: The value of the objective function, which will be 0.0 if
the embedding succeeds and strictly positive, otherwise.
"""
# make sure that no variables carry over between embeddings
tf.reset_default_graph()
# n variables
# node 'embedding_variable'
for node in graph.nodes:
graph.nodes[node]['embedding_variable'] = tf.get_variable(str(node), [dimension])
# m variables
# edge 'distance_variable'
for edge in graph.edges:
graph.edges[edge]['distance_variable'] = tf.norm(
graph.nodes[edge[0]]['embedding_variable'] - graph.nodes[edge[1]]['embedding_variable']
)
# 3n variables
# n friend variables
# n enemies variables
# n objective variables
for node in graph.nodes:
friend_distance_variables = [graph.edges[edge]['distance_variable'] for edge in graph.edges(nbunch=node) if graph.edges[edge]['sign'] == 1]
enemy_distance_variables = [graph.edges[edge]['distance_variable'] for edge in graph.edges(nbunch=node) if graph.edges[edge]['sign'] == -1]
if len(friend_distance_variables) == 0 or len(enemy_distance_variables) == 0:
# the node has only positive or only negative neighbors
graph.nodes[node]['objective_penalty_variable'] = tf.constant(0.0)
else:
# the node has both positive and negative neighbors
graph.nodes[node]['farthest_friend_distance_variable'] = tf.reduce_max(
tf.stack(friend_distance_variables)
)
graph.nodes[node]['nearest_enemy_distance_variable'] = tf.reduce_min(
tf.stack(enemy_distance_variables)
)
graph.nodes[node]['objective_penalty_variable'] = tf.maximum(
graph.nodes[node]['farthest_friend_distance_variable'] - graph.nodes[node]['nearest_enemy_distance_variable'] + 0.1,
0.0
)
# objective
obj = sum(graph.nodes[node]['objective_penalty_variable'] for node in graph.nodes)
# optimizer
opt = tf.train.AdamOptimizer(1.0/dimension).minimize(obj)
# session
sess = tf.Session()
# initialize
init = tf.global_variables_initializer()
sess.run(init)
# optimize
for step in range(max_steps):
# if step % (max_steps // 10) == 0:
# print("objective after %s steps is %s" % (step, sess.run(obj)))
if sess.run(obj) == 0.0:
break
sess.run(opt)
# assign values
for node in graph.nodes:
graph.nodes[node]['embedding'] = tuple(sess.run(graph.nodes[node]['embedding_variable']))
return sess.run(obj)
| StarcoderdataPython |
4832039 | """
Django settings for tsadm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
import tsadm.config
tsadm.config.setdefault('BASE_DIR', BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = tsadm.config.get('DJANGO_SECRET_KEY', 'pUV1i7uH58cMQaQv')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = tsadm.config.get('DEBUG', False)
ALLOWED_HOSTS = [tsadm.config.get('MASTER_SERVER', 'dev.tsadm.local')]
DEBUG_PROPAGATE_EXCEPTIONS = False
TEMPLATE_DEBUG = DEBUG
TEMPLATE_DIRS = ('/'.join([BASE_DIR, 'templates']),)
TEMPLATE_LOADERS = ('django.template.loaders.filesystem.Loader',)
TEMPLATE_STRING_IF_INVALID = 'TMPL_MISS:%s'
# Application definition
INSTALLED_APPS = (
#'django.contrib.admin',
#'django.contrib.auth',
#'django.contrib.contenttypes',
#'django.contrib.sessions',
#'django.contrib.messages',
'django.contrib.staticfiles',
#'django_extensions',
'tsadm.site',
'tsadm.git',
'tsadm.rsync',
'tsadm._mysql',
'tsadm.help',
'tsadm.jobq',
'tsadm.user',
'tsadm.slave',
'tsadm.admin',
'tsadm.ansible',
)
MIDDLEWARE_CLASSES = (
'tsadm.wapp.TSAdmWAppCleanHTML',
'tsadm.wapp.TSAdmWAppResponseHeaders',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tsadm.urls'
WSGI_APPLICATION = 'tsadm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Using tsadm.db module as at the time of starting this project Django
# didn't have support for mysql under python3.
# tsadm.db uses mysql.connector
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': '',
'NAME': '',
}
}
#SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#SESSION_SAVE_EVERY_REQUEST = True
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = tsadm.config.get('LANG_CODE', 'en-gb')
TIME_ZONE = tsadm.config.get('TIME_ZONE', 'Europe/London')
USE_I18N = False
USE_L10N = False
USE_TZ = True
DEFAULT_CHARSET = tsadm.config.get('CHARSET', 'utf-8')
FILE_CHARSET = DEFAULT_CHARSET
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ('/'.join([BASE_DIR, 'static']),)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': tsadm.config.get('DJANGO_CACHE_PATH', '/var/tmp/tsadmdev_cache'),
'TIMEOUT': tsadm.config.get('DJANGO_CACHE_TIMEOUT', 15),
'KEY_PREFIX': tsadm.config.get('DJANGO_CACHE_KEY_PREFIX', 'tsadmdev:')
}
}
| StarcoderdataPython |
3377391 | # -*- coding: utf-8 -*-
"""
Created Date: 2021-03-22 Am
@author: XiaoQingLin
"""
import getopt
import sys
from binary_python.binary_python import start_binary_encrypt
def usage():
"""
python代码 加密|加固
参数说明:
-i | --input_file_path 待加密文件或文件夹路径,可是相对路径或绝对路径
-o | --output_file_path 加密后的文件输出路径,默认在input_file_path下创建binary文件夹,存放加密后的文件
-I | --ignore_files 不需要加密的文件或文件夹,逗号分隔
-m | --except_main_file 不加密包含__main__的文件(主文件加密后无法启动), 值为0、1。 默认为1
"""
def execute():
try:
options, args = getopt.getopt(
sys.argv[1:],
"hi:o:I:m:",
[
"help",
"input_file_path=",
"output_file_path=",
"ignore_files=",
"except_main_file=",
],
)
input_file_path = output_file_path = ignore_files = ""
except_main_file = 0
for name, value in options:
if name in ("-h", "--help"):
print(usage.__doc__)
sys.exit()
elif name in ("-i", "--input_file_path"):
input_file_path = value
elif name in ("-o", "--output_file_path"):
output_file_path = value
elif name in ("-I", "--ignore_files"):
ignore_files = value.split(",")
elif name in ("-m", "--except_main_file"):
except_main_file = int(value)
if not input_file_path:
print("需指定-i 或 input_file_path")
print(usage.__doc__)
sys.exit()
start_binary_encrypt(input_file_path, output_file_path, ignore_files, except_main_file)
except getopt.GetoptError:
print(usage.__doc__)
sys.exit()
| StarcoderdataPython |
1659860 | import requests
import json
import uuid
import boto3
client = boto3.client('rekognition')
# store each person by a uuid instead of by name
uuid_to_person_map = {}
photo_link = ("https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/"
"Official_portrait_of_Vice_President_Joe_Biden.jpg/"
"1024px-Official_portrait_of_Vice_President_Joe_Biden.jpg")
name = "<NAME>"
this_uuid = str(uuid.uuid4())
print("{} ({}): {}".format(name, this_uuid, photo_link))
# update our mapping
uuid_to_person_map[this_uuid] = [name, photo_link]
# actually get the photo
photo = requests.get(photo_link)
# now load it into our rekognition collection
response = client.index_faces(
CollectionId='company-photos',
Image={
'Bytes': photo.content
},
ExternalImageId=str(this_uuid)
)
# persist our uuid to photo mapping to a file
with open("uuid_to_person_map.json", 'w') as mapfile:
mapfile.write(json.dumps(uuid_to_person_map))
| StarcoderdataPython |
1733555 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-26 10:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('query', '0023_auto_20181026_1010'),
]
operations = [
migrations.CreateModel(
name='LabeledCommercial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.FloatField()),
('end', models.FloatField()),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='query.Video')),
],
),
migrations.CreateModel(
name='LabeledInterview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.FloatField()),
('end', models.FloatField()),
('interviewer1', models.CharField(blank=True, default=None, max_length=256, null=True)),
('interviewer2', models.CharField(blank=True, default=None, max_length=256, null=True)),
('guest1', models.CharField(blank=True, default=None, max_length=256, null=True)),
('guest2', models.CharField(blank=True, default=None, max_length=256, null=True)),
('original', models.BooleanField(default=True)),
('scattered_clips', models.BooleanField(default=False)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='query.Video')),
],
),
migrations.CreateModel(
name='LabeledPanel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.FloatField()),
('end', models.FloatField()),
('num_panelists', models.IntegerField()),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='query.Video')),
],
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.