id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1686645
|
import json
import responses
from pbpstats.data_loader.live.boxscore.file import LiveBoxscoreFileLoader
from pbpstats.data_loader.live.boxscore.loader import LiveBoxscoreLoader
from pbpstats.data_loader.live.boxscore.web import LiveBoxscoreWebLoader
from pbpstats.resources.boxscore.live_boxscore_item import LiveBoxscoreItem
class TestLiveBoxscoreLoader:
game_id = "0022000001"
data_directory = "tests/data"
def test_file_loader_loads_data(self):
source_loader = LiveBoxscoreFileLoader(self.data_directory)
boxscore_loader = LiveBoxscoreLoader(self.game_id, source_loader)
assert len(boxscore_loader.items) == 36
assert isinstance(boxscore_loader.items[0], LiveBoxscoreItem)
assert boxscore_loader.items[0].player_id == 203952
assert boxscore_loader.items[0].total_seconds == 1874
assert boxscore_loader.items[0].name == "<NAME>"
assert boxscore_loader.items[0].team_id == 1610612744
@responses.activate
def test_web_loader_loads_data(self):
with open(f"{self.data_directory}/game_details/live_{self.game_id}.json") as f:
boxscore_response = json.loads(f.read())
boxscore_url = f"https://nba-prod-us-east-1-mediaops-stats.s3.amazonaws.com/NBA/liveData/boxscore/boxscore_{self.game_id}.json"
responses.add(responses.GET, boxscore_url, json=boxscore_response, status=200)
source_loader = LiveBoxscoreWebLoader(self.data_directory)
boxscore_loader = LiveBoxscoreLoader(self.game_id, source_loader)
assert len(boxscore_loader.items) == 36
assert isinstance(boxscore_loader.items[0], LiveBoxscoreItem)
assert boxscore_loader.items[0].player_id == 203952
assert boxscore_loader.items[0].total_seconds == 1874
assert boxscore_loader.items[0].name == "<NAME>"
assert boxscore_loader.items[0].team_id == 1610612744
|
1686688
|
import pytest
import numpy as np
from astropy.utils.data import download_file
from jdaviz.app import Application
# This file is originally from
# https://data.sdss.org/sas/dr14/manga/spectro/redux/v2_1_2/7495/stack/manga-7495-12704-LOGCUBE.fits.gz
URL = 'https://stsci.box.com/shared/static/28a88k1qfipo4yxc4p4d40v4axtlal8y.fits'
""" The purpose of this test is to check that both methods:
- app.get_viewer('spectrum-viewer').data()
- app.get_data_from_viewer("spectrum-viewer")
return the same spectrum values.
"""
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
@pytest.mark.filterwarnings('ignore')
@pytest.mark.remote_data
def test_data_retrieval(jdaviz_app):
fn = download_file(URL, cache=True)
jdaviz_app.load_data(fn)
# two ways of retrieving data from the viewer.
# They should return the same spectral values
a1 = jdaviz_app.get_viewer('spectrum-viewer').data()
a2 = jdaviz_app.get_data_from_viewer("spectrum-viewer")
test_value_1 = a1[0].data
test_value_2 = list(a2.values())[0].data
assert np.allclose(test_value_1, test_value_2, atol=1e-5)
|
1686700
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) <NAME> and <NAME>
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
|
1686707
|
from typing import Optional
import django_admin_relation_links
from adminutils import options
from authtools import admin as authtools_admin
from django.contrib import admin
from enumfields.admin import EnumFieldListFilter
from rangefilter.filter import DateRangeFilter
from solo.admin import SingletonModelAdmin
from eahub.base import models
from eahub.base.models import User
from eahub.profiles.models import Profile
@admin.register(models.User)
class UserAdmin(
django_admin_relation_links.AdminChangeLinksMixin, authtools_admin.UserAdmin
):
list_select_related = ["profile"]
list_display = [
"is_active",
"email",
"profile_link",
"is_profile_approved",
"date_joined",
"last_login",
"is_superuser",
"is_staff",
"get_visibility",
]
change_links = ["profile"]
list_filter = [
"is_superuser",
"is_staff",
"is_active",
"profile__is_approved",
("profile__visibility", EnumFieldListFilter),
("date_joined", DateRangeFilter),
("last_login", DateRangeFilter),
]
search_fields = ["email", "profile__first_name", "profile__last_name"]
@options(desc="Approved", boolean=True)
def is_profile_approved(self, user) -> Optional[bool]:
profile = get_profile(user)
if profile is None:
return None
return profile.is_approved
@options(desc="Visibility")
def get_visibility(self, user) -> str:
profile = get_profile(user)
if profile is None:
return ""
return profile.visibility.value
def get_profile(user: User) -> Optional[Profile]:
try:
return user.profile
except Profile.DoesNotExist:
return None
@admin.register(models.MessagingLog)
class MessagingLogAdmin(admin.ModelAdmin):
list_display = [
"sender_email",
"recipient_email",
"recipient_type",
"send_action_uuid",
"time",
]
list_filter = [
"recipient_type",
("time", DateRangeFilter),
]
search_fields = ["sender", "recipient"]
admin.site.register(models.FeedbackURLConfig, SingletonModelAdmin)
|
1686719
|
import pomdp_py
class Observation(pomdp_py.Observation):
"""Defines the Observation for the continuous light-dark domain;
Observation space:
:math:`\Omega\subseteq\mathbb{R}^2` the observation of the robot is
an estimate of the robot position :math:`g(x_t)\in\Omega`.
"""
# the number of decimals to round up an observation when it is discrete.
PRECISION=2
def __init__(self, position, discrete=False):
"""
Initializes a observation in light dark domain.
Args:
position (tuple): position of the robot.
"""
self._discrete = discrete
if len(position) != 2:
raise ValueError("Observation position must be a vector of length 2")
if self._discrete:
self.position = position
else:
self.position = (round(position[0], Observation.PRECISION),
round(position[1], Observation.PRECISION))
def discretize(self):
return Observation(self.position, discrete=True)
def __hash__(self):
return hash(self.position)
def __eq__(self, other):
if isinstance(other, Observation):
return self.position == other.position
else:
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Observation(%s)" % (str(self.position))
|
1686737
|
from .util import is_module_available
__all__ = []
if is_module_available("aspell"):
from .corrector_aspell import AspellChecker
__all__.extend(["AspellChecker"])
if is_module_available("jamspell"):
from .corrector_jamspell import JamspellChecker
__all__.extend(["JamspellChecker"])
|
1686740
|
import logging
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from rest_framework.parsers import JSONParser
from pss_project.api.serializers.rest.OLTPBenchSerializer import OLTPBenchSerializer
from pss_project.api.serializers.database.OLTPBenchResultSerializer import OLTPBenchResultSerializer
from rest_framework.authentication import BasicAuthentication
logger = logging.getLogger()
class OLTPBenchViewSet(viewsets.ViewSet):
def create(self, request):
""" First check that the an authorized user posted the request. Then validate the API request body. Next convert
the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the
database. """
user = BasicAuthentication().authenticate(request)
if user is None:
logger.debug('Invalid authentication')
return Response({'message': "Forbidden"}, status=status.HTTP_403_FORBIDDEN)
data = JSONParser().parse(request)
api_serializer = OLTPBenchSerializer(data=data)
if not api_serializer.is_valid():
logger.debug(f'Bad Request: {api_serializer.errors}')
return Response(api_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
api_serializer.save()
db_serializer = OLTPBenchResultSerializer(data=api_serializer.instance.convert_to_db_json())
db_serializer.smudge_timestamp()
if not db_serializer.is_valid():
return Response(db_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
db_serializer.save()
except Exception as err:
logger.error(f'OLTPBenchViewSet create failed: {err}')
return Response({'message': str(err)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(api_serializer.validated_data, status=status.HTTP_201_CREATED)
|
1686744
|
import time
from busio import I2C
from adafruit_seesaw.seesaw import Seesaw
from adafruit_seesaw.pwmout import PWMOut
from adafruit_motor import motor
import neopixel
import audioio
import audiocore
import board
print("The voyages of the CPX-1701!")
# Create seesaw object
i2c = I2C(board.SCL, board.SDA)
seesaw = Seesaw(i2c)
# Create one motor on seesaw PWM pins 22 & 23
motor_a = motor.DCMotor(PWMOut(seesaw, 22), PWMOut(seesaw, 23))
# audio output
cpx_audio = audioio.AudioOut(board.A0)
# neopixels!
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=1)
pixels.fill((0, 0, 0))
# give me a second before starting
time.sleep(1)
motor_a.throttle = 0 # warp drive off
f = open("01space.wav", "rb")
wav = audiocore.WaveFile(f)
cpx_audio.play(wav)
t = time.monotonic() # take a timestamp
# slowly power up the dilithium crystals
for i in range(50):
pixels.fill((0, 0, i))
time.sleep(0.05)
# 6 seconds after audio started...
while time.monotonic() - t < 6:
pass
motor_a.throttle = 1 # full warp drive on!
# wait for music to end
while cpx_audio.playing:
pass
f.close()
# play the warp drive and theme music!
f = open("02warp.wav", "rb")
wav = audiocore.WaveFile(f)
cpx_audio.play(wav)
time.sleep(1)
# blast off!
pixels.fill((255, 0, 0))
# pulse the warp core
while True:
for i in range(255, 0, -5):
pixels.fill((i, 0, 0))
for i in range(0, 255, 5):
pixels.fill((i, 0, 0))
# wait for music to end
while cpx_audio.playing:
pass
f.close()
|
1686753
|
import datetime
import discord
import logging
from discord.ext import commands
from typing import Union, List
from cogs.utils.db_objects import LogConfig, BoardConfig, SlimEventConfig
log = logging.getLogger(__name__)
class Utils(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._messages = {}
async def log_config(self, channel_id: int, log_type: str) -> Union[LogConfig, None]:
query = """SELECT guild_id,
channel_id,
"interval",
toggle,
type,
detailed
FROM logs
WHERE channel_id=$1
AND type=$2
"""
fetch = await self.bot.pool.fetchrow(query, channel_id, log_type)
if not fetch:
return None
return LogConfig(bot=self.bot, record=fetch)
async def board_config(self, message_id: int) -> Union[BoardConfig, None]:
query = """SELECT guild_id,
channel_id,
icon_url,
title,
render,
sort_by,
toggle,
type,
in_event,
message_id,
per_page,
page,
season_id
FROM boards
WHERE message_id = $1
"""
fetch = await self.bot.pool.fetchrow(query, message_id)
if not fetch:
return None
return BoardConfig(bot=self.bot, record=fetch)
async def get_board_channels(self, guild_id: int, board_type: str) -> Union[List[int], None]:
query = "SELECT message_id FROM boards WHERE guild_id = $1 AND type = $2 AND toggle = True;"
fetch = await self.bot.pool.fetch(query, guild_id, board_type)
return [n["message_id"] for n in fetch]
async def board_config_from_channel(self, channel_id: int, board_type: str) -> Union[BoardConfig, None]:
query = """SELECT guild_id,
channel_id,
icon_url,
title,
render,
sort_by,
toggle,
type,
in_event,
message_id,
per_page,
page,
season_id
FROM boards
WHERE channel_id = $1
AND type = $2
"""
fetch = await self.bot.pool.fetchrow(query, channel_id, board_type)
if not fetch:
return None
return BoardConfig(bot=self.bot, record=fetch)
async def get_board_configs(self, guild_id: int, board_type: str) -> List[BoardConfig]:
message_ids = await self.get_board_channels(guild_id, board_type)
if not message_ids:
return list()
message_ids = [int(n) for n in message_ids]
configs = list()
for n in message_ids:
configs.append(await self.board_config(n))
return configs
async def event_config(self, guild_id: int) -> Union[SlimEventConfig, None]:
query = """SELECT id,
start,
finish,
event_name,
channel_id,
guild_id
FROM events
WHERE guild_id = $1
AND CURRENT_TIMESTAMP < finish
ORDER BY start DESC;
"""
fetch = await self.bot.pool.fetchrow(query, guild_id)
if not fetch:
return None
return SlimEventConfig(fetch['id'], fetch['start'],
fetch['finish'], fetch['event_name'],
fetch['channel_id'], fetch['guild_id'])
async def get_message(self, channel: discord.TextChannel, message_id: int) -> Union[discord.Message, None]:
try:
return self._messages[message_id]
except KeyError:
msg = self._messages[message_id] = await channel.fetch_message(message_id)
return msg
async def safe_send(self, channel_id, content=None, embed=None):
channel = self.bot.get_channel(channel_id)
try:
return await channel.send(content, embed=embed)
except (discord.Forbidden, discord.NotFound, AttributeError):
return await self.bot.pool.execute("UPDATE logs SET toggle = FALSE WHERE channel_id = $1", channel_id)
except:
log.exception(f"{channel} failed to send {content} {embed}")
async def channel_log(self, channel_id, log_type, message=None, embed_to_send=None, colour=None, embed=True):
config = await self.log_config(channel_id, log_type)
if not config.channel or not config.toggle:
return
if embed_to_send:
e = embed_to_send
c = None
elif embed:
e = discord.Embed(colour=colour or self.bot.colour,
description=message,
timestamp=datetime.datetime.utcnow())
c = None
else:
e = None
c = message
try:
await config.channel.send(content=c, embed=e)
except (discord.Forbidden, discord.HTTPException):
return
async def event_config_id(self, event_id: int) -> Union[None, SlimEventConfig]:
query = """SELECT id,
start,
finish,
event_name,
channel_id,
guild_id
FROM events
WHERE id = $1
"""
fetch = await self.bot.pool.fetchrow(query, event_id)
if not fetch:
return None
return SlimEventConfig(fetch['id'], fetch['start'],
fetch['finish'], fetch['event_name'],
fetch['channel_id'], fetch['guild_id'])
def setup(bot):
bot.add_cog(Utils(bot))
|
1686810
|
from django.db.models import Q
from django.core.exceptions import ValidationError
from settings.local import people_who_need_to_know_about_failures
from settings.local import inventorys_email
from email.mime.text import MIMEText
import ipaddr
import smtplib
import re
import urllib
# http://dev.mysql.com/doc/refman/5.0/en/miscellaneous-functions.html
# Prevent this case http://people.mozilla.com/~juber/public/t1_t2_scenario.txt
# TODO, put this in a try accept and always unlock things
def locked_function(lock_name, timeout=10):
def decorator(f):
def new_function(*args, **kwargs):
from django.db import connection
cursor = connection.cursor()
cursor.execute(
"SELECT GET_LOCK('{lock_name}', {timeout});".format(
lock_name=lock_name, timeout=timeout
)
)
ret = f(*args, **kwargs)
cursor.execute(
"SELECT RELEASE_LOCK('{lock_name}');".format(
lock_name=lock_name
)
)
return ret
return new_function
return decorator
def fail_mail(content, subject='Inventory is having issues.',
to=people_who_need_to_know_about_failures,
from_=inventorys_email):
"""Send email about a failure."""
if not to:
return
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = inventorys_email
# msg['To'] = to
s = smtplib.SMTP('localhost')
s.sendmail(from_, to, msg.as_string())
s.quit()
class IPFilterSet(object):
"""The IPFilterSet expects that all IPFilters added to it are of the same
type. This might be useful later.
"""
def __init__(self):
self.ipfs = []
def add(self, ipf):
self.ipfs.append(ipf)
def pprint(self):
for ipf in self.ipfs:
print ipf
def pprint_intersect(self):
for intersect in self.calc_intersect():
print intersect
def calc_intersect(self):
"""
This is where the magic comes from. Given a list of IPFilter objects,
figure the ranges that are common to all the IPFilters, and create a
new list of IPFilter objects that represent this range.
"""
def trim(self, r, rs, ip_type):
if not (rs and r):
return r
r1 = rs[0]
rx = self.intersect(r, r1, ip_type)
return self.trim(rx, rs[1:], ip_type)
def intersect(self, r1, r2, ip_type):
"""
Cases:
* Subset or equal
* Left intersect
* Right intersect
* No intersect
"""
if r1.start > r2.end:
return None
# We have intersection somewhere.
if r1.end == r2.end and r1.start == r1.end:
# r1 is subset of r2
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end < r2.end:
# r1 is subset of r2
# Low High
# r1 |-------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end > r2.start:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r1.start_upper, r1.start_lower,
r2.end_upper, r2.end_lower)
if r1.start < r2.start and r1.end < r2.end:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r2.start_upper, r2.start_lower,
r1.end_upper, r1.end_lower)
class IPFilter(object):
def __init__(self, start, end, ip_type, object_=None):
self.object_ = object_ # The composite object (it can be None)
self.ip_type = ip_type
self.start, self.end, self.Q = start_end_filter(start, end, ip_type)
def __str__(self):
return "{0} -- {1}".format(self.start, self.end)
def __repr__(self):
return str(self)
def start_end_filter(start, end, ip_type):
ip_type = ip_type
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
istart = IPKlass(start)
iend = IPKlass(end)
if int(istart) > int(iend):
raise ValidationError("start cannot be greater than end")
start_upper, start_lower = one_to_two(int(istart))
end_upper, end_lower = one_to_two(int(iend))
# Equal uppers. Lower must be within.
if start_upper == end_upper:
q = Q(ip_upper=start_upper,
ip_lower__gte=start_lower,
ip_lower__lte=end_lower,
ip_type=ip_type)
else:
q = Q(ip_upper__gt=start_upper, ip_upper__lt=end_upper,
ip_type=ip_type)
return istart, iend, q
def overlap(r1, r2, ip_type=None, cast_to_int=False):
if cast_to_int:
if ip_type == '4':
IP = ipaddr.IPv4Address
elif ip_type == '6':
IP = ipaddr.IPv6Address
else:
raise Exception('Not using overlap right. Missing ip_type')
to_int = lambda r: (int(IP(r[0])), int(IP(r[1])))
return _overlap(to_int(r1), to_int(r2))
else:
return _overlap(r1, r2)
def _overlap(r1, r2):
# Make r1 always larger than r2
size = lambda r: abs(r[0] - r[1])
if size(r1) > size(r2):
(r1_start, r1_end), (r2_start, r2_end) = r1, r2
else:
# They could be the same size
(r1_start, r1_end), (r2_start, r2_end) = r2, r1
if r1_start > r2_end or r1_end < r2_start: # no overlap
return None
if r1_start <= r2_start and r1_end >= r2_end:
# r2 is subset of r1 or equal
# Low High
# r1 |---------|
# r2 |-------|
# rx |---------|
# OR
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r2
if r1_start >= r2_start and r1_end >= r2_end:
# Low High
# r1 |-----------|
# r2 |---------|
# rx |------|
return r1_start, r2_end
if r1_start <= r2_start and r1_end <= r2_end:
# Low High
# r1 |-----------|
# r2 |---------|
# rx |------|
return r2_start, r1_end
def networks_to_Q(networks):
"""Take a list of network objects and compile a Q that matches any object
that exists in one of those networks."""
q = Q(pk__lt=-1)
for network in networks:
network.update_ipf()
q = q | network.ipf.Q
return q
def two_to_four(start, end):
start_upper = start >> 64
start_lower = start & (1 << 64) - 1
end_upper = end >> 64
end_lower = end & (1 << 64) - 1
return start_upper, start_lower, end_upper, end_lower
def one_to_two(ip):
return (ip >> 64, ip & (1 << 64) - 1)
def two_to_one(upper, lower):
return long(upper << 64) + long(lower)
def four_to_two(start_upper, start_lower, end_upper, end_lower):
start = (start_upper << 64) + start_lower
end = (end_upper << 64) + end_lower
return start, end
def int_to_ip(ip, ip_type):
"""A wrapper that converts a 32 or 128 bit integer into human readable IP
format."""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return str(IPKlass(ip))
def ip_to_int(ip, ip_type):
"""A wrapper that converts a string to 32 or 128 bit integer"""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return int(IPKlass(ip))
def resolve_ip_type(ip_str):
if ip_str.find(':') > -1:
Klass = ipaddr.IPv6Network
ip_type = '6'
elif ip_str.find('.') > -1:
Klass = ipaddr.IPv4Network
ip_type = '4'
else:
Klass = None
ip_type = None
return ip_type, Klass
def to_a(text, obj, use_absolute_url=True):
if use_absolute_url:
return "<a href='{0}'>{1}</a>".format(obj.get_absolute_url(), text)
else:
return "<a href='{0}'>{1}</a>".format(obj, text)
def create_key_index(kvs):
index = {}
for kv in kvs:
index[kv['key']] = kv
return index
def mozillian(name):
return "https://mozillians.org/en-US/search/?q={0}".format(
urllib.quote_plus(name)
)
def mozillian_a(name):
return "<a href='{0}'>{1}</a>".format(mozillian(re.escape(name)), name)
|
1686820
|
from __future__ import annotations
from typing import Any, Dict, Optional, Text, Type
import dataclasses
import uuid
from rasa.engine.caching import Cacheable, TrainingCache
from rasa.engine.graph import ExecutionContext, GraphComponent, SchemaNode
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
class PrecomputedValueProvider(GraphComponent):
"""Holds the precomputed values of a `GraphNode` from a previous training.
Pre-computed values can either be
- values loaded from cache
- values which were provided during the fingerprint run by input nodes
"""
def __init__(self, output: Cacheable):
"""Initializes a `PrecomputedValueProvider`.
Args:
output: The precomputed output to return.
"""
self._output = output
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> PrecomputedValueProvider:
"""Creates instance (see parent class for full docstring)."""
return cls(output=config["output"])
def get_value(self) -> Cacheable:
"""Returns the precomputed output."""
return self._output
@classmethod
def replace_schema_node(cls, node: SchemaNode, output: Any) -> None:
"""Updates a `SchemaNode` to use a `PrecomputedValueProvider`.
This is for when we want to use the precomputed output value of a node from a
previous training in a subsequent training. We replace the class in the `uses`
of the node to a be a `PrecomputedValueProvider` configured to return the
precomputed value.
Args:
node: The node to update.
output: precomputed cached output that the `PrecomputedValueProvider` will
return.
"""
node.uses = cls
node.config = {"output": output}
node.fn = cls.get_value.__name__
node.constructor_name = cls.create.__name__
@dataclasses.dataclass
class FingerprintStatus:
"""Holds the output of a `FingerprintComponent` and is used to prune the graph.
Attributes:
output_fingerprint: A fingerprint of the node's output value.
is_hit: `True` if node's fingerprint key exists in the cache, `False` otherwise.
"""
output_fingerprint: Optional[Text]
is_hit: bool
def fingerprint(self) -> Text:
"""Returns the internal fingerprint.
If there is no fingerprint returns a random string that will never match.
"""
return self.output_fingerprint or uuid.uuid4().hex
class FingerprintComponent(GraphComponent):
"""Replaces non-input nodes during a fingerprint run."""
def __init__(
self,
cache: TrainingCache,
config_of_replaced_component: Dict[Text, Any],
class_of_replaced_component: Type,
) -> None:
"""Initializes a `FingerprintComponent`.
Args:
cache: Training cache used to determine if the run is a hit or not.
config_of_replaced_component: Needed to generate the fingerprint key.
class_of_replaced_component: Needed to generate the fingerprint key.
"""
self._cache = cache
self._config_of_replaced_component = config_of_replaced_component
self._class_of_replaced_component = class_of_replaced_component
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> FingerprintComponent:
"""Creates a `FingerprintComponent` (see parent class for full docstring)."""
return cls(
cache=config["cache"],
config_of_replaced_component=config["config_of_replaced_component"],
class_of_replaced_component=config["graph_component_class"],
)
def run(self, **kwargs: Any) -> FingerprintStatus:
"""Calculates the fingerprint key to determine if cached output can be used.
If the fingerprint key matches an entry in the cache it means that there has
been a previous node execution which matches the same component class, component
config and input values. This means that we can potentially prune this node
from the schema, or replace it with a cached value before the next graph run.
Args:
**kwargs: Inputs from all parent nodes.
Returns:
A `FingerprintStatus` determining if the run was a hit, and if it was a hit
also the output fingerprint from the cache.
"""
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=self._class_of_replaced_component,
config={
**self._class_of_replaced_component.get_default_config(),
**self._config_of_replaced_component,
},
inputs=kwargs,
)
output_fingerprint = self._cache.get_cached_output_fingerprint(fingerprint_key)
return FingerprintStatus(
is_hit=output_fingerprint is not None, output_fingerprint=output_fingerprint
)
@classmethod
def replace_schema_node(cls, node: SchemaNode, cache: TrainingCache) -> None:
"""Updates a `SchemaNode` to use a `FingerprintComponent`.
This is for when we want to do a fingerprint run. During the fingerprint run we
replace all non-input nodes with `FingerprintComponent`s so we can determine
whether they are able to be pruned or cached before the next graph run without
running the actual components.
Args:
node: The node to update.
cache: The cache is needed to determine of there is cache hit for the
fingerprint key.
"""
graph_component_class = node.uses
node.uses = cls
# We update the node to be "eager" so that `FingerprintComponent.run` sees
# ALL the inputs to the node. If it was not eager, we would miss any args used
# by the constructor.
node.eager = True
node.constructor_name = cls.create.__name__
node.fn = cls.run.__name__
node.config = {
"config_of_replaced_component": node.config,
"cache": cache,
"graph_component_class": graph_component_class,
}
|
1686841
|
from ..model import Model
from . import loadDefaultParams as dp
from . import timeIntegration as ti
class ThalamicMassModel(Model):
"""
Two population thalamic model
Reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (2016). A thalamocortical neural mass
model of the EEG during NREM sleep and its response to auditory stimulation.
PLoS computational biology, 12(9).
"""
name = "thalamus"
description = "Two population thalamic mass model"
init_vars = [
"V_t_init",
"V_r_init",
"Q_t_init",
"Q_r_init",
"Ca_init",
"h_T_t_init",
"h_T_r_init",
"m_h1_init",
"m_h2_init",
"s_et_init",
"s_gt_init",
"s_er_init",
"s_gr_init",
"ds_et_init",
"ds_gt_init",
"ds_er_init",
"ds_gr_init",
]
state_vars = [
"V_t",
"V_r",
"Q_t",
"Q_r",
"Ca",
"h_T_t",
"h_T_r",
"m_h1",
"m_h2",
"s_et",
"s_gt",
"s_er",
"s_gr",
"ds_et",
"ds_gt",
"ds_er",
"ds_gr",
]
output_vars = ["V_t", "V_r", "Q_t", "Q_r"]
default_output = "Q_t"
input_vars = []
default_input = None
def __init__(self, params=None, seed=None):
self.seed = seed
# the integration function must be passed
integration = ti.timeIntegration
# load default parameters if none were given
if params is None:
params = dp.loadDefaultParams()
# Initialize base class Model
super().__init__(integration=integration, params=params)
def randomICs(self):
ics = dp.generateRandomICs()
for idx, iv in enumerate(self.init_vars):
self.params[iv] = ics[idx]
|
1686855
|
import json
import uuid
from collections import OrderedDict
from ... import path
from ...iterutils import first
__all__ = ['SlnBuilder', 'SlnElement', 'SlnVariable', 'Solution', 'UuidMap']
class SlnElement:
def __init__(self, name, arg=None, value=None):
if (arg is None) != (value is None):
raise TypeError('if arg is passed, value must be too')
self.name = name
self.arg = arg
self.value = value
self.children = []
def __call__(self, *args):
return self.extend(args)
def append(self, item):
self.children.append(item)
return self
def extend(self, args):
self.children.extend(args)
return self
def write(self, out, depth=0):
out.write('\t' * depth)
out.write(self.name)
if self.arg:
out.write('({}) = {}'.format(self.arg, self.value))
out.write('\n')
for i in self.children:
i.write(out, depth + 1)
out.write('\t' * depth + 'End' + self.name + '\n')
class SlnVariable:
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, out, depth=0):
out.write('\t' * depth + '{} = {}\n'.format(self.name, self.value))
class SlnBuilder:
def __init__(self):
pass
def __call__(self, *args, **kwargs):
return SlnElement(*args, **kwargs)
def __getattribute__(self, name):
def closure(*args, **kwargs):
return SlnElement(name, *args, **kwargs)
return closure
class Solution:
def __init__(self, uuids):
self.uuid = uuids['']
self._uuids = uuids
self._projects = OrderedDict()
def __setitem__(self, key, value):
value.set_uuid(self._uuids)
self._projects[key] = value
def __getitem__(self, key):
return self._projects[key]
def set_default(self, key):
if key not in self._projects:
return
new_projects = OrderedDict([ ('key', self._projects.pop(key)) ])
new_projects.update(self._projects)
self._projects = new_projects
def __iter__(self):
return iter(self._projects.values())
def __contains__(self, key):
return key in self._projects
def dependencies(self, deps):
# By definition, a dependency for an edge must already be defined by
# the time the edge is created, so we can map *all* the dependencies to
# their associated projects by looking at the projects we've already
# created.
dependencies = []
for dep in deps:
if not dep.creator:
continue
dep_output = first(dep.creator.public_output)
if dep_output not in self:
raise RuntimeError('unknown dependency for {!r}'.format(dep))
dependencies.append(self[dep_output])
return dependencies
@property
def uuid_str(self):
return uuid_str(self.uuid)
def write(self, out):
S = SlnBuilder()
Var = SlnVariable
out.write('Microsoft Visual Studio Solution File, Format Version ' +
'12.00\n')
out.write('# Visual Studio 14\n')
Var('VisualStudioVersion', '14.0.22609.0').write(out)
Var('MinimumVisualStudioVersion', '10.0.40219.1').write(out)
configs = set()
project_info = []
for p in self:
path_vars = {path.Root.builddir: None}
proj = S.Project(
'"{}"'.format(self.uuid_str),
'"{name}", "{path}", "{uuid}"'.format(
name=p.name, path=p.path.string(path_vars), uuid=p.uuid_str
)
)
if p.dependencies:
proj.append(
S.ProjectSection('ProjectDependencies', 'postProject')
.extend(Var(i.uuid_str, i.uuid_str)
for i in p.dependencies)
)
proj.write(out)
configs.add(p.config_plat)
project_info.append(Var('{uuid}.{cfg}.ActiveCfg'.format(
uuid=p.uuid_str, cfg=p.config_plat
), p.real_config_plat))
project_info.append(Var('{uuid}.{cfg}.Build.0'.format(
uuid=p.uuid_str, cfg=p.config_plat
), p.real_config_plat))
S.Global()(
S.GlobalSection('SolutionConfigurationPlatforms', 'preSolution')
.extend(Var(i, i) for i in configs),
S.GlobalSection('ProjectConfigurationPlatforms', 'postSolution')
.extend(project_info),
S.GlobalSection('SolutionProperties', 'preSolution')(
Var('HideSolutionNode', 'FALSE')
)
).write(out)
def uuid_str(uuid):
return '{{{}}}'.format(str(uuid).upper())
class UuidMap:
version = 1
def __init__(self, path):
self._path = path
self._seen = set()
try:
self._map = self._load(path)
except IOError:
self._map = {}
def __getitem__(self, key):
self._seen.add(key)
if key in self._map:
return self._map[key]
else:
u = uuid.uuid4()
self._map[key] = u
return u
@classmethod
def _load(cls, path):
with open(path) as inp:
state = json.load(inp)
if state['version'] > cls.version:
raise ValueError('saved version exceeds expected version')
return { k: uuid.UUID(hex=v) for k, v in state['map'].items() }
def save(self, path=None):
with open(path or self._path, 'w') as out:
# Only save the UUIDs we saw this time. Skip ones we didn't see.
seenmap = { k: v.hex for k, v in self._map.items()
if k in self._seen }
json.dump({
'version': self.version,
'map': seenmap,
}, out)
|
1686869
|
import click
from flask import g
import fastText
from .server import app
@click.command()
@click.argument('model', type=click.Path(exists=True))
def cli(model):
app.config["FT_SERVER_MODEL_PATH"] = model
app.run(host=app.config["HOST"], port=app.config["PORT"], debug=app.config["DEBUG"])
cli()
|
1686874
|
import json
import pytest
import uuid
from copy import deepcopy
from datetime import datetime, timezone
from time import time
from unittest import TestCase
from ..please_ack_decorator import PleaseAckDecorator
MESSAGE_ID = "abc123"
ON = ("RECEIPT", "OUTCOME")
class TestPleaseAckDecorator(TestCase):
def test_init_serde(self):
decorator = PleaseAckDecorator()
assert type(decorator) == PleaseAckDecorator
assert decorator.message_id is None
assert decorator.on is None
dumped = decorator.serialize()
assert dumped == {}
loaded = PleaseAckDecorator.deserialize(dumped)
assert type(loaded) == PleaseAckDecorator
assert loaded.message_id is None
assert loaded.on is None
decorator = PleaseAckDecorator(message_id=MESSAGE_ID)
assert type(decorator) == PleaseAckDecorator
assert decorator.message_id == MESSAGE_ID
assert decorator.on is None
dumped = decorator.serialize()
assert dumped == {"message_id": MESSAGE_ID}
loaded = PleaseAckDecorator.deserialize(dumped)
assert type(loaded) == PleaseAckDecorator
assert loaded.message_id == MESSAGE_ID
assert loaded.on is None
decorator = PleaseAckDecorator(on=ON)
assert type(decorator) == PleaseAckDecorator
assert decorator.message_id is None
assert decorator.on == list(ON)
dumped = decorator.serialize()
assert dumped == {
"on": list(ON),
}
loaded = PleaseAckDecorator.deserialize(dumped)
assert type(loaded) == PleaseAckDecorator
assert loaded.message_id is None
assert loaded.on == list(ON)
decorator = PleaseAckDecorator(message_id=MESSAGE_ID, on=ON)
assert type(decorator) == PleaseAckDecorator
assert decorator.message_id == MESSAGE_ID
assert decorator.on == list(ON)
dumped = decorator.serialize()
assert dumped == {
"message_id": MESSAGE_ID,
"on": list(ON),
}
loaded = PleaseAckDecorator.deserialize(dumped)
assert type(loaded) == PleaseAckDecorator
assert loaded.message_id == MESSAGE_ID
assert loaded.on == list(ON)
|
1686931
|
import torch
from torch import nn, Tensor
import math
class LearnedPositionEncoder(nn.Module):
"""
Learned Position Encoder. Takes tensor of positional indicies and converts to learned embeddings
"""
def __init__(self, n_timesteps, d_model):
super().__init__()
self.embeddor = nn.Embedding(n_timesteps, d_model) # lookup table, each with vector of size d_model
nn.init.uniform_(self.embeddor.weight)
def forward(self, pos_indicies):
pos_indicies = pos_indicies.long()
return self.embeddor(pos_indicies)
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self,params, temperature=10000, scale=2*math.pi):
super().__init__()
self.params=params
self.num_pos_feats = params.arch.d_model
self.temperature = temperature
self.scale = scale
self.max_time = params.data_generation.n_timesteps
def forward(self, proposals):
proposals = proposals + 1
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
# N, L
proposals = proposals / self.max_time * self.scale
# N, L, num_pos_feats
pos = proposals[:, :, None] / dim_t
# N, L, 2, num_pos_feats/2, 2
pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3).flatten(2)
# N, L, num_pos_feats*2
return pos
|
1686935
|
from setuptools import setup
with open('README.md', 'r') as file:
long_description = file.read()
setup(
name='serverlessplus',
packages=['serverlessplus'],
version='0.0.8',
license='Apache-2.0',
author='chenhengqi',
author_email='<EMAIL>',
description='serverless your django/flask apps',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/serverlessplus/py',
install_requires=['werkzeug'],
keywords=['serverless', 'scf', 'tencent-cloud', 'wsgi', 'django', 'flask'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
1686954
|
import torch
import torch.nn as nn
import subprocess
import sys
import os
def is_distributed():
return torch.distributed.is_initialized()
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
def all_reduce_numpy(array):
tensor = torch.from_numpy(array).cuda()
torch.distributed.all_reduce(tensor)
return tensor.cpu().numpy()
def handle_distributed(args, main_file):
if not args.distributed:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpu))
return
if args.local_rank >= 0:
_setup_process_group(args)
return
current_env = os.environ.copy()
if current_env.get('CUDA_VISIBLE_DEVICES') is None:
current_env['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpu))
world_size = len(args.gpu)
else:
world_size = len(current_env['CUDA_VISIBLE_DEVICES'].split(','))
current_env['WORLD_SIZE'] = str(world_size)
print('World size:', world_size)
# Logic for spawner
python_exec = sys.executable
command_args = sys.argv
main_index = command_args.index('main.py')
command_args = command_args[main_index+1:]
print(command_args)
command_args = [
python_exec, '-u',
'-m', 'torch.distributed.launch',
'--nproc_per_node', str(world_size),
main_file,
] + command_args
process = subprocess.Popen(command_args, env=current_env)
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=command_args)
sys.exit(process.returncode)
def _setup_process_group(args):
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(
'nccl',
init_method='env://',
# rank=local_rank
)
|
1686978
|
import os
import sys
import math
import pickle
import argparse
import time
from torch import optim
from torch.utils.tensorboard import SummaryWriter
sys.path.append(os.getcwd())
from utils import *
from motion_pred.utils.config import Config
from motion_pred.utils.dataset_h36m import DatasetH36M
from motion_pred.utils.dataset_humaneva import DatasetHumanEva
from models.motion_pred import *
def loss_function(X, Y_r, Y, mu, logvar):
MSE = (Y_r - Y).pow(2).sum() / Y.shape[1]
MSE_v = (X[-1] - Y_r[0]).pow(2).sum() / Y.shape[1]
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) / Y.shape[1]
loss_r = MSE + cfg.lambda_v * MSE_v + cfg.beta * KLD
return loss_r, np.array([loss_r.item(), MSE.item(), MSE_v.item(), KLD.item()])
def train(epoch):
t_s = time.time()
train_losses = 0
total_num_sample = 0
loss_names = ['TOTAL', 'MSE', 'MSE_v', 'KLD']
generator = dataset.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size)
for traj_np in generator:
traj_np = traj_np[..., 1:, :].reshape(traj_np.shape[0], traj_np.shape[1], -1)
traj = tensor(traj_np, device=device, dtype=dtype).permute(1, 0, 2).contiguous()
X = traj[:t_his]
Y = traj[t_his:]
Y_r, mu, logvar = model(X, Y)
loss, losses = loss_function(X, Y_r, Y, mu, logvar)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses += losses
total_num_sample += 1
scheduler.step()
dt = time.time() - t_s
train_losses /= total_num_sample
lr = optimizer.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(x, y) for x, y in zip(loss_names, train_losses)])
logger.info('====> Epoch: {} Time: {:.2f} {} lr: {:.5f}'.format(epoch, dt, losses_str, lr))
for name, loss in zip(loss_names, train_losses):
tb_logger.add_scalar('vae_' + name, loss, epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None)
parser.add_argument('--mode', default='train')
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--iter', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu_index', type=int, default=0)
args = parser.parse_args()
"""setup"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
dtype = torch.float64
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
cfg = Config(args.cfg, test=args.test)
tb_logger = SummaryWriter(cfg.tb_dir) if args.mode == 'train' else None
logger = create_logger(os.path.join(cfg.log_dir, 'log.txt'))
"""parameter"""
mode = args.mode
nz = cfg.nz
t_his = cfg.t_his
t_pred = cfg.t_pred
"""data"""
dataset_cls = DatasetH36M if cfg.dataset == 'h36m' else DatasetHumanEva
dataset = dataset_cls('train', t_his, t_pred, actions='all', use_vel=cfg.use_vel)
if cfg.normalize_data:
dataset.normalize_data()
"""model"""
model = get_vae_model(cfg, dataset.traj_dim)
optimizer = optim.Adam(model.parameters(), lr=cfg.vae_lr)
scheduler = get_scheduler(optimizer, policy='lambda', nepoch_fix=cfg.num_vae_epoch_fix, nepoch=cfg.num_vae_epoch)
if args.iter > 0:
cp_path = cfg.vae_model_path % args.iter
print('loading model from checkpoint: %s' % cp_path)
model_cp = pickle.load(open(cp_path, "rb"))
model.load_state_dict(model_cp['model_dict'])
if mode == 'train':
model.to(device)
model.train()
for i in range(args.iter, cfg.num_vae_epoch):
train(i)
if cfg.save_model_interval > 0 and (i + 1) % cfg.save_model_interval == 0:
with to_cpu(model):
cp_path = cfg.vae_model_path % (i + 1)
model_cp = {'model_dict': model.state_dict(), 'meta': {'std': dataset.std, 'mean': dataset.mean}}
pickle.dump(model_cp, open(cp_path, 'wb'))
|
1686982
|
from colossalai.amp import AMP_TYPE
# ViT Base
BATCH_SIZE = 128
DROP_RATE = 0.1
NUM_EPOCHS = 2
clip_grad_norm = 1.0
|
1686995
|
import copy
import os
import random
import numpy as np
import torch
import torch.nn as nn
from torch import fx
from torchvision.models import MNASNet, MobileNetV3, ShuffleNetV2
from torchvision.models.densenet import _DenseLayer
def matches_module_pattern(pattern, node, modules):
if len(node.args) == 0:
return False
nodes = (node.args[0], node)
for expected_type, current_node in zip(pattern, nodes):
if not isinstance(current_node, fx.Node):
return False
if current_node.op != 'call_module':
return False
if not isinstance(current_node.target, str):
return False
if current_node.target not in modules:
return False
if type(modules[current_node.target]) is not expected_type:
return False
return True
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
def get_previous_layer(node, modules):
# print("get_previous_layer")
for input_node in node.all_input_nodes:
# print(input_node.name)
if input_node.target in modules and isinstance(modules[input_node.target], (nn.Conv2d, nn.BatchNorm2d)):
return input_node.target
else:
return get_previous_layer(input_node, modules)
def get_pinned_out(model):
pinned_out = []
try:
fx_model = fx.symbolic_trace(copy.deepcopy(model))
modules = dict(fx_model.named_modules())
last_module = None
for i, node in enumerate(fx_model.graph.nodes):
# print(node.name)
if node.target in modules and isinstance(modules[node.target], nn.Conv2d):
if modules[node.target].groups > 1 and last_module is not None:
if last_module.target is not None and last_module.target not in pinned_out:
pinned_out.append(last_module.target)
last_module = node
if i > 0 and (len(node.all_input_nodes) > 1 or len(node.users) > 1):
for input_node in node.all_input_nodes:
if input_node.target in modules and isinstance(modules[input_node.target],
(nn.Conv2d, nn.BatchNorm2d)):
if input_node.target is not None and input_node.target not in pinned_out:
pinned_out.append(input_node.target)
else:
previous_layer = get_previous_layer(input_node, modules)
if previous_layer is not None and previous_layer not in pinned_out:
pinned_out.append(previous_layer)
except Exception as e:
pass
return pinned_out
def get_bn_folding(model):
bn_folding = []
try:
patterns = [(torch.nn.Conv2d, torch.nn.BatchNorm2d)]
fx_model = fx.symbolic_trace(model)
modules = dict(fx_model.named_modules())
for pattern in patterns:
for node in fx_model.graph.nodes:
if matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1:
continue
bn_folding.append([node.args[0].target, node.target])
except Exception as e:
last_module = None
for name, module in model.named_modules():
if isinstance(module, _DenseLayer):
last_module = None
if isinstance(module, (nn.Linear, nn.Conv2d)):
last_module = (name, module)
if isinstance(module, nn.BatchNorm2d):
if last_module is not None and last_module[1].weight.shape[0] == module.weight.shape[0]:
bn_folding.append([last_module[0], name])
return bn_folding
def get_previous_layer_2(connections, module):
for k in connections:
if any([c == module for c in connections[k]["next"]]):
if not isinstance(connections[k]["class"], (nn.Conv2d, nn.BatchNorm2d)):
return get_previous_layer_2(connections, k)
else:
return k
def get_pinned(model):
fx_model = fx.symbolic_trace(copy.deepcopy(model))
modules = dict(fx_model.named_modules())
connections = {}
# Build dictionary node -> list of connected nodes
for i, node in enumerate(fx_model.graph.nodes):
# print(f"{node.name}->{[str(user) for user in node.users]}")
if node.target in modules:
module = modules[node.target]
else:
module = None
connections[node.name] = {"next": [str(user) for user in node.users], "class": module}
# Remove duplicates and build list of "to-pin" nodes (may contain nodes not CONV nor BN)
same_next = []
for k in connections:
for k2 in connections:
if k != k2:
if "add" in str(set(connections[k]["next"]) & set(connections[k2]["next"])):
same_next.append([k, k2])
same_next = set([item for sublist in same_next for item in sublist])
# Add input node of CONV with grouping, layer.6 for MNASNet and fc2 for MobileNetV3
for i, node in enumerate(fx_model.graph.nodes):
if (isinstance(model, MobileNetV3) and "fc2" in node.name) or \
(isinstance(model, ShuffleNetV2) and (node.name == "conv1_1" or
"branch1_3" in node.name or
"branch2_1" in node.name or
"branch2_6" in node.name)):
same_next.add(str(node.name))
name = node.name.replace("_", ".")
if name in modules:
module = modules[name]
if isinstance(module, nn.Conv2d) and module.groups > 1:
same_next.add(str(node.prev))
# For each node not CONV nor BN recover the closest previous CONV or BN
to_pin = []
for m in same_next:
if not isinstance(connections[m]["class"], (nn.Conv2d, nn.BatchNorm2d)):
to_pin.append(get_previous_layer_2(connections, m))
else:
to_pin.append(m)
return [n.replace("_", ".") for n in list(set(to_pin))]
|
1686999
|
from river import metrics
__all__ = ["CohenKappa"]
class CohenKappa(metrics.base.MultiClassMetric):
r"""Cohen's Kappa score.
Cohen's Kappa expresses the level of agreement between two annotators on a classification
problem. It is defined as
$$
\kappa = (p_o - p_e) / (1 - p_e)
$$
where $p_o$ is the empirical probability of agreement on the label
assigned to any sample (prequential accuracy), and $p_e$ is
the expected agreement when both annotators assign labels randomly.
Parameters
----------
cm
This parameter allows sharing the same confusion
matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage
and computation time.
Examples
--------
>>> from river import metrics
>>> y_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']
>>> y_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']
>>> metric = metrics.CohenKappa()
>>> for yt, yp in zip(y_true, y_pred):
... metric = metric.update(yt, yp)
>>> metric
CohenKappa: 42.86%
References
----------
[^1]: <NAME> (1960). "A coefficient of agreement for nominal scales". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104.
"""
def get(self):
try:
p0 = self.cm.total_true_positives / self.cm.n_samples # same as accuracy
except ZeroDivisionError:
p0 = 0
pe = 0
for c in self.cm.classes:
estimation_row = self.cm.sum_row[c] / self.cm.n_samples
estimation_col = self.cm.sum_col[c] / self.cm.n_samples
pe += estimation_row * estimation_col
try:
return (p0 - pe) / (1 - pe)
except ZeroDivisionError:
return 0.0
|
1687022
|
import os
import unittest
import site # so that ai4water directory is in path
import sys
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from ai4water.datasets import busan_beach, load_nasdaq
from ai4water import InputAttentionModel, DualAttentionModel
arg_busan = busan_beach()
arg_input_features = arg_busan.columns.tolist()[0:-1]
arg_output_features = arg_busan.columns.tolist()[-1:]
nasdaq= load_nasdaq(inputs=['AAL', 'AAPL', 'ADBE', 'ADI', 'ADP', 'ADSK'])
nasdaq_input_features = nasdaq.columns.tolist()[0:-1]
nasdaq_output_features = nasdaq.columns.tolist()[-1:]
def make_and_run(input_model, data, _layers=None, lookback=12, batch_size=64, epochs=3, **kwargs):
model = input_model(
verbosity=0,
batch_size=batch_size,
lookback=lookback,
lr=0.001,
epochs=epochs,
train_data='random',
**kwargs
)
_ = model.fit(data=data)
pred_y = model.predict(data='training')
eval_score = model.evaluate(data='training')
pred_y = model.predict()
return pred_y
class TestModels(unittest.TestCase):
# InputAttention based model does not conform reproducibility so just testing that it runs.
def test_InputAttentionModel(self):
prediction = make_and_run(InputAttentionModel,
data=arg_busan,
input_features=arg_input_features,
output_features=arg_output_features)
self.assertGreater(float(abs(prediction[0].sum())), 0.0)
return
# def test_InputAttentionModel_with_drop_remainder(self):
#
# prediction = make_and_run(InputAttentionModel, drop_remainder=True)
# self.assertGreater(float(prediction[0].sum()), 0.0)
def test_DualAttentionModel(self):
# DualAttentionModel based model
prediction = make_and_run(
DualAttentionModel,
data=nasdaq,
input_features=nasdaq_input_features,
output_features=nasdaq_output_features
)
self.assertGreater(float(abs(prediction[0].sum())), 0.0)
return
def test_da_without_prev_y(self):
prediction = make_and_run(
DualAttentionModel,
data=arg_busan,
teacher_forcing=False,
batch_size=8,
drop_remainder=True,
input_features=arg_input_features,
output_features=arg_output_features
)
self.assertGreater(float(abs(prediction[0].sum())), 0.0)
return
if __name__ == "__main__":
unittest.main()
|
1687035
|
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class TestDicts(BaseTestPyPyC):
def test_strdict(self):
def fn(n):
import sys
d = {}
class A(object):
pass
a = A()
a.x = 1
for s in sys.modules.keys() * 1000:
d.get(s) # force pending setfields etc.
inc = a.x # ID: look
d[s] = d.get(s, 0) + inc
return sum(d.values())
#
log = self.run(fn, [1000])
assert log.result % 1000 == 0
loop, = log.loops_by_filename(self.filepath)
ops = loop.ops_by_id('look')
assert log.opnames(ops) == []
def test_identitydict(self):
def fn(n):
class X(object):
pass
x = X()
d = {}
d[x] = 1
res = 0
for i in range(300):
value = d[x] # ID: getitem
res += value
return res
#
log = self.run(fn, [1000])
assert log.result == 300
loop, = log.loops_by_filename(self.filepath)
# check that the call to ll_dict_lookup is not a call_may_force, the
# gc_id call is hoisted out of the loop, the id of a value obviously
# can't change ;)
assert loop.match_by_id("getitem", """
...
i26 = call_i(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...)
...
p33 = getinteriorfield_gc_r(p31, i26, descr=<InteriorFieldDescr <FieldP odictentry.value .*>>)
...
""")
def test_non_virtual_dict(self):
def main(n):
i = 0
while i < n:
d = {str(i): i}
i += d[str(i)] - i + 1
return i
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i8 = int_lt(i5, i7)
guard_true(i8, descr=...)
guard_not_invalidated(descr=...)
p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=<Callr . i EF=3>)
guard_no_exception(descr=...)
guard_nonnull(p10, descr=...)
i99 = strhash(p10)
i12 = cond_call_value_i(i99, ConstClass(_ll_strhash__rpy_stringPtr), p10, descr=<Calli . r EF=2>)
p13 = new(descr=...)
p15 = new_array_clear(16, descr=<ArrayU 1>)
{{{
setfield_gc(p13, 0, descr=<FieldS dicttable.num_ever_used_items .+>)
setfield_gc(p13, p15, descr=<FieldP dicttable.indexes .+>)
setfield_gc(p13, ConstPtr(0), descr=<FieldP dicttable.entries .+>)
}}}
i17 = call_i(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=<Calli . rrii EF=5 OS=4>)
{{{
setfield_gc(p13, 0, descr=<FieldS dicttable.lookup_function_no .+>)
setfield_gc(p13, 0, descr=<FieldS dicttable.num_live_items .+>)
setfield_gc(p13, 32, descr=<FieldS dicttable.resize_counter .+>)
}}}
guard_no_exception(descr=...)
p20 = new_with_vtable(descr=...)
call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=<Callv 0 rrrii EF=5>)
setfield_gc(p20, i5, descr=<FieldS .*W_IntObject.inst_intval .* pure>)
guard_no_exception(descr=...)
i98 = strhash(p10)
i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=<Calli . rrii EF=5 OS=4>)
guard_no_exception(descr=...)
i27 = int_lt(i23, 0)
guard_false(i27, descr=...)
p28 = getfield_gc_r(p13, descr=<FieldP dicttable.entries .*>)
p29 = getinteriorfield_gc_r(p28, i23, descr=<InteriorFieldDescr <FieldP odictentry.value .*>>)
guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...)
i31 = getfield_gc_i(p29, descr=<FieldS .*W_IntObject.inst_intval .* pure>)
i32 = int_sub_ovf(i31, i5)
guard_no_overflow(descr=...)
i34 = int_add_ovf(i32, 1)
guard_no_overflow(descr=...)
i35 = int_add_ovf(i5, i34)
guard_no_overflow(descr=...)
--TICK--
jump(..., descr=...)
""")
class TestOtherContainers(BaseTestPyPyC):
def test_list(self):
def main(n):
i = 0
while i < n:
z = list(())
z.append(1)
i += z[-1] / len(z)
return i
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i7 = int_lt(i5, i6)
guard_true(i7, descr=...)
guard_not_invalidated(descr=...)
i9 = int_add(i5, 1)
--TICK--
jump(..., descr=...)
""")
def test_floatlist_unpack_without_calls(self):
def fn(n):
l = [2.3, 3.4, 4.5]
for i in range(n):
x, y, z = l # ID: look
#
log = self.run(fn, [1000])
loop, = log.loops_by_filename(self.filepath)
ops = loop.ops_by_id('look')
assert 'call' not in log.opnames(ops)
# XXX the following tests only work with strategies enabled
def test_should_not_create_intobject_with_sets(self):
def main(n):
i = 0
s = set()
while i < n:
s.add(i)
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_should_not_create_stringobject_with_sets(self):
def main(n):
i = 0
s = set()
while i < n:
s.add(str(i))
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_should_not_create_intobject_with_lists(self):
def main(n):
i = 0
l = []
while i < n:
l.append(i)
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_should_not_create_stringobject_with_lists(self):
def main(n):
i = 0
l = []
while i < n:
l.append(str(i))
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_optimized_create_list_from_string(self):
def main(n):
i = 0
l = []
while i < n:
l = list("abc" * i)
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_optimized_create_set_from_list(self):
def main(n):
i = 0
while i < n:
s = set([1, 2, 3])
i += 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_constfold_tuple(self):
code = """if 1:
tup = tuple(range(10000))
l = [1, 2, 3, 4, 5, 6, "a"]
def main(n):
while n > 0:
sub = tup[1] # ID: getitem
l[1] = n # kill cache of tup[1]
n -= sub
"""
log = self.run(code, [1000])
loop, = log.loops_by_filename(self.filepath)
ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False)
assert log.opnames(ops) == []
def test_specialised_tuple(self):
def main(n):
import pypyjit
f = lambda: None
tup = (n, n)
while n > 0:
tup[0] # ID: getitem
pypyjit.residual_call(f)
n -= 1
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False)
assert log.opnames(ops) == []
def test_enumerate_list(self):
def main(n):
for a, b in enumerate([1, 2] * 1000):
a + b
log = self.run(main, [1000])
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
def test_enumerate(self):
def main(n):
for a, b in enumerate("abc" * 1000):
a + ord(b)
log = self.run(main, [1000])
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
assert opnames.count('new_with_vtable') == 0
|
1687042
|
from core.advbase import *
from slot.a import *
from slot.d import*
def module():
return Summer_Ranzal
class Summer_Ranzal(Adv):
a1 = ('lo',0.4)
a3 = ('primed_defense', 0.08)
conf = {}
conf['slots.a'] = Resounding_Rendition() + Breakfast_at_Valerios()
conf['slots.frostbite.a'] = Primal_Crisis() + His_Clever_Brother()
conf['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s3
`s4
`s2
"""
coab = ['Xander', 'Dagger', 'Dagger2']
conf['afflict_res.bog'] = 100
share = ['Gala_Elisanne', 'Ranzal']
def init(self):
self.a3_iscding = 0
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
@staticmethod
def prerun_skillshare(adv, dst):
adv.buff_class = Teambuff if adv.condition('buff all team') else Selfbuff
def s1_proc(self, e):
self.dmg_make(e.name,2.16)
self.afflics.bog.on(e.name, 100)
self.dmg_make(e.name,6.48)
def s2_proc(self, e):
self.buff_class(e.name,0.10,15).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
1687105
|
import ztom
mongo_rep = ztom.MongoReporter("test", "offline")
mongo_rep.init_db("localhost", 27017, "db_test", "test_collection")
mongo_rep.set_indicator("test_field_int", 1)
mongo_rep.set_indicator("test_field_str", "Hi")
mongo_rep.set_indicator("test_field_dict", {"level1": {"sublevel1": {"key1": "value1", "key1": 7777}}})
result = mongo_rep.push_report()
print(result)
report = list()
report.append({"symbol": "ABC/XYZ", "amount": 1.11})
report.append({"symbol": "ABC2/XYZ2", "amount": 2.11})
report.append({"symbol": "ABC3/XYZ3", "amount": 3.11})
report.append({"symbol": "ABC4/XYZ4", "amount": 4.11})
result = mongo_rep.push_report(report)
print(result)
|
1687112
|
from __future__ import print_function
from tqdm import tqdm
import math
from termcolor import colored
import numpy as np
from openrec.tf1.legacy.utils.evaluators import ImplicitEvalManager
import sys
import json
import pickle
class ImplicitModelTrainer(object):
"""
The ImplicitModelTrainer class implements logics for basic recommender training and evaluation using users'
*implicit feedback*.
Parameters
----------
batch_size: int
Training batch size.
test_batch_size: int
Test/Evaluation batch size (number of users per testing batch).
train_dataset: Dataset
Dataset for model training.
model: Recommender
The target recommender.
sampler: Sampler
The sampler for model training.
item_serving_size: int, optional
Test/Evaluation batch size (number of items per testing batch).
Notes
-----
The function :code:`train` should be called for model training and evaluation.
"""
def __init__(self, batch_size, test_batch_size, train_dataset, model, sampler, item_serving_size=None, eval_save_prefix=None):
self._batch_size = batch_size
self._test_batch_size = test_batch_size
self._item_serving_size = item_serving_size
self._eval_save_prefix = eval_save_prefix
self._train_dataset = train_dataset
self._max_item = self._train_dataset.max_item()
self._model = model
self._sampler = sampler
def train(self, num_itr, display_itr, eval_datasets=[], evaluators=[], num_negatives=None, seed=10):
"""Train and evaluate a recommender.
Parameters
----------
num_itr: int
total number of training iterations.
display_itr: int
Evaluation/testing period.
eval_datasets: list of Dataset
A list of datasets for evaluation/testing.
evaluators: list of Evaluator
A list of evaluators for evaluation/testing.
num_negatives: int, optional
If specified, a given number of items NOT interacted with each user will be sampled (as negative items) for evaluations.
"""
acc_loss = 0
self._eval_manager = ImplicitEvalManager(evaluators=evaluators)
self._num_negatives = num_negatives
self._exclude_positives(eval_datasets)
if self._num_negatives is None:
eval_func = self._evaluate_full
print(colored('== Start training with FULL evaluation ==', 'blue'))
else:
eval_func = self._evaluate_partial
self._sample_negatives(seed=seed)
print(colored('== Start training with sampled evaluation, sample size: %d ==' % num_negatives, 'blue'))
for itr in range(num_itr):
batch_data = self._sampler.next_batch()
loss = self._model.train(batch_data)
acc_loss += loss
if itr % (display_itr // 10) == 0 and itr > 0:
print(colored('[Itr %d] Finished' % itr, 'blue'))
if itr % display_itr == 0 and itr > 0:
if self._eval_save_prefix:
self._model.save(self._eval_save_prefix, itr)
print(colored('[Itr %d]' % itr, 'red'), 'loss: %f' % (acc_loss/display_itr))
for dataset in eval_datasets:
print(colored('..(dataset: %s) evaluation' % dataset.name, 'green'))
sys.stdout.flush()
eval_results = eval_func(eval_dataset=dataset)
for key, result in eval_results.items():
average_result = np.mean(result, axis=0)
if type(average_result) is np.ndarray:
print(colored('..(dataset: %s)' % dataset.name, 'green'), \
key, ' '.join([str(s) for s in average_result]))
else:
print(colored('..(dataset: %s)' % dataset.name, 'green'), \
key, average_result)
acc_loss = 0
def _score_full_items(self, users):
if self._item_serving_size is None:
return self._model.serve({'user_id_input': users,
'item_id_input': np.arange(self._max_item)})
else:
scores = []
item_id_input = np.zeros(self._item_serving_size, np.int32)
for ibatch in range(int(math.ceil(float(self._max_item) / self._item_serving_size))):
item_id_list = range(ibatch*self._item_serving_size,
min((ibatch+1)*self._item_serving_size, self._max_item))
item_id_input[:len(item_id_list)] = item_id_list
scores.append(self._model.serve({'user_id_input': users,
'item_id_input': item_id_input})[:len(item_id_list)])
return np.concatenate(scores, axis=1)
def _score_partial_items(self, user, items):
if self._item_serving_size is None:
return self._model.serve({'user_id_input': [user],
'item_id_input': np.arange(self._max_item)})[0][np.array(items)]
else:
return self._model.serve({'user_id_input': [user],
'item_id_input': np.array(items)})[0]
def _evaluate_full(self, eval_dataset):
metric_results = {}
for evaluator in self._eval_manager.evaluators:
metric_results[evaluator.name] = []
for itr in tqdm(range(int(math.ceil(float(eval_dataset.unique_user_count()) / self._test_batch_size)))):
users = eval_dataset.get_unique_user_list()[itr * self._test_batch_size:(itr + 1) * self._test_batch_size]
scores = self._score_full_items(users=users)
for u_ind, user in enumerate(users):
result = self._eval_manager.full_eval(
pos_samples=list(eval_dataset.get_interactions_by_user_gb_item(user)),
excl_pos_samples=self._excluded_positives[user],
predictions=scores[u_ind])
for key in result:
metric_results[key].append(result[key])
return metric_results
def _evaluate_partial(self, eval_dataset):
metric_results = {}
for evaluator in self._eval_manager.evaluators:
metric_results[evaluator.name] = []
to_be_saved = dict()
to_be_saved["num_negatives"] = self._num_negatives
to_be_saved["users"] = list()
to_be_saved["user_items"] = dict()
to_be_saved["results"] = dict()
for user in tqdm(eval_dataset.get_unique_user_list()):
to_be_saved["users"].append(int(user))
items = self._sampled_negatives[user] + list(eval_dataset.get_interactions_by_user_gb_item(user))
to_be_saved["user_items"][int(user)] = items
scores = self._score_partial_items(user, items)
result = self._eval_manager.partial_eval(pos_scores=scores[self._num_negatives:], neg_scores=scores[:self._num_negatives])
to_be_saved["results"][int(user)] = scores
for key in result:
metric_results[key].append(result[key])
if self._eval_save_prefix:
with open(self._eval_save_prefix + "_evaluate_partial.pickle", 'wb') as tmpf:
pickle.dump(to_be_saved, tmpf)
return metric_results
def _exclude_positives(self, eval_datasets):
self._excluded_positives = {}
user_set = set()
for dataset in eval_datasets:
user_set = user_set.union(dataset.get_unique_user_list())
for user in user_set:
self._excluded_positives[user] = set()
for user in user_set:
if self._train_dataset.contain_user(user):
self._excluded_positives[user] = self._excluded_positives[user].union(self._train_dataset.get_interactions_by_user_gb_item(user))
for dataset in eval_datasets:
if dataset.contain_user(user):
self._excluded_positives[user] = self._excluded_positives[user].union(dataset.get_interactions_by_user_gb_item(user))
def _sample_negatives(self, seed):
print(colored('[Subsampling negative items]', 'red'))
np.random.seed(seed=seed)
self._sampled_negatives = {}
for user in tqdm(self._excluded_positives, leave=False):
shuffled_items = np.random.permutation(self._max_item)
subsamples = []
for item in shuffled_items:
if item not in self._excluded_positives[user]:
subsamples.append(item)
if len(subsamples) == self._num_negatives:
break
self._sampled_negatives[user] = subsamples
|
1687120
|
import logging
import shutil
from os import path, link
from os.path import normpath
from django.conf import settings
logger = logging.getLogger(__name__)
def data_sample_pre_save(sender, instance, **kwargs):
destination_path = path.join(getattr(settings, 'MEDIA_ROOT'), 'datasamples/{0}'.format(instance.key))
src_path = normpath(instance.path)
if path.exists(destination_path):
raise FileExistsError(f'File exists: {destination_path}')
# try to make an hard link to keep a free copy of the data
# if not possible, keep the real path location
try:
shutil.copytree(src_path, destination_path, copy_function=link)
except Exception:
logger.exception(f'error happened while copying data from {src_path} to {destination_path}')
shutil.rmtree(destination_path, ignore_errors=True)
logger.info(f'directory {destination_path} deleted')
else:
# override path for getting our hardlink
instance.path = destination_path
|
1687155
|
from flask import Flask,render_template
from flask_mqtt import Mqtt
import ssl
import urllib.request as request
# import threading
app = Flask(__name__)
app.config['MQTT_BROKER_URL'] = '************-ats.iot.YOUR_REGION.amazonaws.com'
app.config['MQTT_BROKER_PORT'] = 8883
app.config['MQTT_CLIENT_ID'] = "DummyCar"
app.config['MQTT_KEEPALIVE'] = 60
app.config['MQTT_TLS_ENABLED'] = True
app.config['MQTT_TLS_CA_CERTS'] = "root-CA.crt"
app.config['MQTT_TLS_CERTFILE'] = "iotThing.cert.pem"
app.config['MQTT_TLS_KEYFILE'] = "iotThing.private.key"
app.config['MQTT_TLS_CIPHERS'] = None
app.config['MQTT_TLS_CERT_REQS'] = ssl.CERT_REQUIRED
app.config['MQTT_TLS_VERSION'] = ssl.PROTOCOL_TLSv1_2
mqtt = Mqtt(app)
lat = 0
lng = 0
position = {lat:0,lng:0}
@mqtt.on_connect()
def handle_connect(client, userdata, flags, rc):
print ('connected and waiting for msg')
mqtt.subscribe('iot')
@mqtt.on_message()
def handle_mqtt_message(client, userdata, msg):
global lat, lng, position
data = eval(msg.payload.decode())
lat = data['lat']
lng = data['lng']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/getlatlng')
def getlatlng_page():
global lat, lng, position
print("called ajax : " , str(lat)+','+str(lng))
return {'lat':lat,'lng':lng}
if __name__ == '__main__':
app.run( use_reloader=False, debug=True)
|
1687165
|
import os
import sys
import cv2
import argparse
import glob
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, transform
from scipy.optimize import minimize
from scipy.optimize import least_squares
import objs
import utils
#fp is in cam-ceil normal, height is in cam-floor normal
def data2scene(fp_points, height):
# cam-ceiling / cam-floor
scale = (height - 1.6) / 1.6
#layout_fp, fp_points = fit_layout(fp, scale=None, max_cor=12)
size = 512
ratio = 20/size
fp_points = fp_points.astype(float)
fp_points[0] -= size/2
fp_points[1] -= size/2
fp_points *= scale
fp_points[0] += size/2
fp_points[1] += size/2
fp_points = fp_points.astype(int)
scene = objs.Scene()
scene.cameraHeight = 1.6
scene.layoutHeight = height
scene.layoutPoints = []
for i in range(fp_points.shape[1]):
fp_xy = (fp_points[:,i] - size/2) * ratio
xyz = (fp_xy[1], 0, fp_xy[0])
scene.layoutPoints.append(objs.GeoPoint(scene, None, xyz))
scene.genLayoutWallsByPoints(scene.layoutPoints)
scene.updateLayoutGeometry()
return scene
def f1_score(pred, gt):
TP = np.zeros(gt.shape); FP = np.zeros(gt.shape)
FN = np.zeros(gt.shape); TN = np.zeros(gt.shape)
TP[(pred==gt) & (pred == 1)] = 1
FP[(pred!=gt) & (pred == 1)] = 1
FN[(pred!=gt) & (gt == 1)] = 1
TN[(pred==gt) & (pred == 0)] = 1
TP = np.sum(TP); FP = np.sum(FP)
FN = np.sum(FN); TN = np.sum(TN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
accuracy = (TP + TN) / (gt.shape[0]*gt.shape[1])
f1_score = 2 / ((1 / precision) + (1 / recall))
return f1_score
def fit_layout(data, max_cor=12):
#find max connective component
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
# crop data sub as f1 true
sub_x, sub_y, w, h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
pred = np.ones(data_sub.shape)
st = 0.25
min_score = 0.1
######
def loss_ul(x):
sample = pred.copy()
sample[0:int(x[0]), 0:int(x[1])] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_ul, np.array([h*st, w*st]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
ul = res.x.astype(int)
######
def loss_ur(x):
sample = pred.copy()
sample[0:int(x[0]), int(x[1]):w] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_ur, np.array([h*st, w*(1-st)]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
ur = res.x.astype(int)
######
def loss_dr(x):
sample = pred.copy()
sample[int(x[0]):h, int(x[1]):w] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_dr, np.array([h*(1-st), w*(1-st)]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
dr = res.x.astype(int)
######
def loss_dl(x):
sample = pred.copy()
sample[int(x[0]):h, 0:int(x[1])] = 0
return -f1_score(sample, data_sub)
res = minimize(loss_dl, np.array([h*(1-st), w*st]), method='nelder-mead',
bounds=[(0,h),(0,w)], options={'xtol': 1e-8, 'disp': False})
dl = res.x.astype(int)
#print([ul, ur, dr, dl])
s_ul = ul[0]*ul[1] / np.sum(data_sub)
s_ur = ur[0]*(w-ur[1]) / np.sum(data_sub)
s_dr = (h-dr[0])*(w-dr[1]) / np.sum(data_sub)
s_dl = (h-dl[0])*dl[1] / np.sum(data_sub)
#print([s_ul, s_ur, s_dr, s_dl])
sort_idx = list(np.argsort([s_ul, s_ur, s_dr, s_dl])[::-1])
assert max_cor in [4, 6, 8, 10, 12]
max_idx = (max_cor-4)/2
if s_ul > min_score and (sort_idx.index(0) < max_idx):
pred[0:int(ul[0]), 0:int(ul[1])] = 0
if s_ur > min_score and (sort_idx.index(1) < max_idx):
pred[0:int(ur[0]), int(ur[1]):w] = 0
if s_dr > min_score and (sort_idx.index(2) < max_idx):
pred[int(dr[0]):h, int(dr[1]):w] = 0
if s_dl > min_score and (sort_idx.index(3) < max_idx):
pred[int(dl[0]):h, 0:int(dl[1])] = 0
pred = np.uint8(pred)
pred_img, pred_cnt, pred_heri = cv2.findContours(pred, 1, 3)
polygon = [(p[0][1], p[0][0]) for p in pred_cnt[0][::-1]]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0], fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
if False:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.imshow(data_sub)
ax2 = fig.add_subplot(1,2,2)
ax2.imshow(pred)
plt.show()
return layout_fp, fp_points
'''
def fit_layout(data, scale=None, max_cor=12):
ret, data_thresh = cv2.threshold(data, 0.5, 1,0)
data_thresh = np.uint8(data_thresh)
data_img, data_cnt, data_heri = cv2.findContours(data_thresh, 1, 2)
data_cnt.sort(key=lambda x: cv2.contourArea(x), reverse=True)
sub_x,sub_y,w,h = cv2.boundingRect(data_cnt[0])
data_sub = data_thresh[sub_y:sub_y+h,sub_x:sub_x+w]
if False:
data_sub_invert = np.uint8(np.ones(data_sub.shape) - data_sub)
label_num, labels = cv2.connectedComponents(data_sub_invert)
for i in range(1, label_num):
score = np.count_nonzero(labels == i) / (data_sub.shape[0]*data_sub.shape[1])
if score < 0.05:
data_sub[labels==i] = 1
#utils.showImage(data_sub)
#img = data_sub[:,:,np.newaxis] * 255
#data_sub_img = np.concatenate([img, img, img], axis=2)
dp = np.zeros(data_sub.shape)
score_map = np.zeros(data_sub.shape)
score_map[data_sub == 1] = -10
score_map[data_sub == 0] = 1
size_h = data_sub.shape[0]-1
size_w = data_sub.shape[1]-1
ul = [(0,0), (size_h, size_w)]
ur = [(0,size_w), (size_h, 0)]
dl = [(size_h,0), (0, size_w)]
dr = [(size_h,size_w), (0, 0)]
def find_rect_pt(box):
start, end = box[0], box[1]
vec = np.clip([end[0]-start[0], end[1]-start[1]], -1, 1)
dp = np.zeros( (data_sub.shape[0], data_sub.shape[1]) )
for i in np.arange(start[0]+vec[0], end[0], vec[0]):
for j in np.arange(start[1]+vec[1], end[1], vec[1]):
dp[i][j] = dp[i-vec[0]][j] + dp[i][j-vec[1]] - dp[i-vec[0]][j-vec[1]] + score_map[i][j]
score = dp.max() / (data_sub.shape[0]*data_sub.shape[1])
if score <= 0.05:
return None, 0
point = np.argwhere(dp.max() == dp)[0]
return point, score
polygon = []
p_ul, s_ul = find_rect_pt(ul)
p_ur, s_ur = find_rect_pt(ur)
p_dr, s_dr = find_rect_pt(dr)
p_dl, s_dl = find_rect_pt(dl)
sort_idx = list(np.argsort([s_ul, s_ur, s_dr, s_dl])[::-1])
assert max_cor in [4, 6, 8, 10, 12]
max_idx = (max_cor-4)/2
if (p_ul is None) or (sort_idx.index(0) >= max_idx) :
polygon.append(ul[0])
else:
polygon += [(p_ul[0],ul[0][1]), tuple(p_ul) ,(ul[0][0], p_ul[1])]
if p_ur is None or (sort_idx.index(1) >= max_idx) :
polygon.append(ur[0])
else:
polygon += [(ur[0][0], p_ur[1]), tuple(p_ur) ,(p_ur[0], ur[0][1])]
if p_dr is None or (sort_idx.index(2) >= max_idx) :
polygon.append(dr[0])
else:
polygon += [(p_dr[0], dr[0][1]), tuple(p_dr) ,(dr[0][0], p_dr[1])]
if p_dl is None or (sort_idx.index(3) >= max_idx) :
polygon.append(dl[0])
else:
polygon += [(dl[0][0], p_dl[1]), tuple(p_dl) ,(p_dl[0], dl[0][1])]
Y = np.array([p[0]+sub_y for p in polygon])
X = np.array([p[1]+sub_x for p in polygon])
fp_points = np.concatenate( (Y[np.newaxis,:],X[np.newaxis,:]), axis=0)
if scale is not None:
fp_points = fp_points.astype(float)
fp_points[0] -= data.shape[0]/2
fp_points[1] -= data.shape[1]/2
fp_points *= scale
fp_points[0] += data.shape[0]/2
fp_points[1] += data.shape[1]/2
fp_points = fp_points.astype(int)
layout_fp = np.zeros(data.shape)
rr, cc = draw.polygon(fp_points[0],fp_points[1])
rr = np.clip(rr, 0, data.shape[0]-1)
cc = np.clip(cc, 0, data.shape[1]-1)
layout_fp[rr,cc] = 1
return layout_fp, fp_points
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
args = parser.parse_args()
data_path = args.i
#for filepath in glob.iglob(data_path + '/*.npy'):
#for i in range(404):
for i in [91, 104, 145, 159, 167, 194, 215, 223, 253, 256, 261, 266, 300, 304, 357, 358]:
filepath = os.path.join(data_path, '{0}.npy'.format(i))
print(filepath)
data = np.load(filepath, encoding = 'bytes')[()]
#color = data['color']
#fp_floor = data['fp_floor']
fp_pred = data['pred_fp_merge']
layout_fp, fp_points = fit_layout(fp_pred)
#print(fp_points)
if True:
fig = plt.figure()
ax3 = fig.add_subplot(2,1,1)
ax3.imshow(fp_pred)
ax4 = fig.add_subplot(2,1,2)
ax4.imshow(layout_fp)
plt.show()
|
1687190
|
from trackintel.model.util import _copy_docstring
from functools import WRAPPER_ASSIGNMENTS
from trackintel.io.postgis import read_trips_postgis
class Test_copy_docstring:
def test_default(self):
@_copy_docstring(read_trips_postgis)
def bar(b: int) -> int:
"""Old docstring."""
pass
old_docs = """Old docstring."""
print(type(old_docs))
for wa in WRAPPER_ASSIGNMENTS:
attr_foo = getattr(read_trips_postgis, wa)
attr_bar = getattr(bar, wa)
if wa == "__doc__":
assert attr_foo == attr_bar
assert attr_bar != old_docs
else:
assert attr_foo != attr_bar
|
1687191
|
import sys
from typing import Tuple, Callable, Any
from protoactor.persistence.messages import Event, Snapshot, RecoverSnapshot, RecoverEvent, PersistedEvent, \
PersistedSnapshot
from protoactor.persistence.providers.abstract_provider import AbstractSnapshotStore, AbstractEventStore
from protoactor.persistence.snapshot_strategies.abstract_snapshot_strategy import AbstractSnapshotStrategy
class NoSnapshots(AbstractSnapshotStrategy):
def should_take_snapshot(self, persisted_event) -> bool:
return False
class NoEventStore(AbstractEventStore):
async def get_events(self, actor_name: str, index_start: int, index_end: int,
callback: Callable[[any], None]) -> int:
return sys.int_info.min
async def persist_event(self, actor_name: str, index: int, event: any) -> int:
pass
async def delete_events(self, actor_name: str, inclusive_to_index: int) -> None:
pass
class NoSnapshotStore(AbstractSnapshotStore):
async def get_snapshot(self, actor_name: str) -> Tuple[any, int]:
return None, 0
async def persist_snapshot(self, actor_name: str, index: int, snapshot: any) -> None:
pass
async def delete_snapshots(self, actor_name: str, inclusive_to_index: int) -> None:
pass
class Persistence():
def __init__(self, event_store: AbstractEventStore,
snapshot_store: AbstractSnapshotStore,
actor_id: str,
apply_event: Callable[[Event], None] = None,
apply_snapshot: Callable[[Snapshot], None] = None,
snapshot_strategy: AbstractSnapshotStrategy = None,
get_state: Callable[[None], Any] = None):
self._event_store = event_store
self._snapshot_store = snapshot_store
self._actor_id = actor_id
self._apply_event = apply_event
self._apply_snapshot = apply_snapshot
self._get_state = get_state
if snapshot_strategy is None:
self._snapshot_strategy = NoSnapshots()
else:
self._snapshot_strategy = snapshot_strategy
self._index = -1
@property
def index(self) -> int:
return self._index
@property
def _using_snapshotting(self) -> bool:
return self._apply_snapshot is not None
@property
def _using_event_sourcing(self) -> bool:
return self._apply_event is not None
@staticmethod
def with_event_sourcing(event_store: AbstractEventStore,
actor_id: str,
apply_event: Callable[[Event], None]) -> 'Persistence':
if event_store is None:
raise ValueError('event store is empty')
if apply_event is None:
raise ValueError('apply event is empty')
return Persistence(event_store, NoSnapshotStore(), actor_id, apply_event)
@staticmethod
def with_snapshotting(snapshot_store: AbstractSnapshotStore,
actor_id: str,
apply_snapshot: Callable[[Snapshot], None]) -> 'Persistence':
if snapshot_store is None:
raise ValueError('snapshot store is empty')
if apply_snapshot is None:
raise ValueError('apply snapshot is empty')
return Persistence(NoEventStore(), snapshot_store, actor_id, None, apply_snapshot)
@staticmethod
def with_event_sourcing_and_snapshotting(event_store: AbstractEventStore,
snapshot_store: AbstractSnapshotStore,
actor_id: str,
apply_event: Callable[[Event], None],
apply_snapshot: Callable[[Snapshot], None],
snapshot_strategy: AbstractSnapshotStrategy = None,
get_state: Callable[[], Any] = None) -> 'Persistence':
if event_store is None:
raise ValueError('event store is empty')
if snapshot_store is None:
raise ValueError('snapshot store is empty')
if apply_event is None:
raise ValueError('apply event is empty')
if apply_snapshot is None:
raise ValueError('apply snapshot is empty')
if snapshot_strategy is None and get_state is not None:
raise ValueError('snapshot strategy is empty')
if get_state is None and snapshot_strategy is not None:
raise ValueError('get state is empty')
return Persistence(event_store,
snapshot_store,
actor_id,
apply_event,
apply_snapshot,
snapshot_strategy,
get_state)
async def recover_state(self) -> None:
snapshot, last_snapshot_index = await self._snapshot_store.get_snapshot(self._actor_id)
if snapshot is not None:
self._index = last_snapshot_index
self._apply_snapshot(RecoverSnapshot(snapshot, last_snapshot_index))
def apply_events(event):
self._index = self._index + 1
self._apply_event(RecoverEvent(event, self._index))
from_event_index = self._index + 1
await self._event_store.get_events(self._actor_id, from_event_index, sys.maxsize, apply_events)
async def replay_events(self, from_index: int, to_index: int) -> None:
if self._apply_event is None:
raise Exception('Events cannot be replayed without using Event Sourcing.')
local_index = from_index
def apply_events(event):
self._apply_event(RecoverEvent(event, self.local_index))
self.local_index = local_index + 1
await self._event_store.get_events(self._actor_id, from_index, to_index, apply_events)
async def persist_event(self, event: Any) -> None:
if self._apply_event is None:
raise Exception('Events cannot be replayed without using Event Sourcing.')
persisted_event = PersistedEvent(event, self._index + 1)
await self._event_store.persist_event(self._actor_id, persisted_event.index, persisted_event.data)
self._index = self._index + 1
self._apply_event(persisted_event)
if self._snapshot_strategy.should_take_snapshot(persisted_event):
persisted_snapshot = PersistedSnapshot(self._get_state(), persisted_event.index)
await self._snapshot_store.persist_snapshot(self._actor_id,
persisted_snapshot.index,
persisted_snapshot.state)
async def persist_snapshot(self, snapshot: Any) -> None:
persisted_snapshot = PersistedSnapshot(snapshot, self._index)
await self._snapshot_store.persist_snapshot(self._actor_id, persisted_snapshot.index, snapshot)
async def delete_snapshots(self, inclusive_to_index: int) -> None:
await self._snapshot_store.delete_snapshots(self._actor_id, inclusive_to_index)
async def delete_events(self, inclusive_to_index: int) -> None:
await self._event_store.delete_events(self._actor_id, inclusive_to_index)
|
1687208
|
from setuptools import setup, find_packages
import medmnist
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def requirements():
with open('requirements.txt') as f:
required = f.read().splitlines()
return required
setup(
name='medmnist',
version=medmnist.__version__,
url=medmnist.HOMEPAGE,
license='Apache-2.0 License',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
description='MedMNIST v2: A Large-Scale Lightweight Benchmark for 2D and 3D Biomedical Image Classification',
long_description=readme(),
packages=find_packages(),
install_requires=requirements(),
zip_safe=True
)
|
1687214
|
from create_response import response
flag=0
while(flag==0):
user_response = input()
flag=response(user_response)
|
1687288
|
from features.numpy_sift import SIFTDescriptor
import numpy as np
import features.feature_utils
from features.DetectorDescriptorTemplate import DetectorAndDescriptor
class np_sift(DetectorAndDescriptor):
def __init__(self, peak_thresh=10.0):
super(
np_sift,
self).__init__(
name='np_sift',
is_detector=True,
is_descriptor=True,
is_both=True,
patch_input=True)
self.peak_thresh = peak_thresh
self.descriptor = None
def detect_feature(self, image):
pass
def extract_descriptor(self, image, feature):
pass
def extract_all(self, image):
pass
def extract_descriptor_from_patch(self, patches):
patch_num = patches.shape[0]
patches.shape[1]
w = patches.shape[2]
if self.descriptor is None or self.descriptor.patchSize != w:
self.descriptor = SIFTDescriptor(w)
descriptors = np.zeros((patch_num, 128))
for i in range(patch_num):
patch = features.feature_utils.all_to_gray(patches[i, :, :, :])
patch = patch[:, :, 0]
descriptors[i, :] = self.descriptor.describe(patch).flatten()
return descriptors
|
1687301
|
import glob
import logging
import os
import re
import shutil
import subprocess
import tempfile
from boto.s3.key import Key
from boto.s3.connection import S3Connection
"""
The functions below are minimal Python wrappers around Ghostscript, Tika, and
Tesseract. They are intended to simplify converting pdf files into usable text.
"""
class TextExtraction:
""" The TextExtraction class contains functions for extracting and saving
metadata and text from all files compatible with Apache Tika"""
def __init__(self, doc_path, tika_port=9998, host='localhost'):
self.doc_path = doc_path
self.root, self.extension = os.path.splitext(doc_path)
self.tika_port = tika_port
self.text_args = ['curl', '-T', doc_path,
'http://%s:%s/tika' % (host, tika_port),
'-s', '--header', 'Accept: text/plain']
self.metadata_args = ['curl', '-T', doc_path,
'http://%s:%s/meta' % (host, tika_port),
'-s', '--header', 'Accept: application/json']
def save(self, document, ext):
""" Save document to root location """
export_path = self.root + ext
with open(export_path, 'w') as f:
f.write(document)
def doc_to_text(self):
""" Converts a document to text using the Tika server """
document = subprocess.check_output(self.text_args)
logging.info("%s converted to text from pdf", self.doc_path)
return document
def extract_metadata(self):
"""
Extracts metadata using Tika into a json file
"""
metadata = subprocess.check_output(self.metadata_args)
self.save(metadata.decode('utf-8'), ext='_metadata.json')
def extract(self):
"""
Converts and extracts metadata for any document type compatiable
with Tika, (http://tika.apache.org/1.7/formats.html) but does not
check if extraction produces text.
"""
self.extract_metadata()
self.save(self.doc_to_text().decode('utf-8'), ext='.txt')
class PDFTextExtraction(TextExtraction):
""" PDFTextExtraction adds OCR functionality to TextExtraction. The ORC
functionality is triggered only if a PDF document is not responsive or
if Tika fails to extract text """
def __init__(self, doc_path, tika_port=9998,
host='localhost', word_threshold=10):
super().__init__(doc_path, tika_port, host)
self.WORDS = re.compile('[A-Za-z]{3,}')
self.word_threshold = word_threshold
def meets_len_threshold(self, doc_text):
"""
Return True if number of words in text are more than the threshold
"""
if len(tuple(self.WORDS.finditer(doc_text))) > self.word_threshold:
return True
def has_text(self):
"""
Using `pdffonts` returns True if document has fonts, which in
essence means it has text. If a document is not a pdf
automatically returns True.
"""
args = ['pdffonts', self.doc_path]
pdffonts_output = subprocess.Popen(
args,
stdout=subprocess.PIPE,
)
result = None
if pdffonts_output.communicate()[0].decode("utf-8").count("\n") > 2:
result = True
retcode = pdffonts_output.returncode
if retcode:
raise subprocess.CalledProcessError(retcode, args)
if result:
return result
def cat_and_clean(self, out_file, main_text_file):
""" Concatenates file to main text file and removes individual file """
out_file = out_file + '.txt'
with open(main_text_file, 'a') as append:
with open(out_file) as source:
shutil.copyfileobj(source, append)
os.remove(out_file)
def img_to_text(self):
""" Uses Tesseract OCR to convert png image to text file """
main_text_file = self.root + '.txt'
for png in sorted(glob.glob('%s_*.png' % self.root)):
out_file = png[:-4]
args = ['tesseract', png, out_file, '-l', 'eng']
doc_process = subprocess.Popen(
args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
doc_process.communicate()
if doc_process.returncode:
raise subprocess.CalledProcessError(doc_process.returncode,
args)
self.cat_and_clean(out_file, main_text_file)
logging.info("%s converted to text from image", self.root + '.png')
return main_text_file
def pdf_to_img(self):
""" Converts and saves pdf file to png image using Ghostscript"""
export_path = self.root + "_%03d.png"
args = [
'gs', '-dNOPAUSE', '-dBATCH', '-sDEVICE=pnggray',
'-dINTERPOLATE', '-r300', '-dNumRenderingThreads=8',
'-sOutputFile={0}'.format(export_path), self.doc_path
]
process = subprocess.Popen(
args=args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
process.communicate()
if process.returncode:
raise subprocess.CalledProcessError(process.returncode, args)
logging.info("%s converted to png images", self.doc_path)
return export_path
def extract(self):
"""
Converts pdfs to text and extracts metadata. Uses OCR if the
initial attempt fails.
"""
self.extract_metadata()
needs_ocr = False
# Determine if PDF has text
if not self.has_text():
needs_ocr = True
else:
doc_text = self.doc_to_text().decode('utf-8')
# Determine if extraction suceeded
if self.meets_len_threshold(doc_text):
self.save(doc_text, ext='.txt')
else:
needs_ocr = True
if needs_ocr:
self.pdf_to_img()
self.img_to_text()
class TextExtractionS3(TextExtraction):
def __init__(self, file_key, s3_bucket, tika_port=9998, host='localhost'):
""" Connects to s3 bucket and downloads file into a temp dir
before using super to initalize like TextExtraction """
self.file_key = file_key
self.s3_bucket = s3_bucket
self.temp = tempfile.TemporaryDirectory()
doc_path = os.path.join(self.temp.name, os.path.basename(file_key))
k = Key(self.s3_bucket)
k.key = self.file_key
k.get_contents_to_filename(doc_path)
super().__init__(doc_path, tika_port, host)
def save(self, document, ext):
""" Save document to s3 """
root, old_ext = os.path.splitext(self.file_key)
s3_path = root + ext
k = Key(self.s3_bucket)
k.key = s3_path
k.set_contents_from_string(str(document))
class PDFTextExtractionS3(TextExtractionS3, PDFTextExtraction):
def __init__(self, file_key, s3_bucket, tika_port=9998, host='localhost',
word_threshold=10):
TextExtractionS3.__init__(self, file_key, s3_bucket, tika_port, host)
self.WORDS = re.compile('[A-Za-z]{3,}')
self.word_threshold = word_threshold
def img_to_text(self):
""" Extends img_to_text from PDFTextExtraction and adds a s3 save
function """
main_text_file = super().img_to_text()
local_base, text_file_name = os.path.split(main_text_file)
s3_base, s3_doc_name = os.path.split(self.file_key)
k = Key(self.s3_bucket)
k.key = os.path.join(s3_base, text_file_name)
k.set_contents_from_filename(main_text_file)
def text_extractor(doc_path, force_convert=False):
"""Checks if document has been converted and sends file to appropriate
converter"""
root, extension = os.path.splitext(doc_path)
if not os.path.exists(root + ".txt") or force_convert:
if extension == '.pdf':
extractor = PDFTextExtraction(doc_path)
else:
extractor = TextExtraction(doc_path)
extractor.extract()
def text_extractor_s3(file_key, s3_bucket, force_convert=True):
""" Checks if document has been converted in s3 bucket and and sends file
to appropriate converter"""
root, extension = os.path.splitext(file_key)
if not force_convert:
if len(list(s3_bucket.list(root + '.txt'))) > 0:
logging.info("%s has already been converted", file_key)
return
if extension == ".pdf":
extractor = PDFTextExtractionS3(file_key, s3_bucket)
else:
extractor = TextExtractionS3(file_key, s3_bucket)
logging.info("%s is being converted", file_key)
extractor.extract()
|
1687326
|
from qgreenland.tasks.common.fetch import FetchDataFiles
from qgreenland.tasks.common.misc import Unrar
from qgreenland.tasks.common.vector import Ogr2OgrVector
from qgreenland.util.luigi import LayerPipeline
class RarredVector(LayerPipeline):
"""Rename files to their final location."""
def requires(self):
fetch_data = FetchDataFiles(
source_cfg=self.cfg['source'],
dataset_cfg=self.cfg['dataset']
) # ->
unrar = Unrar(
requires_task=fetch_data,
layer_id=self.layer_id
) # ->
return Ogr2OgrVector(
requires_task=unrar,
layer_id=self.layer_id
)
|
1687331
|
from collections import defaultdict
import numpy as np
from dateutil import parser
from pandas import DataFrame
from peewee import *
from playhouse.db_url import connect
from config import app_config as cfg
# Connect to the database URL defined in the app_config
db = connect(cfg.database['url'])
def create_database():
db.connect()
db.drop_tables([User, Tweet], True)
db.create_tables([User, Tweet], True)
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
screen_name = CharField()
is_bot = BooleanField()
followers = IntegerField()
following = IntegerField()
def reputation(self):
if self.followers == 0:
return 0
else:
return self.followers / float(self.followers + self.following)
@classmethod
def get_sample(self, is_bot=False):
return User.select().where(User.is_bot == is_bot)
@classmethod
def followers_friends_per_users(self, users):
data = [{
"followers" : user.followers,
"following" : user.following,
"accountreputation" : user.reputation()
} for user in users]
df = DataFrame(data, columns=["followers", "following", "accountreputation", "CDFx", "CDFy"], index=range(len(users)))
df_size = len(df.index)
df["CDFx"] = np.sort(df["accountreputation"])
df["CDFy"] = np.array(range(df_size)) / float(df_size)
return df
@classmethod
def entropy(X):
probs = [np.mean(X == c) for c in set(X)]
return np.sum(-p * np.log2(p) for p in probs)
class Tweet(BaseModel):
user = ForeignKeyField(User, related_name='tweets')
text = CharField()
date = CharField()
source = CharField()
mentions = CharField()
@classmethod
def get_sample(cls, is_bot=False, min_tweets=200):
selected_users = Tweet.select(Tweet.user) \
.group_by(Tweet.user) \
.having(fn.Count(Tweet.user) >= min_tweets)
tweets = (Tweet.select(Tweet).join(User)
.where(
User.is_bot == is_bot,
User.id << selected_users
))
return tweets
@classmethod
def avg_mentions_per_user(cls, tweets):
mentions_per_user = defaultdict(lambda: [])
for tweet in tweets:
count = 0
if len(tweet.mentions) > 0:
count = len(tweet.mentions.split(","))
mentions_per_user[tweet.user_id].append(count)
avg_per_user = {user: np.mean(mentions) for (user, mentions) in mentions_per_user.iteritems()}
return avg_per_user
@classmethod
def vocabulary_size(cls, tweets):
words_per_user = defaultdict(lambda: set())
for tweet in tweets:
for word in tweet.text.split(" "):
words_per_user[tweet.user_id].add(word)
return {name: len(words) for (name, words) in words_per_user.iteritems()}
@classmethod
def tweet_density(cls, tweets):
tweets_df = DataFrame(columns=["user_id", "date"], index=range(len(tweets)))
for i, tweet in enumerate(tweets):
date = parser.parse(tweet.date)
tweets_df["date"][i] = str(date.year)+str(date.month)+str(date.day)
tweets_df["user_id"][i] = tweet.user_id
grouped = tweets_df.groupby(['user_id', 'date']).size().reset_index()
count_list_by_user = grouped[0].apply(lambda x: x if (x < 6) else 6).tolist()
mean_count = np.mean(count_list_by_user)
median_count = np.median(count_list_by_user)
return count_list_by_user, mean_count, median_count
@classmethod
def tweet_weekday(cls, tweets):
tweets_df = DataFrame(columns=["user_id", "weekday"], index=range(len(tweets)))
for i, tweet in enumerate(tweets):
tweets_df["weekday"][i] = str(tweet.date.split(' ')[0])
tweets_df["user_id"][i] = tweet.user_id
grouped = tweets_df.groupby(['user_id', 'weekday']).size().reset_index()
list_days = set(grouped["weekday"])
stats_weekdays = DataFrame(columns=["weekday", "mean","std"], index=range(len(list_days)))
stats_weekdays["weekday"] = list_days
stats_weekdays["mean"] = list(map(lambda day : np.mean(grouped[0][grouped["weekday"] == day]),list_days))
stats_weekdays["std"] = list(map(lambda day : np.std(grouped[0][grouped["weekday"] == day]),list_days))
prop_weekdays = DataFrame(columns=["weekday", "prop","std"], index=range(len(list_days)))
prop_weekdays["weekday"] = list_days
prop_weekdays['prop'] = stats_weekdays['mean'] / sum(stats_weekdays['mean'])
prop_weekdays['std'] = stats_weekdays['std'] / sum(stats_weekdays['mean'])
sorted_weekdays = prop_weekdays.reindex([4,3,0,2,5,6,1])
return sorted_weekdays
@classmethod
def top_sources(cls, tweets):
sources = [{"source": tweet.source} for tweet in tweets]
return DataFrame(sources).stack().value_counts()
|
1687412
|
from unittest.mock import ANY
import pytest
from rstream import schema
from rstream.client import Client
pytestmark = pytest.mark.asyncio
async def test_peer_properties(no_auth_client: Client) -> None:
result = await no_auth_client.peer_properties()
assert result["product"] == "RabbitMQ"
async def test_create_stream(client: Client) -> None:
assert await client.stream_exists("test-stream") is False
await client.create_stream("test-stream")
assert await client.stream_exists("test-stream") is True
await client.delete_stream("test-stream")
assert await client.stream_exists("test-stream") is False
async def test_deliver(client: Client, stream: str) -> None:
subscription_id = 1
publisher_id = 1
await client.declare_publisher(stream, "test-reference", publisher_id)
await client.subscribe(stream, subscription_id)
waiter = client.wait_frame(schema.Deliver)
msg = schema.Message(publishing_id=1, data=b"test message")
await client.publish([msg], publisher_id)
assert await waiter == schema.Deliver(
subscription_id=subscription_id,
magic_version=80,
chunk_type=0,
num_entries=1,
num_records=1,
timestamp=ANY,
epoch=1,
chunk_first_offset=0,
chunk_crc=307778378,
data_length=16,
trailer_length=24,
_reserved=0,
data=b"\x00\x00\x00\x0ctest message",
)
await client.credit(subscription_id, 1)
await client.unsubscribe(subscription_id)
await client.delete_publisher(publisher_id)
async def test_query_leader(client: Client, stream: str) -> None:
leader, _ = await client.query_leader_and_replicas(stream)
assert (leader.host, int(leader.port)) == (client.host, int(client.port))
|
1687417
|
from torch.nn import init
def init_net(net, init_type='normal'):
init_weights(net, init_type)
return net
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
# this will apply to each layer
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1):
if init_type=='normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
#print('initialize network with %s' % init_type)
net.apply(init_func)
|
1687427
|
import os
from collections import defaultdict
"""
Run from actual_data directory to get the average runtimes for each actual data test
"""
files_for_avg = 3
valid_strs = ["data{0}.acp".format(i) for i in range(1, files_for_avg + 1)]
def is_file_path_valid_str(path):
for valid_str in valid_strs:
try:
idx = path.index(valid_str)
return path[0:idx]
except:
pass
return None
for ref_path in os.listdir('./'):
try:
runtimes = defaultdict(float)
for file_path in os.listdir('./' + ref_path + '/igenomics_data'):
complete_file_path = './' + ref_path + '/igenomics_data/' + file_path
prefix_str = is_file_path_valid_str(complete_file_path)
if prefix_str is not None:
with open(complete_file_path) as my_file:
for line_num, line in enumerate(my_file.readlines()):
if line_num == 1:
line_comps = line.split('\t')
runtime = float(line_comps[2])
runtimes[prefix_str] += runtime
break
for key in runtimes.keys():
print("{0}: {1}".format(key, round(runtimes[key] / files_for_avg,
2)))
except Exception:
pass
|
1687436
|
import clr
def process_input(func, input):
if isinstance(input, list): return [func(x) for x in input]
else: return func(input)
def journalContainsAPIErrors(journal):
if journal.__repr__() == 'Journal': return journal.ContainsAPIErrors()
else: return False
OUT = process_input(journalContainsAPIErrors,IN[0])
|
1687441
|
import requests
from . import constants, oauth_server
from .bc3_api import _create_session
try:
# noinspection PyCompatibility
from urlparse import urljoin, urlparse, parse_qs
from urllib import quote
except ImportError:
# noinspection PyCompatibility
from urllib.parse import urljoin, urlparse, parse_qs, quote
import webbrowser
class TokenRequester(object):
def __init__(self, client_id, redirect_uri=None, session=None, listen_addr=None):
"""
For completing the OAuth2 authorization flow.
:param client_id: the client ID of the integration you created
:type client_id: str
:param redirect_uri: the URL to redirect to as part of the flow (this is http://localhost:33333 by default)
:type redirect_uri: str
:param session: optionally provide your own Session object
:type session: requests.sessions.Session
:param listen_addr: the address to listen on. Usually this is localhost. It can also be set with the
BC3_OAUTH_BIND_ADDRESS environment variable
:type listen_addr: str
"""
if redirect_uri is None:
redirect_uri = constants.DEFAULT_REDIRECT_URI
if not redirect_uri.lower().startswith("http"):
raise ValueError("'%s' is an invalid Redirect URI. Should be a valid http(s) URL." % redirect_uri)
if session is None:
session = _create_session()
if not listen_addr:
listen_addr = constants.OAUTH_LOCAL_BIND_ADDRESS
self.client_id = client_id
self.redirect_uri = redirect_uri
self._session = session
self._listen_addr = listen_addr
def get_authorization(self):
"""
Open the user's web browser to complete signing in and allow access. Spawn a local HTTP server to handle the
redirect URI.
:return:
"""
quoted_uri = quote(self.redirect_uri)
url = constants.AUTHORIZE_URL.format(client_id=self.client_id, redirect_uri=quoted_uri)
print("Attempting to open a browser...")
print("(You may have to copy/paste this into your web browser)\n%s" % url)
webbrowser.open(url)
parsed = urlparse(self.redirect_uri)
listen_port = parsed.port
if listen_port is None:
listen_port = 80 if parsed.scheme == "http" else 443
user_code = oauth_server.wait_for_user_response(self._listen_addr, listen_port)
return user_code
|
1687467
|
import pathlib
import pytest
from opera.parser import tosca
from opera.storage import Storage
class TestNodePolicies:
@pytest.fixture
def service_template(self, tmp_path, yaml_text):
name = pathlib.PurePath("service.yaml")
(tmp_path / name).write_text(yaml_text(
# language=yaml
"""
tosca_definitions_version: tosca_simple_yaml_1_3
node_types:
steampunk.nodes.VM:
derived_from: tosca.nodes.Compute
interfaces:
Standard:
type: tosca.interfaces.node.lifecycle.Standard
operations:
create: playbooks/create.yaml
delete: playbooks/delete.yaml
scaling_up:
type: steampunk.interfaces.scaling.ScaleUp
scaling_down:
type: steampunk.interfaces.scaling.ScaleDown
autoscaling:
operations:
retrieve_info:
description: Operation for autoscaling.
implementation: playbooks/retrieve_info.yaml
autoscale:
description: Operation for autoscaling.
implementation: playbooks/auto_scale.yaml
inputs:
min_size:
type: float
default: { get_property: [ autoscale, min_size ] }
max_size:
type: float
default: { get_property: [ autoscale, max_size ] }
steampunk.nodes.ConfigureMonitoring:
derived_from: tosca.nodes.SoftwareComponent
interfaces:
Standard:
type: tosca.interfaces.node.lifecycle.Standard
operations:
configure:
implementation: playbooks/configure.yaml
inputs:
cpu_lower_bound:
type: float
default: { get_property: [ steampunk.policies.scaling.ScaleDown, cpu_lower_bound ] }
cpu_upper_bound:
type: float
default: { get_property: [ steampunk.policies.scaling.ScaleUp, cpu_upper_bound ] }
interface_types:
steampunk.interfaces.scaling.ScaleDown:
derived_from: tosca.interfaces.Root
operations:
scale_down:
inputs:
adjustment:
type: float
default: { get_property: [ steampunk.policies.scaling.ScaleDown, adjustment ] }
description: Operation for scaling down.
implementation: playbooks/scale_down.yaml
steampunk.interfaces.scaling.ScaleUp:
derived_from: tosca.interfaces.Root
operations:
scale_up:
inputs:
adjustment:
type: float
default: { get_property: [ steampunk.policies.scaling.ScaleUp, adjustment ] }
description: Operation for scaling up.
implementation: playbooks/scale_up.yaml
policy_types:
steampunk.policies.scaling.ScaleDown:
derived_from: tosca.policies.Scaling
properties:
cpu_lower_bound:
description: The lower bound for the CPU
type: float
required: false
constraints:
- less_or_equal: 20.0
adjustment:
description: The amount by which to scale
type: integer
required: false
constraints:
- less_or_equal: -1
targets: [ steampunk.nodes.VM, steampunk.nodes.ConfigureMonitoring ]
triggers:
steampunk.triggers.scaling.ScaleDown:
description: A trigger for scaling down
event: scale_down_trigger
target_filter:
node: steampunk.nodes.VM
condition:
constraint:
- not:
- and:
- available_instances: [ { greater_than: 42 } ]
- available_space: [ { greater_than: 1000 } ]
action:
- call_operation:
operation: scaling_down.scale_down
inputs:
adjustment: { get_property: [ SELF, adjustment ] }
steampunk.policies.scaling.ScaleUp:
derived_from: tosca.policies.Scaling
properties:
cpu_upper_bound:
description: The upper bound for the CPU
type: float
required: false
constraints:
- greater_or_equal: 80.0
adjustment:
description: The amount by which to scale
type: integer
required: false
constraints:
- greater_or_equal: 1
targets: [ steampunk.nodes.VM, steampunk.nodes.ConfigureMonitoring ]
triggers:
steampunk.triggers.scaling.ScaleUp:
description: A trigger for scaling up
event: scale_up_trigger
target_filter:
node: steampunk.nodes.VM
condition:
constraint:
- not:
- and:
- available_instances: [ { greater_than: 42 } ]
- available_space: [ { greater_than: 1000 } ]
action:
- call_operation:
operation: scaling_up.scale_up
inputs:
adjustment: { get_property: [ SELF, adjustment ] }
steampunk.policies.scaling.AutoScale:
derived_from: tosca.policies.Scaling
properties:
min_size:
type: integer
description: The minimum number of instances
required: true
status: supported
constraints:
- greater_or_equal: 1
max_size:
type: integer
description: The maximum number of instances
required: true
status: supported
constraints:
- greater_or_equal: 10
topology_template:
node_templates:
VM:
type: steampunk.nodes.VM
ConfigureMonitoring:
type: steampunk.nodes.ConfigureMonitoring
policies:
- scale_down:
type: steampunk.policies.scaling.ScaleDown
properties:
cpu_lower_bound: 10
adjustment: 1
- scale_up:
type: steampunk.policies.scaling.ScaleUp
properties:
cpu_upper_bound: 90
adjustment: 5
- autoscale:
type: steampunk.policies.scaling.AutoScale
properties:
min_size: 3
max_size: 7
targets: [ VM ]
triggers:
steampunk.triggers.scaling.AutoScale:
description: A trigger for autoscaling
event: auto_scale_trigger
schedule:
start_time: 2020-04-08T21:59:43.10-06:00
end_time: 2022-04-08T21:59:43.10-06:00
target_filter:
node: VM
requirement: workstation
capability: host_capability
condition:
constraint:
- not:
- and:
- available_instances: [ { greater_than: 42 } ]
- available_space: [ { greater_than: 1000 } ]
period: 60 sec
evaluations: 2
method: average
action:
- call_operation: autoscaling.retrieve_info
- call_operation: autoscaling.autoscale
"""
))
# language=yaml
playbook = \
"""
- hosts: all
tasks:
- name: Debug
debug:
msg: "Just testing."
"""
pathlib.Path.mkdir(tmp_path / "playbooks")
(tmp_path / "playbooks" / "create.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "delete.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "configure.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "scale_up.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "scale_down.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "retrieve_info.yaml").write_text(yaml_text(playbook))
(tmp_path / "playbooks" / "auto_scale.yaml").write_text(yaml_text(playbook))
storage = Storage(tmp_path / pathlib.Path(".opera"))
storage.write("service.yaml", "root_file")
ast = tosca.load(tmp_path, name)
template = ast.get_template({})
template.instantiate(storage)
yield template
def test_count_policies_for_service_template(self, service_template):
assert len(service_template.policies) == 3
def test_count_policies_for_node(self, service_template):
node_vm = service_template.find_node("VM")
assert len(node_vm.policies) == 3
node_monitoring = service_template.find_node("ConfigureMonitoring")
assert len(node_monitoring.policies) == 2
def test_find_policies_for_node(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policies = [policy.name for policy in node_vm.policies]
assert "scale_down" in node_vm_policies
assert "scale_up" in node_vm_policies
assert "autoscale" in node_vm_policies
node_monitoring = service_template.find_node("ConfigureMonitoring")
node_monitoring_policies = [policy.name for policy in node_monitoring.policies]
assert "scale_down" in node_monitoring_policies
assert "scale_up" in node_monitoring_policies
def test_find_policy_targets(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_targets = [policy.targets for policy in node_vm.policies]
assert "VM" in node_vm_policy_targets[0]
assert "VM" in node_vm_policy_targets[1]
assert "VM" in node_vm_policy_targets[2]
def test_find_policy_triggers(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_triggers = [policy.triggers for policy in node_vm.policies]
assert "steampunk.triggers.scaling.ScaleDown" in node_vm_policy_triggers[0]
assert "steampunk.triggers.scaling.ScaleUp" in node_vm_policy_triggers[1]
assert "steampunk.triggers.scaling.AutoScale" in node_vm_policy_triggers[2]
def test_find_policy_trigger_events(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_triggers = [policy.triggers for policy in node_vm.policies]
assert node_vm_policy_triggers[0]["steampunk.triggers.scaling.ScaleDown"].event.data == "scale_down_trigger"
assert node_vm_policy_triggers[1]["steampunk.triggers.scaling.ScaleUp"].event.data == "scale_up_trigger"
assert node_vm_policy_triggers[2]["steampunk.triggers.scaling.AutoScale"].event.data == "auto_scale_trigger"
def test_find_policy_trigger_target_filter(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_triggers = [policy.triggers for policy in node_vm.policies]
assert node_vm_policy_triggers[0]["steampunk.triggers.scaling.ScaleDown"].target_filter[0] == "VM"
assert node_vm_policy_triggers[1]["steampunk.triggers.scaling.ScaleUp"].target_filter[0] == "VM"
assert node_vm_policy_triggers[2]["steampunk.triggers.scaling.AutoScale"].target_filter[0] == "VM"
def test_find_policy_trigger_action(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_triggers = [policy.triggers for policy in node_vm.policies]
interface1, operation1, _ = node_vm_policy_triggers[0]["steampunk.triggers.scaling.ScaleDown"].action[0]
assert ("scaling_down", "scale_down") == (interface1, operation1)
interface2, operation2, _ = node_vm_policy_triggers[1]["steampunk.triggers.scaling.ScaleUp"].action[0]
assert ("scaling_up", "scale_up") == (interface2, operation2)
interface3, operation3, _ = node_vm_policy_triggers[2]["steampunk.triggers.scaling.AutoScale"].action[0]
assert ("autoscaling", "retrieve_info") == (interface3, operation3)
interface3, operation3, _ = node_vm_policy_triggers[2]["steampunk.triggers.scaling.AutoScale"].action[1]
assert ("autoscaling", "autoscale") == (interface3, operation3)
def test_find_policy_properties(self, service_template):
node_vm = service_template.find_node("VM")
node_vm_policy_properties = [policy.properties for policy in node_vm.policies]
assert "cpu_lower_bound" in node_vm_policy_properties[0]
assert "adjustment" in node_vm_policy_properties[0]
assert "cpu_upper_bound" in node_vm_policy_properties[1]
assert "adjustment" in node_vm_policy_properties[1]
assert "min_size" in node_vm_policy_properties[2]
assert "max_size" in node_vm_policy_properties[2]
def test_get_policy_properties(self, service_template):
node_vm = service_template.find_node("VM")
assert node_vm.get_property(("scale_down", "cpu_lower_bound")) == 10.0
assert node_vm.get_property(("scale_down", "adjustment")) == 1
assert node_vm.get_property(("scale_up", "cpu_upper_bound")) == 90.0
assert node_vm.get_property(("scale_up", "adjustment")) == 5
assert node_vm.get_property(("autoscale", "min_size")) == 3
assert node_vm.get_property(("autoscale", "max_size")) == 7
|
1687537
|
import numpy as np
from numpy.testing import assert_almost_equal
import torch
import torch.nn as nn
from torch.utils import data
import torch.optim as optim
from mbrltools.pytorch_utils import train, predict, _set_device
torch.manual_seed(0)
class MLP(nn.Module):
"""Multi-Layer Perceptron for the sake of testing."""
def __init__(self, input_size, output_size, layers,
activation=nn.LeakyReLU()):
super(MLP, self).__init__()
model = nn.Sequential()
model.add_module('initial-lin', nn.Linear(input_size, layers[0]))
model.add_module('initial-act', activation)
for i in range(len(layers) - 1):
model.add_module('layer{}-lin'.format(i + 1),
nn.Linear(layers[i], layers[i + 1]))
model.add_module('layer{}-act'.format(i + 1), activation)
model.add_module('final-lin', nn.Linear(layers[-1], output_size))
self.model = model
def forward(self, x):
return self.model(x)
def test_batch_predict():
# Batch predict is equal to non batch predict
# create a simple model (a MLP with 2 inner layers)
device = _set_device()
input_size, output_size, layers = 2, 2, [5, 5]
model = MLP(input_size, output_size, layers)
model = model.to(device)
# create a random dataset. take a number of samples that is not a multiple
# of the batch_size.
n_samples = 26
x = torch.randn(n_samples, input_size)
y = torch.randn(n_samples, output_size)
dataset = data.TensorDataset(x, y)
model = train(model, dataset, n_epochs=1, batch_size=10)
with torch.no_grad():
predictions_without_batch = model(x.to(device)).cpu()
predictions_with_batch = predict(model, x, batch_size=10)
predictions_with_predict_no_batch = predict(model, x, batch_size=None)
assert_almost_equal(
predictions_with_batch.numpy(), predictions_without_batch.numpy())
assert_almost_equal(
predictions_with_batch.numpy(),
predictions_with_predict_no_batch.numpy())
def test_best_model():
# check the best model
loss_fn = torch.nn.MSELoss()
# create a simple model (a MLP with 2 inner layers)
device = _set_device()
input_size, output_size, layers = 2, 2, [50, 50]
model = MLP(input_size, output_size, layers)
model = model.to(device)
# create a random dataset
n_samples = 100
x = torch.randn(n_samples, input_size)
y = 2 * x + 1
x, y = x.to(device), y.to(device)
dataset = data.TensorDataset(x, y)
# create training and validation sets
validation_fraction = 0.5
n_samples = len(dataset)
ind_split = int(np.floor(validation_fraction * n_samples))
dataset_train = data.TensorDataset(*dataset[ind_split:])
dataset_valid = data.TensorDataset(*dataset[:ind_split])
optimizer = optim.Adam(model.parameters(), lr=1e-2)
model, best_val_loss = train(
model, dataset_train, optimizer=optimizer, dataset_valid=dataset_valid,
n_epochs=10, batch_size=20, return_best_model=True, loss_fn=loss_fn)
# check val_loss
X_valid = dataset_valid.tensors[0]
y_valid = dataset_valid.tensors[1]
y_valid_pred = predict(model, X_valid)
val_loss = loss_fn(y_valid, y_valid_pred).item()
assert_almost_equal(val_loss, best_val_loss)
|
1687548
|
import json
from datetime import datetime, timezone
from typing import Dict, Any, NamedTuple, Optional
from uuid import UUID
import bach
from bach import DataFrame
from sql_models.constants import DBDialect
from sql_models.util import is_postgres, is_bigquery
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from tests.functional.bach.test_data_and_utils import run_query
from tests.unit.bach.util import get_pandas_df
from modelhub import ModelHub
from tests_modelhub.data_and_utils.data_json_real import TEST_DATA_JSON_REAL, JSON_COLUMNS_REAL
from tests_modelhub.data_and_utils.data_objectiv import TEST_DATA_OBJECTIV
class DBParams(NamedTuple):
url: str
credentials: Optional[str]
table_name: str
def _convert_moment_to_utc_time(moment: str) -> int:
dt = datetime.fromisoformat(moment)
dt = dt.replace(tzinfo=timezone.utc)
return int(dt.timestamp() * 1e3)
def get_df_with_json_data_real(db_params: DBParams) -> DataFrame:
engine = create_engine_from_db_params(db_params)
df = DataFrame.from_pandas(
engine=engine,
df=get_pandas_df(dataset=TEST_DATA_JSON_REAL, columns=JSON_COLUMNS_REAL),
convert_objects=True,
)
df['global_contexts'] = df.global_contexts.astype('json')
df['location_stack'] = df.location_stack.astype('json')
return df
def get_objectiv_dataframe_test(db_params=None, time_aggregation=None):
if not db_params:
# by default use PG (this should be removed after modelhub is able to work with all bach engines)
import os
db_url = os.environ.get('OBJ_DB_PG_TEST_URL', 'postgresql://objectiv:@localhost:5432/objectiv')
credentials = None
table_name = 'objectiv_data'
else:
db_url = db_params.url
credentials = db_params.credentials
table_name = db_params.table_name
kwargs = {}
if time_aggregation:
kwargs = {'time_aggregation': time_aggregation}
modelhub = ModelHub(**kwargs)
return modelhub.get_objectiv_dataframe(
db_url=db_url,
table_name=table_name,
bq_credentials_path=credentials,
), modelhub
def get_parsed_objectiv_data(engine):
parsed_data = []
for event_data in TEST_DATA_OBJECTIV:
event_id, day, moment, cookie_id, value = event_data
value = json.loads(value)
# BQ uses time from taxonomy json for getting moment and day
# therefore time value MUST be the same as moment
if is_bigquery(engine):
value['time'] = _convert_moment_to_utc_time(moment)
parsed_data.append(
{
'event_id': UUID(event_id),
'day': datetime.strptime(day, '%Y-%m-%d').date(),
'moment': datetime.fromisoformat(moment),
'cookie_id': UUID(cookie_id),
'value': value
}
)
return parsed_data
def create_engine_from_db_params(db_params: DBParams) -> Engine:
if db_params.credentials:
engine = create_engine(url=db_params.url, credentials_path=db_params.credentials)
else:
engine = create_engine(url=db_params.url)
return engine
def setup_db(engine: Engine, table_name: str):
columns = {
'event_id': bach.SeriesUuid.supported_db_dtype[DBDialect.POSTGRES],
'day': bach.SeriesDate.supported_db_dtype[DBDialect.POSTGRES],
'moment': bach.SeriesTimestamp.supported_db_dtype[DBDialect.POSTGRES],
'cookie_id': bach.SeriesUuid.supported_db_dtype[DBDialect.POSTGRES],
'value': bach.SeriesJson.supported_db_dtype[DBDialect.POSTGRES],
}
_prep_db_table(engine, table_name=table_name, columns=columns)
_insert_records_in_db(engine, table_name=table_name, columns=columns)
def _prep_db_table(engine, table_name: str, columns: Dict[str, Any]):
if is_postgres(engine):
column_stmt = ','.join(f'{col_name} {db_type}' for col_name, db_type in columns.items())
sql = f"""
drop table if exists {table_name};
create table {table_name} ({column_stmt});
alter table {table_name}
owner to objectiv
"""
else:
raise Exception()
run_query(engine, sql)
def _insert_records_in_db(engine, table_name: str, columns: Dict[str, Any]):
from tests_modelhub.data_and_utils.data_objectiv import TEST_DATA_OBJECTIV
column_stmt = ','.join(columns.keys())
records = []
if is_postgres(engine):
for record in TEST_DATA_OBJECTIV:
formatted_values = [f"'{record[col_index]}'" for col_index, _ in enumerate(columns)]
records.append(f"({','.join(formatted_values)})")
else:
raise Exception()
values_stmt = ','.join(records)
sql = f'insert into {table_name} ({column_stmt}) values {values_stmt}'
run_query(engine, sql)
|
1687557
|
import torch
import torch.nn
from .base import CplxToCplx, BaseCplxToReal
from ... import cplx
class CplxModReLU(CplxToCplx):
r"""Applies soft thresholding to the complex modulus:
$$
F
\colon \mathbb{C} \to \mathbb{C}
\colon z \mapsto (\lvert z \rvert - \tau)_+
\tfrac{z}{\lvert z \rvert}
\,, $$
with $\tau \in \mathbb{R}$. The if threshold=None then it
becomes a learnable parameter.
"""
def __init__(self, threshold=0.5):
super().__init__()
if not isinstance(threshold, float):
threshold = torch.nn.Parameter(torch.rand(1) * 0.25)
self.threshold = threshold
def forward(self, input):
return cplx.modrelu(input, self.threshold)
class CplxAdaptiveModReLU(CplxToCplx):
r"""Applies soft thresholding to the complex modulus:
$$
F
\colon \mathbb{C}^d \to \mathbb{C}^d
\colon z \mapsto (\lvert z_j \rvert - \tau_j)_+
\tfrac{z_j}{\lvert z_j \rvert}
\,, $$
with $\tau_j \in \mathbb{R}$ being the $j$-th learnable threshold. Torch's
broadcasting rules apply and the passed dimensions must conform with the
upstream input. `CplxChanneledModReLU(1)` learns a common threshold for all
features of the $d$-dim complex vector, and `CplxChanneledModReLU(d)` lets
each dimension have its own threshold.
"""
def __init__(self, *dim):
super().__init__()
self.dim = dim if dim else (1,)
self.threshold = torch.nn.Parameter(torch.randn(*self.dim) * 0.02)
def forward(self, input):
return cplx.modrelu(input, self.threshold)
def __repr__(self):
body = repr(self.dim)[1:-1] if len(self.dim) > 1 else repr(self.dim[0])
return f"{self.__class__.__name__}({body})"
class CplxModulus(BaseCplxToReal):
def forward(self, input):
return abs(input)
class CplxAngle(BaseCplxToReal):
def forward(self, input):
return input.angle
|
1687561
|
from conf import settings
print(settings.MYSQL_HOST) # noqa
print(settings.MYSQL_PASSWD) # noqa
print(settings.EXAMPLE) # noqa
print(settings.current_env) # noqa
print(settings.WORKS) # noqa
assertions = {
"AGE": 15,
"A_DICT": {"NESTED_1": {"NESTED_2": {"NESTED_3": {"NESTED_4": 1}}}},
"BASE_IMAGE": "bla",
"DEV_SERVERS": ["127.0.0.1", "localhost", "development.com"],
"EXAMPLE": True,
"MYSQL_HOST": "localhost",
"NAME": "BRUNO",
"PORT": 8001,
"PRESIDENT": "Lula",
"PROJECT": "hello_world",
"SALARY": 2000,
"VERSION": 1,
"WORKS": "validator",
"MYSQL_PASSWD": "<PASSWORD>",
"FOOBAR": "EMPTY",
}
for key, value in assertions.items():
found = settings.get(key)
assert found == getattr(settings, key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {
"AGE": 15,
"A_DICT": {"NESTED_1": {"NESTED_2": {"NESTED_3": {"NESTED_4": 1}}}},
"BASE_IMAGE": "bla",
"DEV_SERVERS": ["127.0.0.1", "localhost", "development.com"],
"EXAMPLE": True,
"MYSQL_HOST": "development.com",
"NAME": "MIKE",
"PORT": 8001,
"PRESIDENT": "Lula",
"PROJECT": "hello_world",
"SALARY": 2000,
"VERSION": 1,
"WORKS": "validator",
"IMAGE_1": "aaa",
"IMAGE_2": "bbb",
"IMAGE_4": "a",
"IMAGE_5": "b",
"MYSQL_PASSWD": "<PASSWORD>",
}
for key, value in assertions.items():
found = settings.from_env("development").get(key)
assert found == getattr(settings.from_env("development"), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
assertions = {
"AGE": 15,
"A_DICT": {"NESTED_1": {"NESTED_2": {"NESTED_3": {"NESTED_4": 1}}}},
"BASE_IMAGE": "bla",
"DEV_SERVERS": ["127.0.0.1", "localhost", "development.com"],
"EXAMPLE": True,
"MYSQL_HOST": "production.com",
"NAME": "MIKE",
"PORT": 8001,
"PRESIDENT": "Lula",
"PROJECT": "hello_world",
"SALARY": 2000,
"VERSION": 1,
"WORKS": "validator",
"IMAGE_4": "a",
"IMAGE_5": "b",
"MYSQL_PASSWD": "<PASSWORD>",
}
for key, value in assertions.items():
found = settings.from_env("production").get(key)
assert found == getattr(settings.from_env("production"), key)
assert found == value, f"expected: {key}: [{value}] found: [{found}]"
|
1687631
|
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.retinanet import model
from ..common.train import train
import torch
from detectron2.config import LazyCall as L
from detectron2.solver.build import get_default_optimizer_params
from detectron2.modeling.backbone.fpn import LastLevelP6P7
from detectron2.modeling.backbone import FPN
from nndet2.modeling.backbone import SwinTransformerV2
# replace backbone
model.backbone = L(FPN)(
bottom_up=L(SwinTransformerV2)(
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=16,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
pretrained_window_sizes=[8, 8, 8, 8],
frozen_stages=-1,
out_features=["stage2", "stage3", "stage4"],
),
in_features=["stage2", "stage3", "stage4"],
out_channels=256,
top_block=L(LastLevelP6P7)(in_channels=768, out_channels="${..out_channels}", in_feature="stage4")
)
model.pixel_mean = [123.675, 116.28, 103.53]
model.pixel_std = [58.395, 57.12, 57.375]
model.input_format = "RGB"
optimizer = L(torch.optim.AdamW)(
params=L(get_default_optimizer_params)(
weight_decay_norm=0.0,
overrides={
"cpb_mlp": {"weight_decay": 0.0},
"logit_scale": {"weight_decay": 0.0},
}
),
lr=1e-04,
weight_decay=0.05,
betas=(0.9, 0.999),
)
dataloader.train.mapper.image_format = "RGB"
dataloader.train.mapper.use_instance_mask = False
|
1687634
|
from .ge_exception import GeException
class GeNeedsReauthenticationError(GeException):
"""Error raised when the reauthentication is needed"""
pass
|
1687647
|
from .pipeline import Exec, Serial, Parallel, Node, Notebook, Params
from .glue import lazy_py, main, lazy_shell, Lazy
from .shared.constants import SameContainer, ContainerReuseContext
from .shared.imagepath import Path
from .image import Image, relpath
from .data import pipeline as temp_data, user as perm_data
from .util import env_bool
from ._version import __version__, __sha1__
from . import api, callback, data, profile, slack, git, nb, schedule
__all__ = [
"Exec",
"Serial",
"Parallel",
"Node",
"Notebook",
"Params",
"main",
"lazy_py",
"lazy_shell",
"Lazy",
"temp_data",
"perm_data",
"data",
"profile",
"slack",
"Image",
"relpath",
"SameContainer", # deprecated
"ContainerReuseContext",
"env_bool",
"api",
"callback",
]
|
1687704
|
from enum import Enum
class Device(Enum):
"""
Enumeration of the devices supported by
Nanograd.
Currently, Nanograd only supports CPU and GPU.
"""
CPU = 1
GPU = 2
|
1687713
|
import click
from fabfile import minify, init_database, local_backup
def bake():
"""
Initialize the database from the backup and minify JS files to configure ("cook") the app.
Uses the functions already written in the fabfile.
"""
init_database('bombolone')
minify()
def serve():
"""
Serve the "cooked" app.
"""
import app
app.main()
def refrigerate():
"""
Makes a local backup of the database, using the function already written in the fabfile.
"""
local_backup()
COMMANDS = {
'bake': bake,
'serve': serve,
'refrigerate': refrigerate
}
@click.command()
@click.argument('command')
def main(command):
"""
Bake, serve and refrigerate your Bombolone app!
:param command: [bake|refrigerate|serve]
`bake`: Initialize your Bombolone app, restoring the database from a backup.
`refrigerate`: Put your Bombolone app in the fridge, making a local backup of the database.
`serve`: Serve your Bombolone app!
"""
if command not in COMMANDS.keys():
raise click.BadParameter('%s is not something you can do with Bombolone!' % command)
COMMANDS[command]()
|
1687716
|
import yaml
from samtranslator.yaml_helper import yaml_parse
def load_yaml(file_path):
"""
Loads a yaml file
Parameters
----------
file_path : Path
File path
Returns
-------
Object
Yaml object
"""
with open(file_path) as f:
data = f.read()
return yaml_parse(data)
def dump_yaml(file_path, yaml_doc):
"""
Writes a yaml object to a file
Parameters
----------
file_path : Path
File path
yaml_doc : Object
Yaml object
"""
with open(file_path, "w") as f:
yaml.dump(yaml_doc, f)
|
1687734
|
import numpy as np
from typing import Iterable, Tuple
import warnings
try:
from swarmlib.util.problem_base import ProblemBase
from swarmlib.pso.particle import Particle as PSOParticle
using_swarmlib = True
except ImportError:
using_swarmlib = False
# I like to give the little guys a run...but this isn't quite there.
# This rips out the guts of swarmlib so that it isn't bundled to visualization
# But it's still only good for 1,2,3 dim, and only really works for 2-dim.
# Here we convert 3-dim to 2-dim using space filling curve, which may not be the best idea.
# Hey, I tried.
if using_swarmlib:
class NoVisualizer():
def __init__(self, **kwargs):
self.__lower_boundary = kwargs.get('lower_boundary', 0.)
self.__upper_boundary = kwargs.get('upper_boundary', 4.)
self.__iteration_number = kwargs.get('iteration_number', 10)
self.__intervals = self.__iteration_number + 2 # Two extra intervals for unanimated start and end pose
self.__interval_ms = kwargs.get('interval', 1000)
self.__continuous = kwargs.get('continuous', False)
self._dark = kwargs.get('dark', False)
self.__function = kwargs['function']
self._marker_size = 0
self._index = 0
self._vel_color = '#CFCFCF'
self._marker_color = '#0078D7' if self._dark else '#FF0000'
self._marker_colors = np.empty(0)
self._positions = []
self._velocities = []
self.__frame_interval = 50 # ms
def add_data(self, **kwargs) -> None:
positions: Iterable[Tuple[float, float]] = kwargs['positions']
self._positions.append(np.transpose(positions))
if len(self._positions) == 1:
# Insert the first position twice to show it "unanimated" first.
self._positions.append(np.transpose(positions))
# Calculate at time t the velocity for step t-1
self._velocities.append(self._positions[-1] - self._positions[-2])
class InvisiblePSOProblem(ProblemBase):
def __init__(self, **kwargs):
"""
Initialize a new particle swarm optimization problem.
"""
super().__init__(**kwargs)
self.__iteration_number = kwargs['iteration_number']
self.__particles = [
PSOParticle(**kwargs, bit_generator=self._random)
for _ in range(kwargs['particles'])
]
# The library stores particles in the visualizer .... groan
positions = [particle.position for particle in self.__particles]
self._visualizer = NoVisualizer(**kwargs)
self._visualizer.add_data(positions=positions)
def solve(self) -> PSOParticle:
# And also update global_best_particle
for _ in range(self.__iteration_number):
# Update global best
global_best_particle = min(self.__particles)
for particle in self.__particles:
particle.step(global_best_particle.position)
# Add data for plot
positions = [particle.position for particle in self.__particles]
self._visualizer.add_data(positions=positions)
return global_best_particle
def swarmlib_cube(objective, n_trials, n_dim, with_count=False, algo=None):
""" Minimize a function on the cube using HyperOpt, and audit # of function calls
:param objective: function on (0,1)^n_dim
:param n_trials: Guideline for function evaluations
:param n_dim:
:param with_count:
:return:
"""
assert algo=='pso'
assert n_dim==2,'yeah, sorry'
global feval_count
feval_count = 0
def cube_objective(us):
# PSO only handles 2-dim
assert all( [ 0<=ui<=1 for ui in us]),' expecting value on square '
global feval_count
feval_count +=1
return objective(us)
iteration_number = 5 if n_trials < 50 else 10
particles = max( int( n_trials / iteration_number), 1)
problem = InvisiblePSOProblem(function=cube_objective, particles=particles, iteration_number=iteration_number,
lower_boundary=0., upper_boundary=1.0)
best_particle = problem.solve()
best_x = best_particle.position.tolist()
best_val = best_particle.value
return (best_val, best_x, feval_count) if with_count else (best_val, best_x)
def swarmlib_pso_cube(objective, n_trials, n_dim, with_count=False):
return swarmlib_cube(objective=objective, n_trials=n_trials, n_dim=n_dim, with_count=with_count, algo='pso')
SWARMLIB_OPTIZERS = [] # Not ready for the A-league yet.
if __name__ == '__main__':
from humpday.objectives.classic import CLASSIC_OBJECTIVES
for objective in CLASSIC_OBJECTIVES:
print(' ')
print(objective.__name__)
for n_dim in range(2,4):
print('n_dim='+str(n_dim))
for optimizer in SWARMLIB_OPTIZERS:
print(optimizer(objective, n_trials=100, n_dim=n_dim, with_count=True))
|
1687765
|
import rospy
import rosnode
def reset_vision():
# This will kill the realsense node on the NUC and the image pipeline. Both are set in the launch files to auto-restart.
rospy.logerr('Resetting the realsense nodes.')
rosnode.kill_nodes(['/realsense_nodelet_manager', '/acrv_realsense_wrist_ros'])
rospy.sleep(5)
|
1687790
|
from analytics_attributes import AdAnalyticsAttributes, AnalyticsAttributes
from api_object import ApiObject
#
# This module uses Pinterest API v3 and v4 in two classes:
# * Analytics synchronously retrieves user (organic) reports.
# * AdAnalytics synchronously retrieves advertising reports.
#
class Analytics(AnalyticsAttributes, ApiObject):
"""
This class retrieves user (sometimes called "organic") metrics
using the v5 interface.
"""
def __init__(self, user_id, api_config, access_token):
super().__init__(api_config, access_token)
self.user_id = user_id
self.enumerated_values.update(
{
"paid": {0, 1, 2},
"in_profile": {0, 1, 2},
"from_owned_content": {0, 1, 2},
"downstream": {0, 1, 2},
"pin_format": {
"all",
"product",
"standard",
"standard_product_stl_union",
"standard_product_union",
"standard_stl_union",
"stl",
"story",
"video",
},
"app_types": {"all", "mobile", "tablet", "web"},
"publish_types": {"all", "published"},
"include_curated": {0, 1, 2},
}
)
# https://developers.pinterest.com/docs/redoc/combined_reporting/#operation/v3_analytics_partner_metrics_GET
def get(self, advertiser_id=None):
"""
Get analytics for the user account. This method has the ad_account_id
for symmetry with the v5 interface, but ad_account_id may not be used
with the v3 or v4 versions of the API.
"""
if advertiser_id:
print("User account analytics for shared accounts are")
print("supported by Pinterest API v5, but not v3 or v4.")
return None
return self.request_data(
f"/v3/partners/analytics/users/{self.user_id}/metrics/?"
+ self.uri_attributes("metric_types", False)
)
# chainable attribute setters...
def paid(self, paid):
self.attrs["paid"] = paid
return self
def in_profile(self, in_profile):
self.attrs["in_profile"] = in_profile
return self
def from_owned_content(self, from_owned_content):
self.attrs["from_owned_content"] = from_owned_content
return self
def downstream(self, downstream):
self.attrs["downstream"] = downstream
return self
def pin_format(self, pin_format):
self.attrs["pin_format"] = pin_format
return self
def app_types(self, app_types):
self.attrs["app_types"] = app_types
return self
def publish_types(self, publish_types):
self.attrs["publish_types"] = publish_types
return self
def include_curated(self, include_curated):
self.attrs["include_curated"] = include_curated
return self
class AdAnalytics(AdAnalyticsAttributes, ApiObject):
"""
This class retrieves advertising delivery metrics with
Pinterest API version v4, which has essentially the same
functionality as v5. A separate module (delivery_metrics)
provides a way to retrieve similar metrics using the v3
asynchronous report functionality.
"""
def __init__(self, api_config, access_token):
super().__init__(api_config, access_token)
self.required_attrs.update({"granularity"})
self.enumerated_values.update(
# https://developers.pinterest.com/docs/redoc/combined_reporting/#operation/ads_v3_create_advertiser_delivery_metrics_report_POST
{
"attribution_types": {"INDIVIDUAL", "HOUSEHOLD"},
"conversion_report_time": {"AD_EVENT", "CONVERSION_EVENT"},
}
)
def request(self, request_uri):
return self.request_data(request_uri + self.uri_attributes("columns", True))
# https://developers.pinterest.com/docs/redoc/adtech_ads_v4/#operation/get_advertiser_delivery_metrics_handler
def get_ad_account(self, advertiser_id):
"""
Get analytics for the ad account.
"""
return self.request(f"/ads/v4/advertisers/{advertiser_id}/delivery_metrics?")
# https://developers.pinterest.com/docs/redoc/adtech_ads_v4/#operation/get_campaign_delivery_metrics_handler
def get_campaign(self, advertiser_id, campaign_id):
"""
Get analytics for the campaign.
"""
request_uri = f"/ads/v4/advertisers/{advertiser_id}/campaigns/delivery_metrics"
request_uri += f"?campaign_ids={campaign_id}&"
return self.request(request_uri)
# https://developers.pinterest.com/docs/redoc/adtech_ads_v4/#operation/get_ad_group_delivery_metrics_handler
def get_ad_group(self, advertiser_id, _campaign_id, ad_group_id):
"""
Get analytics for the ad group.
"""
request_uri = f"/ads/v4/advertisers/{advertiser_id}/ad_groups/delivery_metrics"
request_uri += f"?ad_group_ids={ad_group_id}&"
return self.request(request_uri)
# https://developers.pinterest.com/docs/redoc/adtech_ads_v4/#operation/get_ad_delivery_metrics_handler
def get_ad(self, advertiser_id, _campaign_id, _ad_group_id, ad_id):
"""
Get analytics for the ad.
"""
request_uri = f"/ads/v4/advertisers/{advertiser_id}/ads/delivery_metrics"
request_uri += f"?ad_ids={ad_id}&"
return self.request(request_uri)
|
1687801
|
import click
from crud.cli.utils import CLICK_ENDPOINT, CLICK_MAPPING
@click.command(short_help="Put chained items in endpoint")
@click.argument("dest", type=CLICK_ENDPOINT)
@click.option("-k", "--kwargs", type=CLICK_MAPPING,
help="""kwargs dict as yaml/json format string or @file.yaml, i.e., '{"level": "series"}'""")
@click.pass_context
def put(ctx, dest, kwargs):
"""Put chained items in endpoint"""
click.echo(click.style('Putting Items in Dest', underline=True, bold=True))
if not kwargs:
kwargs = {}
for item in ctx.obj.get("items", []):
dest.put(item, **kwargs)
|
1687830
|
from snet.sdk.payment_strategies.payment_staregy import PaymentStrategy
class PrePaidPaymentStrategy(PaymentStrategy):
def __init__(self, concurrency_manager, block_offset=240, call_allowance=1):
self.concurrency_manager = concurrency_manager
self.block_offset = block_offset
self.call_allowance = call_allowance
def get_price(self, service_client):
return service_client.get_price() * self.concurrency_manager.concurrent_calls
def get_payment_metadata(self, service_client, channel):
if channel is None:
channel = self.select_channel(service_client)
token = self.concurrency_manager.get_token(service_client, channel, self.get_price(service_client))
metadata = [
("snet-payment-type", "prepaid-call"),
("snet-payment-channel-id", str(channel.channel_id)),
("snet-payment-channel-nonce", str(channel.state["nonce"])),
("snet-prepaid-auth-token-bin", bytes(token, 'UTF-8'))
]
return metadata
def get_concurrency_token_and_channel(self, service_client):
channel = self.select_channel(service_client)
token = self.concurrency_manager.get_token(service_client, channel, self.get_price(service_client))
return token, channel
def select_channel(self, service_client):
account = service_client.account
service_client.load_open_channels()
service_client.update_channel_states()
payment_channels = service_client.payment_channels
service_call_price = self.get_price(service_client)
extend_channel_fund = service_call_price * self.call_allowance
mpe_balance = account.escrow_balance()
default_expiration = service_client.default_channel_expiration()
if len(payment_channels) < 1:
if service_call_price > mpe_balance:
payment_channel = service_client.deposit_and_open_channel(service_call_price,
default_expiration + self.block_offset)
else:
payment_channel = service_client.open_channel(service_call_price,
default_expiration + self.block_offset)
else:
payment_channel = payment_channels[0]
if self.__has_sufficient_funds(payment_channel, service_call_price) \
and not self.__is_valid(payment_channel, default_expiration):
payment_channel.extend_expiration(default_expiration + self.block_offset)
elif not self.__has_sufficient_funds(payment_channel, service_call_price) and \
self.__is_valid(payment_channel, default_expiration):
payment_channel.add_funds(extend_channel_fund)
elif not self.__has_sufficient_funds(payment_channel, service_call_price) and \
not self.__is_valid(payment_channel, default_expiration):
payment_channel.extend_and_add_funds(default_expiration + self.block_offset, extend_channel_fund)
return payment_channel
@staticmethod
def __has_sufficient_funds(channel, amount):
return channel.state["available_amount"] >= amount
@staticmethod
def __is_valid(channel, expiry):
return channel.state["expiration"] >= expiry
|
1687878
|
import pdb
import z3
import helpers.vcommon as CM
import settings
from data.inv.base import Inv
import data.inv.invs
dbg = pdb.set_trace
mlog = CM.getLogger(__name__, settings.LOGGER_LEVEL)
class PrePost(Inv):
"""
Set of Preconds -> PostInv
"""
def __init__(self, preconds, postcond, stat=None):
assert isinstance(preconds, data.inv.invs.Invs), preconds
# assert postcond.is_eqt, postcond
super().__init__((frozenset(preconds), postcond), stat)
self.preconds = preconds
self.is_conj = True # conj preconds
self.postcond = postcond
def expr(self, use_reals):
"""
And(preconds) -> postcond
"""
if self.is_conj:
pre = z3.And([c.expr(use_reals) for c in self.preconds])
else:
pre = z3.Or([c.expr(use_reals) for c in self.preconds])
post = self.postcond.expr(use_reals)
return z3.Implies(pre, post)
def __str__(self, print_stat=False):
delim = " & " if self.is_conj else " | "
return "({}) => {} {}".format(
self.preconds.__str__(delim=delim),
self.postcond, self.stat)
|
1687882
|
from dataclasses import dataclass
import multiprocessing
from io import StringIO
from unittest import mock
from typing import List
from pytest_cov.embed import cleanup_on_sigterm
import pytest
from outrun.rpc import Client, Encoding, InvalidTokenError, Server
def start_server_process(server: Server) -> multiprocessing.Process:
def run_server():
cleanup_on_sigterm()
server.serve("tcp://127.0.0.1:8000")
proc = multiprocessing.Process(target=run_server)
proc.start()
return proc
def test_call():
class Service:
@staticmethod
def add(a, b):
return a + b
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
assert client.add(3, 5) == 8
finally:
server_process.terminate()
server_process.join()
def test_nonexistent_call():
class Service:
pass
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
with pytest.raises(AttributeError):
client.foo()
finally:
server_process.terminate()
server_process.join()
def test_successful_ping():
class Service:
pass
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
client.ping()
finally:
server_process.terminate()
server_process.join()
def test_failing_ping_with_custom_timeout():
class Service:
pass
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=-1)
with pytest.raises(IOError):
client.ping(timeout_ms=1)
def test_timeout():
class Service:
pass
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1)
with pytest.raises(IOError):
client.foo()
def test_socket_per_thread():
class Service:
pass
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
with mock.patch("threading.current_thread") as m:
m.return_value = 1
client.ping()
m.return_value = 2
client.ping()
assert client.socket_count == 2
finally:
server_process.terminate()
server_process.join()
def test_tuple_serialization():
class Service:
@staticmethod
def get_tuple():
return (1, 2, 3)
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
# tuples are serialized as lists
assert client.get_tuple() == [1, 2, 3]
finally:
server_process.terminate()
server_process.join()
def test_dataclasses():
@dataclass
class Point:
x: int
y: int
@dataclass
class Line:
p1: Point
p2: Point
class Service:
@staticmethod
def make_line(p1: Point, p2: Point) -> Line:
return Line(p1, p2)
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
p1 = Point(1, 2)
p2 = Point(3, 4)
assert client.make_line(p1, p2) == Line(p1, p2)
finally:
server_process.terminate()
server_process.join()
def test_dataclass_in_container_type():
@dataclass
class Point:
x: int
y: int
class Service:
@staticmethod
def make_point_list(x: int, y: int) -> List[Point]:
return [Point(x, y)]
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
assert client.make_point_list(1, 2) == [Point(1, 2)]
finally:
server_process.terminate()
server_process.join()
def test_builtin_exceptions():
class Service:
@staticmethod
def os_failure():
raise OSError("foo")
@staticmethod
def value_failure():
raise ValueError("bar")
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
with pytest.raises(OSError) as e:
client.os_failure()
assert e.value.args == ("foo",)
with pytest.raises(ValueError) as e:
client.value_failure()
assert e.value.args == ("bar",)
finally:
server_process.terminate()
server_process.join()
def test_custom_exception():
class CustomException(Exception):
pass
class Service:
@staticmethod
def custom_failure():
raise CustomException("a", "b", "c")
server = Server(Service())
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
with pytest.raises(Exception) as e:
client.custom_failure()
assert e.value.args == ("a", "b", "c")
finally:
server_process.terminate()
server_process.join()
def test_missing_token():
class Service:
pass
server = Server(Service(), token="<PASSWORD>")
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", timeout_ms=1000)
with pytest.raises(InvalidTokenError):
client.ping()
finally:
server_process.terminate()
server_process.join()
def test_invalid_token():
class Service:
pass
server = Server(Service(), token="<PASSWORD>")
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", token="<PASSWORD>", timeout_ms=1000)
with pytest.raises(InvalidTokenError):
client.ping()
finally:
server_process.terminate()
server_process.join()
def test_valid_token():
class Service:
pass
server = Server(Service(), token="<PASSWORD>")
server_process = start_server_process(server)
try:
client = Client(Service, "tcp://127.0.0.1:8000", token="<PASSWORD>", timeout_ms=1000)
client.ping()
finally:
server_process.terminate()
server_process.join()
def test_json_encoding_dataclasses():
@dataclass
class Point:
x: int
y: int
@dataclass
class Line:
p1: Point
p2: Point
encoding = Encoding(Line)
obj_in = ["abc", True, Line(Point(1, 2), Point(3, 4)), Point(5, 6)]
io = StringIO()
encoding.dump_json(obj_in, io)
io.seek(0)
obj_out = encoding.load_json(io)
assert obj_in == obj_out
def test_json_encoding_exceptions():
encoding = Encoding()
exceptions_in = [OSError("a", "b"), TypeError("c"), NotImplementedError()]
io = StringIO()
encoding.dump_json(exceptions_in, io)
io.seek(0)
exceptions_out = encoding.load_json(io)
with pytest.raises(OSError) as e:
raise exceptions_out[0]
assert e.value.args == ("a", "b")
with pytest.raises(TypeError) as e:
raise exceptions_out[1]
assert e.value.args == ("c",)
with pytest.raises(NotImplementedError) as e:
raise exceptions_out[2]
assert e.value.args == ()
def test_unserializable_object():
encoding = Encoding()
with pytest.raises(ValueError):
encoding.serialize_obj(set())
def test_deserialize_unknown_dataclass():
@dataclass
class Point:
x: int
y: int
encoding = Encoding(Point)
serialized = encoding.serialize_obj(Point(1, 2))
with pytest.raises(TypeError):
encoding = Encoding()
encoding.deserialize_obj(serialized)
|
1687892
|
from maya.api import OpenMaya
__all__ = [
"average_vector",
"smooth_vectors"
]
def average_vector(vectors):
"""
Get the average vector of the all of the provided vectors. All vectors
will be added up and divided by the number of the provided vectors.
:param list[OpenMaya.MVector] vectors:
:return: Average vector
:rtype: OpenMaya.MVector
"""
vector = OpenMaya.MVector()
for v in vectors:
vector += v
vector /= len(vectors)
return vector
def smooth_vectors(vectors, connections, iterations=3):
"""
Perform smoothing on the provided vectors based on a connections mapper.
The position of the new vector is set based on the index of that vector
and its connected vectors based on the connected indices. The new vector
position is the average position of the connected vectors.
:param list[OpenMaya.MVector] vectors:
:param dict connections:
:param int iterations:
:return: Smooth vectors
:rtype: list[OpenMaya.MVector]
"""
# get length
length = len(vectors)
# copy vectors
vectors = vectors[:]
# loop iterations
for i in range(iterations):
# copy vectors again to make sure the connected information queried
# hasn't been smoothed already
copy = vectors[:]
# loop vectors
for j in range(length):
indices = connections.get(j, [])
neighbours = [copy[i] for i in indices]
vectors[j] = average_vector(neighbours)
return vectors
|
1687897
|
from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
import h2o
from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
max_models = 3
scores = {}
def _get_data(format='numpy', n_classes=2):
generator = make_classification if n_classes > 0 else make_regression
params = dict(n_samples=100, n_features=5, n_informative=n_classes or 2, random_state=seed)
if generator is make_classification:
params.update(n_classes=n_classes)
X, y = generator(**params)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def test_binomial_classification_with_h2o_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='h2o', n_classes=2)
assert isinstance(data.X_train, h2o.H2OFrame)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
probs = pipeline.predict_proba(data.X_test)
assert probs.dim == [len(data.X_test), 2]
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_multinomial_classification_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_regression_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLRegressor(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlregressor__max_models=max_models,
h2oautomlregressor__nfolds=3
)
pipeline.named_steps.h2oautomlregressor.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlregressor.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_classification():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='classifier', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_regression():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='regressor', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
pyunit_utils.run_tests([
test_binomial_classification_with_h2o_frames,
test_multinomial_classification_with_numpy_frames,
test_regression_with_numpy_frames,
test_generic_estimator_for_classification,
test_generic_estimator_for_regression,
])
|
1687915
|
from __future__ import unicode_literals
import datetime
from sqlalchemy import engine_from_config
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
from billy.db import tables
def setup_database(global_config, **settings):
"""Setup database
"""
if 'engine' not in settings:
settings['engine'] = (
engine_from_config(settings, 'sqlalchemy.')
)
if 'session' not in settings:
settings['session'] = scoped_session(sessionmaker(
extension=ZopeTransactionExtension(keep_session=True),
bind=settings['engine'],
))
tables.set_now_func(datetime.datetime.utcnow)
return settings
|
1687934
|
from oogway.request import request_payment, parse_request
from oogway.validate import validate
REQ1_RES = "bitcoin:1FHXDkRLhoCziRjftaPB3fELUYrZomFanx?amount=0.00020000&time=1598319207&exp=3600&message=oogway%20requests"
REQ2_RES = "bitcoin:1FHXDkRLhoCziRjftaPB3fELUYrZomFanx?amount=0.00020000&time=1598319712"
def test_request_payment():
req1 = request_payment("1FHXDkRLhoCziRjftaPB3fELUYrZomFanx", 20000, 60, "oogway requests")
req2 = request_payment("1FHXDkRLhoCziRjftaPB3fELUYrZomFanx", 20000)
assert req1[0:66] == REQ1_RES[0:66]
assert req1[76:] == REQ1_RES[76:]
assert req2[0:66] == REQ2_RES[0:66]
def test_parse_request():
req = parse_request(REQ1_RES)
address = req['address']
amount = req['amount']
created = req['created']
status = req['status']
assert validate.is_valid_address(address) == True
assert type(amount) == str
assert type(created) == str
assert type(status) == str
|
1687950
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tarfile
import cv2
import copy
import numpy as np
import tensorflow as tf
from utils.curve import points_to_heatmap_rectangle_68pt
from six.moves import xrange
from six.moves import urllib
from datagen import DataGenerator
from datagen import ensure_dir
from FAB import FAB
MOMENTUM = 0.9
POINTS_NUM = 68
IMAGE_SIZE = 256
PIC_CHANNEL = 3
num_input_imgs = 3
NUM_CLASSES = POINTS_NUM*2
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
structure_predictor_net_channel = 64
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('structure_predictor_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('video_deblur_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('resnet_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('end_2_end_train_dir', '', """Directory where to write train_checkpoints.""")
tf.app.flags.DEFINE_string('end_2_end_test_dir', '', """Directory where to write test logs.""")
tf.app.flags.DEFINE_string('data_dir', '', """Directory where the dataset stores.""")
tf.app.flags.DEFINE_string('img_list', '', """Directory where the img_list stores.""")
tf.app.flags.DEFINE_float('learning_rate', 0.0, "learning rate.")
tf.app.flags.DEFINE_integer('batch_size', 1, "batch size")
tf.app.flags.DEFINE_boolean('resume_structure_predictor', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_resnet', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_video_deblur', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('resume_all', False, """Resume from latest saved state.""")
tf.app.flags.DEFINE_boolean('minimal_summaries', False, """Produce fewer summaries to save HD space.""")
tf.app.flags.DEFINE_boolean('use_bn', False, """Use batch normalization. Otherwise use biases.""")
def resume(sess, do_resume, ckpt_path, key_word):
var = tf.global_variables()
if do_resume:
structure_predictor_latest = tf.train.latest_checkpoint(ckpt_path)
if not structure_predictor_latest:
print ("\n No checkpoint to continue from in ", ckpt_path, '\n')
structure_predictor_var_to_restore = [val for val in var if key_word in val.name]
saver_structure_predictor = tf.train.Saver(structure_predictor_var_to_restore)
saver_structure_predictor.restore(sess, structure_predictor_latest)
def test(resnet_model, is_training, F, H, F_curr, H_curr, input_images_blur,
input_images_boundary, next_boundary_gt, labels, data_dir, img_list,
dropout_ratio):
global_step = tf.get_variable('global_step', [],
initializer=tf.constant_initializer(0),
trainable=False)
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(init)
val_save_root = os.path.join(FLAGS.end_2_end_test_dir,'visualization')
################################ resume part #################################
# resume weights
resume(sess, FLAGS.resume_structure_predictor, FLAGS.structure_predictor_train_dir, 'voxel_flow_model_')
resume(sess, FLAGS.resume_video_deblur, FLAGS.video_deblur_train_dir, 'video_deblur_model_')
resume(sess, FLAGS.resume_resnet, FLAGS.resnet_train_dir, 'resnet_model_')
resume(sess, FLAGS.resume_all, FLAGS.end_2_end_train_dir, '')
##############################################################################
gt_file_path = os.path.join(FLAGS.end_2_end_test_dir,'gt.txt')
pre_file_path = os.path.join(FLAGS.end_2_end_test_dir,'pre.txt')
ensure_dir(gt_file_path)
ensure_dir(pre_file_path)
gt_file = open(gt_file_path,'w')
pre_file = open(pre_file_path,'w')
dataset = DataGenerator(data_dir,img_list)
dataset._create_train_table()
dataset._create_sets_for_300VW()
test_gen = dataset._aux_generator(batch_size = FLAGS.batch_size, num_input_imgs = num_input_imgs,
NUM_CLASSES = POINTS_NUM*2, sample_set='test')
test_break_flag = False
for x in xrange(len(dataset.train_table)-2):
step = sess.run(global_step)
if not test_break_flag:
test_line_num, frame_name, input_boundaries, boundary_gt_test, input_images_blur_generated, landmark_gt_test, names, test_break_flag = next(test_gen)
if (frame_name == '2.jpg') or test_line_num <= 3:
input_images_boundary_init = copy.deepcopy(input_boundaries)
F_init = np.zeros([FLAGS.batch_size, IMAGE_SIZE//2,
IMAGE_SIZE//2, structure_predictor_net_channel//2], dtype=np.float32)
H_init = np.zeros([1, FLAGS.batch_size, IMAGE_SIZE//2,
IMAGE_SIZE//2, structure_predictor_net_channel], dtype=np.float32)
feed_dict={
input_images_boundary:input_images_boundary_init,
input_images_blur:input_images_blur_generated,
F:F_init,
H:H_init,
labels:landmark_gt_test,
next_boundary_gt:boundary_gt_test,
dropout_ratio:1.0
}
else:
output_points = o[0]
output_points = np.reshape(output_points,(POINTS_NUM,2))
boundary_from_points = points_to_heatmap_rectangle_68pt(output_points)
boundary_from_points = np.expand_dims(boundary_from_points,axis=0)
boundary_from_points = np.expand_dims(boundary_from_points,axis=3)
input_images_boundary_init = np.concatenate([input_images_boundary_init[:,:,:,1:2],
boundary_from_points], axis=3)
feed_dict={
input_images_boundary:input_images_boundary_init,
input_images_blur:input_images_blur_generated,
F:o[-2],
H:o[-1],
labels:landmark_gt_test,
next_boundary_gt:boundary_gt_test,
dropout_ratio:1.0
}
i = [resnet_model.logits, F_curr, H_curr]
o = sess.run(i, feed_dict=feed_dict)
pres = o[0]
for batch_num,pre in enumerate(pres):
for v in pre:
pre_file.write(str(v*255.0)+' ')
if len(names) > 1:
pre_file.write(names[-1])
else:
pre_file.write(names[batch_num])
pre_file.write('\n')
for batch_num,g in enumerate(landmark_gt_test):
for v in g:
gt_file.write(str(v*255.0)+' ')
if len(names) > 1:
gt_file.write(names[-1])
else:
gt_file.write(names[batch_num])
gt_file.write('\n')
img = input_images_blur_generated[0,:,:,0:3]*255
points = o[0][0]*255
for point_num in range(int(points.shape[0]/2)):
cv2.circle(img,(int(round(points[point_num*2])),int(round(points[point_num*2+1]))),1,(55,225,155),2)
val_save_path = os.path.join(val_save_root,str(step)+'.jpg')
ensure_dir(val_save_path)
cv2.imwrite(val_save_path,img)
global_step = global_step + 1
print('Test done!')
def main(argv=None):
resnet_model = FAB()
is_training = tf.placeholder('bool', [], name='is_training')
input_images_boundary = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 2))
input_images_blur = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, PIC_CHANNEL*3))
next_boundary_gt = tf.placeholder(tf.float32,shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 1))
labels = tf.placeholder(tf.float32,shape=(FLAGS.batch_size,NUM_CLASSES))
dropout_ratio = tf.placeholder(tf.float32)
F = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_SIZE//2, IMAGE_SIZE//2, structure_predictor_net_channel//2])
H = tf.placeholder(tf.float32, [1, FLAGS.batch_size, IMAGE_SIZE//2, IMAGE_SIZE//2, structure_predictor_net_channel])
F_curr, H_curr= \
resnet_model.FAB_inference(input_images_boundary, input_images_blur, F, H, FLAGS.batch_size,
net_channel=structure_predictor_net_channel, num_classes=136, num_blocks=[2, 2, 2, 2],
use_bias=(not FLAGS.use_bn), bottleneck=True, dropout_ratio=1.0)
test(resnet_model, is_training, F, H, F_curr, H_curr, input_images_blur,
input_images_boundary, next_boundary_gt, labels, FLAGS.data_dir, FLAGS.img_list,
dropout_ratio)
if __name__ == '__main__':
tf.app.run()
|
1687953
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from itertools import *
from functools import partial
from . import DefaultTable
from . import grUtils
import struct, operator, warnings
Glat_format_0 = """
> # big endian
version: 16.16F
"""
Glat_format_3 = """
>
version: 16.16F
compression:L # compression scheme or reserved
"""
Glat_format_1_entry = """
>
attNum: B # Attribute number of first attribute
num: B # Number of attributes in this run
"""
Glat_format_23_entry = """
>
attNum: H # Attribute number of first attribute
num: H # Number of attributes in this run
"""
Glat_format_3_octabox_metrics = """
>
subboxBitmap: H # Which subboxes exist on 4x4 grid
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
Glat_format_3_subbox_entry = """
>
left: B # xi
right: B # xa
bottom: B # yi
top: B # ya
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
class _Object() :
pass
class _Dict(dict) :
pass
class table_G__l_a_t(DefaultTable.DefaultTable):
'''
Support Graphite Glat tables
'''
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.scheme = 0
def decompile(self, data, ttFont):
sstruct.unpack2(Glat_format_0, data, self)
if self.version <= 1.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry)
elif self.version <= 2.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry)
elif self.version >= 3.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Glat_format_3, data, self)
self.hasOctaboxes = (self.compression & 1) == 1
decoder = self.decompileAttributes3
gloc = ttFont['Gloc']
self.attributes = {}
count = 0
for s,e in zip(gloc,gloc[1:]):
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
count += 1
def decompileAttributes12(self, data, fmt):
attributes = _Dict()
while len(data) > 3:
e, data = sstruct.unpack2(fmt, data, _Object())
keys = range(e.attNum, e.attNum+e.num)
if len(data) >= 2 * e.num :
vals = struct.unpack_from(('>%dh' % e.num), data)
attributes.update(zip(keys,vals))
data = data[2*e.num:]
return attributes
def decompileAttributes3(self, data):
if self.hasOctaboxes:
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
numsub = bin(o.subboxBitmap).count("1")
o.subboxes = []
for b in range(numsub):
if len(data) >= 8 :
subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry,
data, _Object())
o.subboxes.append(subbox)
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
if self.hasOctaboxes:
attrs.octabox = o
return attrs
def compile(self, ttFont):
data = sstruct.pack(Glat_format_0, self)
if self.version <= 1.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version <= 2.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version >= 3.0:
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
data = sstruct.pack(Glat_format_3, self)
encoder = self.compileAttributes3
glocs = []
for n in range(len(self.attributes)):
glocs.append(len(data))
data += encoder(self.attributes[ttFont.getGlyphName(n)])
glocs.append(len(data))
ttFont['Gloc'].set(glocs)
if self.version >= 3.0:
data = grUtils.compress(self.scheme, data)
return data
def compileAttributes12(self, attrs, fmt):
data = b""
for e in grUtils.entries(attrs):
data += sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]}) + \
struct.pack(('>%dh' % len(e[2])), *e[2])
return data
def compileAttributes3(self, attrs):
if self.hasOctaboxes:
o = attrs.octabox
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
numsub = bin(o.subboxBitmap).count("1")
for b in range(numsub) :
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
else:
data = ""
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
def toXML(self, writer, ttFont):
writer.simpletag('version', version=self.version, compressionScheme=self.scheme)
writer.newline()
for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])):
writer.begintag('glyph', name=n)
writer.newline()
if hasattr(a, 'octabox'):
o = a.octabox
formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics)
vals = {}
for k in names:
if k == 'subboxBitmap': continue
vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 255)
vals['bitmap'] = "{:0X}".format(o.subboxBitmap)
writer.begintag('octaboxes', **vals)
writer.newline()
formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry)
for s in o.subboxes:
vals = {}
for k in names:
vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 255)
writer.simpletag('octabox', **vals)
writer.newline()
writer.endtag('octaboxes')
writer.newline()
for k, v in sorted(a.items()):
writer.simpletag('attribute', index=k, value=v)
writer.newline()
writer.endtag('glyph')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version' :
self.version = float(safeEval(attrs['version']))
self.scheme = int(safeEval(attrs['compressionScheme']))
if name != 'glyph' : return
if not hasattr(self, 'attributes'):
self.attributes = {}
gname = attrs['name']
attributes = _Dict()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'attribute' :
k = int(safeEval(attrs['index']))
v = int(safeEval(attrs['value']))
attributes[k]=v
elif tag == 'octaboxes':
self.hasOctaboxes = True
o = _Object()
o.subboxBitmap = int(attrs['bitmap'], 16)
o.subboxes = []
del attrs['bitmap']
for k, v in attrs.items():
setattr(o, k, int(float(v[:-1]) * 255. / 100. + 0.5))
for element in subcontent:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
so = _Object()
for k, v in attrs.items():
setattr(so, k, int(float(v[:-1]) * 255. / 100. + 0.5))
o.subboxes.append(so)
attributes.octabox = o
self.attributes[gname] = attributes
|
1687974
|
import unittest
import groundstation.objects.object_factory as object_factory
from groundstation.objects.root_object import RootObject
from groundstation.objects.update_object import UpdateObject
class TestRootObject(unittest.TestCase):
def test_hydrate_root_object(self):
root = RootObject(
"test_object",
"<EMAIL>:groundstation/tests",
"<EMAIL>:groundstation/testcase"
)
hydrated_root = object_factory.hydrate_object(root.as_object())
self.assertTrue(isinstance(hydrated_root, RootObject))
class TestUpdateObject(unittest.TestCase):
def test_hydate_update_with_1_parent(self):
update = UpdateObject(
["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c"],
"Lol I r update data"
)
hydrated_update = object_factory.hydrate_object(update.as_object())
self.assertTrue(isinstance(hydrated_update, UpdateObject))
def test_hydate_update_with_2_parent(self):
update = UpdateObject(
["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c",
"d41e2dadaf624319518a9dfa8ef4cb0dde055bff"],
"Lol I r update data"
)
hydrated_update = object_factory.hydrate_object(update.as_object())
self.assertTrue(isinstance(hydrated_update, UpdateObject))
|
1688012
|
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from oslo_service import wsgi
from bm_instance_agent.api import app
from bm_instance_agent.conf import CONF
from bm_instance_agent import exception
LOG = logging.getLogger(__name__)
def parse_args(argv, default_config_files=None):
cfg.CONF(argv[1:],
project='bm-instance-agent',
default_config_files=default_config_files)
def prepare_service(argv=None):
logging.register_options(CONF)
logging.set_defaults(default_log_levels=CONF.default_log_levels)
argv = argv or []
parse_args(argv)
logging.setup(CONF, 'bm-instance-agent')
def process_launcher():
return service.ProcessLauncher(CONF, restart_method='mutate')
def thread_launcher():
return service.ServiceLauncher(CONF, restart_method='mutate')
class WSGIService(service.ServiceBase):
"""Provides ability to launch bm-instance-agent from wsgi app."""
def __init__(self, name, use_ssl=False):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param use_ssl: Wraps the socket in an SSL context if True.
:returns: None
"""
self.name = name
self.app = app.setup_app()
# The agent works lightly, no more workers need
self.workers = CONF.api.api_workers or 1
if self.workers and self.workers < 1:
raise exception.ConfigInvalid(
("api_workers value of %d is invalid, "
"must be greater than 0.") % self.workers)
self.server = wsgi.Server(CONF, self.name, self.app,
host=CONF.api.host_ip,
port=CONF.api.port,
use_ssl=use_ssl)
def start(self):
"""Start serving this service using loaded configuration.
:returns: None
"""
self.server.start()
def stop(self):
"""Stop serving this API.
:returns: None
"""
# NOTE: Sleep 3 seconds to send the api callback
time.sleep(3)
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
|
1688037
|
from panda3d.core import NodePath
from panda3d.core import Point3
from libpandadna import DNAStorage
from libpandadna import DNAVisGroup
from libpandadna import DNASuitPoint
from panda3d.core import loadPrcFileData
loadPrcFileData('', 'window-type none')
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
import unittest
class TestStorage(unittest.TestCase):
def test_storage_visgroups(self):
store = DNAStorage()
vg = DNAVisGroup("my_vg")
vg.addVisible("visible1")
vg.addVisible("visible2")
vg.addVisible("visible3")
store.storeDNAVisGroup(vg)
self.assertEqual(store.getNumDNAVisGroups(), 1)
self.assertEqual(store.getNumDNAVisGroupsAI(), 1)
self.assertEqual(store.getDNAVisGroupAI(0), vg)
self.assertEqual(store.getDNAVisGroupName(0), vg.getName())
num_visibles = store.getNumVisiblesInDNAVisGroup(0)
self.assertEqual(num_visibles, 3)
for i in xrange(num_visibles):
self.assertEqual(store.getVisibleName(0, i), "visible%d" % (i + 1))
vg.removeVisible("visible2")
self.assertEqual(store.getNumVisiblesInDNAVisGroup(0), 2)
self.assertEqual(store.getVisibleName(0, 0), "visible1")
self.assertEqual(store.getVisibleName(0, 1), "visible3")
store.resetDNAVisGroups()
self.assertEqual(store.getNumDNAVisGroups(), 0)
self.assertEqual(store.getNumDNAVisGroupsAI(), 0)
def test_texture(self):
store = DNAStorage()
texture1 = loader.loadTexture('files/texture1.png')
texture2 = loader.loadTexture('files/texture2.png')
store.storeTexture('texture1', texture1)
store.storeTexture('texture2', texture2)
self.assertEqual(store.findTexture('texture1'), texture1)
self.assertEqual(store.findTexture('texture2'), texture2)
self.assertEqual(store.findTexture('bad'), None)
store.resetTextures()
self.assertEqual(store.findTexture('texture1'), None)
self.assertEqual(store.findTexture('texture2'), None)
def test_font(self):
store = DNAStorage()
font1 = loader.loadFont('files/arial.ttf')
font2 = loader.loadFont('files/comic.ttf')
store.storeFont('font1', font1)
store.storeFont('font2', font2)
self.assertEqual(store.findFont('font1'), font1)
self.assertEqual(store.findFont('font2'), font2)
self.assertEqual(store.findFont('bad'), None)
store.resetFonts()
self.assertEqual(store.findFont('font1'), None)
self.assertEqual(store.findFont('font2'), None)
def test_storage_catalog(self):
store = DNAStorage()
store.storeCatalogCode('root', 'code1')
store.storeCatalogCode('root', 'code2')
store.storeCatalogCode('root', 'code3')
colors = ('red', 'green', 'blue', 'yellow', 'orange')
for color in colors:
store.storeCatalogCode('colors', color)
self.assertEqual(store.getNumCatalogCodes('colors'), len(colors))
for i, color in enumerate(colors):
self.assertEqual(store.getCatalogCode('colors', i), color)
# 'bad' root must have no codes
self.assertEqual(store.getNumCatalogCodes('bad'), 0)
def test_nodes(self):
store = DNAStorage()
store.storeNode('files/node.bam', 'dummy', 'node')
store.storeHoodNode('files/hood_node.bam', 'dummy', 'hood_node')
store.storePlaceNode('files/place_node.bam', 'dummy', 'place_node')
self.assertTrue(store.findNode('bad').isEmpty())
self.assertFalse(store.findNode('node').isEmpty())
self.assertFalse(store.findNode('hood_node').isEmpty())
self.assertFalse(store.findNode('place_node').isEmpty())
store.resetNodes()
self.assertTrue(store.findNode('node').isEmpty())
store.resetHoodNodes()
self.assertTrue(store.findNode('hood_node').isEmpty())
store.resetPlaceNodes()
self.assertTrue(store.findNode('place_node').isEmpty())
def test_blocks(self):
store = DNAStorage()
block1 = (
4269, # block_number
"libpandadna", # title
"", # article
"", # bldg_type
1337 # zone_id
)
block2 = (
1337, # block_number
"Visual Studio 2010", # title
"", # article
"", # bldg_type
4269 # zone_id
)
block3 = (
1000, # block_number
"C++ reader", # title
"", # article
"", # bldg_type
4269 # zone_id
)
store.storeBlock(*block1)
store.storeBlock(*block2)
store.storeBlock(*block3)
door1 = NodePath('block-1000')
door1.setPos(5, 5, 10)
door1.setH(180)
store.storeBlockDoor(1000, door1)
self.assertEqual(store.getNumBlockNumbers(), 3)
# Test an invalid block number
self.assertEqual(store.getZoneFromBlockNumber(100), 0)
self.assertEqual(store.getZoneFromBlockNumber(1000), 4269)
self.assertEqual(store.getZoneFromBlockNumber(1337), 4269)
self.assertEqual(store.getZoneFromBlockNumber(4269), 1337)
self.assertEqual(store.getTitleFromBlockNumber(1000), "C++ reader")
self.assertEqual(store.getTitleFromBlockNumber(1337), "Visual Studio 2010")
self.assertEqual(store.getTitleFromBlockNumber(4269), "libpandadna")
self.assertEqual(store.getBlockNumberAt(0), 4269)
self.assertEqual(store.getBlockNumberAt(1), 1337)
self.assertEqual(store.getBlockNumberAt(2), 1000)
self.assertEqual(store.getDoorPosHprFromBlockNumber(1000), door1)
self.assertTrue(store.getDoorPosHprFromBlockNumber(1337).isEmpty())
store.resetBlockNumbers()
store.resetBlockZones()
self.assertEqual(store.getTitleFromBlockNumber(1000), "")
self.assertEqual(store.getTitleFromBlockNumber(1337), "")
self.assertEqual(store.getTitleFromBlockNumber(4269), "")
def test_suit_points(self):
store = DNAStorage()
point1 = DNASuitPoint(5, DNASuitPoint.STREET_POINT,
Point3(100, 20, 0.5))
point2 = DNASuitPoint(10, DNASuitPoint.STREET_POINT,
Point3(100, 0, 0.5))
point3 = DNASuitPoint(15, DNASuitPoint.FRONT_DOOR_POINT,
Point3(100, -20, 0.5), 10)
store.storeSuitPoint(point1)
store.storeSuitPoint(point2)
store.storeSuitPoint(point3)
self.assertEqual(store.getNumSuitPoints(), 3)
self.assertEqual(store.getSuitPointAtIndex(0), point1)
self.assertEqual(store.getSuitPointAtIndex(1), point2)
self.assertEqual(store.getSuitPointAtIndex(2), point3)
self.assertEqual(store.getSuitPointWithIndex(5), point1)
self.assertEqual(store.getSuitPointWithIndex(10), point2)
self.assertEqual(store.getSuitPointWithIndex(15), point3)
# Test invalid index
self.assertEqual(store.getSuitPointWithIndex(1000), None)
# Test suit edges
edges = ((5, 10, 2301), (10, 15, 2302), (15, 5, 2303))
for edge in edges:
store.storeSuitEdge(*edge)
for edge in edges:
dna_edge = store.getSuitEdge(edge[0], edge[1])
self.assertTrue(dna_edge is not None)
self.assertEqual(dna_edge.getStartPoint(), store.getSuitPointWithIndex(edge[0]))
self.assertEqual(dna_edge.getEndPoint(), store.getSuitPointWithIndex(edge[1]))
self.assertEqual(dna_edge.getZoneId(), edge[2])
self.assertEqual(store.getSuitEdgeZone(edge[0], edge[1]), edge[2])
adj_points = store.getAdjacentPoints(point1)
self.assertEqual(adj_points.getNumPoints(), 1)
self.assertEqual(adj_points.getPoint(0), point2)
self.assertEqual(store.getSuitEdgeTravelTime(5, 10, 5), 4)
self.assertEqual(store.getSuitEdgeTravelTime(10, 15, 5), 4)
self.assertEqual(store.getSuitEdgeTravelTime(15, 5, 5), 8)
# Test suit path
path = store.getSuitPath(point1, point3, 1, 10)
self.assertEqual(path.getNumPoints(), 3)
self.assertEqual(path.getPoint(0), point1)
self.assertEqual(path.getPoint(1), point2)
self.assertEqual(path.getPoint(2), point3)
# Test invalid values
store.storeSuitEdge(1, 2, 2800)
self.assertEqual(store.getSuitEdge(1, 2), None)
self.assertEqual(store.getSuitEdge(145, 13442), None)
self.assertEqual(store.getSuitEdgeTravelTime(1, 2, 5), 0)
self.assertEqual(repr(store.getSuitEdgeTravelTime(10, 15, 0)), 'inf') # Division by 0
point4 = DNASuitPoint(400, DNASuitPoint.STREET_POINT, Point3(0, 0, 0))
self.assertEqual(store.getSuitPath(point4, point2, 1, 10), None)
# Reset
store.resetSuitPoints()
self.assertEqual(store.getSuitPointWithIndex(5), None)
self.assertEqual(store.getSuitPointWithIndex(10), None)
self.assertEqual(store.getSuitPointWithIndex(15), None)
self.assertTrue(store.discoverContinuity())
if __name__ == '__main__':
unittest.main()
|
1688040
|
from IPython.display import display
from .odysis import (
Scene, DataBlock,
Warp,
ColorMapping, Grid,
VectorField, PointCloud,
Clip, Slice,
Threshold, IsoSurface
)
_current_scene = None
_current_datablock = None
def scene(mesh):
global _current_scene
global _current_datablock
_current_datablock = DataBlock(mesh=mesh)
_current_scene = Scene(datablocks=[_current_datablock])
return _current_scene
def color_mapping(**kwargs):
global _current_datablock
effect = ColorMapping(**kwargs)
_current_datablock.apply(effect)
return effect
def grid(**kwargs):
global _current_datablock
effect = Grid(**kwargs)
_current_datablock.apply(effect)
return effect
def warp(**kwargs):
global _current_datablock
effect = Warp(**kwargs)
_current_datablock.apply(effect)
return effect
def vector_field(**kwargs):
global _current_datablock
effect = VectorField(**kwargs)
_current_datablock.apply(effect)
return effect
def point_cloud(**kwargs):
global _current_datablock
effect = PointCloud(**kwargs)
_current_datablock.apply(effect)
return effect
def clip(**kwargs):
global _current_datablock
effect = Clip(**kwargs)
_current_datablock.apply(effect)
return effect
def slice(**kwargs):
global _current_datablock
effect = Slice(**kwargs)
_current_datablock.apply(effect)
return effect
def threshold(**kwargs):
global _current_datablock
effect = Threshold(**kwargs)
_current_datablock.apply(effect)
return effect
def iso_surface(**kwargs):
global _current_datablock
effect = IsoSurface(**kwargs)
_current_datablock.apply(effect)
return effect
def plot():
global _current_scene
display(_current_scene)
def get_current_scene():
global _current_scene
return _current_scene
|
1688051
|
import sys
import os
import platform
import numpy as np
import matplotlib.pyplot as plt
import flopy
#Set name of MODFLOW exe
# assumes executable is in users path statement
version = 'mf2005'
exe_name = 'mf2005'
if platform.system() == 'Windows':
exe_name = 'mf2005.exe'
mfexe = exe_name
#Set the paths
loadpth = os.path.join('..', 'data', 'secp')
modelpth = os.path.join('data')
#make sure modelpth directory exists
if not os.path.exists(modelpth):
os.makedirs(modelpth)
ml = flopy.modflow.Modflow.load('secp.nam', model_ws=loadpth,
exe_name=exe_name, version=version, verbose=True)
ml.change_model_ws(new_pth=modelpth)
ml.write_input()
print '...end'
|
1688074
|
from __future__ import annotations
from hist import Hist, Stack, axis
def test_1D_empty_repr(named_hist):
h = named_hist.new.Reg(10, -1, 1, name="x", label="y").Double()
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "label='y'" in repr(h)
def test_1D_var_empty_repr(named_hist):
h = named_hist.new.Var(range(10), name="x", label="y").Double()
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "label='y'" in repr(h)
def test_1D_int_empty_repr(named_hist):
h = named_hist.new.Int(-9, 9, name="x", label="y").Double()
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "label='y'" in repr(h)
def test_1D_intcat_empty_repr(named_hist):
h = named_hist.new.IntCat([1, 3, 5], name="x", label="y").Double()
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "label='y'" in repr(h)
def test_1D_strcat_empty_repr(named_hist):
h = named_hist.new.StrCat(["1", "3", "5"], name="x", label="y").Double()
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "label='y'" in repr(h)
def test_2D_empty_repr(named_hist):
h = (
named_hist.new.Reg(10, -1, 1, name="x", label="y")
.Int(0, 15, name="p", label="q")
.Double()
)
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "name='p'" in repr(h)
assert "label='y'" in repr(h)
assert "label='q'" in repr(h)
def test_1D_circ_empty_repr(named_hist):
h = named_hist.new.Reg(10, -1, 1, circular=True, name="R", label="r").Double()
html = h._repr_html_()
assert html
assert "name='R'" in repr(h)
assert "label='r'" in repr(h)
def test_ND_empty_repr(named_hist):
h = (
named_hist.new.Reg(10, -1, 1, name="x", label="y")
.Reg(12, -3, 3, name="p", label="q")
.Reg(15, -2, 4, name="a", label="b")
.Double()
)
html = h._repr_html_()
assert html
assert "name='x'" in repr(h)
assert "name='p'" in repr(h)
assert "name='a'" in repr(h)
assert "label='y'" in repr(h)
assert "label='q'" in repr(h)
assert "label='b'" in repr(h)
def test_stack_repr(named_hist):
a1 = axis.Regular(
50, -5, 5, name="A", label="a [unit]", underflow=False, overflow=False
)
a2 = axis.Regular(
50, -5, 5, name="A", label="a [unit]", underflow=False, overflow=False
)
assert "name='A'" in repr(Stack(Hist(a1), Hist(a2)))
assert "label='a [unit]'" in repr(Stack(Hist(a1), Hist(a2)))
|
1688096
|
from setuptools import setup, find_packages
import os
version = '4.0.1'
install_requires = [
'setuptools',
'requests',
'APScheduler',
'iso8601',
'python-dateutil',
'Flask',
'Flask-Redis',
'WSGIProxy2',
'gevent',
'sse',
'flask_oauthlib',
'PyYAML',
'request_id_middleware',
'restkit',
'PyMemoize',
'barbecue',
'PasteDeploy',
# ssl warning
'pyopenssl',
'ndg-httpsclient',
'pyasn1',
'openprocurement_client',
'python-consul',
'retrying',
'zope.interface',
'walkabout'
]
extras_require = {
'test': [
'robotframework',
'robotframework-seleniumlibrary',
'robotframework-debuglibrary',
'webtest',
'mock',
'pytest-cov'
]
}
entry_points = {
'console_scripts': [
'auctions_chronograph = openprocurement.auction.chronograph:main',
'auctions_data_bridge = openprocurement.auction.databridge:main',
'auction_test = openprocurement.auction.tests.main:main [test]'
],
'paste.app_factory': [
'auctions_server = openprocurement.auction.app:make_auctions_app',
],
'openprocurement.auction.auctions': [
'default = openprocurement.auction.includeme:default'
]
}
setup(name='openprocurement.auction',
version=version,
description="",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords='',
author='Quintagroup, Ltd.',
author_email='<EMAIL>',
license='Apache License 2.0',
url='https://github.com/openprocurement/openprocurement.auction',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['openprocurement'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require=extras_require,
entry_points=entry_points,
)
|
1688109
|
from pprint import pprint
from pymldb import Connection
import rec.settings as _
def run_test_pipeline(mldb):
mldb.datasets(_.TEST_DATASET).delete()
mldb.procedures(_.DATASET_MANAGER_TEST).runs.post_json({})
print('nb test examples', mldb.datasets(_.TEST_DATASET)
.get()['status']['rowCount'])
mldb.datasets(_.TEST_FEATS_DATASET).delete()
mldb.procedures(_.FEATURE_GENERATION_TEST).runs.post_json({})
runid = 'lastest'
mldb.datasets(_.CLASSIFIER_TESTING_SCORES).delete()
mldb.procedures(_.CLASSIFIER_TESTING).runs(runid).delete()
mldb.procedures(_.CLASSIFIER_TESTING).runs.post_json({'id': runid})
r = mldb.procedures(_.CLASSIFIER_TESTING).runs(runid).get_query()
print('auc', r['status']['auc'])
# r = mldb.datasets(_.CLASSIFIER_TESTING_SCORES).query.get_query(
# format='aos')
# pprint(r)
if __name__ == '__main__':
mldb = Connection(_.HOST)
run_test_pipeline(mldb)
|
1688145
|
import cocotb
import pyuvm.utility_classes as utility_classes
from pyuvm import *
import inspect
phase_list = {}
class my_comp(uvm_component):
def log_phase(self):
"""
Log this function to the phase list
"""
comp_name = self.get_name()
function_name = inspect.stack()[1][3]
phase_list[function_name].append(comp_name)
def build_phase(self):
self.log_phase()
def connect_phase(self):
self.log_phase()
def end_of_elaboration_phase(self):
self.log_phase()
def start_of_simulation_phase(self):
self.log_phase()
async def run_phase(self):
self.log_phase()
def extract_phase(self):
self.log_phase()
def check_phase(self):
self.log_phase()
def report_phase(self):
self.log_phase()
def final_phase(self):
self.log_phase()
def setUp():
for phase_class in uvm_common_phases:
phase_func = phase_class.__name__[4:]
phase_list[phase_func] = []
top = my_comp("top", None)
#
# top +-> aa +-> cc
# +-> dd
# +-> bb +-> ee
# +-> ff
#
aa = my_comp("aa", top)
bb = my_comp("bb", top)
my_comp("cc", aa)
my_comp("dd", aa)
my_comp("ee", bb)
my_comp("ff", bb)
return top
def tearDown():
uvm_root().clear_hierarchy()
class my_test(uvm_test):
async def run_phase(self):
self.raise_objection()
print("Hey, I'm here")
self.drop_objection()
@cocotb.test()
async def run_test(dut):
"""Test the various nowait flavors"""
await uvm_root().run_test("my_test")
assert True
@cocotb.test()
async def test_stub(dut):
"""testing the basic testing mechanism"""
top = setUp()
top.build_phase()
assert "top" == phase_list["build_phase"][0]
tearDown()
async def test_traverse():
top = setUp()
top_down = ['top', 'aa', 'cc', 'dd', 'bb', 'ee', 'ff']
bottom_up = ['cc', 'dd', 'aa', 'ee', 'ff', 'bb', 'top']
sorted_list = sorted(top_down)
for phase_class in uvm_common_phases:
phase = phase_class()
phase.traverse(top)
if phase_class == uvm_run_phase:
await utility_classes.ObjectionHandler().run_phase_complete()
function_name = phase_class.__name__[4:]
returned_comps = phase_list[function_name]
try:
if isinstance(phase, uvm_run_phase):
assert sorted_list == sorted(returned_comps)
elif isinstance(phase, uvm_topdown_phase):
assert top_down == returned_comps
elif isinstance(phase, uvm_bottomup_phase):
assert bottom_up == returned_comps
else:
# Should not get here.
assert False
except AssertionError:
tearDown()
return False
tearDown()
return True
@cocotb.test()
async def traverse(self):
"""Testing topdown and bottom up traversal"""
assert await test_traverse()
|
1688160
|
import logging
import os
import os.path
import galaxy.tools.parameters.basic
import galaxy.tools.parameters.grouping
from galaxy.tool_util.verify.interactor import ToolTestDescription
from galaxy.util import (
string_as_bool,
string_as_bool_or_none,
unicodify,
)
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
log = logging.getLogger(__name__)
@nottest
def parse_tests(tool, tests_source):
"""
Build ToolTestDescription objects for each "<test>" elements and
return default interactor (if any).
"""
raw_tests_dict = tests_source.parse_tests_to_dict()
tests = []
for i, raw_test_dict in enumerate(raw_tests_dict.get('tests', [])):
test = description_from_tool_object(tool, i, raw_test_dict)
tests.append(test)
return tests
def description_from_tool_object(tool, test_index, raw_test_dict):
required_files = []
required_data_tables = []
required_loc_files = []
num_outputs = raw_test_dict.get('expect_num_outputs', None)
if num_outputs:
num_outputs = int(num_outputs)
try:
processed_inputs = _process_raw_inputs(tool, tool.inputs, raw_test_dict["inputs"], required_files, required_data_tables, required_loc_files)
processed_test_dict = {
"inputs": processed_inputs,
"outputs": raw_test_dict["outputs"],
"output_collections": raw_test_dict["output_collections"],
"num_outputs": num_outputs,
"command_line": raw_test_dict.get("command", None),
"command_version": raw_test_dict.get("command_version", None),
"stdout": raw_test_dict.get("stdout", None),
"stderr": raw_test_dict.get("stderr", None),
"expect_exit_code": raw_test_dict.get("expect_exit_code", None),
"expect_failure": raw_test_dict.get("expect_failure", False),
"required_files": required_files,
"required_data_tables": required_data_tables,
"required_loc_files": required_loc_files,
"tool_id": tool.id,
"tool_version": tool.version,
"test_index": test_index,
"error": False,
}
except Exception as e:
log.exception("Failed to load tool test number [%d] for %s" % (test_index, tool.id))
processed_test_dict = {
"tool_id": tool.id,
"tool_version": tool.version,
"test_index": test_index,
"inputs": {},
"error": True,
"exception": unicodify(e),
}
return ToolTestDescription(processed_test_dict)
def _process_raw_inputs(tool, tool_inputs, raw_inputs, required_files, required_data_tables, required_loc_files, parent_context=None):
"""
Recursively expand flat list of inputs into "tree" form of flat list
(| using to nest to new levels) structure and expand dataset
information as proceeding to populate self.required_files.
"""
parent_context = parent_context or RootParamContext()
expanded_inputs = {}
for value in tool_inputs.values():
if isinstance(value, galaxy.tools.parameters.grouping.Conditional):
cond_context = ParamContext(name=value.name, parent_context=parent_context)
case_context = ParamContext(name=value.test_param.name, parent_context=cond_context)
raw_input_dict = case_context.extract_value(raw_inputs)
case_value = raw_input_dict["value"] if raw_input_dict else None
case = _matching_case_for_value(tool, value, case_value)
if case:
for input_name, input_value in case.inputs.items():
case_inputs = _process_raw_inputs(tool, {input_name: input_value}, raw_inputs, required_files, required_data_tables, required_loc_files, parent_context=cond_context)
expanded_inputs.update(case_inputs)
if not value.type == "text":
expanded_case_value = _split_if_str(case.value)
if case_value is not None:
# A bit tricky here - we are growing inputs with value
# that may be implicit (i.e. not defined by user just
# a default defined in tool). So we do not want to grow
# expanded_inputs and risk repeat block viewing this
# as a new instance with value defined and hence enter
# an infinite loop - hence the "case_value is not None"
# check.
processed_value = _process_simple_value(value.test_param, expanded_case_value, required_data_tables, required_loc_files)
expanded_inputs[case_context.for_state()] = processed_value
elif isinstance(value, galaxy.tools.parameters.grouping.Section):
context = ParamContext(name=value.name, parent_context=parent_context)
for r_value in value.inputs.values():
expanded_input = _process_raw_inputs(tool, {context.for_state(): r_value}, raw_inputs, required_files, required_data_tables, required_loc_files, parent_context=context)
if expanded_input:
expanded_inputs.update(expanded_input)
elif isinstance(value, galaxy.tools.parameters.grouping.Repeat):
repeat_index = 0
while True:
context = ParamContext(name=value.name, index=repeat_index, parent_context=parent_context)
updated = False
for r_value in value.inputs.values():
expanded_input = _process_raw_inputs(tool, {context.for_state(): r_value}, raw_inputs, required_files, required_data_tables, required_loc_files, parent_context=context)
if expanded_input:
expanded_inputs.update(expanded_input)
updated = True
if not updated:
break
repeat_index += 1
else:
context = ParamContext(name=value.name, parent_context=parent_context)
raw_input_dict = context.extract_value(raw_inputs)
if raw_input_dict:
name = raw_input_dict["name"]
param_value = raw_input_dict["value"]
param_extra = raw_input_dict["attributes"]
if not value.type == "text":
param_value = _split_if_str(param_value)
if isinstance(value, galaxy.tools.parameters.basic.DataToolParameter):
if not isinstance(param_value, list):
param_value = [param_value]
for v in param_value:
_add_uploaded_dataset(context.for_state(), v, param_extra, value, required_files)
processed_value = param_value
elif isinstance(value, galaxy.tools.parameters.basic.DataCollectionToolParameter):
assert 'collection' in param_extra
collection_def = param_extra['collection']
for input_dict in collection_def.collect_inputs():
name = input_dict["name"]
value = input_dict["value"]
attributes = input_dict["attributes"]
require_file(name, value, attributes, required_files)
processed_value = collection_def
else:
processed_value = _process_simple_value(value, param_value, required_data_tables, required_loc_files)
expanded_inputs[context.for_state()] = processed_value
return expanded_inputs
def _process_simple_value(param, param_value, required_data_tables, required_loc_files):
if isinstance(param, galaxy.tools.parameters.basic.SelectToolParameter):
# Tests may specify values as either raw value or the value
# as they appear in the list - the API doesn't and shouldn't
# accept the text value - so we need to convert the text
# into the form value.
def process_param_value(param_value):
found_value = False
value_for_text = None
for (text, opt_value, _) in getattr(param, 'static_options', []):
if param_value == opt_value:
found_value = True
if value_for_text is None and param_value == text:
value_for_text = opt_value
if param.options:
if param.options.tool_data_table_name:
required_data_tables.append(param.options.tool_data_table_name)
elif param.options.index_file:
required_loc_files.append(param.options.index_file)
if not found_value and value_for_text is not None:
processed_value = value_for_text
else:
processed_value = param_value
return processed_value
# Do replacement described above for lists or singleton
# values.
if isinstance(param_value, list):
processed_value = list(map(process_param_value, param_value))
else:
processed_value = process_param_value(param_value)
elif isinstance(param, galaxy.tools.parameters.basic.BooleanToolParameter):
# Like above, tests may use the tool define values of simply
# true/false.
processed_value = _process_bool_param_value(param, param_value)
else:
processed_value = param_value
return processed_value
def _matching_case_for_value(tool, cond, declared_value):
test_param = cond.test_param
if isinstance(test_param, galaxy.tools.parameters.basic.BooleanToolParameter):
if declared_value is None:
# No explicit value for param in test case, determine from default
query_value = test_param.checked
else:
query_value = _process_bool_param_value(test_param, declared_value)
def matches_declared_value(case_value):
return _process_bool_param_value(test_param, case_value) == query_value
elif isinstance(test_param, galaxy.tools.parameters.basic.SelectToolParameter):
if declared_value is not None:
# Test case supplied explicit value to check against.
def matches_declared_value(case_value):
return case_value == declared_value
elif test_param.static_options:
# No explicit value in test case, not much to do if options are dynamic but
# if static options are available can find the one specified as default or
# fallback on top most option (like GUI).
for (name, _, selected) in test_param.static_options:
if selected:
default_option = name
else:
first_option = test_param.static_options[0]
first_option_value = first_option[1]
default_option = first_option_value
def matches_declared_value(case_value):
return case_value == default_option
else:
# No explicit value for this param and cannot determine a
# default - give up. Previously this would just result in a key
# error exception.
msg = f"Failed to find test parameter value specification required for conditional {cond.name}"
raise Exception(msg)
# Check the tool's defined cases against predicate to determine
# selected or default.
for case in cond.cases:
if matches_declared_value(case.value):
return case
else:
msg_template = "%s - Failed to find case matching value (%s) for test parameter specification for conditional %s. Remainder of test behavior is unspecified."
msg = msg_template % (tool.id, declared_value, cond.name)
log.info(msg)
def _add_uploaded_dataset(name, value, extra, input_parameter, required_files):
if value is None:
assert input_parameter.optional, f'{name} is not optional. You must provide a valid filename.'
return value
return require_file(name, value, extra, required_files)
def _split_if_str(value):
split = isinstance(value, str)
if split:
value = value.split(",")
return value
def _process_bool_param_value(param, param_value):
assert isinstance(param, galaxy.tools.parameters.basic.BooleanToolParameter)
was_list = False
if isinstance(param_value, list):
was_list = True
param_value = param_value[0]
if param.truevalue == param_value:
processed_value = True
elif param.falsevalue == param_value:
processed_value = False
else:
if param.optional:
processed_value = string_as_bool_or_none(param_value)
else:
processed_value = string_as_bool(param_value)
return [processed_value] if was_list else processed_value
def require_file(name, value, extra, required_files):
if (value, extra) not in required_files:
required_files.append((value, extra)) # these files will be uploaded
name_change = [att for att in extra.get('edit_attributes', []) if att.get('type') == 'name']
if name_change:
name_change = name_change[-1].get('value') # only the last name change really matters
value = name_change # change value for select to renamed uploaded file for e.g. composite dataset
else:
for end in ['.zip', '.gz']:
if value.endswith(end):
value = value[:-len(end)]
break
value = os.path.basename(value) # if uploading a file in a path other than root of test-data
return value
class ParamContext:
def __init__(self, name, index=None, parent_context=None):
self.parent_context = parent_context
self.name = name
self.index = None if index is None else int(index)
def for_state(self):
name = self.name if self.index is None else "%s_%d" % (self.name, self.index)
parent_for_state = self.parent_context.for_state()
if parent_for_state:
return f"{parent_for_state}|{name}"
else:
return name
def __str__(self):
return f"Context[for_state={self.for_state()}]"
def param_names(self):
for parent_context_param in self.parent_context.param_names():
if self.index is not None:
yield "%s|%s_%d" % (parent_context_param, self.name, self.index)
else:
yield f"{parent_context_param}|{self.name}"
if self.index is not None:
yield "%s_%d" % (self.name, self.index)
else:
yield self.name
def extract_value(self, raw_inputs):
for param_name in self.param_names():
value = self.__raw_param_found(param_name, raw_inputs)
if value:
return value
return None
def __raw_param_found(self, param_name, raw_inputs):
index = None
for i, raw_input_dict in enumerate(raw_inputs):
if raw_input_dict["name"] == param_name:
index = i
if index is not None:
raw_input_dict = raw_inputs[index]
del raw_inputs[index]
return raw_input_dict
else:
return None
class RootParamContext:
def __init__(self):
pass
def for_state(self):
return ""
def param_names(self):
return []
def get_index(self):
return 0
|
1688182
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from cogkge.models.basemodel import BaseModel
class Rescal(BaseModel):
def __init__(self,
entity_dict_len,
relation_dict_len,
embedding_dim,
penalty_weight=0.0):
super().__init__(model_name="Rescal", penalty_weight=penalty_weight)
self.entity_dict_len = entity_dict_len
self.relation_dict_len = relation_dict_len
self.embedding_dim = embedding_dim
self.entity_embedding = nn.Embedding(entity_dict_len, embedding_dim)
self.relation_embedding = nn.Embedding(relation_dict_len, embedding_dim * embedding_dim)
self._reset_param()
def _reset_param(self):
# 重置参数
nn.init.xavier_uniform_(self.entity_embedding.weight.data)
nn.init.xavier_uniform_(self.relation_embedding.weight.data)
def get_realation_embedding(self, relation_ids):
# 得到关系的embedding
return self.r_embedding(relation_ids)
def get_entity_embedding(self, entity_ids):
# 得到实体的embedding
return self.e_embedding(entity_ids)
def get_triplet_embedding(self, data):
# 得到三元组的embedding
h_embedding = self.e_embedding(data[0])
r_embedding = self.r_embedding(data[1])
t_embedding = self.e_embedding(data[2])
return h_embedding, r_embedding, t_embedding
def forward(self, data):
batch_h, batch_r, batch_t = data[ 0], data[1], data[2]
A = self.entity_embedding(batch_h) # (batch,embedding)
A = F.normalize(A, p=2, dim=-1)
R = self.relation_embedding(batch_r).view(-1, self.embedding_dim,self.embedding_dim) # (batch,embedding,embedding)
A_T = self.entity_embedding(batch_t).view(-1, self.embedding_dim, 1) # (batch,embedding,1)
A_T = F.normalize(A_T, p=2, dim=1)
tr = torch.matmul(R, A_T) # (batch,embedding_dim,1)
tr = tr.view(-1, self.embedding_dim) # (batch,embedding_dim)
return -torch.sum(A * tr, dim=-1) # (batch,)
def loss(self, data):
# 计算损失
pos_data = data
pos_data = self.data_to_device(pos_data)
neg_data = self.model_negative_sampler.create_negative(data)
neg_data = self.data_to_device(neg_data)
pos_score = self.forward(pos_data)
neg_score = self.forward(neg_data)
return self.model_loss(pos_score, neg_score) + self.penalty(data)
|
1688201
|
def dummy(): pass
function = type(dummy)
class Dummy:
def dummy(self): pass
classobj = type(Dummy)
instancemethod = type(Dummy().dummy)
NoneType = type(None)
str = str
ref = 'RPYJSON:null:RPYJSON'
int = int
float = float
bool = bool
dict = dict
list = list
tuple = tuple
|
1688257
|
import os
import tensorflow as tf
slim = tf.contrib.slim
def load_checkpoints(checkpoint_dir, saver):
# Load latest checkpoint if available
all_checkpoint_states = tf.train.get_checkpoint_state(
checkpoint_dir)
if all_checkpoint_states is not None:
all_checkpoint_paths = \
all_checkpoint_states.all_model_checkpoint_paths
# Save the checkpoint list into saver.last_checkpoints
saver.recover_last_checkpoints(all_checkpoint_paths)
else:
print('No checkpoints found')
def get_global_step(sess, global_step_tensor):
# Read the global step if restored
global_step = tf.train.global_step(sess,
global_step_tensor)
return global_step
def create_dir(dir):
"""
Checks if a directory exists, or else create it
Args:
dir: directory to create
"""
if not os.path.exists(dir):
os.makedirs(dir)
def load_model_weights(sess, checkpoint_dir):
"""Restores the model weights.
Loads the weights loaded from checkpoint dir onto the
model. It ignores the missing weights since this is used
to load the RPN weights onto AVOD.
Args:
sess: A TensorFlow session
checkpoint_dir: Path to the weights to be loaded
"""
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)
init_fn(sess)
|
1688274
|
import tweepy
import os
import sys
import random
import logging
from multiprocessing import Queue, Process
"""
Retrieves a sample dataset of pre-2013 Twitter users
"""
DEFAULT_PERCENTAGE = 100
DEFAULT_MIN_ID = 0
DEFAULT_MAX_ID = 5000000000
logger = logging.getLogger(__name__)
def fetch_accounts(api,
account_queue,
min_id=DEFAULT_MIN_ID,
max_id=DEFAULT_MAX_ID,
percentage=DEFAULT_PERCENTAGE):
"""Fetches accounts from a min_id to a max_id.
Arguments:
api {tweepy.API} -- The authenticated API instance
min_id {int} -- The starting account ID
max_id {int} -- The maximum max ID
queue {Queue} -- The queue to send found accounts to
Keyword Arguments:
percentage {int} -- The percentage of accounts between min_id and
max_id to fetch (default: {100})
"""
logger.info('Account enumeration service started')
account_ids = []
for i in range(min_id, max_id, 100):
# Short-circuit for the common case
sample = [uid for uid in range(i, i + 100)]
if percentage != DEFAULT_PERCENTAGE:
sample = random.sample(sample, percentage)
account_ids.extend(sample)
if len(account_ids) > 100:
try:
results = api.lookup_users(
user_ids=account_ids[0:100], include_entities=True)
except Exception as e:
logger.error(e)
for result in results:
user = result._json
user['_tbsource'] = 'enum'
account_queue.put(user)
logger.debug('\t{} results found. Max ID: {}'.format(
len(results), account_ids[100]))
account_ids = account_ids[100:]
def main():
consumer_key = os.environ.get('TWEEPY_CONSUMER_KEY')
consumer_secret = os.environ.get('TWEEPY_CONSUMER_SECRET')
access_token = os.environ.get('TWEEPY_ACCESS_TOKEN')
access_token_secret = os.environ.get('TWEEPY_ACCESS_TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(
auth, wait_on_rate_limit_notify=True, wait_on_rate_limit=True)
q = Queue()
if len(sys.argv) < 3:
print('Usage: python3 accounts_pre_2013.py min_id max_id')
sys.exit()
min_id = int(sys.argv[1])
max_id = int(sys.argv[2])
try:
p = Process(
target=fetch_accounts,
args=[api, min_id, max_id, q],
kwargs={'percentage': 5})
p.start()
while True:
try:
elem = q.get()
print(elem)
except Exception as e:
print(e)
except KeyboardInterrupt:
print('\nCtrl+C detected. Shutting down...')
p.terminate()
p.join()
if __name__ == '__main__':
main()
|
1688314
|
from .base import Variable
import itertools
class InteractionType(Variable):
type = "Interaction"
def __init__(self, definition):
self.interactions = definition["interaction variables"]
self.name = "(Interaction: %s)" % str(self.interactions)
self.interaction_fields = self.interactions
super(InteractionType, self).__init__(definition)
def expandInteractions(self, field_model):
self.interaction_fields = self.atomicInteractions(self.interactions,
field_model)
for field in self.interaction_fields:
if field_model[field].has_missing:
self.has_missing = True
self.categorical(field_model)
def categorical(self, field_model):
categoricals = [field for field in self.interaction_fields
if hasattr(field_model[field], "higher_vars")]
noncategoricals = [field for field in self.interaction_fields
if not hasattr(field_model[field], "higher_vars")]
dummies = [field_model[field].higher_vars
for field in categoricals]
self.higher_vars = []
for combo in itertools.product(*dummies):
var_names = [field.name for field in combo] + noncategoricals
higher_var = InteractionType({'has missing': self.has_missing,
'interaction variables': var_names})
self.higher_vars.append(higher_var)
def atomicInteractions(self, interactions, field_model):
atomic_interactions = []
for field in interactions:
try:
field_model[field]
except KeyError:
raise KeyError("The interaction variable %s is "
"not a named variable in the variable "
"definition" % field)
if hasattr(field_model[field], 'interaction_fields'):
sub_interactions = field_model[field].interaction_fields
atoms = self.atomicInteractions(sub_interactions, field_model)
atomic_interactions.extend(atoms)
else:
atomic_interactions.append(field)
return atomic_interactions
|
1688325
|
from azureml.core import Run
from mlapp.main import MLApp
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance
from mlapp.integrations.aml.utils.run_class import load_config_from_string, tag_and_log_run, tag_and_log_outputs
import argparse
from config import settings
from mlapp.managers.flow_manager import FlowManager
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, dest='config', help='configuration')
args = parser.parse_args()
run = Run.get_context()
# pre-processing
config = load_config_from_string(args.config)
tag_and_log_run(config)
# init mlapp
MLApp(settings)
# run config
_, output_ids, output_data = FlowManager(Run.get_context().id, config).run()
# post-processing
tag_and_log_outputs(output_ids)
# post-processing
file_storage_instance.postprocessing()
|
1688348
|
from datetime import datetime
from app import DATETIME_FORMAT
from tests.app.db import (
create_api_key,
create_service,
create_notification,
create_template
)
from app.models import (
KEY_TYPE_NORMAL,
)
def test_get_api_key_stats_with_sends(admin_request, notify_db, notify_db_session):
service = create_service(service_name='Service 1')
api_key = create_api_key(service)
template = create_template(service=service, template_type='email')
total_sends = 10
for x in range(total_sends):
create_notification(template=template, api_key=api_key)
api_key_stats = admin_request.get(
'api_key.get_api_key_stats',
api_key_id=api_key.id
)['data']
assert api_key_stats["api_key_id"] == str(api_key.id)
assert api_key_stats["email_sends"] == total_sends
assert api_key_stats["sms_sends"] == 0
assert api_key_stats["total_sends"] == total_sends
# the following lines test that a send has occurred within the last second
last_send_dt = datetime.strptime(api_key_stats["last_send"], DATETIME_FORMAT)
now = datetime.utcnow()
time_delta = now - last_send_dt
assert abs(time_delta.total_seconds()) < 1
def test_get_api_key_stats_no_sends(admin_request, notify_db, notify_db_session):
service = create_service(service_name='Service 2')
api_key = create_api_key(service)
api_key_stats = admin_request.get(
'api_key.get_api_key_stats',
api_key_id=api_key.id
)['data']
assert api_key_stats["api_key_id"] == str(api_key.id)
assert api_key_stats["email_sends"] == 0
assert api_key_stats["sms_sends"] == 0
assert api_key_stats["total_sends"] == 0
assert api_key_stats["last_send"] is None
def test_get_api_keys_ranked(admin_request, notify_db, notify_db_session):
service = create_service(service_name='Service 1')
api_key_1 = create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name="Key 1")
api_key_2 = create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name="Key 2")
template_email = create_template(service=service, template_type='email')
total_sends = 10
create_notification(template=template_email, api_key=api_key_1)
for x in range(total_sends):
create_notification(template=template_email, api_key=api_key_1)
create_notification(template=template_email, api_key=api_key_2)
api_keys_ranked = admin_request.get(
'api_key.get_api_keys_ranked',
n_days_back=2
)['data']
assert api_keys_ranked[0]["api_key_name"] == api_key_1.name
assert api_keys_ranked[0]["service_name"] == service.name
assert api_keys_ranked[0]["sms_notifications"] == 0
assert api_keys_ranked[0]["email_notifications"] == total_sends + 1
assert api_keys_ranked[0]["total_notifications"] == total_sends + 1
assert "last_notification_created" in api_keys_ranked[0]
assert api_keys_ranked[1]["api_key_name"] == api_key_2.name
assert api_keys_ranked[1]["service_name"] == service.name
assert api_keys_ranked[1]["sms_notifications"] == 0
assert api_keys_ranked[1]["email_notifications"] == total_sends
assert api_keys_ranked[1]["total_notifications"] == total_sends
assert "last_notification_created" in api_keys_ranked[0]
|
1688366
|
from concurrent.futures import ThreadPoolExecutor, wait
import traceback
import os
import sys
import time
nworkers = int(sys.argv[1])
n = 40000
nruns = 11
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as sla
from test_data import discrete_laplacian
base_array = discrete_laplacian(n)
#print(sla.eigsh(base_array, 25, which = 'LM')[0])
# Store underlying buffers as memoryviews for handoff
# to different VECs.
data = memoryview(base_array.data)
indices = memoryview(base_array.indices)
indptr = memoryview(base_array.indptr)
np.random.seed(0)
v0 = memoryview(np.random.rand(n))
pool = ThreadPoolExecutor(max_workers = nworkers)
#print("starting")
for i in range(nruns):
def call_arpack(i):
try:
a = sparse.csr_matrix((data, indices, indptr), shape = (n, n))
eig = sla.eigsh(a, 25, which = 'LM', v0 = np.asarray(v0))
except:
traceback.print_exc()
raise
start = time.perf_counter()
futures = [pool.submit(call_arpack, i) for i in range(nworkers)]
wait(futures)
stop = time.perf_counter()
print(stop - start, flush = True)
|
1688370
|
import time
from functools import wraps
import requests
from enum import IntEnum
try:
import gevent
do_sleep = gevent.sleep
except ImportError:
do_sleep = time.sleep
from pyVmomi import vim, Version as pyVmomi_version
from pyVim.connect import SmartConnect
import logging
from requests.exceptions import ConnectionError
requests.packages.urllib3.disable_warnings()
__author__ = 'Eddi'
logging.basicConfig(filename='vshpere_client.log', level=logging.DEBUG)
LOG = logging
class VSphereEntity(IntEnum):
Vm = 1
Folder = 2
class VSphereException(Exception):
def __init__(self, err_msg, *args):
Exception.__init__(self, err_msg % args)
class VSphereTaskFailed(VSphereException):
def __init__(self, job_name, error):
super(VSphereTaskFailed, self).__init__("vSphere Task %s failed with error %s" % (job_name, error))
# an exception we should should try and reconnect after
class VSphereReconnectException(VSphereException):
pass
# Shamelessly borrowed from:
# https://github.com/dnaeon/py-vconnector/blob/master/src/vconnector/core.py
def collect_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (pyVmomi.vim.view.*): Starting point of inventory navigation
obj_type (pyVmomi.vim.*): Type of managed object
path_set (list): List of properties to retrieve
include_mors (bool): If True include the managed objects
refs in the result
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data
def get_container_view(service_instance, obj_type, container=None):
"""
Get a vSphere Container View reference to all objects of type 'obj_type'
It is up to the caller to take care of destroying the View when no longer
needed.
Args:
obj_type (list): A list of managed object types
Returns:
A container view ref to the discovered managed objects
"""
if not container:
container = service_instance.content.rootFolder
view_ref = service_instance.content.viewManager.CreateContainerView(
container=container,
type=obj_type,
recursive=True
)
return view_ref
def reconnect_on_fault(func):
"""
Decorator for functions which access the vSphere API.
Tries to reconnect on vSphere errors (e.g. session authentication timeout, etc).
"""
@wraps(func)
def decorated(client, *args, **kwargs):
try:
return func(client, *args, **kwargs)
except (vim.MethodFault, VSphereReconnectException) as exc:
LOG.exception('vSphere connection error raised (will try to reconnect): %s', exc)
client.connect(reconnect=True)
return func(client, *args, **kwargs)
except Exception as e:
LOG.exception('Failed to execute {} - got {}'.format(func, e))
return decorated
class VSphereClient(object):
"""
Wrapper for Vsphere
"""
def __init__(self, admin_user, admin_password, auth_host, auth_port=443):
self.admin_user = admin_user
self.admin_password = <PASSWORD>
self.auth_host = auth_host
self.auth_port = auth_port
self._vsphere_connection = None
self._vsphere_content = None
self._perfManager = None
def set_configuration(self, admin_user, admin_password, auth_host, auth_port=443):
self.admin_user = admin_user
self.admin_password = <PASSWORD>
self.auth_host = auth_host
self.auth_port = auth_port
def __repr__(self):
return "<VSphereClient host='%s:%d' [%s] user='%s' password='%s'>" % \
(self.auth_host, self.auth_port,
"On" if self._vsphere_connection is not None else "Off",
self.admin_user, "*" * len(self.admin_password))
def __del__(self):
self.close()
def connect(self, reconnect=False, insecure=True):
"""
Create new authenticated VSphere client
"""
if self._vsphere_connection is None or reconnect:
LOG.info('Connecting to vSphere server on %s:%s' % (self.auth_host, self.auth_port))
kwargs = {'host': self.auth_host, 'port': self.auth_port,
'user': self.admin_user, 'pwd': <PASSWORD>}
vmomi_versions = sorted(pyVmomi_version.versionMap.keys())
LOG.debug("PyVmomi versions: %s", vmomi_versions)
if insecure and ("vim25/6.5" in vmomi_versions or "vim25/6.0" in vmomi_versions):
try:
import ssl
kwargs['sslContext'] = ssl._create_unverified_context()
except (ImportError, AttributeError):
# on python older than 2.7.9 ssl does not have this function
pass
try:
self._vsphere_connection = SmartConnect(**kwargs)
self._vsphere_content = self._vsphere_connection.RetrieveContent()
self._perf_manager = self._vsphere_content.perfManager
except (ConnectionError, vim.fault.InvalidLogin) as exc:
raise VSphereException("Failed connecting to %s:%s using user %s: %s" % (self.auth_host,
self.auth_port,
self.admin_user,
exc))
def close(self):
if self._vsphere_connection:
del self._vsphere_connection
self._vsphere_connection = None
@property
@reconnect_on_fault
def session_key(self):
"""
:return: The current session key. A unique identifier of the current connection.
"""
if self._vsphere_content is not None and self._vsphere_connection is not None:
# perform simple operation to check connectivity.
self._vsphere_connection.CurrentTime()
if not self._vsphere_content.sessionManager.currentSession:
raise VSphereReconnectException("Can't get session key, session might be off")
return self._vsphere_content.sessionManager.currentSession.key
@reconnect_on_fault
def _list_objects(self, object_type, folder=None):
if folder is None:
folder = self._vsphere_content.rootFolder
objview = self._vsphere_content.viewManager.CreateContainerView(folder,
[object_type],
True)
objects = objview.view
objview.Destroy()
return objects
@reconnect_on_fault
def _get_obj(self, vim_type, name):
"""
Get the vsphere object associated with a given text name
:param vim_type: List of pyVmomi types.
:type vim_type: vim.*
:param name: The name of the desired object.
:type name: str.
"""
obj = None
container = self._vsphere_content.viewManager.CreateContainerView(self._vsphere_content.rootFolder, vim_type,
True)
for item in container.view:
if item.name == name:
obj = item
break
if obj is None:
LOG.debug("Could not find %s ", name)
return obj
def get_obj(self, vim_type, name):
return self._get_obj(vim_type, name)
def list_vms(self):
return self._list_objects(vim.VirtualMachine)
def list_hosts(self):
return self._list_objects(vim.HostSystem)
def list_users(self):
user_list = []
for dom in self.domains: # skipping host users
tmp_list = self._vsphere_content.userDirectory.RetrieveUserGroups(
domain=dom, searchStr="", exactMatch=False,
findUsers=True, findGroups=True)
user_list.extend(tmp_list)
return user_list
# -----------------------------------------
# Property-Collector based API (used for deployment)
# TODO: consider using only this API
# -----------------------------------------
@reconnect_on_fault
def _get_objects(self, object_type, properties):
view = get_container_view(self._vsphere_connection,
obj_type=[object_type])
objects = collect_properties(self._vsphere_connection,
view_ref=view,
obj_type=object_type,
path_set=properties,
include_mors=True)
for obj in objects:
obj['moid'] = obj['obj']._moId
del obj['obj']
return objects
def collect_roles(self):
return self._vsphere_content.authorizationManager.roleList
@property
def roles(self):
return self.collect_roles()
def collect_domains(self):
return self._vsphere_content.userDirectory.domainList[1:]
@property
def domains(self):
"""
Return all domains except host domain
:return: List of domains except host domain.
"""
return self.collect_domains()
@reconnect_on_fault
def _get_obj_by_moid(self, obj_type, moid):
obj = obj_type(moid)
obj._stub = self._vsphere_connection._stub
return obj
def get_vm(self, vm_moid):
"""
Get VM by moid.
"""
return self._get_obj_by_moid(vim.VirtualMachine, vm_moid)
def get_host(self, host_moid):
"""
Get host by moid.
"""
return self._get_obj_by_moid(vim.HostSystem, host_moid)
def get_host_by_name(self, host_name):
"""
Get host by name.
"""
return self.get_obj([vim.HostSystem], host_name)
@reconnect_on_fault
def wait_for_task(self, task, action_name, hide_result=False, update_status_callback=None):
if update_status_callback is None:
def dummy_callback(task):
pass
update_status_callback = dummy_callback
LOG.info('Waiting for %s to complete.', action_name)
last_state = (None, None)
while task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
if task.info.state == "canceled":
try:
task.CancelTask()
except Exception as exc:
LOG.warn("Error canceling task '%s': %s", action_name, exc)
LOG.warn('%s was canceled!', action_name)
return None
elif last_state != (task.info.state, task.info.progress):
LOG.info("Task '%s' state: %s (progress: %s%%)", action_name, task.info.state, task.info.progress or 0)
last_state = (task.info.state, task.info.progress)
try:
update_status_callback(task)
except Exception:
LOG.exception("Error while calling %s task update status callback", action_name)
do_sleep(1)
if task.info.state == vim.TaskInfo.State.success:
try:
update_status_callback(task)
except Exception:
LOG.exception("Error while calling %s task update status callback", action_name)
if task.info.result is not None and not hide_result:
LOG.info('%s completed successfully, result: %s', action_name, task.info.result)
else:
LOG.info('%s completed successfully.', action_name)
else:
LOG.error('%s did not complete successfully: %s', action_name, task.info.error)
raise VSphereTaskFailed(action_name, task.info.error)
# may not always be applicable, but can't hurt.
return task
|
1688460
|
import sys
import cftime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average
else:
from geocat.comp import anomaly, climatology, month_to_season, calendar_average, climatology_average
dset_a = xr.tutorial.open_dataset("rasm")
dset_b = xr.tutorial.open_dataset("air_temperature")
dset_c = dset_a.copy().rename({"time": "Times"})
dset_encoded = xr.tutorial.open_dataset("rasm", decode_cf=False)
def get_fake_dataset(start_month, nmonths, nlats, nlons):
"""Returns a very simple xarray dataset for testing.
Data values are equal to "month of year" for monthly time steps.
"""
# Create coordinates
months = pd.date_range(start=pd.to_datetime(start_month),
periods=nmonths,
freq="MS")
lats = np.linspace(start=-90, stop=90, num=nlats, dtype="float32")
lons = np.linspace(start=-180, stop=180, num=nlons, dtype="float32")
# Create data variable. Construct a 3D array with time as the first
# dimension.
month_values = np.expand_dims(np.arange(start=1, stop=nmonths + 1),
axis=(1, 2))
var_values = np.tile(month_values, (1, nlats, nlons))
ds = xr.Dataset(
data_vars={
"my_var": (("time", "lat", "lon"), var_values.astype("float32")),
},
coords={
"time": months,
"lat": lats,
"lon": lons
},
)
return ds
def _get_dummy_data(start_date,
end_date,
freq,
nlats,
nlons,
calendar='standard'):
"""Returns a simple xarray dataset to test with.
Data can be hourly, daily, or monthly.
"""
# Coordinates
time = xr.cftime_range(start=start_date,
end=end_date,
freq=freq,
calendar=calendar)
lats = np.linspace(start=-90, stop=90, num=nlats, dtype='float32')
lons = np.linspace(start=-180, stop=180, num=nlons, dtype='float32')
# Create data variable
values = np.expand_dims(np.arange(len(time)), axis=(1, 2))
data = np.tile(values, (1, nlats, nlons))
ds = xr.Dataset(data_vars={'data': (('time', 'lat', 'lon'), data)},
coords={
'time': time,
'lat': lats,
'lon': lons
})
return ds
def test_climatology_invalid_freq():
with pytest.raises(ValueError):
climatology(dset_a, "hourly")
def test_climatology_encoded_time():
with pytest.raises(ValueError):
climatology(dset_encoded, "monthly")
@pytest.mark.parametrize("dataset", [dset_a, dset_b, dset_c["Tair"]])
@pytest.mark.parametrize("freq", ["day", "month", "year", "season"])
def test_climatology_setup(dataset, freq):
computed_dset = climatology(dataset, freq)
assert type(dataset) == type(computed_dset)
@pytest.mark.parametrize("dataset", [dset_a, dset_b, dset_c["Tair"]])
@pytest.mark.parametrize("freq", ["day", "month", "year", "season"])
def test_anomaly_setup(dataset, freq):
computed_dset = anomaly(dataset, freq)
assert type(dataset) == type(computed_dset)
ds1 = get_fake_dataset(start_month="2000-01", nmonths=12, nlats=1, nlons=1)
# Create another dataset for the year 2001.
ds2 = get_fake_dataset(start_month="2001-01", nmonths=12, nlats=1, nlons=1)
# Create a dataset that combines the two previous datasets, for two
# years of data.
ds3 = xr.concat([ds1, ds2], dim="time")
# Create a dataset with the wrong number of months.
partial_year_dataset = get_fake_dataset(start_month="2000-01",
nmonths=13,
nlats=1,
nlons=1)
# Create a dataset with a custom time coordinate.
custom_time_dataset = get_fake_dataset(start_month="2000-01",
nmonths=12,
nlats=1,
nlons=1)
custom_time_dataset = custom_time_dataset.rename({"time": "my_time"})
# Create a more complex dataset just to verify that get_fake_dataset()
# is generally working.
complex_dataset = get_fake_dataset(start_month="2001-01",
nmonths=12,
nlats=10,
nlons=10)
@pytest.mark.parametrize("dataset, season, expected", [(ds1, "JFM", 2.0),
(ds1, "JJA", 7.0)])
def test_month_to_season_returns_middle_month_value(dataset, season, expected):
season_ds = month_to_season(dataset, season)
np.testing.assert_equal(season_ds["my_var"].data, expected)
def test_month_to_season_bad_season_exception():
with pytest.raises(KeyError):
month_to_season(ds1, "TEST")
def test_month_to_season_partial_years_exception():
with pytest.raises(ValueError):
month_to_season(partial_year_dataset, "JFM")
@pytest.mark.parametrize("dataset, season, expected", [(ds1, "NDJ", 11.5)])
def test_month_to_season_final_season_returns_2month_average(
dataset, season, expected):
season_ds = month_to_season(dataset, season)
np.testing.assert_equal(season_ds["my_var"].data, expected)
@pytest.mark.parametrize(
"season",
[
"DJF",
"JFM",
"FMA",
"MAM",
"AMJ",
"MJJ",
"JJA",
"JAS",
"ASO",
"SON",
"OND",
"NDJ",
],
)
def test_month_to_season_returns_one_point_per_year(season):
nyears_of_data = ds3.sizes["time"] / 12
season_ds = month_to_season(ds3, season)
assert season_ds["my_var"].size == nyears_of_data
@pytest.mark.parametrize(
"dataset, time_coordinate, var_name, expected",
[
(custom_time_dataset, "my_time", "my_var", 2.0),
(dset_c.isel(x=110, y=200), None, "Tair", [-10.56, -8.129, -7.125]),
],
)
def test_month_to_season_custom_time_coordinate(dataset, time_coordinate,
var_name, expected):
season_ds = month_to_season(dataset, "JFM", time_coord_name=time_coordinate)
np.testing.assert_almost_equal(season_ds[var_name].data,
expected,
decimal=1)
# Test Datasets For calendar_average() and climatology_average()
minute = _get_dummy_data('2020-01-01', '2021-12-31 23:30:00', '30min', 1, 1)
hourly = _get_dummy_data('2020-01-01', '2021-12-31 23:00:00', 'H', 1, 1)
daily = _get_dummy_data('2020-01-01', '2021-12-31', 'D', 1, 1)
monthly = _get_dummy_data('2020-01-01', '2021-12-01', 'MS', 1, 1)
# Computational Tests for calendar_average()
hour_avg = np.arange(0.5, 35088.5, 2).reshape((365 + 366) * 24, 1, 1)
hour_avg_time = xr.cftime_range('2020-01-01 00:30:00',
'2021-12-31 23:30:00',
freq='H')
min_2_hour_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), hour_avg)},
coords={
'time': hour_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(minute, min_2_hour_avg)])
def test_30min_to_hourly_calendar_average(dset, expected):
result = calendar_average(dset, freq='hour')
xr.testing.assert_equal(result, expected)
day_avg = np.arange(11.5, 17555.5, 24).reshape(366 + 365, 1, 1)
day_avg_time = xr.cftime_range('2020-01-01 12:00:00',
'2021-12-31 12:00:00',
freq='D')
hour_2_day_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_avg)},
coords={
'time': day_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(hourly, hour_2_day_avg)])
def test_hourly_to_daily_calendar_average(dset, expected):
result = calendar_average(dset, freq='day')
xr.testing.assert_equal(result, expected)
month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 410.5,
440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715
]).reshape(24, 1, 1)
month_avg_time = xr.cftime_range('2020-01-01', '2022-01-01', freq='MS')
month_avg_time = xr.DataArray(np.vstack((month_avg_time[:-1], month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_avg)},
coords={
'time': month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_month_avg)])
def test_daily_to_monthly_calendar_average(dset, expected):
result = calendar_average(dset, freq='month')
xr.testing.assert_equal(result, expected)
season_avg = np.array([29.5, 105.5, 197.5, 289, 379.5, 470.5, 562.5, 654,
715]).reshape(9, 1, 1)
season_avg_time = xr.cftime_range('2019-12-01', '2022-03-01', freq='QS-DEC')
season_avg_time = xr.DataArray(np.vstack((season_avg_time[:-1], season_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_2_season_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), season_avg)},
coords={
'time': season_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
season_avg = np.array(
[0.483333333, 3, 6.010869565, 9, 11.96666667, 15, 18.01086957, 21,
23]).reshape(9, 1, 1)
month_2_season_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), season_avg)},
coords={
'time': season_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_season_avg),
(monthly, month_2_season_avg)])
def test_daily_monthly_to_seasonal_calendar_average(dset, expected):
result = calendar_average(dset, freq='season')
xr.testing.assert_allclose(result, expected)
year_avg_time = [
cftime.datetime(2020, 7, 2),
cftime.datetime(2021, 7, 2, hour=12)
]
day_2_year_avg = [[[182.5]], [[548]]]
day_2_year_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_2_year_avg)},
coords={
'time': year_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
month_2_year_avg = [[[5.513661202]], [[17.5260274]]]
month_2_year_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_2_year_avg)},
coords={
'time': year_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_year_avg),
(monthly, month_2_year_avg)])
def test_daily_monthly_to_yearly_calendar_average(dset, expected):
result = calendar_average(dset, freq='year')
xr.testing.assert_allclose(result, expected)
# Computational Tests for climatology_average()
hour_clim = np.concatenate([np.arange(8784.5, 11616.5, 2),
np.arange(2832.5, 2880.5, 2),
np.arange(11640.5, 26328.5, 2)])\
.reshape(8784, 1, 1)
hour_clim_time = xr.cftime_range('2020-01-01 00:30:00',
'2020-12-31 23:30:00',
freq='H')
min_2_hourly_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), hour_clim)},
coords={
'time': hour_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(minute, min_2_hourly_clim)])
def test_30min_to_hourly_climatology_average(dset, expected):
result = climatology_average(dset, freq='hour')
xr.testing.assert_allclose(result, expected)
day_clim = np.concatenate([np.arange(4403.5, 5819.5, 24),
[1427.5],
np.arange(5831.5, 13175.5, 24)]) \
.reshape(366, 1, 1)
day_clim_time = xr.cftime_range('2020-01-01 12:00:00',
'2020-12-31 12:00:00',
freq='24H')
hour_2_day_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_clim)},
coords={
'time': day_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(hourly, hour_2_day_clim)])
def test_hourly_to_daily_climatology_average(dset, expected):
result = climatology_average(dset, freq='day')
xr.testing.assert_equal(result, expected)
month_clim = np.array([
198, 224.5438596, 257.5, 288, 318.5, 349, 379.5, 410.5, 441, 471.5, 502,
532.5
]).reshape(12, 1, 1)
month_clim_time = xr.cftime_range('2020-01-01', '2021-01-01', freq='MS')
month_clim_time = xr.DataArray(np.vstack(
(month_clim_time[:-1], month_clim_time[1:])).T,
dims=['time', 'nbd']).mean(dim='nbd')
day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), month_clim)},
coords={
'time': month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_month_clim)])
def test_daily_to_monthly_climatology_average(dset, expected):
result = climatology_average(dset, freq='month')
xr.testing.assert_allclose(result, expected)
season_clim = np.array([320.9392265, 380, 288, 471.5]).reshape(4, 1, 1)
season_clim_time = ['DJF', 'JJA', 'MAM', 'SON']
day_2_season_clim = xr.Dataset(
data_vars={'data': (('season', 'lat', 'lon'), season_clim)},
coords={
'season': season_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
season_clim = np.array([10.04972376, 12.01086957, 9, 15]).reshape(4, 1, 1)
month_2_season_clim = xr.Dataset(
data_vars={'data': (('season', 'lat', 'lon'), season_clim)},
coords={
'season': season_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected', [(daily, day_2_season_clim),
(monthly, month_2_season_clim)])
def test_daily_monthly_to_seasonal_climatology_average(dset, expected):
result = climatology_average(dset, freq='season')
xr.testing.assert_allclose(result, expected)
# Argument Tests for climatology_average() and calendar_average()
@pytest.mark.parametrize('freq', ['TEST', None])
def test_invalid_freq_climatology_average(freq):
with pytest.raises(KeyError):
climatology_average(monthly, freq=freq)
@pytest.mark.parametrize('freq', ['TEST', None])
def test_invalid_freq_calendar_average(freq):
with pytest.raises(KeyError):
calendar_average(monthly, freq=freq)
time_dim = 'my_time'
custom_time = daily.rename({'time': time_dim})
custom_time_expected = day_2_month_clim.rename({'time': time_dim})
@pytest.mark.parametrize('dset, expected, time_dim',
[(custom_time, custom_time_expected, time_dim)])
def test_custom_time_coord_climatology_average(dset, expected, time_dim):
result = climatology_average(dset, freq='month', time_dim=time_dim)
xr.testing.assert_allclose(result, expected)
custom_time_expected = day_2_month_avg.rename({'time': time_dim})
@pytest.mark.parametrize('dset, expected, time_dim',
[(custom_time, custom_time_expected, time_dim)])
def test_custom_time_coord_calendar_average(dset, expected, time_dim):
result = calendar_average(dset, freq='month', time_dim=time_dim)
xr.testing.assert_allclose(result, expected)
array = daily['data']
array_expected = day_2_month_clim['data']
@pytest.mark.parametrize('da, expected', [(array, array_expected)])
def test_xr_DataArray_support_climatology_average(da, expected):
result = climatology_average(da, freq='month')
xr.testing.assert_allclose(result, expected)
array_expected = day_2_month_avg['data']
@pytest.mark.parametrize('da, expected', [(array, array_expected)])
def test_xr_DataArray_support_calendar_average(da, expected):
result = calendar_average(da, freq='month')
xr.testing.assert_equal(result, expected)
dset_encoded = xr.tutorial.open_dataset("air_temperature", decode_cf=False)
def test_non_datetime_like_objects_climatology_average():
with pytest.raises(ValueError):
climatology_average(dset_encoded, 'month')
def test_non_datetime_like_objects_calendar_average():
with pytest.raises(ValueError):
calendar_average(dset_encoded, 'month')
time = pd.to_datetime(['2020-01-01', '2020-01-02', '2020-01-04'])
non_uniform = xr.Dataset(data_vars={'data': (('time'), np.arange(3))},
coords={'time': time})
def test_non_uniformly_spaced_data_climatology_average():
with pytest.raises(ValueError):
climatology_average(non_uniform, freq='day')
def test_non_uniformly_spaced_data_calendar_average():
with pytest.raises(ValueError):
calendar_average(non_uniform, freq='day')
julian_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='julian')
noleap_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='noleap')
all_leap_daily = _get_dummy_data('2020-01-01',
'2021-12-31',
'D',
1,
1,
calendar='all_leap')
day_360_daily = _get_dummy_data('2020-01-01',
'2021-12-30',
'D',
1,
1,
calendar='360_day')
# Daily -> Monthly Climatologies for Julian Calendar
julian_month_clim = np.array([198, 224.54385965, 257.5, 288, 318.5, 349,
379.5, 410.5, 441, 471.5, 502, 532.5])\
.reshape(12, 1, 1)
julian_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='julian')
julian_month_clim_time = xr.DataArray(np.vstack((julian_month_clim_time[:-1], julian_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
julian_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), julian_month_clim)},
coords={
'time': julian_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for NoLeap Calendar
noleap_month_clim = np.array([197.5, 227, 256.5, 287, 317.5, 348,
378.5, 409.5, 440, 470.5, 501, 531.5])\
.reshape(12, 1, 1)
noleap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='noleap')
noleap_month_clim_time = xr.DataArray(np.vstack((noleap_month_clim_time[:-1], noleap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
noleap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), noleap_month_clim)},
coords={
'time': noleap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for AllLeap Calendar
all_leap_month_clim = np.array([198, 228, 258, 288.5, 319, 349.5,
380, 411, 441.5, 472, 502.5, 533])\
.reshape(12, 1, 1)
all_leap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='all_leap')
all_leap_month_clim_time = xr.DataArray(np.vstack((all_leap_month_clim_time[:-1], all_leap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
all_leap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_clim)},
coords={
'time': all_leap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Climatologies for 360 Day Calendar
day_360_leap_month_clim = np.arange(194.5, 554.5, 30).reshape(12, 1, 1)
day_360_leap_month_clim_time = xr.cftime_range('2020-01-01',
'2021-01-01',
freq='MS',
calendar='360_day')
day_360_leap_month_clim_time = xr.DataArray(np.vstack((day_360_leap_month_clim_time[:-1], day_360_leap_month_clim_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_360_leap_day_2_month_clim = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_360_leap_month_clim)},
coords={
'time': day_360_leap_month_clim_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected',
[(julian_daily, julian_day_2_month_clim),
(noleap_daily, noleap_day_2_month_clim),
(all_leap_daily, all_leap_day_2_month_clim),
(day_360_daily, day_360_leap_day_2_month_clim)])
def test_non_standard_calendars_climatology_average(dset, expected):
result = climatology_average(dset, freq='month')
xr.testing.assert_allclose(result, expected)
# Daily -> Monthly Means for Julian Calendar
julian_month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 410.5,
440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715
]).reshape(24, 1, 1)
julian_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='julian')
julian_month_avg_time = xr.DataArray(np.vstack((julian_month_avg_time[:-1], julian_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
julian_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), julian_month_avg)},
coords={
'time': julian_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for NoLeap Calendar
noleap_month_avg = np.array([
15, 44.5, 74, 104.5, 135, 165.5, 196, 227, 257.5, 288, 318.5, 349, 380,
409.5, 439, 469.5, 500, 530.5, 561, 592, 622.5, 653, 683.5, 714
]).reshape(24, 1, 1)
noleap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='noleap')
noleap_month_avg_time = xr.DataArray(np.vstack((noleap_month_avg_time[:-1], noleap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
noleap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), noleap_month_avg)},
coords={
'time': noleap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for AllLeap Calendar
all_leap_month_avg = np.array([
15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 411,
441, 471.5, 502, 532.5, 563, 594, 624.5, 655, 685.5, 716
]).reshape(24, 1, 1)
all_leap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='all_leap')
all_leap_month_avg_time = xr.DataArray(np.vstack((all_leap_month_avg_time[:-1], all_leap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
all_leap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), all_leap_month_avg)},
coords={
'time': all_leap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
# Daily -> Monthly Means for 360 Day Calendar
day_360_leap_month_avg = np.arange(14.5, 734.5, 30).reshape(24, 1, 1)
day_360_leap_month_avg_time = xr.cftime_range('2020-01-01',
'2022-01-01',
freq='MS',
calendar='360_day')
day_360_leap_month_avg_time = xr.DataArray(np.vstack((day_360_leap_month_avg_time[:-1], day_360_leap_month_avg_time[1:])).T,
dims=['time', 'nbd']) \
.mean(dim='nbd')
day_360_leap_day_2_month_avg = xr.Dataset(
data_vars={'data': (('time', 'lat', 'lon'), day_360_leap_month_avg)},
coords={
'time': day_360_leap_month_avg_time,
'lat': [-90.0],
'lon': [-180.0]
})
@pytest.mark.parametrize('dset, expected',
[(julian_daily, julian_day_2_month_avg),
(noleap_daily, noleap_day_2_month_avg),
(all_leap_daily, all_leap_day_2_month_avg),
(day_360_daily, day_360_leap_day_2_month_avg)])
def test_non_standard_calendars_calendar_average(dset, expected):
result = calendar_average(dset, freq='month')
xr.testing.assert_equal(result, expected)
|
1688494
|
import unittest
from scenario_test_support import *
class S08AutoValueTest(unittest.TestCase):
archive = load_archive("08_auto_value/intellij_files")
compiler_xml_content = archive["08_auto_value/.idea/compiler.xml"]
def test_source_folders(self):
self.assertEqual([
"foo_profile", "my_idea_auto_value_annotation_processor_profile"
], xpath_attribute_list(self.compiler_xml_content, "./component/annotationProcessing/profile", "name"))
|
1688497
|
from bluedot import BlueDot, COLORS
from signal import pause
from random import choice
bd = BlueDot()
bd.resize(1,2)
def pressed(pos):
print("Pressed : {}".format(pos))
def moved(pos):
print("Moved : {}".format(pos))
def released(pos):
print("Released : {}".format(pos))
def double_press(pos):
print("Double press : {}".format(pos))
def swipe(swipe):
print("Swipe : {}".format(swipe))
def rotation(rotation):
print("Rotation : {}".format(rotation))
def increase_matrix():
bd.resize(bd._cols + 1, bd._rows + 1)
# bd._send_cell_config(2, 2, "#ff0000ff", False, False, True)
def change_color():
# increase_matrix()
# bd[bd.cols - 1, bd.rows - 1].color = "green"
for c in range(bd.cols):
for r in range(bd.rows):
bd[c,r].color = choice(list(COLORS.keys()))
#print(bd.cells)
change_color()
# bd.when_pressed = increase_matrix
bd[0,0].when_pressed = pressed
bd[0,0].when_moved = moved
bd[0,0].when_released = released
bd[0,0].when_double_pressed = double_press
bd[0,0].when_swiped = swipe
bd[0,0].when_rotated = rotation
bd.when_pressed = pressed
bd.when_moved = moved
bd.when_released = released
bd.when_double_pressed = double_press
bd.when_swiped = swipe
bd.when_rotated = rotation
pause()
|
1688518
|
import os
import numpy as np
import scipy.signal
import torch
from matplotlib import pyplot as plt
def triplet_loss(alpha = 0.2):
def _triplet_loss(y_pred,Batch_size):
anchor, positive, negative = y_pred[:int(Batch_size)], y_pred[int(Batch_size):int(2*Batch_size)], y_pred[int(2*Batch_size):]
pos_dist = torch.sqrt(torch.sum(torch.pow(anchor - positive,2), axis=-1))
neg_dist = torch.sqrt(torch.sum(torch.pow(anchor - negative,2), axis=-1))
keep_all = (neg_dist - pos_dist < alpha).cpu().numpy().flatten()
hard_triplets = np.where(keep_all == 1)
pos_dist = pos_dist[hard_triplets].cuda()
neg_dist = neg_dist[hard_triplets].cuda()
basic_loss = pos_dist - neg_dist + alpha
loss = torch.sum(basic_loss)/torch.max(torch.tensor(1),torch.tensor(len(hard_triplets[0])))
return loss
return _triplet_loss
def weights_init(net, init_type='normal', init_gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and classname.find('Conv') != -1:
if init_type == 'normal':
torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
print('initialize network with %s type' % init_type)
net.apply(init_func)
class LossHistory():
def __init__(self, log_dir):
import datetime
curr_time = datetime.datetime.now()
time_str = datetime.datetime.strftime(curr_time,'%Y_%m_%d_%H_%M_%S')
self.log_dir = log_dir
self.time_str = time_str
self.save_path = os.path.join(self.log_dir, "loss_" + str(self.time_str))
self.acc = []
self.losses = []
self.val_loss = []
os.makedirs(self.save_path)
def append_loss(self, acc, loss, val_loss):
self.acc.append(acc)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.save_path, "epoch_acc_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(acc))
f.write("\n")
with open(os.path.join(self.save_path, "epoch_loss_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.save_path, "epoch_val_loss_" + str(self.time_str) + ".txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.save_path, "epoch_loss_" + str(self.time_str) + ".png"))
plt.cla()
plt.close("all")
plt.figure()
plt.plot(iters, self.acc, 'red', linewidth = 2, label='lfw acc')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.acc, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth lfw acc')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Lfw Acc')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.save_path, "epoch_acc_" + str(self.time_str) + ".png"))
plt.cla()
plt.close("all")
|
1688535
|
import logging
from assigner.config import requires_config
help = "Set configuration values"
logger = logging.getLogger(__name__)
@requires_config
def set_conf(conf, args):
"""Sets <key> to <value> in the config.
"""
conf[args.key] = args.value
def setup_parser(parser):
parser.add_argument("key", help="Key to set")
parser.add_argument("value", help="Value to set")
parser.set_defaults(run=set_conf)
|
1688556
|
import torch.nn as nn
from HeadNeRFOptions import BaseOptions
from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader
import torch
import torch.nn.functional as F
import FaceModels
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader)
import numpy as np
class NL3DMMRenderer(nn.Module):
def __init__(self, img_size, opt: BaseOptions):
super().__init__()
self.opt = opt
self.img_h = img_size
self.img_w = img_size
self.build_info()
self.build_nl3dmm()
self.build_tool_funcs()
self.set_3dmmdecoder_eval()
def build_nl3dmm(self):
self.decoder_3dmm = FaceModels.Linear_3DMM(self.opt)
self.decoder_nl3dmm_new = FaceModels.NonLinear_3DMM(self.opt)
def build_info(self):
topo_info = np.load("ConfigFiles/nl_3dmm_topo_info.npz")
tris = torch.as_tensor(topo_info['fv_indices']).long()
vert_tris = torch.as_tensor(topo_info['corr_vf_indices']).long()
self.register_buffer("tris", tris)
self.register_buffer("corr_vf_indices", vert_tris)
self.a0 = np.pi
self.a1 = 2 * np.pi / np.sqrt(3.0)
self.a2 = 2 * np.pi / np.sqrt(8.0)
self.c0 = 1 / np.sqrt(4 * np.pi)
self.c1 = np.sqrt(3.0) / np.sqrt(4 * np.pi)
self.c2 = 3 * np.sqrt(5.0) / np.sqrt(12 * np.pi)
self.d0 = 0.5/ np.sqrt(3.0)
def build_tool_funcs(self):
self.extract_lm3d_func = ExtractLandMarkPosition()
def set_3dmmdecoder_eval(self):
self.decoder_3dmm.eval()
self.decoder_nl3dmm_new.eval()
def train(self, mode=True):
r"""Sets the module in training mode."""
self.training = mode
for module in self.children():
module.train(mode)
self.set_3dmmdecoder_eval()
return self
def calc_geometry_Albedo(self, iden_codes, text_codes, expr_codes):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes)
batch_vcs = self.decoder_3dmm(text_codes)
return batch_vps, batch_vcs
def calc_normal(self, geometry):
vert_1 = geometry[:, self.tris[:, 0], :]
vert_2 = geometry[:, self.tris[:, 1], :]
vert_3 = geometry[:, self.tris[:, 2], :]
nnorm = torch.cross(vert_2 - vert_1, vert_3 - vert_1, 2)
tri_normal = F.normalize(nnorm, dim=2)
tri_normal = F.pad(tri_normal, [0, 0, 0, 1, 0, 0], mode="constant", value=0)
v_norm = tri_normal[:, self.corr_vf_indices, :].sum(2)
vert_normal = F.normalize(v_norm, dim=-1)
return vert_normal
def build_color(self, batch_vcolor, batch_norm, batch_gamma):
"""
batch_vcolor: [1, n_v, 3]
batch_norm: [B, n_v, 3]
batch_gamma: [B, 27]
"""
# n_b, num_vertex, _ = batch_vcolor.size()
n_b, num_vertex, _ = batch_norm.size()
gamma = batch_gamma.view(-1, 9, 3)
norm = batch_norm.view(-1, 3)
nx, ny, nz = norm[:, 0], norm[:, 1], norm[:, 2]
Y0 = torch.ones_like(nx) * self.a0 * self.c0
arrH = []
arrH.append(Y0)
arrH.append(-self.a1 * self.c1 * ny)
arrH.append(self.a1 * self.c1 * nz)
arrH.append(-self.a1 * self.c1 * nx)
arrH.append(self.a2 * self.c2 * nx * ny)
arrH.append(-self.a2 * self.c2 * ny * nz)
arrH.append(self.a2 * self.c2 * self.d0 * (3 * nz.pow(2) - 1))
arrH.append(-self.a2 * self.c2 * nx * nz)
arrH.append(self.a2 * self.c2 * 0.5 * (nx.pow(2) - ny.pow(2)))
H = torch.stack(arrH, 1)
Y = H.view(n_b, num_vertex, 9)
lighting = Y.bmm(gamma)
face_color = batch_vcolor * lighting
return face_color
def calc_ProjUV(self, cam_vps, batch_inmat):
tv = cam_vps[:, :, 2:3] + 1e-7
temp_uvs = cam_vps / tv
uv = torch.bmm(temp_uvs, batch_inmat.permute(0, 2, 1))
# uv = bmm_self_define_dim3(temp_uvs, batch_inmat, mat_2_is_trans=True)
return uv[:, :, :2]
def generate_renderer(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
half_w = self.img_w * 0.5
half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0] / half_w, batch_inmats[:, 1, 1] / half_w], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2] / half_w - 1.0, batch_inmats[:, 1, 2] / half_h - 1.0], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
device=cur_device
)
# focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
# center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
# iden_mat = torch.eye(3)
# iden_mat[0, 0] = -1.0
# iden_mat[1, 1] = -1.0
# temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
# temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
# cameras = PerspectiveCameras(
# focal_length=focal_info,
# principal_point=center_info,
# R=temp_Rmat,
# T=temp_Vec,
# in_ndc=False,
# image_size = [[self.img_h, self.img_w] * batch_size],
# device=cur_device
# )
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer
def render_img(self,
batch_vps, batch_vcs, illu_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats,
):
batch_size = batch_vps.size(0)
live_vps = torch.bmm(c2l_Scales * batch_vps, c2l_Rmats.permute(0, 2, 1)) + c2l_Tvecs.view(-1, 1, 3)
cam_vps = torch.bmm(live_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer = self.generate_renderer(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
return rendered_img, mask_c3b, proj_lm2d, sh_vcs
def generate_renderer_for_eval(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
# half_w = self.img_w * 0.5
# half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
in_ndc=False,
image_size = [[self.img_h, self.img_w] * batch_size],
device=cur_device
)
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
lights_phong = PointLights(
location=[[0.0, 0.0, -1e5]],
ambient_color=[[0.5, 0.5, 0.5]],
specular_color=[[0.2, 0.2, 0.2]],
diffuse_color=[[0.3, 0.3, 0.3]], device=cur_device
)
renderer_phong = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=HardPhongShader(
lights=lights_phong,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer, renderer_phong
def render_img_for_eval(self,
batch_vps, batch_vcs, illu_sh,
batch_Rmats, batch_Tvecs, batch_inmats
):
batch_size = batch_vps.size(0)
cam_vps = torch.bmm(batch_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer, renderer_phong = self.generate_renderer_for_eval(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
color_phong = torch.ones_like(cam_vps)
color_phong = TexturesVertex(color_phong)
meshes_phong = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), color_phong)
rendered_phong = renderer_phong(meshes_phong)
phong_mask_c3b = (rendered_phong[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_phong = rendered_phong[:, :, :, :3]
return rendered_img, mask_c3b, proj_lm2d, sh_vcs, rendered_phong, phong_mask_c3b
def forward(self,
iden_codes, text_codes, expr_codes, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats, eval = False, **kwargs
):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes, scale = 0.01)
batch_vcs = self.decoder_3dmm(text_codes)
if eval:
return self.render_img_for_eval(batch_vps, batch_vcs, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats)
else:
c2l_Scales, c2l_Rmats, c2l_Tvecs = kwargs["c2l_Scales"], kwargs["c2l_Rmats"], kwargs["c2l_Tvecs"]
return self.render_img(batch_vps, batch_vcs, cur_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats)
|
1688580
|
class DataLinkError(RuntimeError):
GENERIC_ERROR_MESSAGE = 'Something went wrong. Please try again. \
If you continue to have problems, please contact us at <EMAIL>.'
def __init__(self, data_link_message=None, http_status=None, http_body=None, http_headers=None,
data_link_error_code=None, response_data=None):
self.http_status = http_status
self.http_body = http_body
self.http_headers = http_headers if http_headers is not None else {}
self.data_link_error_code = data_link_error_code
self.data_link_message = data_link_message if data_link_message is not None \
else self.GENERIC_ERROR_MESSAGE
self.response_data = response_data
def __str__(self):
if self.http_status is None:
status_string = ''
else:
status_string = "(Status %(http_status)s) " % {"http_status": self.http_status}
if self.data_link_error_code is None:
data_link_error_string = ''
else:
data_link_error_string = "(Nasdaq Data Link Error %(data_link_error_code)s) " % {
"data_link_error_code": self.data_link_error_code}
return "%(ss)s%(qes)s%(qm)s" % {
"ss": status_string, "qes": data_link_error_string, "qm": self.data_link_message
}
class AuthenticationError(DataLinkError):
pass
class InvalidRequestError(DataLinkError):
pass
class LimitExceededError(DataLinkError):
pass
class NotFoundError(DataLinkError):
pass
class ServiceUnavailableError(DataLinkError):
pass
class InternalServerError(DataLinkError):
pass
class ForbiddenError(DataLinkError):
pass
class InvalidDataError(DataLinkError):
pass
class ColumnNotFound(DataLinkError):
pass
|
1688591
|
import os
import yaml
from sqlalchemy.orm.collections import attribute_mapped_collection
from emonitor.extensions import db
class Department(db.Model):
"""Department class"""
__tablename__ = 'departments'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
shortname = db.Column(db.String(10))
color = db.Column(db.String(7))
orderpos = db.Column(db.Integer)
defaultcity = db.Column(db.Integer) # id of default city for this department
_attributes = db.Column('attributes', db.Text)
def _get_city(self):
from emonitor.modules.streets.city import City
return City.getCities(self.defaultcity)
city = property(_get_city)
cars = db.relationship("Car", collection_class=attribute_mapped_collection('id'), cascade="all, delete-orphan")
def __init__(self, name, shortname, color, orderpos, defaultcity=0, attributes={}):
self.name = name
self.shortname = shortname
self.color = color
self.orderpos = orderpos
self.defaultcity = defaultcity
self._attributes = yaml.safe_dump(attributes, encoding='utf-8')
def __getattr__(self, name, default=''):
if name in self.attributes:
return self.attributes[name]
else:
return default
@property
def attributes(self):
return yaml.load(self._attributes)
@attributes.setter
def attributes(self, attrs):
self._attributes = yaml.safe_dump(attrs, encoding='utf-8')
def set(self, name, value):
attrs = self.attributes
attrs[name] = value
self.attributes = attrs
def getCars(self):
return sorted(self.cars.values(), key=lambda car: car.name)
def getLogoStream(self):
"""
Deliver logo file as stream, base 64 encoded
:return: base 64 stream or empty string
"""
from emonitor import app
if self.attributes.get('logo', '') != '' and os.path.exists('{}{}'.format(app.config.get('PATH_DATA'), self.attributes.get('logo', ''))):
return open('{}{}'.format(app.config.get('PATH_DATA'), self.attributes.get('logo', '')), 'rb').read().encode("base64")
else:
return open('{}/emonitor/frontend/web/img/empty.png'.format(app.config.get('PROJECT_ROOT')), 'rb').read().encode("base64")
@staticmethod
def getDefaultDepartment():
"""Get default department :py:class:`emonitor.modules.settings.department.Department`"""
return Department.query.order_by('orderpos').first()
@staticmethod
def getDepartments(id=0):
"""
Get department list filtered by criteria
:param optional id: id of department, *0* for all
:return: list of :py:class:`emonitor.modules.settings.department.Department`
"""
if id == 0:
return Department.query.order_by('orderpos').all()
else:
return Department.query.filter_by(id=id).first()
@staticmethod
def getDeptsDict():
"""
Get departements as dict
:return: dict of :py:class:`emonitor.modules.settings.department.Department`
"""
ret = {}
for dept in Department.query.order_by('orderpos'):
ret[dept.orderpos] = (dept.name, dept.color)
return ret
|
1688632
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.rotalyze
# LIBTBX_SET_DISPATCHER_NAME molprobity.rotalyze
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
import sys
from iotbx.cli_parser import CCTBXParser
from libtbx.utils import multi_out, show_total_time
from mmtbx.programs import rotalyze
# =============================================================================
def run(args):
# create parser
logger = multi_out()
logger.register('stderr', sys.stderr)
logger2 = multi_out()
logger2.register('stdout', sys.stdout)
parser = CCTBXParser(
program_class=rotalyze.Program,
logger=logger)
namespace = parser.parse_args(sys.argv[1:])
# start program
print('Starting job', file=logger)
print('='*79, file=logger)
task = rotalyze.Program(
parser.data_manager, parser.working_phil.extract(), logger=logger2)
# validate inputs
task.validate()
# run program
task.run()
# stop timer
print('', file=logger)
print('='*79, file=logger)
print('Job complete', file=logger)
show_total_time(out=logger)
# =============================================================================
if __name__ == '__main__':
run(sys.argv[1:])
|
1688719
|
import argparse
import numpy as np
from os import path
import struct
from internal import db_handling
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--sift_feature_dir', required=True)
parser.add_argument('--query_txt_file', required=True)
parser.add_argument('--database_file', required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
db = db_handling.COLMAPDatabase.connect(args.database_file)
db.create_tables()
with open(args.query_txt_file) as f:
for line in f:
name, _, h, w, fx, fy, cx, cy = line.split(' ')
params = np.array([float(fx), float(fy), float(cx), float(cy)])
camera_id = db.add_camera(1, int(h), int(w), params)
image_id = db.add_image(path.join('images', name), camera_id)
featurefile = path.join(args.sift_feature_dir,
path.splitext(name)[0] + '.sift')
with open(featurefile, 'rb') as f:
data = f.read()
header = struct.unpack_from('iiiii', data, 0)
_, _, num_points, num_entries, desc_size = header
assert num_entries == 5 and desc_size == 128
offset = 20
keypoints = np.zeros((num_points, 2))
for i in range(num_points):
point = struct.unpack_from('fffff', data, offset)
offset += 20
keypoints[i, :] = np.array((point[1], point[0]))
descriptors = np.zeros((num_points, desc_size))
for i in range(num_points):
descriptor = struct.unpack_from('128B', data, offset)
offset += desc_size
descriptors[i, :] = np.asarray(descriptor)
db.add_keypoints(image_id, keypoints)
db.add_descriptors(image_id, descriptors)
db.commit()
if __name__ == '__main__':
main()
|
1688732
|
import json
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Wrappers.Dials.Scale")
def DialsScale(DriverType=None, decay_correction=None):
"""A factory for DialsScaleWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class DialsScaleWrapper(DriverInstance.__class__):
"""A wrapper for dials.scale"""
def __init__(self):
# generic things
super().__init__()
self.set_executable("dials.scale")
# clear all the header junk
self.reset()
self._model = None
self._full_matrix = True
self._absorption_correction = True
self._absorption_level = None # or low, medium, high
self._error_model = None
self._error_model_grouping = None
self._shared_absorption = False
self._error_model = None
self._error_model_grouping = None
self._error_model_groups = None
self._outlier_rejection = None
self._outlier_zmax = None
self._min_partiality = None
self._partiality_cutoff = None
self._d_min = None
self._d_max = None
self._crystal_name = None
self._project_name = None
self._overwrite_existing_models = None
# scale and filter parameters
self._filtering_method = None
self._deltacchalf_max_cycles = None
self._deltacchalf_min_completeness = None
self._deltacchalf_stdcutoff = None
self._scale_and_filter_results = None
# input and output files
self._unmerged_reflections = None
self._experiments_json = []
self._reflection_files = []
# this flag indicates that the input reflections are already
# scaled and just need merging e.g. from XDS/XSCALE.
self._onlymerge = False
# by default, switch this on
if decay_correction is None:
self._bfactor = True
else:
self._bfactor = decay_correction
# this will often be wanted
self._anomalous = False
# these are only relevant for 'rotation' mode scaling
self._spacing = None
self._cycles = None
self._brotation = None
self._bfactor_tie = None
self._surface_weight = None
self._lmax = None
# dose_decay model parameters
self._share_decay = None
self._resolution_dependence = None
# Array model terms
self._n_resolution_bins = None
self._n_absorption_bins = None
self._isigma_selection = None
self._reflection_selection_method = None
self._intensities = None
self._project_crystal_dataset = {}
self._runs = []
# for adding data on merge - one dname
self._pname = None
self._xname = None
self._dname = None
self._scaled_experiments = None
self._scaled_reflections = None
self._html = None
self._unmerged_reflections = None
self._merged_reflections = None
self._best_unit_cell = None
# getter and setter methods
def add_experiments_json(self, experiments_json):
self._experiments_json.append(experiments_json)
def add_reflections_file(self, reflections_file):
self._reflection_files.append(reflections_file)
def clear_datafiles(self):
self._experiments_json = []
self._reflection_files = []
self._scaled_experiments = []
self._scaled_reflections = []
def set_resolution(self, d_min=None, d_max=None):
"""Set the resolution limit for the scaling -
default is to include all reflections."""
self._d_min = d_min
self._d_max = d_max
def set_anomalous(self, anomalous=True):
"""Switch on/off separating of anomalous pairs."""
self._anomalous = anomalous
def set_bfactor(self, bfactor=True, brotation=None):
"""Switch on/off bfactor refinement, optionally with the
spacing for the bfactor refinement (in degrees.)"""
self._bfactor = bfactor
if brotation:
self._brotation = brotation
def set_decay_bins(self, n_bins):
self._n_resolution_bins = n_bins
def set_array_absorption_bins(self, n_bins):
self._n_absorption_bins = n_bins
def set_min_partiality(self, min_partiality):
self._min_partiality = min_partiality
def set_partiality_cutoff(self, v):
self._partiality_cutoff = v
def set_surface_weight(self, surface_weight):
self._surface_weight = surface_weight
def set_lmax(self, lmax):
self._lmax = lmax
def set_share_decay(self, share):
self._share_decay = share
def set_resolution_dependence(self, resolution_dependence):
self._resolution_dependence = resolution_dependence
def set_model(self, model):
self._model = model
def set_full_matrix(self, full_matrix=True):
self._full_matrix = full_matrix
def set_absorption_correction(self, absorption_correction=True):
self._absorption_correction = absorption_correction
def set_shared_absorption(self, share=True):
self._shared_absorption = share
def set_spacing(self, spacing):
self._spacing = spacing
def set_cycles(self, cycles):
"""Set the maximum number of cycles allowed for the scaling -
this assumes the default convergence parameters."""
self._cycles = cycles
def set_intensities(self, intensities):
intensities = intensities.lower()
assert intensities in ("summation", "profile", "combine")
self._intensities = intensities
def set_isigma_selection(self, isigma_selection):
assert len(isigma_selection) == 2
self._isigma_selection = isigma_selection
def set_reflection_selection_method(self, reflection_selection_method):
self._reflection_selection_method = reflection_selection_method
def set_error_model(self, error_model="basic"):
self._error_model = error_model
def set_error_model_grouping_method(self, grouping="combined"):
self._error_model_grouping = grouping
def set_error_model_groups(self, groups):
"Groups should be a list of groups e.g. ['0,1', '2,3']"
self._error_model_groups = groups
def set_outlier_rejection(self, outlier_rejection):
self._outlier_rejection = outlier_rejection
def set_outlier_zmax(self, z_max):
self._outlier_zmax = z_max
def set_absorption_level(self, level):
self._absorption_level = level
def get_scaled_mtz(self):
return self._merged_reflections
def set_crystal_name(self, name):
self._crystal_name = name
def set_project_name(self, name):
self._project_name = name
def get_scaled_reflections(self):
return self._scaled_reflections
def get_scaled_experiments(self):
return self._scaled_experiments
def set_scaled_mtz(self, filepath):
self._merged_reflections = filepath
def set_html(self, filepath):
self._html = filepath
def get_html(self):
return self._html
def get_scaled_unmerged_mtz(self):
return self._unmerged_reflections
def set_scaled_unmerged_mtz(self, filepath):
self._unmerged_reflections = filepath
def set_best_unit_cell(self, unit_cell):
self._best_unit_cell = unit_cell
def set_overwrite_existing_models(self, overwrite):
self._overwrite_existing_models = overwrite
def set_filtering_method(self, filtering_method):
self._filtering_method = filtering_method
def set_deltacchalf_max_cycles(self, max_cycles):
self._deltacchalf_max_cycles = max_cycles
def set_deltacchalf_min_completeness(self, min_completeness):
self._deltacchalf_min_completeness = min_completeness
def set_deltacchalf_stdcutoff(self, stdcutoff):
self._deltacchalf_stdcutoff = stdcutoff
def get_scale_and_filter_results(self):
return self._scale_and_filter_results
def scale(self):
"""Actually perform the scaling."""
self.clear_command_line() # reset the command line in case has already
# been run previously
assert len(self._experiments_json)
assert len(self._reflection_files)
assert len(self._experiments_json) == len(self._reflection_files)
for f in self._experiments_json + self._reflection_files:
assert os.path.isfile(f)
self.add_command_line(f)
nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
if isinstance(nproc, int) and nproc > 1:
self.add_command_line("nproc=%i" % nproc)
if self._anomalous:
self.add_command_line("anomalous=True")
if self._intensities == "summation":
self.add_command_line("intensity_choice=sum")
elif self._intensities == "profile":
self.add_command_line("intensity_choice=profile")
# Handle all model options. Model can be none - would trigger auto
# models in dials.scale.
if self._model is not None:
self.add_command_line("model=%s" % self._model)
# Decay correction can refer to any model (physical, array, KB)
if self._bfactor:
self.add_command_line("%s.decay_correction=True" % self._model)
else:
self.add_command_line("%s.decay_correction=False" % self._model)
if self._model in ("physical", "dose_decay", "array"):
# These options can refer to array, physical or dose_decay model
if self._absorption_correction:
self.add_command_line("%s.absorption_correction=True" % self._model)
else:
self.add_command_line(
"%s.absorption_correction=False" % self._model
)
if self._model in ("physical", "array"):
# These options can refer to array, physical or dose_decay model
if self._bfactor and self._brotation is not None:
self.add_command_line(
f"{self._model}.decay_interval={self._brotation:g}"
)
if self._model == "dose_decay" and self._share_decay is not None:
self.add_command_line(f"{self._model}.share.decay={self._share_decay}")
if self._model == "dose_decay" and self._resolution_dependence is not None:
self.add_command_line(
f"{self._model}.resolution_dependence={self._resolution_dependence}"
)
# Option only relevant for spherical harmonic absorption in physical model.
if (
self._model in ("physical", "dose_decay")
and self._absorption_correction
and self._lmax is not None
):
self.add_command_line("%s.lmax=%i" % (self._model, self._lmax))
if self._absorption_level:
self.add_command_line(f"absorption_level={self._absorption_level}")
# 'Spacing' i.e. scale interval only relevant to physical model.
if self._model in ("physical", "dose_decay") and self._spacing:
self.add_command_line(f"{self._model}.scale_interval={self._spacing:g}")
if self._model == "physical" and self._surface_weight:
self.add_command_line(
f"{self._model}.surface_weight={self._surface_weight}"
)
if self._shared_absorption:
self.add_command_line("share.absorption=True")
self.add_command_line(f"full_matrix={self._full_matrix}")
if self._error_model:
self.add_command_line(f"error_model={self._error_model}")
if self._error_model_grouping:
self.add_command_line(
f"error_model.grouping={self._error_model_grouping}"
)
if self._error_model_groups and self._error_model_grouping == "grouped":
for g in self._error_model_groups:
self.add_command_line(f"error_model_group={g}")
if self._outlier_rejection:
self.add_command_line(f"outlier_rejection={self._outlier_rejection}")
if self._min_partiality is not None:
self.add_command_line(f"min_partiality={self._min_partiality}")
if self._partiality_cutoff is not None:
self.add_command_line(f"partiality_cutoff={self._partiality_cutoff}")
# next any 'generic' parameters
if self._isigma_selection is not None:
self.add_command_line(
"reflection_selection.Isigma_range=%f,%f"
% tuple(self._isigma_selection)
)
if self._reflection_selection_method is not None:
self.add_command_line(
f"reflection_selection.method={self._reflection_selection_method}"
)
if self._d_min is not None:
self.add_command_line("cut_data.d_min=%g" % self._d_min)
if self._d_max is not None:
self.add_command_line("cut_data.d_max=%g" % self._d_max)
if self._cycles is not None:
self.add_command_line("max_iterations=%d" % self._cycles)
if self._outlier_zmax:
self.add_command_line("outlier_zmax=%d" % self._outlier_zmax)
if self._n_resolution_bins:
self.add_command_line("n_resolution_bins=%d" % self._n_resolution_bins)
if self._n_absorption_bins:
self.add_command_line("n_absorption_bins=%d" % self._n_absorption_bins)
if self._best_unit_cell is not None:
self.add_command_line(
"best_unit_cell=%s,%s,%s,%s,%s,%s" % self._best_unit_cell
)
if self._overwrite_existing_models is not None:
self.add_command_line("overwrite_existing_models=True")
if not self._scaled_experiments:
self._scaled_experiments = os.path.join(
self.get_working_directory(), "%i_scaled.expt" % self.get_xpid()
)
if not self._scaled_reflections:
self._scaled_reflections = os.path.join(
self.get_working_directory(), "%i_scaled.refl" % self.get_xpid()
)
if self._unmerged_reflections:
self.add_command_line(
"output.unmerged_mtz=%s" % self._unmerged_reflections
)
if self._merged_reflections:
self.add_command_line("output.merged_mtz=%s" % self._merged_reflections)
if not self._html:
self._html = os.path.join(
self.get_working_directory(), "%i_scaling.html" % self.get_xpid()
)
self.add_command_line("output.html=%s" % self._html)
if self._crystal_name:
self.add_command_line("output.crystal_name=%s" % self._crystal_name)
if self._project_name:
self.add_command_line("output.project_name=%s" % self._project_name)
if self._filtering_method:
self.add_command_line("filtering.method=%s" % self._filtering_method)
scale_and_filter_filename = (
"%s_scale_and_filter_results.json" % self.get_xpid()
)
self.add_command_line(
"output.scale_and_filter_results=%s" % scale_and_filter_filename
)
if self._deltacchalf_max_cycles:
self.add_command_line(
"filtering.deltacchalf.max_cycles=%i"
% self._deltacchalf_max_cycles
)
if self._deltacchalf_min_completeness:
self.add_command_line(
"filtering.deltacchalf.min_completeness=%i"
% self._deltacchalf_min_completeness
)
if self._deltacchalf_stdcutoff:
self.add_command_line(
"filtering.deltacchalf.stdcutoff=%i"
% self._deltacchalf_stdcutoff
)
self.add_command_line("output.experiments=%s" % self._scaled_experiments)
self.add_command_line("output.reflections=%s" % self._scaled_reflections)
# run using previously determined scales
self.start()
self.close_wait()
# check for errors
try:
self.check_for_errors()
except Exception:
logger.warning(
"dials.scale failed, see log file for more details:\n %s",
self.get_log_file(),
)
raise
logger.debug("dials.scale status: OK")
if self._filtering_method and os.path.isfile(scale_and_filter_filename):
with open(scale_and_filter_filename) as fh:
from dials.algorithms.scaling import scale_and_filter
self._scale_and_filter_results = (
scale_and_filter.AnalysisResults.from_dict(json.load(fh))
)
return "OK"
def get_unmerged_reflection_file(self):
"""Return a single unmerged mtz, for resolution cutoff analysis."""
return self._unmerged_reflections
return DialsScaleWrapper()
|
1688736
|
from typing import TYPE_CHECKING
from protostar.commands.test.test_cases import TestCaseResult
if TYPE_CHECKING:
import queue
class TestResultsQueue:
def __init__(self, shared_queue: "queue.Queue[TestCaseResult]") -> None:
self._shared_queue = shared_queue
def get(self) -> TestCaseResult:
return self._shared_queue.get(block=True, timeout=1000)
def put(self, item: TestCaseResult) -> None:
self._shared_queue.put(item)
|
1688738
|
import sys
import bteve as eve
import random
import minpng
if sys.implementation.name == "circuitpython":
gd = eve.Gameduino()
else:
from spidriver import SPIDriver
gd = eve.Gameduino(SPIDriver(sys.argv[1]))
gd.init()
if 0:
gd.ClearColorRGB(0x20, 0x40, 0x20)
gd.Clear()
gd.cmd_text(gd.w // 2, gd.h // 2, 31, eve.OPT_CENTER, "Hello world")
gd.swap()
if 1:
rr = random.randrange
random.seed(8)
gd.VertexFormat(2)
gd.Clear()
gd.Begin(eve.POINTS)
for i in range(100):
gd.ColorRGB(rr(256), rr(256), rr(256))
gd.PointSize(rr(gd.w // 6))
gd.Vertex2f(rr(gd.w), rr(gd.h))
gd.swap()
def screenshot(filename):
with open(filename, "wb") as pngf:
p = minpng.PngWriter(pngf.write, gd.w, gd.h)
def handle_line(rgb):
for i in range(gd.w):
r = rgb[3 * i + 0]
g = rgb[3 * i + 1]
b = rgb[3 * i + 2]
p.rgb(r, g, b)
gd.screenshot(handle_line)
screenshot("/sd/foo.png")
|
1688764
|
try:
# python2
from urlparse import urlparse
except Exception:
# python3
from urllib.parse import urlparse
from consolemenu.validators.base import BaseValidator
class UrlValidator(BaseValidator):
def __init__(self):
"""
URL Validator class
"""
super(UrlValidator, self).__init__()
def validate(self, input_string):
"""
Validate url
:return: True if match / False otherwise
"""
parsed_url = urlparse(url=input_string)
return bool(parsed_url.scheme and parsed_url.netloc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.