id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
166250
|
from django.http import HttpResponse
from django.shortcuts import render, render_to_response
from django.template import RequestContext, loader
from django_translate.services import trans as _, transchoice
def hello(request):
return render_to_response("hello.html", context=RequestContext(request))
def apples(request):
return render_to_response("apples.html", context=RequestContext(request))
def pythonic_apples(request):
return render_to_response("apples_python.html", {"rendered":
u"<h1>{0}</h1>"
"<p>{1}</p>"
"<p>{2}</p>"
"<p>{3}</p>".format(
_("apples.header"),
_("apples.want_some", {"fruits": "apples"}),
transchoice("apples.praise_n", 1),
transchoice("apples.praise_n", 3)
)
})
def po(request):
return render_to_response("po.html", context=RequestContext(request))
|
166277
|
from data.types.pixel_coordinate_system import PixelCoordinateSystem
from data.types.bounding_box_format import BoundingBoxFormat
from data.types.bounding_box_coordinate_system import BoundingBoxCoordinateSystem
from data.types.pixel_definition import PixelDefinition
def _common_routine(bounding_box, image_size, bounding_box_format: BoundingBoxFormat,
pixel_coordinate_system: PixelCoordinateSystem,
bounding_box_coordinate_system: BoundingBoxCoordinateSystem,
pixel_definition, rasterized_xyxy_func, rasterized_polygon_func,
spatial_xyxy_func, spatial_polygon_func):
if bounding_box_coordinate_system == BoundingBoxCoordinateSystem.Rasterized:
if bounding_box_format == BoundingBoxFormat.XYWH or bounding_box_format == BoundingBoxFormat.XYXY:
if bounding_box_format == BoundingBoxFormat.XYWH:
from data.operator.bbox.rasterized.xywh2xyxy import bbox_xywh2xyxy
bounding_box = bbox_xywh2xyxy(bounding_box)
return rasterized_xyxy_func(bounding_box, image_size)
else:
return rasterized_polygon_func(bounding_box, image_size)
else:
if bounding_box_format == BoundingBoxFormat.XYWH or bounding_box_format == BoundingBoxFormat.XYXY:
if bounding_box_format == BoundingBoxFormat.XYWH:
from data.operator.bbox.rasterized.xywh2xyxy import bbox_xywh2xyxy
bounding_box = bbox_xywh2xyxy(bounding_box)
return spatial_xyxy_func(bounding_box, image_size, pixel_coordinate_system, pixel_definition)
else:
return spatial_polygon_func(bounding_box, image_size, pixel_coordinate_system, pixel_definition)
def bounding_box_is_intersect_with_image(bounding_box, image_size, bounding_box_format: BoundingBoxFormat,
pixel_coordinate_system: PixelCoordinateSystem,
bounding_box_coordinate_system: BoundingBoxCoordinateSystem,
pixel_definition: PixelDefinition = PixelDefinition.Point):
import data.operator.bbox.rasterized.utility.image
import data.operator.bbox.spatial.utility.image
return _common_routine(bounding_box, image_size, bounding_box_format, pixel_coordinate_system,
bounding_box_coordinate_system, pixel_definition,
data.operator.bbox.rasterized.utility.image.bounding_box_is_intersect_with_image,
data.operator.bbox.rasterized.utility.image.bounding_box_is_intersect_with_image_polygon,
data.operator.bbox.spatial.utility.image.bounding_box_is_intersect_with_image,
data.operator.bbox.spatial.utility.image.bounding_box_is_intersect_with_image_polygon)
def bounding_box_fit_in_image_boundary(bounding_box, image_size, bounding_box_format: BoundingBoxFormat,
pixel_coordinate_system: PixelCoordinateSystem,
bounding_box_coordinate_system: BoundingBoxCoordinateSystem,
pixel_definition: PixelDefinition = PixelDefinition.Point):
import data.operator.bbox.rasterized.utility.image
import data.operator.bbox.spatial.utility.image
return _common_routine(bounding_box, image_size, bounding_box_format, pixel_coordinate_system,
bounding_box_coordinate_system, pixel_definition,
data.operator.bbox.rasterized.utility.image.bounding_box_fit_in_image_boundary,
data.operator.bbox.rasterized.utility.image.bounding_box_fit_in_image_boundary_polygon,
data.operator.bbox.spatial.utility.image.bounding_box_fit_in_image_boundary,
data.operator.bbox.spatial.utility.image.bounding_box_fit_in_image_boundary_polygon)
|
166316
|
import FreeCAD
import FreeCADGui
from pivy import coin
import arch_texture_utils.faceset_utils as faceset_utils
class Light():
def __init__(self, obj):
obj.Proxy = self
self.setProperties(obj)
def setProperties(self, obj):
pl = obj.PropertiesList
if not 'Color' in pl:
obj.addProperty("App::PropertyColor", "Color", "Light",
"The color of the light").Color = (1.0, 0.94, 0.91)
if not 'Intensity' in pl:
obj.addProperty("App::PropertyFloatConstraint", "Intensity", "Light",
"The intensity of the light").Intensity = (1.0, 0.0, 1.0, 0.1)
def onDocumentRestored(self, obj):
self.setProperties(obj)
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self, ob):
pass
class ViewProviderLight:
def __init__(self, vobj):
vobj.Proxy = self
self.setProperties(vobj)
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
# Setting properties does not work here as the pl is not filled yet :/
sceneGraph = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
self.switch = coin.SoSwitch()
self.geometryNode = coin.SoSeparator()
self.transform = coin.SoTransform()
self.material = coin.SoMaterial()
self.coinLight = self.createLightInstance()
actualGeometry = self.createGeometry()
self.geometryNode.addChild(self.transform)
self.geometryNode.addChild(self.material)
if actualGeometry is not None:
self.geometryNode.addChild(actualGeometry)
sceneGraph.insertChild(self.coinLight, 1)
self.switch.addChild(self.geometryNode)
vobj.addDisplayMode(self.switch, "Light")
self.updateLightVisibility()
self.updateDirection()
self.updateColor()
self.updateIntensity()
# self.updateGeometryVisibility()
def setProperties(self, vobj):
pl = vobj.PropertiesList
if not 'ShowGeometry' in pl:
vobj.addProperty("App::PropertyBool", "ShowGeometry", "Light",
"Show the light as geometry in the 3D View").ShowGeometry = True
def createLightInstance(self):
raise NotImplementedError()
def createGeometry(self):
raise NotImplementedError()
def getDisplayModes(self,obj):
'''Return a list of display modes.'''
return ["Light"]
def getDefaultDisplayMode(self):
'''Return the name of the default display mode. It must be defined in getDisplayModes.'''
return "Light"
def updateData(self, fp, prop):
if prop in ['HorizontalRotation', 'VerticalRotation']:
self.updateDirection()
elif prop == 'Color':
self.updateColor()
elif prop == 'Intensity':
self.updateIntensity()
elif prop == 'Location':
self.updateLocation()
def onChanged(self, vp, prop):
if prop == 'Visibility':
self.updateLightVisibility()
elif prop == 'ShowGeometry':
self.updateGeometryVisibility()
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def updateLocation(self):
if hasattr(self.Object, 'Location'):
location = self.Object.Location
coinVector = coin.SbVec3f(location.x, location.y, location.z)
self.coinLight.location.setValue(coinVector)
self.updateGeometryLocation(coinVector)
def updateDirection(self):
if hasattr(self.Object, 'HorizontalRotation') and hasattr(self.Object, 'VerticalRotation'):
horizontalRotation = self.Object.HorizontalRotation
verticalRotation = self.Object.VerticalRotation
# Defaults to south to north
direction = FreeCAD.Vector(0, 1, 0)
# Negative Z because we want the light to follow the real sun path from East to west.
rotateZ = FreeCAD.Rotation(FreeCAD.Vector(0, 0, -1), horizontalRotation)
# Negative X because a positive rotation should let the light point downwards
rotateX = FreeCAD.Rotation(FreeCAD.Vector(-1, 0, 0), verticalRotation)
rotation = rotateZ.multiply(rotateX)
direction = rotateZ.multVec(direction)
direction = rotateX.multVec(direction)
coinVector = coin.SbVec3f(direction.x, direction.y, direction.z)
self.coinLight.direction.setValue(coinVector)
self.updateGeometryDirection(rotation)
#print('h: %s, v: %s, d: %s' % (horizontalRotation, verticalRotation, direction))
def updateLightVisibility(self):
self.coinLight.on.setValue(self.ViewObject.Visibility)
def updateColor(self):
color = self.Object.Color
r = color[0]
g = color[1]
b = color[2]
coinColor = coin.SbColor(r, g, b)
self.coinLight.color.setValue(coinColor)
self.material.diffuseColor.setValue(coinColor)
def updateIntensity(self):
self.coinLight.intensity.setValue(self.Object.Intensity)
def updateGeometryLocation(self, coinVector):
self.transform.translation.setValue(coinVector)
def updateGeometryDirection(self, rotation):
# Nothing to do right now. Subclasses override this
pass
def updateGeometryVisibility(self):
if not hasattr(self, 'switch') or self.switch is None:
return
if self.ViewObject.ShowGeometry:
self.switch.whichChild.setValue(0)
else:
self.switch.whichChild.setValue(coin.SO_SWITCH_NONE)
def createDirectionalLight():
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython", "DirectionalLight")
light = DirectionalLight(obj)
ViewProviderDirectionalLight(obj.ViewObject)
return obj
if __name__ == "__main__":
createDirectionalLight()
|
166319
|
from packaging.version import parse as Version
import sys
import requests
def get_pypi_xmlrpc_client():
"""This is actually deprecated client."""
import xmlrpc.client
return xmlrpc.client.ServerProxy("https://pypi.python.org/pypi", use_datetime=True)
class PyPIClient:
def __init__(self, host="https://pypi.org"):
self._host = host
self._session = requests.Session()
def project(self, package_name):
response = self._session.get(
"{host}/pypi/{project_name}/json".format(host=self._host, project_name=package_name)
)
response.raise_for_status()
return response.json()
def project_release(self, package_name, version):
response = self._session.get(
"{host}/pypi/{project_name}/{version}/json".format(
host=self._host, project_name=package_name, version=version
)
)
response.raise_for_status()
return response.json()
def filter_packages_for_compatibility(self, package_name, version_set):
# only need the packaging.specifiers import if we're actually executing this filter.
from packaging.specifiers import SpecifierSet
results = []
for version in version_set:
requires_python = self.project_release(package_name, version)["info"]["requires_python"]
if requires_python:
if Version(".".join(map(str, sys.version_info[:3]))) in SpecifierSet(requires_python):
results.append(version)
else:
results.append(version)
return results
def get_ordered_versions(self, package_name, filter_by_compatibility=False):
project = self.project(package_name)
versions = [Version(package_version) for package_version in project["releases"].keys()]
versions.sort()
if filter_by_compatibility:
return self.filter_packages_for_compatibility(package_name, versions)
return versions
def get_relevant_versions(self, package_name):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (versions[-1], pre_releases[-1])
|
166349
|
import numpy as np
import os
import math
import random
from argparse import ArgumentParser
from util.config import configure
from tqdm import tqdm
import renderer.randomize.scene_randomizer as sr
'''
This script creates a folder structure containing 'size' + 'testing_size' batches,
where each batch consists of a certain number of scenes based on the batch size.
Each scene also consists of N rendered views.
This script does not generate any imagery by itself, only the scene descriptions required to
render it.
'''
parser = ArgumentParser()
parser.add_argument('--config_dir', type=str, default='', help='Where config file is located')
parser.add_argument('--config', type=str, default='', help='Config file to read')
parser.add_argument('--size', type=int, default=9000, help='How many batches to include in dataset')
parser.add_argument('--testing_size', type=int, default=1000, help='How many testing batches to include in dataset')
parser.add_argument('--device', type=str, default='', help='Which device to run on')
parser.add_argument('--find_checkpoints', action='store_true', help='Attempt to find checkpoints automatically')
parser.add_argument('--out_folder', type=str, default='tmp/', help='Folder to save JSON files to')
args = parser.parse_args()
settings = configure(args, ignore_data=True)
randomizer = sr.select_randomizer(settings.dataset, settings.seed)
# Create main directories
parent_path = os.path.abspath(args.out_folder)
train_path = os.path.join(parent_path, 'train')
test_path = os.path.join(parent_path, 'test')
os.makedirs(parent_path, exist_ok=True)
os.makedirs(train_path, exist_ok=True)
os.makedirs(test_path, exist_ok=True)
def random_scene(factor):
global randomizer
if factor == -1:
randomizer.random_scene()
elif factor == 0:
randomizer.randomize_lighting()
elif factor == 1:
randomizer.randomize_geometry()
else:
randomizer.randomize_materials()
def scene_json(folder, factor):
random_scene(factor)
os.makedirs(folder, exist_ok=True)
for i in range(settings.views_per_scene):
randomizer.random_view()
json_file = folder + "/view%03d.json" % i
params = randomizer.generate_params()
randomizer.save_json(json_file, params)
def generate_batch(folder, batch_size, latent_separation):
randomizer.random_scene()
if latent_separation:
os.makedirs(folder, exist_ok=True)
factor = random.randint(0, 2)
factor_file = folder + "/factor.txt"
with open(factor_file, 'w') as fac:
fac.write(str(factor))
else:
factor = -1
for i in range(batch_size):
scene_path = os.path.join(folder, "scene%04d" % i)
scene_json(scene_path, factor)
def generate_set(folder, size, batch_size, latent_separation):
for i in tqdm(range(size)):
batch_path = os.path.join(folder, "batch%09d" % i)
generate_batch(batch_path, batch_size, latent_separation)
print("Generating training data...")
generate_set(train_path, args.size, settings.batch_size, settings.latent_separation)
print("Generating testing data...")
generate_set(test_path, args.testing_size, settings.test_batch_size, False)
|
166353
|
import FWCore.ParameterSet.Config as cms
from L1TriggerConfig.L1GtConfigProducers.l1GtPrescaleFactorsTechTrig_cfi import *
#
L1GtPrescaleFactorsTechTrigRcdSource = cms.ESSource("EmptyESSource",
recordName = cms.string('L1GtPrescaleFactorsTechTrigRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
|
166373
|
import traceback
import logging
import attr
from .. import entities, exceptions
logger = logging.getLogger(name=__name__)
@attr.s
class User(entities.BaseEntity):
"""
User entity
"""
created_at = attr.ib()
updated_at = attr.ib(repr=False)
name = attr.ib()
last_name = attr.ib()
username = attr.ib()
avatar = attr.ib(repr=False)
email = attr.ib()
role = attr.ib()
type = attr.ib()
org = attr.ib()
id = attr.ib()
# api
_project = attr.ib(repr=False)
_client_api = attr.ib(default=None, repr=False)
_users = attr.ib(repr=False, default=None)
@property
def createdAt(self):
logger.warning(
'Deprecation Warning - param "createdAt" will be deprecated from version "1.41.0'
'Use "created_at"')
return self.created_at
@property
def updatedAt(self):
logger.warning(
'Deprecation Warning - param "updatedAt" will be deprecated from version "1.41.0'
'Use "updated_at"')
return self.updated_at
@staticmethod
def _protected_from_json(_json, project, client_api, users=None):
"""
Same as from_json but with try-except to catch if error
:param _json: platform json
:param project: project entity
:param client_api: ApiClient entity
:param users: Users repository
:return:
"""
try:
user = User.from_json(_json=_json,
project=project,
users=users,
client_api=client_api)
status = True
except Exception:
user = traceback.format_exc()
status = False
return status, user
@property
def project(self):
if self._project is None:
raise exceptions.PlatformException(error='2001',
message='Missing entity "project".')
assert isinstance(self._project, entities.Project)
return self._project
@classmethod
def from_json(cls, _json, project, client_api, users=None):
"""
Build a User entity object from a json
:param _json: _json response from host
:param project: project entity
:param client_api: ApiClient entity
:param users: Users repository
:return: User object
"""
return cls(
created_at=_json.get('createdAt', None),
name=_json.get('firstName', None),
updated_at=_json.get('updatedAt', None),
last_name=_json.get('lastName', None),
username=_json.get('username', None),
avatar=_json.get('avatar', None),
email=_json.get('email', None),
role=_json.get('role', None),
type=_json.get('type', None),
org=_json.get('org', None),
id=_json.get('id', None),
project=project,
users=users,
client_api=client_api)
def to_json(self):
"""
Returns platform _json format of object
:return: platform json format of object
"""
_json = attr.asdict(self,
filter=attr.filters.exclude(attr.fields(User)._project,
attr.fields(User).name,
attr.fields(User)._client_api,
attr.fields(User).users,
attr.fields(User).last_name,
attr.fields(User).created_at,
attr.fields(User).updated_at,
))
_json['firstName'] = self.name
_json['lastName'] = self.last_name
_json['createdAt'] = self.created_at
_json['updatedAt'] = self.updated_at
return _json
|
166379
|
from datetime import timedelta
from .interchange import WaypointType
class ActivityStatisticCalculator:
ImplicitPauseTime = timedelta(minutes=1, seconds=5)
def CalculateDistance(act, startWpt=None, endWpt=None):
import math
dist = 0
altHold = None # seperate from the lastLoc variable, since we want to hold the altitude as long as required
lastTimestamp = lastLoc = None
flatWaypoints = act.GetFlatWaypoints()
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
timeDelta = flatWaypoints[x].Timestamp - lastTimestamp if lastTimestamp else None
lastTimestamp = flatWaypoints[x].Timestamp
if flatWaypoints[x].Type == WaypointType.Pause or (timeDelta and timeDelta > ActivityStatisticCalculator.ImplicitPauseTime):
lastLoc = None # don't count distance while paused
continue
loc = flatWaypoints[x].Location
if loc is None or loc.Longitude is None or loc.Latitude is None:
# Used to throw an exception in this case, but the TCX schema allows for location-free waypoints, so we'll just patch over it.
continue
if loc and lastLoc:
altHold = lastLoc.Altitude if lastLoc.Altitude is not None else altHold
latRads = loc.Latitude * math.pi / 180
meters_lat_degree = 1000 * 111.13292 + 1.175 * math.cos(4 * latRads) - 559.82 * math.cos(2 * latRads)
meters_lon_degree = 1000 * 111.41284 * math.cos(latRads) - 93.5 * math.cos(3 * latRads)
dx = (loc.Longitude - lastLoc.Longitude) * meters_lon_degree
dy = (loc.Latitude - lastLoc.Latitude) * meters_lat_degree
if loc.Altitude is not None and altHold is not None: # incorporate the altitude when possible
dz = loc.Altitude - altHold
else:
dz = 0
dist += math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
lastLoc = loc
return dist
def CalculateTimerTime(act, startWpt=None, endWpt=None):
flatWaypoints = []
for lap in act.Laps:
flatWaypoints.append(lap.Waypoints)
if len(flatWaypoints) < 3:
# Either no waypoints, or one at the start and one at the end
raise ValueError("Not enough waypoints to calculate timer time")
duration = timedelta(0)
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
lastTimestamp = None
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
wpt = flatWaypoints[x]
delta = wpt.Timestamp - lastTimestamp if lastTimestamp else None
lastTimestamp = wpt.Timestamp
if wpt.Type is WaypointType.Pause:
lastTimestamp = None
elif delta and delta > act.ImplicitPauseTime:
delta = None # Implicit pauses
if delta:
duration += delta
if duration.total_seconds() == 0 and startWpt is None and endWpt is None:
raise ValueError("Zero-duration activity")
return duration
def CalculateAverageMaxHR(act, startWpt=None, endWpt=None):
flatWaypoints = act.GetFlatWaypoints()
# Python can handle 600+ digit numbers, think it can handle this
maxHR = 0
cumulHR = 0
samples = 0
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
wpt = flatWaypoints[x]
if wpt.HR:
if wpt.HR > maxHR:
maxHR = wpt.HR
cumulHR += wpt.HR
samples += 1
if not samples:
return None, None
cumulHR = cumulHR / samples
return cumulHR, maxHR
|
166396
|
from torch import nn
class Discriminator(nn.Module):
def __init__(self, hidden_size=512):
super(Discriminator,self).__init__()
self.model = nn.Sequential(
nn.Linear(hidden_size, hidden_size//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(hidden_size//2, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
# nn.Softmax()
nn.Sigmoid()
)
def forward(self, vqg, questions, qlengths): # pass object of vqg
encoder_hidden = vqg.encode_questions_discriminator(questions, qlengths)
x = encoder_hidden.view(-1, vqg.hidden_size)
verdict = self.model(x)
return verdict
|
166418
|
import discord
from discord.ext import commands
from modules.economy import Economy
from modules.helpers import *
class GamblingHelpers(commands.Cog, name='General'):
def __init__(self, client: commands.Bot) -> None:
self.client = client
self.economy = Economy()
@commands.command(hidden=True)
@commands.is_owner()
async def set(
self,
ctx: commands.Context,
user_id: int=None,
money: int=0,
credits: int=0
):
if money:
self.economy.set_money(user_id, money)
if credits:
self.economy.set_credits(user_id, credits)
@commands.command(
brief=f"Gives you ${DEFAULT_BET*B_MULT} once every {B_COOLDOWN}hrs",
usage="add"
)
@commands.cooldown(1, B_COOLDOWN*3600, type=commands.BucketType.user)
async def add(self, ctx: commands.Context):
amount = DEFAULT_BET*B_MULT
self.economy.add_money(ctx.author.id, amount)
await ctx.send(f"Added ${amount} come back in {B_COOLDOWN}hrs")
@commands.command(
brief="How much money you or someone else has",
usage="money *[@member]",
aliases=['credits']
)
async def money(self, ctx: commands.Context, user: discord.Member=None):
user = user.id if user else ctx.author.id
user = self.client.get_user(user)
profile = self.economy.get_entry(user.id)
embed = make_embed(
title=user.name,
description=(
'**${:,}**'.format(profile[1]) +
'\n**{:,}** credits'.format(profile[2])
),
footer=discord.Embed.Empty
)
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.command(
brief="Shows the user with the most money",
usage="leaderboard",
aliases=["top"]
)
async def leaderboard(self, ctx):
entries = self.economy.top_entries(5)
embed = make_embed(title='Leaderboard:', color=discord.Color.gold())
for i, entry in enumerate(entries):
embed.add_field(
name=f"{i+1}. {self.client.get_user(entry[0]).name}",
value='${:,}'.format(entry[1]),
inline=False
)
await ctx.send(embed=embed)
def setup(client: commands.Bot):
client.add_cog(GamblingHelpers(client))
|
166481
|
from __future__ import absolute_import
from __future__ import print_function
from .fsm import reset
|
166498
|
import sys
import pytest
import tempfile
import subprocess
from unittest import mock
from andriller import adb_conn
fake_adb = tempfile.NamedTemporaryFile()
@pytest.fixture
def ADB(mocker):
mocker.patch('andriller.adb_conn.ADBConn.kill')
mocker.patch('andriller.adb_conn.ADBConn._opt_use_capture', return_value=True)
with mock.patch('andriller.adb_conn.ADBConn._get_adb_bin', return_value=fake_adb.name):
with mock.patch('andriller.adb_conn.ADBConn._adb_has_exec', return_value=True):
adb = adb_conn.ADBConn()
adb_cmd = adb.adb.__func__
setattr(adb, 'adb', lambda *args, **kwargs: adb_cmd(adb, *args, **kwargs))
return adb
@pytest.fixture
def ADB_alt(mocker):
mocker.patch('andriller.adb_conn.ADBConn.kill')
mocker.patch('andriller.adb_conn.ADBConn._opt_use_capture', return_value=False)
with mock.patch('andriller.adb_conn.ADBConn._get_adb_bin', return_value=fake_adb.name):
with mock.patch('andriller.adb_conn.ADBConn._adb_has_exec', return_value=False):
adb = adb_conn.ADBConn()
adb_cmd = adb.adb.__func__
setattr(adb, 'adb', lambda *args, **kwargs: adb_cmd(adb, *args, **kwargs))
return adb
@pytest.fixture
def ADB_win(mocker):
mock_sub = mocker.patch('andriller.adb_conn.subprocess', autospec=True)
mock_sub.STARTUPINFO = mock.MagicMock()
mock_sub.STARTF_USESHOWWINDOW = mock.MagicMock()
mocker.patch('andriller.adb_conn.ADBConn.kill')
mocker.patch('andriller.adb_conn.ADBConn._opt_use_capture', return_value=True)
with mock.patch('sys.platform', return_value='win32'):
with mock.patch('andriller.adb_conn.ADBConn._get_adb_bin', return_value=fake_adb.name):
with mock.patch('andriller.adb_conn.ADBConn._adb_has_exec', return_value=True):
adb = adb_conn.ADBConn()
return adb
def test_init_windows(ADB_win):
assert ADB_win.startupinfo is not None
assert ADB_win.rmr == b'\r\r\n'
@pytest.mark.parametrize('file_path, result', [
('/some/file.txt', '/some/file.txt\n'),
('/some/my file.txt', '/some/my file.txt\n'),
('some/file.txt', 'some/file.txt\n'),
])
def test_file_regex(file_path, result):
assert adb_conn.ADBConn._file_regex(file_path).match(result)
def test_adb_simple(ADB, mocker):
output = mock.Mock(stdout=b'lala', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB('hello')
assert res == 'lala'
mock_run.assert_called_with([fake_adb.name, 'hello'],
capture_output=True, shell=False, startupinfo=None)
def test_adb_simple_su(ADB, mocker):
output = mock.Mock(stdout=b'lala', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB('hello', su=True)
assert res == 'lala'
mock_run.assert_called_with([fake_adb.name, 'su -c', 'hello'],
capture_output=True, shell=False, startupinfo=None)
def test_adb_binary(ADB, mocker):
output = mock.Mock(stdout=b'lala', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB('hello', binary=True)
assert res == b'lala'
mock_run.assert_called_with([fake_adb.name, 'hello'],
capture_output=True, shell=False, startupinfo=None)
def test_adb_out(ADB, mocker):
output = mock.Mock(stdout=b'uid(1000)', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB.adb_out('id', binary=False)
assert res == 'uid(1000)'
mock_run.assert_called_with([fake_adb.name, 'shell', 'id'],
capture_output=True, shell=False, startupinfo=None)
def test_adb_out_alt(ADB_alt, mocker):
output = mock.Mock(stdout=b'uid(1000)', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB_alt.adb_out('id', binary=True)
assert res == b'uid(1000)'
mock_run.assert_called_with([fake_adb.name, 'shell', 'id'],
stdout=subprocess.PIPE, shell=False, startupinfo=None)
def test_adb_out_win(ADB_win, mocker):
output = mock.Mock(stdout=b'uid(1000)\r\r\n', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB_win.adb_out('id', binary=True)
assert res == b'uid(1000)\n'
def test_adb_out_uses_exec(ADB, mocker):
ADB._is_adb_out_post_v5 = True
output = mock.Mock(stdout=b'uid(1000)', returncode=0)
mock_run = mocker.patch('andriller.adb_conn.subprocess.run', return_value=output)
res = ADB.adb_out('id', binary=False)
assert res == 'uid(1000)'
mock_run.assert_called_with([fake_adb.name, 'exec-out', 'id'],
capture_output=True, shell=False, startupinfo=None)
|
166518
|
import pytest
from insta_api.insta_api import InstaAPI
from insta_api.endpoints import *
@pytest.fixture(scope="module")
def insta():
insta = InstaAPI(use_cookies=False)
yield insta
insta._close_session()
class TestEndpoints:
""" These tests make sure that the API endpoints are still reachable and not moved"""
def test_base_endpoint(self, insta):
resp = insta.ses.head(base_endpoint)
assert resp.status_code != 404
def test_login_endpoint(self, insta):
resp = insta.ses.head(base_endpoint + login_endpoint)
assert resp.status_code != 404
def test_upload_photo_endpoint(self, insta):
resp = insta.ses.head(base_endpoint + post_photo_endpoint1)
assert resp.status_code != 404
def test_exploretag_endpoint(self, insta):
resp = insta.ses.head(base_endpoint +
explore_tag.format(hashtag="test"))
assert resp.status_code != 404
def test_like_endpoint(self, insta):
resp = insta.ses.head(base_endpoint +
like_endpoint.format(media_id='_'))
assert resp.status_code != 404
def test_follow_endpoint(self, insta):
resp = insta.ses.head(base_endpoint +
follow_endpoint.format(user_id="0"))
assert resp.status_code != 404
def test_unfollow_endpoint(self, insta):
resp = insta.ses.head(base_endpoint +
unfollow_endpoint.format(user_id="0"))
assert resp.status_code != 404
def test_graphql_endpoint(self, insta):
resp = insta.ses.head(base_endpoint + graphql_endpoint)
assert resp.status_code != 404
def test_logout_endpoint(self, insta):
resp = insta.ses.head(base_endpoint + logout_endpoint)
assert resp.status_code != 404
def test_hashtag_suggestions_endpoint(self, insta):
resp = insta.ses.head(base_endpoint + search_hashtag_endpoint)
assert resp.status_code != 404
|
166547
|
import cv2
import numpy as np
from PIL import Image
from PIL import ImageDraw
from subprocess import Popen, PIPE
import pycocotools.mask as coco_mask_util
def draw_bboxes(image, bboxes, labels=None, output_file=None, fill='red'):
"""
Draw bounding boxes on image.
Return image with drawings as BGR ndarray.
Args:
image (string | ndarray): input image path or image BGR ndarray.
bboxes (np.array): bounding boxes.
labels (list of string): the label names of bboxes.
output_file (string): output image path.
"""
if labels:
assert len(bboxes) == len(labels)
if isinstance(image, str):
image = Image.open(image)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image[:, :, ::-1], mode='RGB')
else:
raise ValueError('`image` should be image path in string or '
'image ndarray.')
draw = ImageDraw.Draw(image)
for i in range(len(bboxes)):
xmin, ymin, xmax, ymax = bboxes[i]
left, right, top, bottom = xmin, xmax, ymin, ymax
lines = [(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)]
draw.line(lines, width=4, fill=fill)
if labels and image.mode == 'RGB':
draw.text((left, top), labels[i], (255, 255, 0))
if output_file:
print('The image with bbox is saved as {}'.format(output_file))
image.save(output_file)
return np.array(image)[:, :, ::-1]
def save_as_gif(images, gif_file, fps=5):
"""
Save numpy images as gif file using ffmpeg.
Args:
images (list|ndarray): a list of uint8 images or uint8 ndarray
with shape [time, height, width, channels]. `channels` can
be 1 or 3.
gif_file (str): path to saved gif file.
fps (int): frames per second of the animation.
"""
h, w, c = images[0].shape
cmd = [
'ffmpeg', '-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-r', '%.02f' % fps,
'-s', '%dx%d' % (w, h),
'-pix_fmt', {1: 'gray', 3: 'rgb24'}[c],
'-i', '-',
'-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse',
'-r', '%.02f' % fps,
'-f', 'gif',
'-']
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc
with open(gif_file, 'wb') as f:
f.write(out)
def colormap(rgb=False):
"""
Get colormap
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
|
166607
|
class Solution(object):
def alertNames(self, keyName, keyTime):
"""
:type keyName: List[str]
:type keyTime: List[str]
:rtype: List[str]
"""
mapp = {}
for i in range(len(keyName)):
name = keyName[i]
if(name not in mapp):
mapp[name] = [keyTime[i]]
else:
mapp[name].append(keyTime[i])
res = []
for name, arr in mapp.items():
arr.sort()
for i in range(len(arr)-2):
time= arr[i]
t2 = arr[i+1]
t3 = arr[i+2]
if(time[0:2]=="23"):
endTime = "24:00"
if(t2<=endTime and t3<=endTime and t2>time and t3>time):
res.append(name)
break
else:
start = int(time[0:2])
endTime = str(start+1)+time[2:]
if(start<9):
endTime = "0"+endTime
if(t2<=endTime and t3<=endTime):
res.append(name)
break
return sorted(res)
|
166619
|
from peewee import *
db = SqliteDatabase('course.db', check_same_thread=True)
class BaseModel(Model):
class Meta:
database = db
class GradeData(BaseModel):
semester = CharField(4)
course_no = TextField()
course_name = TextField()
grades = TextField()
total = IntegerField()
type = BooleanField()
GPA = DoubleField()
class Meta:
indexes = (
(('semester', 'course_no'), True),
)
class CourseData(BaseModel):
semester = CharField(4)
course_no = TextField()
course_name = TextField()
credit = DoubleField()
node = TextField(null=True)
dimension = CharField(1, null=True)
lecturer = TextField(null=True)
grade_data = ForeignKeyField(GradeData, backref='course_data', null=True, default=None)
class Meta:
indexes = (
(('semester', 'course_no'), True),
)
|
166641
|
from rest_framework import serializers
import ast
from pulpo_forms.fields import Validations, Dependencies, Option
class ValidationSerializer(serializers.Serializer):
"""
Serializer for the validations in the versions json
"""
max_len_text = serializers.IntegerField(required=False, allow_null=True)
max_number = serializers.IntegerField(required=False, allow_null=True)
min_number = serializers.IntegerField(required=False, allow_null=True)
def update(self, instance, validated_data):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
if instance is not None:
if (not validated_data.get('max_len_text')
and (validated_data.get('max_len_text') != 0)):
instance.max_len_text = None
else:
instance.max_len_text = validated_data.get(
'max_len_text', instance.max_len_text)
if (not validated_data.get('max_number')
and (validated_data.get('max_number') != 0)):
instance.max_number = None
else:
instance.max_number = validated_data.get(
'max_number', instance.max_number)
if (not validated_data.get('min_number')
and (validated_data.get('min_number') != 0)):
instance.min_number = None
else:
instance.min_number = validated_data.get(
'min_number', instance.min_number)
return instance
class OptionSerializer(serializers.Serializer):
label = serializers.CharField(max_length=100, required=False)
id = serializers.IntegerField(required=False)
def restore_object(self, attrs, instance=None):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
if instance is not None:
instance.label = attrs.get('label', instance.label)
instance.id = attrs.get('id', instance.id)
return instance
else:
opt = Option()
opt.label = attrs.get('label', opt.label)
opt.id = attrs.get('id')
return opt
class DependencySerializer(serializers.Serializer):
pages = serializers.CharField(required=False)
fields = serializers.CharField(required=False)
def update(self, instance, validated_data):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
instance.fields = ast.literal_eval(str(
validated_data.get('fields', instance.fields)))
instance.pages = ast.literal_eval(str(
validated_data.get('pages', instance.pages)))
return instance
class FieldSerializer(serializers.Serializer):
text = serializers.CharField(required=True, max_length=500)
required = serializers.BooleanField(required=True)
tooltip = serializers.CharField(required=False, max_length=300)
answer = serializers.CharField(required=False)
options = OptionSerializer(many=True, required=False, read_only=False)
dependencies = DependencySerializer(required=False)
validations = ValidationSerializer(required=False)
max_id = serializers.IntegerField(required=False)
field_type = serializers.CharField(required=True, max_length=30)
field_id = serializers.IntegerField(required=True)
def update(self, instance, validated_data):
instance.text = validated_data.get('text', instance.text)
instance.required = validated_data.get('required', instance.required)
instance.tooltip = validated_data.get('tooltip', instance.tooltip)
instance.answer = validated_data.get('answer', instance.answer)
instance.options = validated_data.get('options', instance.options)
instance.max_id = validated_data.get('max_id', instance.max_id)
instance.field_type = validated_data.get(
'field_type', instance.field_type)
instance.field_id = validated_data.get('field_id', instance.field_id)
dep = Dependencies()
dependencies = DependencySerializer(validated_data.get(
'dependencies', instance.dependencies))
instance.dependencies = dependencies.update(
dep, validated_data.get('dependencies', instance.dependencies))
val = Validations()
validations = ValidationSerializer(validated_data.get(
'validations', instance.validations))
instance.validations = validations.update(
val, validated_data.get('validations', instance.validations))
return instance
class AfterSubmitSerializer(serializers.Serializer):
"""
Serializer for the validations in the versions json
"""
sendMail = serializers.BooleanField(required=True)
action = serializers.CharField(required=True)
mailSubject = serializers.CharField(required=False, allow_blank=True)
mailText = serializers.CharField(required=False, allow_blank=True)
mailSender = serializers.CharField(required=False, allow_blank=True)
mailRecipient = serializers.CharField(required=False, allow_blank=True)
message = serializers.CharField(required=False, allow_blank=True)
redirect = serializers.CharField(required=False, allow_blank=True)
def update(self, instance, validated_data):
instance.sendMail = validated_data.get('sendMail', instance.sendMail)
instance.action = validated_data.get('action', instance.action)
instance.mailSubject = validated_data.get(
'mailSubject', instance.mailSubject)
instance.mailText = validated_data.get('mailText', instance.mailText)
instance.mailSender = validated_data.get(
'mailSender', instance.mailSender)
instance.mailRecipient = validated_data.get(
'mailRecipient', instance.mailRecipient)
instance.message = validated_data.get('message', instance.message)
instance.redirect = validated_data.get('redirect', instance.redirect)
return instance
|
166643
|
from xml.etree import ElementTree
class Parser:
def __init__(self, path):
self.entity_mentions = []
self.event_mentions = []
self.relation_mentions = []
self.parse_xml(path + '.apf.xml')
def parse_xml(self, xml_path):
tree = ElementTree.parse(xml_path)
root = tree.getroot()
for child in root[0]:
if child.tag == 'entity':
self.entity_mentions.extend(self.parse_entity_tag(child))
elif child.tag in ['value', 'timex2']:
self.entity_mentions.extend(self.parse_value_timex_tag(child))
elif child.tag == 'event':
self.event_mentions.extend(self.parse_event_tag(child))
elif child.tag == 'relation':
self.relation_mentions.extend(self.parse_relation_tag(child))
@staticmethod
def parse_entity_tag(node):
entity_mentions = []
for child in node:
if child.tag != 'entity_mention':
continue
extent = child[0]
head = child[1]
charset = extent[0]
head_charset = head[0]
entity_mention = dict()
entity_mention['entity-id'] = child.attrib['ID']
entity_mention['entity-type'] = '{}:{}'.format(node.attrib['TYPE'], node.attrib['SUBTYPE'])
entity_mention['text'] = charset.text
entity_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
entity_mention["head"] = {"text": head_charset.text,
"position": [int(head_charset.attrib['START']), int(head_charset.attrib['END'])]}
entity_mentions.append(entity_mention)
return entity_mentions
@staticmethod
def parse_relation_tag(node):
relation_mentions = []
for child in node:
if child.tag != 'relation_mention':
continue
extent = child[0]
charset = extent[0]
relation_mention = dict()
relation_mention['relation-id'] = child.attrib['ID']
relation_mention['relation-type'] = '{}:{}'.format(node.attrib['TYPE'], node.attrib['SUBTYPE'])
relation_mention['text'] = charset.text
relation_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
relation_mention['arguments'] = []
for child2 in child:
if child2.tag == 'relation_mention_argument':
extent = child2[0]
charset = extent[0]
relation_mention['arguments'].append({
'text': charset.text,
'position': [int(charset.attrib['START']), int(charset.attrib['END'])],
'role': child2.attrib['ROLE'],
'entity-id': child2.attrib['REFID'],
})
relation_mentions.append(relation_mention)
return relation_mentions
@staticmethod
def parse_event_tag(node):
event_mentions = []
for child in node:
if child.tag == 'event_mention':
event_mention = dict()
event_mention['event-id'] = child.attrib['ID']
event_mention['event_type'] = '{}:{}'.format(node.attrib['TYPE'], node.attrib['SUBTYPE'])
event_mention['arguments'] = []
for child2 in child:
if child2.tag == 'ldc_scope':
charset = child2[0]
event_mention['text'] = charset.text
event_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
if child2.tag == 'anchor':
charset = child2[0]
event_mention['trigger'] = {
'text': charset.text,
'position': [int(charset.attrib['START']), int(charset.attrib['END'])],
}
if child2.tag == 'event_mention_argument':
extent = child2[0]
charset = extent[0]
event_mention['arguments'].append({
'text': charset.text,
'position': [int(charset.attrib['START']), int(charset.attrib['END'])],
'role': child2.attrib['ROLE'],
'entity-id': child2.attrib['REFID'],
})
event_mentions.append(event_mention)
return event_mentions
@staticmethod
def parse_value_timex_tag(node):
entity_mentions = []
for child in node:
extent = child[0]
charset = extent[0]
entity_mention = dict()
entity_mention['entity-id'] = child.attrib['ID']
if 'TYPE' in node.attrib:
entity_mention['entity-type'] = node.attrib['TYPE']
if 'SUBTYPE' in node.attrib:
entity_mention['entity-type'] += ':{}'.format(node.attrib['SUBTYPE'])
if child.tag == 'timex2_mention':
entity_mention['entity-type'] = 'TIM:time'
entity_mention['text'] = charset.text
entity_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
entity_mention["head"] = {"text": charset.text,
"position": [int(charset.attrib['START']), int(charset.attrib['END'])]}
entity_mentions.append(entity_mention)
return entity_mentions
|
166645
|
from datetime import timedelta
import pytest
from .conf import TlsTestConf
class TestProxy:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
conf = TlsTestConf(env=env, extras={
'base': "LogLevel proxy:trace1 proxy_http:trace1 ssl:trace1",
env.domain_b: [
"ProxyPreserveHost on",
f'ProxyPass "/proxy/" "http://127.0.0.1:{env.http_port}/"',
f'ProxyPassReverse "/proxy/" "http://{env.domain_b}:{env.http_port}"',
]
})
# add vhosts a+b and a ssl proxy from a to b
conf.add_tls_vhosts(domains=[env.domain_a, env.domain_b])
conf.install()
assert env.apache_restart() == 0
def test_13_proxy_http_get(self, env):
data = env.tls_get_json(env.domain_b, "/proxy/index.json")
assert data == {'domain': env.domain_b}
@pytest.mark.parametrize("name, value", [
("SERVER_NAME", "b.mod-tls.test"),
("SSL_SESSION_RESUMED", ""),
("SSL_SECURE_RENEG", ""),
("SSL_COMPRESS_METHOD", ""),
("SSL_CIPHER_EXPORT", ""),
("SSL_CLIENT_VERIFY", ""),
])
def test_13_proxy_http_vars(self, env, name: str, value: str):
r = env.tls_get(env.domain_b, f"/proxy/vars.py?name={name}")
assert r.exit_code == 0, r.stderr
assert r.json == {name: value}, r.stdout
|
166657
|
import numpy as np
import pandas as pd
from nilearn import image, input_data
from nilearn.datasets import load_mni152_brain_mask
def get_masker(mask_img=None, target_affine=None):
if isinstance(mask_img, input_data.NiftiMasker):
return mask_img
if mask_img is None:
mask_img = load_mni152_brain_mask()
if target_affine is not None:
if np.ndim(target_affine) == 0:
target_affine = np.eye(3) * target_affine
elif np.ndim(target_affine) == 1:
target_affine = np.diag(target_affine)
mask_img = image.resample_img(
mask_img, target_affine=target_affine, interpolation="nearest"
)
masker = input_data.NiftiMasker(mask_img=mask_img).fit()
return masker
def coords_to_voxels(coords, ref_img=None):
if ref_img is None:
ref_img = load_mni152_brain_mask()
affine = ref_img.affine
coords = np.atleast_2d(coords)
coords = np.hstack([coords, np.ones((len(coords), 1))])
voxels = np.linalg.pinv(affine).dot(coords.T)[:-1].T
voxels = voxels[(voxels >= 0).all(axis=1)]
voxels = voxels[(voxels < ref_img.shape[:3]).all(axis=1)]
voxels = np.floor(voxels).astype(int)
return voxels
def coords_to_peaks_img(coords, mask_img):
mask_img = image.load_img(mask_img)
voxels = coords_to_voxels(coords, mask_img)
peaks = np.zeros(mask_img.shape)
np.add.at(peaks, tuple(voxels.T), 1.0)
peaks_img = image.new_img_like(mask_img, peaks)
return peaks_img
def gaussian_coord_smoothing(
coords, mask_img=None, target_affine=None, fwhm=9.0
):
masker = get_masker(mask_img, target_affine)
peaks_img = coords_to_peaks_img(coords, mask_img=masker.mask_img_)
img = image.smooth_img(peaks_img, fwhm=fwhm)
return masker.inverse_transform(masker.transform(img).squeeze())
def coordinates_to_maps(
coordinates, mask_img=None, target_affine=(4, 4, 4), fwhm=9.0
):
print(
"Transforming {} coordinates for {} articles".format(
coordinates.shape[0], len(set(coordinates["pmid"]))
)
)
masker = get_masker(mask_img=mask_img, target_affine=target_affine)
images, img_pmids = [], []
for pmid, img in iter_coordinates_to_maps(
coordinates, mask_img=masker, fwhm=fwhm
):
images.append(masker.transform(img).ravel())
img_pmids.append(pmid)
return pd.DataFrame(images, index=img_pmids), masker
def iter_coordinates_to_maps(
coordinates, mask_img=None, target_affine=(4, 4, 4), fwhm=9.0
):
masker = get_masker(mask_img=mask_img, target_affine=target_affine)
articles = coordinates.groupby("pmid")
for i, (pmid, coord) in enumerate(articles):
print(
"{:.1%} pmid: {:< 20}".format(i / len(articles), pmid),
end="\r",
flush=True,
)
img = gaussian_coord_smoothing(
coord.loc[:, ["x", "y", "z"]].values, fwhm=fwhm, mask_img=masker
)
yield pmid, img
|
166725
|
from .settings import *
DATABASES['default']['USER'] = 'djangomigrator'
with open(BASE_DIR + '/_etc_passwords_djangomigrator.txt') as fp:
DATABASES['default']['PASSWORD'] = fp.read().strip()
|
166750
|
import numpy as np
import cv2
import collections
import numbers
import random
import math
import copy
from up.data.datasets.transforms import Augmentation
from up.utils.general.registry_factory import AUGMENTATION_REGISTRY
@AUGMENTATION_REGISTRY.register('color_jitter_mmseg')
class RandomColorJitterMMSeg(Augmentation):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
color_type='BGR'):
super(RandomColorJitterMMSeg, self).__init__()
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.color2hsv = getattr(cv2, 'COLOR_{}2HSV'.format(color_type))
self.hsv2color = getattr(cv2, 'COLOR_HSV2{}'.format(color_type))
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(0, 2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(0, 2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(0, 2):
img = cv2.cvtColor(img, self.color2hsv)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = cv2.cvtColor(img, self.hsv2color)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(0, 2):
img = cv2.cvtColor(img, self.color2hsv)
img[:, :,
0] = (img[:, :, 0].astype(int)
+ random.randint(-self.hue_delta, self.hue_delta)) % 180
img = cv2.cvtColor(img, self.hsv2color)
return img
def augment(self, data):
"""
Arguments:
img (np.array): Input image.
Returns:
img (np.array): Color jittered image.
"""
output = copy.copy(data)
img = data.image
assert isinstance(img, np.ndarray)
img = np.uint8(img)
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(0, 2)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
img = np.asanyarray(img)
output.image = img
return output
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness_delta)
format_string += ', contrast=({0},{1})'.format(self.contrast_lower, self.contrast_upper)
format_string += ', saturation=({0},{1})'.format(self.saturation_lower, self.saturation_upper)
format_string += ', hue={0})'.format(self.hue_delta)
return format_string
@AUGMENTATION_REGISTRY.register('seg_resize')
class SegResize(Augmentation):
def __init__(self, size, **kwargs):
super(Augmentation, self).__init__()
assert (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = tuple(size)
def augment(self, data):
data['image'] = cv2.resize(data['image'], dsize=self.size, interpolation=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.resize(data['gt_semantic_seg'], dsize=self.size, interpolation=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_resize')
class SegRandResize(Augmentation):
"""
Randomly resize image & label with scale factor in [scale_min, scale_max]
"""
def __init__(self, scale, aspect_ratio=None):
super(SegRandResize, self).__init__()
assert (isinstance(scale, collections.Iterable) and len(scale) == 2)
if isinstance(scale, collections.Iterable) and len(scale) == 2 \
and isinstance(scale[0], numbers.Number) and isinstance(scale[1], numbers.Number):
self.scale = scale
else:
raise (RuntimeError("segtransforms.RandScale() scale param error.\n"))
if aspect_ratio is None:
self.aspect_ratio = aspect_ratio
elif isinstance(aspect_ratio, collections.Iterable) and len(aspect_ratio) == 2 \
and isinstance(aspect_ratio[0], numbers.Number) and isinstance(aspect_ratio[1], numbers.Number) \
and 0 < aspect_ratio[0] < aspect_ratio[1]:
self.aspect_ratio = aspect_ratio
else:
raise (RuntimeError("segtransforms.RandScale() aspect_ratio param error.\n"))
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
if random.random() < 0.5:
temp_scale = self.scale[0] + (1. - self.scale[0]) * random.random()
else:
temp_scale = 1. + (self.scale[1] - 1.) * random.random()
temp_aspect_ratio = 1.0
if self.aspect_ratio is not None:
temp_aspect_ratio = self.aspect_ratio[0] + (self.aspect_ratio[1] - self.aspect_ratio[0]) * random.random()
temp_aspect_ratio = math.sqrt(temp_aspect_ratio)
scale_factor_w = temp_scale * temp_aspect_ratio
scale_factor_h = temp_scale / temp_aspect_ratio
h, w, _ = image.shape
new_w = int(w * scale_factor_w)
new_h = int(h * scale_factor_h)
data['image'] = cv2.resize(image, dsize=(new_w, new_h), interpolation=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.resize(label, dsize=(new_w, new_h), interpolation=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_crop')
class SegCrop(Augmentation):
"""Crops the given tensor.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is made.
"""
def __init__(self, size, crop_type='center', ignore_label=255):
super(SegCrop, self).__init__()
if isinstance(size, int):
self.crop_h = size
self.crop_w = size
elif isinstance(size, collections.Iterable) and len(size) == 2 \
and isinstance(size[0], int) and isinstance(size[1], int) \
and size[0] > 0 and size[1] > 0:
self.crop_h = size[0]
self.crop_w = size[1]
else:
raise (RuntimeError("crop size error.\n"))
if crop_type == 'center' or crop_type == 'rand':
self.crop_type = crop_type
else:
raise (RuntimeError("crop type error: rand | center\n"))
if isinstance(ignore_label, int):
self.ignore_label = ignore_label
else:
raise (RuntimeError("ignore_label should be an integer number\n"))
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
h, w, _ = image.shape
pad_h = max(self.crop_h - h, 0)
pad_w = max(self.crop_w - w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label))
h, w, _ = image.shape
if self.crop_type == 'rand':
h_off = random.randint(0, h - self.crop_h)
w_off = random.randint(0, w - self.crop_w)
else:
h_off = (h - self.crop_h) // 2
w_off = (w - self.crop_w) // 2
data['image'] = np.asarray(image[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
data['gt_semantic_seg'] = np.asarray(label[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
return data
@AUGMENTATION_REGISTRY.register('seg_random_flip')
class SegRandomHorizontalFlip(Augmentation):
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
flip = np.random.choice(2) * 2 - 1
data['image'] = image[:, ::flip, :]
data['gt_semantic_seg'] = label[:, ::flip]
return data
@AUGMENTATION_REGISTRY.register('seg_rand_rotate')
class RandRotate(Augmentation):
def augment(self, data):
image = data['image']
label = data['gt_semantic_seg']
angle = random.random() * 20 - 10
h, w = image.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1)
data['image'] = cv2.warpAffine(image, rotation_matrix, (w, h), flags=cv2.INTER_LINEAR)
data['gt_semantic_seg'] = cv2.warpAffine(label, rotation_matrix, (w, h), flags=cv2.INTER_NEAREST)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_blur')
class RandomGaussianBlur(Augmentation):
def augment(self, data):
gauss_size = random.choice([1, 3, 5, 7])
if gauss_size > 1:
# do the gaussian blur
data['image'] = cv2.GaussianBlur(data['image'], (gauss_size, gauss_size), 0)
return data
@AUGMENTATION_REGISTRY.register('seg_rand_brightness')
class Random_Brightness(Augmentation):
def __init__(self, shift_value=10):
super().__init__()
self.shift_value = shift_value
def augment(self, data):
if random.random() < 0.5:
return data
image = data['image']
image = image.astype(np.float32)
shift = random.randint(-self.shift_value, self.shift_value)
image[:, :, :] += shift
image = np.around(image)
image = np.clip(image, 0, 255).astype(np.uint8)
data['image'] = image
return data
|
166762
|
import torch
from torch import nn
from .utils import Conv, ConcatBlock
class PathAggregationNetwork(nn.Module):
def __init__(self, in_channels_list, depth):
super().__init__()
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
self.downsample_blocks = nn.ModuleList()
self.outer_blocks = nn.ModuleList()
for i, ch in enumerate(in_channels_list):
self.inner_blocks.append(ConcatBlock(2 * ch if i < 2 else in_channels_list[-1], ch, depth, False))
if i > 0:
in_channels = in_channels_list[i - 1]
self.layer_blocks.append(Conv(ch, in_channels, 1))
self.upsample_blocks.append(nn.Upsample(scale_factor=2))
self.downsample_blocks.append(Conv(in_channels, in_channels, 3, 2))
self.outer_blocks.append(ConcatBlock(ch, ch, depth, False))
#for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_uniform_(m.weight, a=1)
def forward(self, x):
results = []
last_inner = self.inner_blocks[-1](x[-1])
results.append(self.layer_blocks[-1](last_inner))
for i in range(len(x) - 2, -1, -1):
inner_top_down = self.upsample_blocks[i](results[0])
last_inner = self.inner_blocks[i](torch.cat((inner_top_down, x[i]), dim=1)) # official
#last_inner = self.inner_blocks[i](torch.cat((x[i], inner_top_down), dim=1)) # old
results.insert(0, last_inner if i == 0 else self.layer_blocks[i - 1](last_inner))
for i in range(len(x) - 1):
outer_bottom_up = self.downsample_blocks[i](results[i])
layer_result = results[i + 1]
results[i + 1] = self.outer_blocks[i](torch.cat((outer_bottom_up, layer_result), dim=1))
return results
|
166802
|
import argparse
import pickle
import numpy as np
from numba import njit
@njit
def count_trees(tau, phi, order, traversal):
assert traversal == 'dfs' or traversal == 'bfs'
K = len(tau)
expected_colsum = np.ones(K)
expected_colsum[0] = 0
first_partial = np.copy(tau)
np.fill_diagonal(first_partial, 0)
first_delta = np.copy(phi)
partial_trees = [(1, first_partial, first_delta)]
completed_trees = 0
while len(partial_trees) > 0:
if traversal == 'dfs':
to_resolve, P, delta = partial_trees.pop()
else:
to_resolve, P, delta = partial_trees.pop(0)
#to_resolve, P, delta = partial_trees[0]
#partial_trees = partial_trees[1:]
if to_resolve == K:
assert np.all(expected_colsum == np.sum(P, axis=0))
assert np.all(0 <= delta) and np.all(delta <= 1)
np.fill_diagonal(P, 1)
completed_trees += 1
continue
R = order[to_resolve]
parents = np.nonzero(P[:,R])[0]
for parent in parents:
P_prime = np.copy(P)
P_prime[:,R] = 0
P_prime[parent,R] = 1
if np.any(delta[parent] - phi[R] < 0):
continue
delta_prime = np.copy(delta)
delta_prime[parent] -= phi[R]
partial_trees.append((to_resolve + 1, P_prime, delta_prime))
return completed_trees
@njit
def make_order(phi):
phisum = np.sum(phi, axis=1)
order = np.argsort(-phisum)
assert order[0] == 0
return order
@njit
def make_tau(phi, order):
K, S = phi.shape
tau = np.eye(K)
for I in range(K):
for J in range(I + 1, K):
I_prime = order[I]
J_prime = order[J]
assert not np.all(phi[I_prime] == phi[J_prime])
if np.all(phi[I_prime] >= phi[J_prime]):
tau[I_prime,J_prime] = 1
return tau
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('sim_data_fn')
args = parser.parse_args()
with open(args.sim_data_fn, 'rb') as dataf:
simdata = pickle.load(dataf)
phi, true_tree = simdata['phi'], simdata['adjm']
order = make_order(phi)
tau = make_tau(phi, order)
num_trees = count_trees(tau, phi, order, 'dfs')
print(args.sim_data_fn, num_trees)
main()
|
166811
|
from .base import GenerativeAPS
from .fitter import GaussNewtonAPSFitter
from .algorithm import Inverse, Forward
|
166822
|
import magma as m
from magma.testing.utils import has_warning, has_error
def _check_foo_interface(Foo):
assert list(Foo.interface.ports.keys()) == ["I", "O"]
assert isinstance(Foo.interface.ports["I"], m.Bit)
assert Foo.interface.ports["I"].is_output()
assert isinstance(Foo.interface.ports["O"], m.Bit)
assert Foo.interface.ports["O"].is_input()
def test_new_style_basic():
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
m.wire(io.I, io.O)
assert _Foo.name == "_Foo"
assert m.isdefinition(_Foo)
_check_foo_interface(_Foo)
def test_new_style_name():
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
name = "new_name"
assert _Foo.name == "new_name"
def test_new_style_with_instance():
instances = []
class _Bar(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
m.wire(io.I, io.O)
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
bar = _Bar()
m.wire(io.I, bar.I)
m.wire(bar.O, io.O)
instances.append(bar)
assert _Foo.instances == instances
assert _Foo.O.value() is instances[0].O
assert instances[0].I.value() is _Foo.I
def test_old_style_override(caplog):
class _Foo(m.Circuit):
IO = ["I", m.In(m.Bit), "O", m.Out(m.Bit)]
io = None # doesn't matter what the value of io is.
_check_foo_interface(_Foo)
expected = "'IO' and 'io' should not both be specified, ignoring 'io'"
assert has_warning(caplog, expected)
def test_new_style_not_isdefinition():
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
assert not m.isdefinition(_Foo)
def test_new_style_unconnected(caplog):
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bits[2]))
m.wire(io.I, io.O[0])
assert m.isdefinition(_Foo)
assert has_error(caplog, "Output port _Foo.O not driven")
def test_new_style_with_definition_method(caplog):
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
m.wire(io.I, io.O)
@classmethod
def definition(io):
raise Exception()
assert m.isdefinition(_Foo)
expected = ("Supplying method 'definition' with new inline definition "
"syntax is not supported, ignoring 'definition'")
assert has_warning(caplog, expected)
def test_defn_wiring_error(caplog):
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.In(m.Bit), O1=m.Out(m.Bits[1]))
m.wire(io.I, io.O)
m.wire(io.I, io.O1)
assert not m.isdefinition(_Foo)
assert has_error(caplog,
"Cannot wire _Foo.I (Out(Bit)) to _Foo.O (Out(Bit))")
assert has_error(caplog,
"Cannot wire _Foo.I (Out(Bit)) to _Foo.O1 (In(Bits[1]))")
def test_inst_wiring_error(caplog):
class _Bar(m.Circuit):
io = m.IO(I=m.In(m.Bits[1]), O=m.Out(m.Bits[1]))
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
bar = _Bar()
m.wire(io.I, bar.I)
m.wire(bar.O, io.O)
assert has_error(
caplog,
"Cannot wire _Foo.I (Out(Bit)) to _Foo._Bar_inst0.I (In(Bits[1]))")
assert has_error(
caplog,
"Cannot wire _Foo._Bar_inst0.O (Out(Bits[1])) to _Foo.O (In(Bit))")
assert has_error(caplog, "Output port _Foo.O not driven")
assert has_error(caplog, "Input port _Bar_inst0.I not driven")
def test_nested_definition():
class _Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
class _Bar(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
m.wire(io.I, io.O)
bar = _Bar()
m.wire(io.I, bar.I)
m.wire(bar.O, io.O)
assert repr(_Foo) == """_Foo = DefineCircuit("_Foo", "I", In(Bit), "O", Out(Bit))
_Bar_inst0 = _Bar()
wire(_Foo.I, _Bar_inst0.I)
wire(_Bar_inst0.O, _Foo.O)
EndCircuit()"""
assert repr(_Foo._Bar) == """_Bar = DefineCircuit("_Bar", "I", In(Bit), "O", Out(Bit))
wire(_Bar.I, _Bar.O)
EndCircuit()"""
|
166823
|
from tqdm import tqdm
def apply_with_progress_bar(desc=None):
def decorator(key_func):
def func_wrapper(iterable):
pbar = tqdm(total=len(iterable), desc=desc)
for obj in iterable:
pbar.update()
key_func(obj)
pbar.close()
return func_wrapper
return decorator
|
166839
|
from helpers import render_frames
from graphs.ForwardRendering import ForwardRendering as g
from falcor import *
g.unmarkOutput("ForwardLightingPass.motionVecs")
m.addGraph(g)
m.loadScene("grey_and_white_room/grey_and_white_room.fbx")
ctx = locals()
# default
render_frames(ctx, 'default', frames=[1,16,64,128,256])
exit()
|
166875
|
import os
import os.path
import yaml
class Configuration:
def __init__(self):
self.path = os.path.dirname(__file__)
def get_liq_bands(self):
return self.__load_config(os.path.join(self.path,'liq_bands.yml'))
def get_trades_bands(self):
return self.__load_config(os.path.join(self.path,'trades_bands.yml'))
def __load_config(self, file_path):
if not os.path.exists(file_path):
raise AttributeError(f"Config file not found:{file_path}")
# TODO: assert with a template
with open(file_path, 'r')as stream:
try:
yaml_conf = yaml.load(stream, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
raise Exception(f'Error loading '
f'configuration file {file_path}: {exc}')
return yaml_conf
|
166901
|
import pytest
from super_mario.base_pipeline import BasePipeline
from super_mario.decorators import process_pipe
from super_mario.exceptions import GlobalContextUpdateException
@pytest.fixture
def simple_pipeline():
class SimplePipeline(BasePipeline):
pipeline = [
'sum_numbers',
'multiply_numbers',
]
@process_pipe
def sum_numbers(a, b):
return {'d': a + b}
@process_pipe
def multiply_numbers(c, d):
return {'d': c * d}
return SimplePipeline()
def test_context_update_raises_exception(simple_pipeline):
with pytest.raises(GlobalContextUpdateException):
simple_pipeline.run(a=1, b=2, c=3)
|
166912
|
import py
import sys, struct
import ctypes
from pypy.rpython.lltypesystem import lltype, rffi, llmemory
from pypy.rpython.tool import rffi_platform
from pypy.rpython.lltypesystem.ll2ctypes import lltype2ctypes, ctypes2lltype
from pypy.rpython.lltypesystem.ll2ctypes import standard_c_lib
from pypy.rpython.lltypesystem.ll2ctypes import uninitialized2ctypes
from pypy.rpython.lltypesystem.ll2ctypes import ALLOCATED
from pypy.rpython.annlowlevel import llhelper
from pypy.rlib import rposix
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.tool.udir import udir
class TestLL2Ctypes(object):
def setup_method(self, meth):
ALLOCATED.clear()
def test_primitive(self):
assert lltype2ctypes(5) == 5
assert lltype2ctypes('?') == ord('?')
assert lltype2ctypes('\xE0') == 0xE0
assert lltype2ctypes(unichr(1234)) == 1234
assert ctypes2lltype(lltype.Signed, 5) == 5
assert ctypes2lltype(lltype.Char, ord('a')) == 'a'
assert ctypes2lltype(lltype.UniChar, ord(u'x')) == u'x'
assert ctypes2lltype(lltype.Char, 0xFF) == '\xFF'
assert lltype2ctypes(5.25) == 5.25
assert ctypes2lltype(lltype.Float, 5.25) == 5.25
assert lltype2ctypes(u'x') == ord(u'x')
res = lltype2ctypes(rffi.r_singlefloat(-3.5))
assert isinstance(res, ctypes.c_float)
assert res.value == -3.5
res = ctypes2lltype(lltype.SingleFloat, ctypes.c_float(-3.5))
assert isinstance(res, rffi.r_singlefloat)
assert float(res) == -3.5
assert lltype2ctypes(rffi.r_ulong(-1)) == sys.maxint * 2 + 1
res = ctypes2lltype(lltype.Unsigned, sys.maxint * 2 + 1)
assert (res, type(res)) == (rffi.r_ulong(-1), rffi.r_ulong)
res = lltype2ctypes(llmemory.sizeof(lltype.Signed))
assert res == struct.calcsize("l")
S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
res = lltype2ctypes(llmemory.sizeof(S))
assert res == struct.calcsize("ll")
p = lltype.nullptr(S)
cptr = lltype2ctypes(p)
assert not cptr
py.test.raises(ValueError, 'cptr.contents') # NULL pointer access
res = ctypes2lltype(lltype.Ptr(S), cptr)
assert res == p
assert not ALLOCATED # detects memory leaks in the test
def test_simple_struct(self):
S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
s = lltype.malloc(S, flavor='raw')
s.x = 123
sc = lltype2ctypes(s)
assert isinstance(sc.contents, ctypes.Structure)
assert sc.contents.x == 123
sc.contents.x = 456
assert s.x == 456
s.x = 789
assert sc.contents.x == 789
s.y = 52
assert sc.contents.y == 52
lltype.free(s, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_struct_ptrs(self):
S2 = lltype.Struct('S2', ('y', lltype.Signed))
S1 = lltype.Struct('S', ('x', lltype.Signed), ('p', lltype.Ptr(S2)))
s1 = lltype.malloc(S1, flavor='raw')
s2a = lltype.malloc(S2, flavor='raw')
s2b = lltype.malloc(S2, flavor='raw')
s2a.y = ord('a')
s2b.y = ord('b')
sc1 = lltype2ctypes(s1)
sc1.contents.x = 50
assert s1.x == 50
sc1.contents.p = lltype2ctypes(s2a)
assert s1.p == s2a
s1.p.y -= 32
assert sc1.contents.p.contents.y == ord('A')
s1.p = s2b
sc1.contents.p.contents.y -= 32
assert s2b.y == ord('B')
lltype.free(s1, flavor='raw')
lltype.free(s2a, flavor='raw')
lltype.free(s2b, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_simple_array(self):
A = lltype.Array(lltype.Signed)
a = lltype.malloc(A, 10, flavor='raw')
a[0] = 100
a[1] = 101
a[2] = 102
ac = lltype2ctypes(a, normalize=False)
assert isinstance(ac.contents, ctypes.Structure)
assert ac.contents.length == 10
assert ac.contents.items[1] == 101
ac.contents.items[2] = 456
assert a[2] == 456
a[3] = 789
assert ac.contents.items[3] == 789
lltype.free(a, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_array_nolength(self):
A = lltype.Array(lltype.Signed, hints={'nolength': True})
a = lltype.malloc(A, 10, flavor='raw')
a[0] = 100
a[1] = 101
a[2] = 102
ac = lltype2ctypes(a, normalize=False)
assert isinstance(ac.contents, ctypes.Structure)
assert ac.contents.items[1] == 101
ac.contents.items[2] = 456
assert a[2] == 456
a[3] = 789
assert ac.contents.items[3] == 789
assert ctypes.sizeof(ac.contents) == 10 * ctypes.sizeof(ctypes.c_long)
lltype.free(a, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_charp(self):
s = rffi.str2charp("hello")
sc = lltype2ctypes(s, normalize=False)
assert sc.contents.items[0] == ord('h')
assert sc.contents.items[1] == ord('e')
assert sc.contents.items[2] == ord('l')
assert sc.contents.items[3] == ord('l')
assert sc.contents.items[4] == ord('o')
assert sc.contents.items[5] == 0
assert not hasattr(sc.contents, 'length')
sc.contents.items[1] = ord('E')
assert s[1] == 'E'
s[0] = 'H'
assert sc.contents.items[0] == ord('H')
rffi.free_charp(s)
assert not ALLOCATED # detects memory leaks in the test
def test_unicharp(self):
SP = rffi.CArrayPtr(lltype.UniChar)
s = lltype.malloc(SP.TO, 4, flavor='raw')
s[0] = u'x'
s[1] = u'y'
s[2] = u'z'
s[3] = u'\x00'
sc = lltype2ctypes(s, normalize=False)
assert sc.contents.items[0] == ord(u'x')
assert sc.contents.items[1] == ord(u'y')
assert sc.contents.items[2] == ord(u'z')
assert not hasattr(sc.contents, 'length')
lltype.free(s, flavor='raw')
assert not ALLOCATED
def test_strlen(self):
eci = ExternalCompilationInfo(includes=['string.h'])
strlen = rffi.llexternal('strlen', [rffi.CCHARP], rffi.SIZE_T,
compilation_info=eci)
s = rffi.str2charp("xxx")
res = strlen(s)
rffi.free_charp(s)
assert res == 3 # actually r_size_t(3)
s = rffi.str2charp("")
res = strlen(s)
rffi.free_charp(s)
assert res == 0 # actually r_size_t(0)
assert not ALLOCATED # detects memory leaks in the test
def test_func_not_in_clib(self):
eci = ExternalCompilationInfo(libraries=['m'])
foobar = rffi.llexternal('I_really_dont_exist', [], lltype.Signed)
py.test.raises(NotImplementedError, foobar)
foobar = rffi.llexternal('I_really_dont_exist', [], lltype.Signed,
compilation_info=eci) # math library
py.test.raises(NotImplementedError, foobar)
eci = ExternalCompilationInfo(libraries=['m', 'z'])
foobar = rffi.llexternal('I_really_dont_exist', [], lltype.Signed,
compilation_info=eci) # math and zlib
py.test.raises(NotImplementedError, foobar)
eci = ExternalCompilationInfo(libraries=['I_really_dont_exist_either'])
foobar = rffi.llexternal('I_really_dont_exist', [], lltype.Signed,
compilation_info=eci)
py.test.raises(NotImplementedError, foobar)
assert not ALLOCATED # detects memory leaks in the test
def test_cstruct_to_ll(self):
S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
s = lltype.malloc(S, flavor='raw')
s2 = lltype.malloc(S, flavor='raw')
s.x = 123
sc = lltype2ctypes(s)
t = ctypes2lltype(lltype.Ptr(S), sc)
assert lltype.typeOf(t) == lltype.Ptr(S)
assert s == t
assert not (s != t)
assert t == s
assert not (t != s)
assert t != lltype.nullptr(S)
assert not (t == lltype.nullptr(S))
assert lltype.nullptr(S) != t
assert not (lltype.nullptr(S) == t)
assert t != s2
assert not (t == s2)
assert s2 != t
assert not (s2 == t)
assert t.x == 123
t.x += 1
assert s.x == 124
s.x += 1
assert t.x == 125
lltype.free(s, flavor='raw')
lltype.free(s2, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_carray_to_ll(self):
A = lltype.Array(lltype.Signed, hints={'nolength': True})
a = lltype.malloc(A, 10, flavor='raw')
a2 = lltype.malloc(A, 10, flavor='raw')
a[0] = 100
a[1] = 101
a[2] = 110
ac = lltype2ctypes(a)
b = ctypes2lltype(lltype.Ptr(A), ac)
assert lltype.typeOf(b) == lltype.Ptr(A)
assert b == a
assert not (b != a)
assert a == b
assert not (a != b)
assert b != lltype.nullptr(A)
assert not (b == lltype.nullptr(A))
assert lltype.nullptr(A) != b
assert not (lltype.nullptr(A) == b)
assert b != a2
assert not (b == a2)
assert a2 != b
assert not (a2 == b)
assert b[2] == 110
b[2] *= 2
assert a[2] == 220
a[2] *= 3
assert b[2] == 660
lltype.free(a, flavor='raw')
lltype.free(a2, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_strchr(self):
eci = ExternalCompilationInfo(includes=['string.h'])
strchr = rffi.llexternal('strchr', [rffi.CCHARP, rffi.INT],
rffi.CCHARP, compilation_info=eci)
s = rffi.str2charp("hello world")
res = strchr(s, ord('r'))
assert res[0] == 'r'
assert res[1] == 'l'
assert res[2] == 'd'
assert res[3] == '\x00'
# XXX maybe we should also allow res[-1], res[-2]...
rffi.free_charp(s)
assert not ALLOCATED # detects memory leaks in the test
def test_frexp(self):
eci = ExternalCompilationInfo(includes=['math.h'],
libraries=['m'])
A = lltype.FixedSizeArray(rffi.INT, 1)
frexp = rffi.llexternal('frexp', [rffi.DOUBLE, lltype.Ptr(A)],
rffi.DOUBLE, compilation_info=eci)
p = lltype.malloc(A, flavor='raw')
res = frexp(2.5, p)
assert res == 0.625
assert p[0] == 2
lltype.free(p, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_rand(self):
eci = ExternalCompilationInfo(includes=['stdlib.h'])
rand = rffi.llexternal('rand', [], rffi.INT,
compilation_info=eci)
srand = rffi.llexternal('srand', [rffi.UINT], lltype.Void,
compilation_info=eci)
srand(rffi.r_uint(123))
res1 = rand()
res2 = rand()
res3 = rand()
srand(rffi.r_uint(123))
res1b = rand()
res2b = rand()
res3b = rand()
assert res1 == res1b
assert res2 == res2b
assert res3 == res3b
assert not ALLOCATED # detects memory leaks in the test
def test_opaque_obj(self):
eci = ExternalCompilationInfo(
includes = ['sys/time.h', 'time.h']
)
TIMEVALP = rffi.COpaquePtr('struct timeval', compilation_info=eci)
TIMEZONEP = rffi.COpaquePtr('struct timezone', compilation_info=eci)
gettimeofday = rffi.llexternal('gettimeofday', [TIMEVALP, TIMEZONEP],
rffi.INT, compilation_info=eci)
ll_timevalp = lltype.malloc(TIMEVALP.TO, flavor='raw')
ll_timezonep = lltype.malloc(TIMEZONEP.TO, flavor='raw')
res = gettimeofday(ll_timevalp, ll_timezonep)
assert res != -1
lltype.free(ll_timezonep, flavor='raw')
lltype.free(ll_timevalp, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_simple_cast(self):
assert rffi.cast(rffi.SIGNEDCHAR, 0x123456) == 0x56
assert rffi.cast(rffi.SIGNEDCHAR, 0x123481) == -127
assert rffi.cast(rffi.CHAR, 0x123456) == '\x56'
assert rffi.cast(rffi.CHAR, 0x123481) == '\x81'
assert rffi.cast(rffi.UCHAR, 0x123481) == 0x81
assert not ALLOCATED # detects memory leaks in the test
def test_forced_ptr_cast(self):
import array
A = lltype.Array(lltype.Signed, hints={'nolength': True})
B = lltype.Array(lltype.Char, hints={'nolength': True})
a = lltype.malloc(A, 10, flavor='raw')
for i in range(10):
a[i] = i*i
b = rffi.cast(lltype.Ptr(B), a)
checker = array.array('l')
for i in range(10):
checker.append(i*i)
expected = checker.tostring()
for i in range(len(expected)):
assert b[i] == expected[i]
c = rffi.cast(rffi.VOIDP, a)
addr = lltype2ctypes(c)
#assert addr == ctypes.addressof(a._obj._ctypes_storage)
d = ctypes2lltype(rffi.VOIDP, addr)
assert lltype.typeOf(d) == rffi.VOIDP
assert c == d
e = rffi.cast(lltype.Ptr(A), d)
for i in range(10):
assert e[i] == i*i
c = lltype.nullptr(rffi.VOIDP.TO)
addr = rffi.cast(lltype.Signed, c)
assert addr == 0
lltype.free(a, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_funcptr1(self):
def dummy(n):
return n+1
FUNCTYPE = lltype.FuncType([lltype.Signed], lltype.Signed)
cdummy = lltype2ctypes(llhelper(lltype.Ptr(FUNCTYPE), dummy))
assert isinstance(cdummy,
ctypes.CFUNCTYPE(ctypes.c_long, ctypes.c_long))
res = cdummy(41)
assert res == 42
lldummy = ctypes2lltype(lltype.Ptr(FUNCTYPE), cdummy)
assert lltype.typeOf(lldummy) == lltype.Ptr(FUNCTYPE)
res = lldummy(41)
assert res == 42
assert not ALLOCATED # detects memory leaks in the test
def test_funcptr2(self):
FUNCTYPE = lltype.FuncType([rffi.CCHARP], lltype.Signed)
cstrlen = standard_c_lib.strlen
llstrlen = ctypes2lltype(lltype.Ptr(FUNCTYPE), cstrlen)
assert lltype.typeOf(llstrlen) == lltype.Ptr(FUNCTYPE)
p = rffi.str2charp("hi there")
res = llstrlen(p)
assert res == 8
cstrlen2 = lltype2ctypes(llstrlen)
cp = lltype2ctypes(p)
assert cstrlen2.restype == ctypes.c_long
res = cstrlen2(cp)
assert res == 8
rffi.free_charp(p)
assert not ALLOCATED # detects memory leaks in the test
def test_qsort(self):
CMPFUNC = lltype.FuncType([rffi.VOIDP, rffi.VOIDP], rffi.INT)
qsort = rffi.llexternal('qsort', [rffi.VOIDP,
rffi.SIZE_T,
rffi.SIZE_T,
lltype.Ptr(CMPFUNC)],
lltype.Void)
lst = [23, 43, 24, 324, 242, 34, 78, 5, 3, 10]
A = lltype.Array(lltype.Signed, hints={'nolength': True})
a = lltype.malloc(A, 10, flavor='raw')
for i in range(10):
a[i] = lst[i]
SIGNEDPTR = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
def my_compar(p1, p2):
p1 = rffi.cast(SIGNEDPTR, p1)
p2 = rffi.cast(SIGNEDPTR, p2)
print 'my_compar:', p1[0], p2[0]
return rffi.cast(rffi.INT, cmp(p1[0], p2[0]))
qsort(rffi.cast(rffi.VOIDP, a),
rffi.cast(rffi.SIZE_T, 10),
rffi.cast(rffi.SIZE_T, llmemory.sizeof(lltype.Signed)),
llhelper(lltype.Ptr(CMPFUNC), my_compar))
for i in range(10):
print a[i],
print
lst.sort()
for i in range(10):
assert a[i] == lst[i]
lltype.free(a, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
# def test_signal(self):...
def test_uninitialized2ctypes(self):
# for now, uninitialized fields are filled with 0xDD in the ctypes data
def checkobj(o, size):
p = ctypes.cast(ctypes.c_void_p(ctypes.addressof(o)),
ctypes.POINTER(ctypes.c_ubyte*size))
for i in range(size):
assert p.contents[i] == 0xDD
def checkval(v, fmt):
res = struct.pack(fmt, v)
assert res == "\xDD" * len(res)
checkval(uninitialized2ctypes(rffi.CHAR), 'B')
checkval(uninitialized2ctypes(rffi.SHORT), 'h')
checkval(uninitialized2ctypes(rffi.INT), 'i')
checkval(uninitialized2ctypes(rffi.UINT), 'I')
checkval(uninitialized2ctypes(rffi.LONGLONG), 'q')
checkval(uninitialized2ctypes(rffi.DOUBLE), 'd')
checkobj(uninitialized2ctypes(rffi.INTP),
ctypes.sizeof(ctypes.c_void_p))
checkobj(uninitialized2ctypes(rffi.CCHARP),
ctypes.sizeof(ctypes.c_void_p))
S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
s = lltype.malloc(S, flavor='raw')
sc = lltype2ctypes(s)
checkval(sc.contents.x, 'l')
checkval(sc.contents.y, 'l')
lltype.free(s, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_substructures(self):
S1 = lltype.Struct('S1', ('x', lltype.Signed))
BIG = lltype.Struct('BIG', ('s1a', S1), ('s1b', S1))
s = lltype.malloc(BIG, flavor='raw')
s.s1a.x = 123
s.s1b.x = 456
sc = lltype2ctypes(s)
assert sc.contents.s1a.x == 123
assert sc.contents.s1b.x == 456
sc.contents.s1a.x += 1
sc.contents.s1b.x += 10
assert s.s1a.x == 124
assert s.s1b.x == 466
s.s1a.x += 3
s.s1b.x += 30
assert sc.contents.s1a.x == 127
assert sc.contents.s1b.x == 496
lltype.free(s, flavor='raw')
s = lltype.malloc(BIG, flavor='raw')
s1ac = lltype2ctypes(s.s1a)
s1ac.contents.x = 53
sc = lltype2ctypes(s)
assert sc.contents.s1a.x == 53
sc.contents.s1a.x += 1
assert s1ac.contents.x == 54
assert s.s1a.x == 54
s.s1a.x += 2
assert s1ac.contents.x == 56
assert sc.contents.s1a.x == 56
sc.contents.s1a.x += 3
assert s1ac.contents.x == 59
assert s.s1a.x == 59
t = ctypes2lltype(lltype.Ptr(BIG), sc)
assert t == s
assert t.s1a == s.s1a
assert t.s1a.x == 59
s.s1b.x = 8888
assert t.s1b == s.s1b
assert t.s1b.x == 8888
t1 = ctypes2lltype(lltype.Ptr(S1), s1ac)
assert t.s1a == t1
assert t1.x == 59
t1.x += 1
assert sc.contents.s1a.x == 60
lltype.free(s, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_recursive_struct(self):
SX = lltype.ForwardReference()
S1 = lltype.Struct('S1', ('p', lltype.Ptr(SX)), ('x', lltype.Signed))
SX.become(S1)
# a chained list
s1 = lltype.malloc(S1, flavor='raw')
s2 = lltype.malloc(S1, flavor='raw')
s3 = lltype.malloc(S1, flavor='raw')
s1.x = 111
s2.x = 222
s3.x = 333
s1.p = s2
s2.p = s3
s3.p = lltype.nullptr(S1)
sc1 = lltype2ctypes(s1)
sc2 = sc1.contents.p
sc3 = sc2.contents.p
assert not sc3.contents.p
assert sc1.contents.x == 111
assert sc2.contents.x == 222
assert sc3.contents.x == 333
sc3.contents.x += 1
assert s3.x == 334
s3.x += 2
assert sc3.contents.x == 336
lltype.free(s1, flavor='raw')
lltype.free(s2, flavor='raw')
lltype.free(s3, flavor='raw')
# a self-cycle
s1 = lltype.malloc(S1, flavor='raw')
s1.x = 12
s1.p = s1
sc1 = lltype2ctypes(s1)
assert sc1.contents.x == 12
assert (ctypes.addressof(sc1.contents.p.contents) ==
ctypes.addressof(sc1.contents))
s1.x *= 5
assert sc1.contents.p.contents.p.contents.p.contents.x == 60
lltype.free(s1, flavor='raw')
# a longer cycle
s1 = lltype.malloc(S1, flavor='raw')
s2 = lltype.malloc(S1, flavor='raw')
s1.x = 111
s1.p = s2
s2.x = 222
s2.p = s1
sc1 = lltype2ctypes(s1)
assert sc1.contents.x == 111
assert sc1.contents.p.contents.x == 222
assert (ctypes.addressof(sc1.contents.p.contents) !=
ctypes.addressof(sc1.contents))
assert (ctypes.addressof(sc1.contents.p.contents.p.contents) ==
ctypes.addressof(sc1.contents))
lltype.free(s1, flavor='raw')
lltype.free(s2, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_indirect_recursive_struct(self):
S2Forward = lltype.ForwardReference()
S1 = lltype.Struct('S1', ('p', lltype.Ptr(S2Forward)))
A2 = lltype.Array(lltype.Ptr(S1), hints={'nolength': True})
S2 = lltype.Struct('S2', ('a', lltype.Ptr(A2)))
S2Forward.become(S2)
s1 = lltype.malloc(S1, flavor='raw')
a2 = lltype.malloc(A2, 10, flavor='raw')
s2 = lltype.malloc(S2, flavor='raw')
s2.a = a2
a2[5] = s1
s1.p = s2
ac2 = lltype2ctypes(a2, normalize=False)
sc1 = ac2.contents.items[5]
sc2 = sc1.contents.p
assert (ctypes.addressof(sc2.contents.a.contents) ==
ctypes.addressof(ac2.contents))
lltype.free(s1, flavor='raw')
lltype.free(a2, flavor='raw')
lltype.free(s2, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_arrayofstruct(self):
S1 = lltype.Struct('S1', ('x', lltype.Signed))
A = lltype.Array(S1, hints={'nolength': True})
a = lltype.malloc(A, 5, flavor='raw')
a[0].x = 100
a[1].x = 101
a[2].x = 102
a[3].x = 103
a[4].x = 104
ac = lltype2ctypes(a, normalize=False)
assert ac.contents.items[0].x == 100
assert ac.contents.items[2].x == 102
ac.contents.items[3].x += 500
assert a[3].x == 603
a[4].x += 600
assert ac.contents.items[4].x == 704
a1 = ctypes2lltype(lltype.Ptr(A), ac)
assert a1 == a
assert a1[2].x == 102
aitem1 = ctypes2lltype(lltype.Ptr(S1),
ctypes.pointer(ac.contents.items[1]))
assert aitem1.x == 101
assert aitem1 == a1[1]
lltype.free(a, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_get_errno(self):
eci = ExternalCompilationInfo(includes=['string.h'])
if sys.platform.startswith('win'):
underscore_on_windows = '_'
else:
underscore_on_windows = ''
strlen = rffi.llexternal('strlen', [rffi.CCHARP], rffi.SIZE_T,
compilation_info=eci)
os_write = rffi.llexternal(underscore_on_windows+'write',
[rffi.INT, rffi.CCHARP, rffi.SIZE_T],
rffi.SIZE_T)
buffer = lltype.malloc(rffi.CCHARP.TO, 5, flavor='raw')
written = os_write(12312312, buffer, 5)
lltype.free(buffer, flavor='raw')
assert rffi.cast(lltype.Signed, written) < 0
# the next line is a random external function call,
# to check that it doesn't reset errno
strlen("hi!")
err = rposix.get_errno()
import errno
assert err == errno.EBADF
assert not ALLOCATED # detects memory leaks in the test
def test_call_with_struct_argument(self):
# XXX is there such a function in the standard C headers?
from pypy.rlib import _rsocket_rffi
buf = rffi.make(_rsocket_rffi.in_addr)
rffi.cast(rffi.CCHARP, buf)[0] = '\x01'
rffi.cast(rffi.CCHARP, buf)[1] = '\x02'
rffi.cast(rffi.CCHARP, buf)[2] = '\x03'
rffi.cast(rffi.CCHARP, buf)[3] = '\x04'
p = _rsocket_rffi.inet_ntoa(buf)
assert rffi.charp2str(p) == '1.2.3.4'
lltype.free(buf, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_storage_stays_around(self):
data = "hello, world!" * 100
A = lltype.Array(rffi.CHAR, hints={'nolength': True})
S = lltype.Struct('S', ('a', lltype.Ptr(A)))
s = lltype.malloc(S, flavor='raw')
lltype2ctypes(s) # force it to escape
s.a = lltype.malloc(A, len(data), flavor='raw')
# the storage for the array should not be freed by lltype even
# though the _ptr object appears to go away here
for i in xrange(len(data)):
s.a[i] = data[i]
for i in xrange(len(data)):
assert s.a[i] == data[i]
lltype.free(s.a, flavor='raw')
lltype.free(s, flavor='raw')
assert not ALLOCATED # detects memory leaks in the test
def test_arrayoffloat(self):
a = lltype.malloc(rffi.FLOATP.TO, 3, flavor='raw')
a[0] = rffi.r_singlefloat(0.0)
a[1] = rffi.r_singlefloat(1.1)
a[2] = rffi.r_singlefloat(2.2)
ac = lltype2ctypes(a, normalize=False)
assert ac.contents.items[0] == 0.0
assert abs(ac.contents.items[1] - 1.1) < 1E-6
assert abs(ac.contents.items[2] - 2.2) < 1E-6
b = ctypes2lltype(rffi.FLOATP, ac)
assert isinstance(b[0], rffi.r_singlefloat)
assert float(b[0]) == 0.0
assert isinstance(b[1], rffi.r_singlefloat)
assert abs(float(b[1]) - 1.1) < 1E-6
assert isinstance(b[2], rffi.r_singlefloat)
assert abs(float(b[2]) - 2.2) < 1E-6
def test_different_signatures(self):
fcntl_int = rffi.llexternal('fcntl', [rffi.INT, rffi.INT, rffi.INT],
rffi.INT)
fcntl_str = rffi.llexternal('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP],
rffi.INT)
fcntl_int(12345, 1, 0)
fcntl_str(12345, 3, "xxx")
fcntl_int(12345, 1, 0)
def test_llexternal_source(self):
eci = ExternalCompilationInfo(
separate_module_sources = ["int fn() { return 42; }"]
)
fn = rffi.llexternal('fn', [], rffi.INT, compilation_info=eci)
res = fn()
assert res == 42
def test_prebuilt_constant(self):
header = py.code.Source("""
#ifndef _SOME_H
#define _SOME_H
#include <stdlib.h>
static int x = 3;
char **z = NULL;
#endif /* _SOME_H */
""")
h_file = udir.join("some_h.h")
h_file.write(header)
eci = ExternalCompilationInfo(includes=['stdio.h', str(h_file.basename)],
include_dirs=[str(udir)])
get_x, set_x = rffi.CExternVariable(rffi.LONG, 'x', eci)
get_z, set_z = rffi.CExternVariable(rffi.CCHARPP, 'z', eci)
def f():
one = get_x()
set_x(13)
return one + get_x()
def g():
l = rffi.liststr2charpp(["a", "b", "c"])
try:
set_z(l)
return rffi.charp2str(get_z()[2])
finally:
rffi.free_charpp(l)
res = f()
assert res == 16
assert g() == "c"
def test_c_callback(self):
c_source = py.code.Source("""
int eating_callback(int arg, int(*call)(int))
{
return call(arg);
}
""")
eci = ExternalCompilationInfo(separate_module_sources=[c_source])
args = [rffi.INT, rffi.CCallback([rffi.INT], rffi.INT)]
eating_callback = rffi.llexternal('eating_callback', args, rffi.INT,
compilation_info=eci)
def g(i):
return i + 3
def f():
return eating_callback(3, g)
assert f() == 6
def test_qsort(self):
TP = rffi.CArrayPtr(rffi.INT)
a = lltype.malloc(TP.TO, 5, flavor='raw')
a[0] = 5
a[1] = 3
a[2] = 2
a[3] = 1
a[4] = 4
def compare(a, b):
if a[0] > b[0]:
return 1
else:
return -1
CALLBACK = rffi.CCallback([rffi.VOIDP, rffi.VOIDP], rffi.INT)
qsort = rffi.llexternal('qsort', [rffi.VOIDP, rffi.INT,
rffi.INT, CALLBACK], lltype.Void)
qsort(rffi.cast(rffi.VOIDP, a), 5, rffi.sizeof(rffi.INT), compare)
for i in range(5):
assert a[i] == i + 1
def test_array_type_bug(self):
A = lltype.Array(lltype.Signed)
a1 = lltype.malloc(A, 0, flavor='raw')
a2 = lltype.malloc(A, 0, flavor='raw')
c1 = lltype2ctypes(a1)
c2 = lltype2ctypes(a2)
assert type(c1) is type(c2)
|
166945
|
import unittest
from oeqa.sdk.case import OESDKTestCase
class PerlTest(OESDKTestCase):
@classmethod
def setUpClass(self):
if not (self.tc.hasHostPackage("nativesdk-perl") or
self.tc.hasHostPackage("perl-native")):
raise unittest.SkipTest("No perl package in the SDK")
def test_perl(self):
try:
cmd = "perl -e '$_=\"Uryyb, jbeyq\"; tr/a-zA-Z/n-za-mN-ZA-M/;print'"
output = self._run(cmd)
self.assertEqual(output, "Hello, world")
except subprocess.CalledProcessError as e:
self.fail("Unexpected exit %d (output %s)" % (e.returncode, e.output))
|
166955
|
from expungeservice.record_creator import RecordCreator
from expungeservice.models.record import Record
from tests.factories.case_factory import CaseFactory
def test_sort_by_case_date():
case1 = CaseFactory.create(case_number="1", date_location=["1/1/2018", "Multnomah"])
case2 = CaseFactory.create(case_number="2", date_location=["1/1/2019", "Multnomah"])
case3 = CaseFactory.create(case_number="3", date_location=["1/1/2020", "Multnomah"])
record = Record(tuple([case1, case2, case3]))
assert record.cases[0].summary.case_number == "1"
assert record.cases[1].summary.case_number == "2"
assert record.cases[2].summary.case_number == "3"
sorted_record = RecordCreator.sort_record_by_case_date(record)
assert sorted_record.cases[0].summary.case_number == "3"
assert sorted_record.cases[1].summary.case_number == "2"
assert sorted_record.cases[2].summary.case_number == "1"
def test_sort_if_all_dates_are_same():
case1 = CaseFactory.create(case_number="1")
case2 = CaseFactory.create(case_number="2")
case3 = CaseFactory.create(case_number="3")
record = Record(tuple([case1, case2, case3]))
assert record.cases[0].summary.case_number == "1"
assert record.cases[1].summary.case_number == "2"
assert record.cases[2].summary.case_number == "3"
sorted_record = RecordCreator.sort_record_by_case_date(record)
assert sorted_record.cases[0].summary.case_number == "1"
assert sorted_record.cases[1].summary.case_number == "2"
assert sorted_record.cases[2].summary.case_number == "3"
|
166959
|
import json
from onadata.apps.api.models import Team
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
class TestTeamViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
self.view = TeamViewSet.as_view({
'get': 'list',
'post': 'create'
})
def test_teams_list(self):
self._team_create()
# access the url with an unauthorised user
request = self.factory.get('/')
response = self.view(request)
self.assertEqual(response.status_code, 401)
# access the url with an authorised user
request = self.factory.get('/', **self.extra)
response = self.view(request)
owner_team = {
'url':
'http://testserver/api/v1/teams/%s' % self.owner_team.pk,
'name': u'Owners',
'organization': 'denoinc',
'projects': [],
'users': [{'username': u'bob',
'first_name': u'Bob',
'last_name': u'',
'id': self.user.pk}
]
}
self.assertEqual(response.status_code, 200)
self.assertEqual(sorted(response.data), [owner_team, self.team_data])
def test_teams_get(self):
self._team_create()
view = TeamViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.team_data)
def _team_create(self):
self._org_create()
data = {
'name': u'dreamteam',
'organization': self.company_data['org']
}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
self.owner_team = Team.objects.get(
organization=self.organization.user,
name='%s#Owners' % (self.organization.user.username))
team = Team.objects.get(
organization=self.organization.user,
name='%s#%s' % (self.organization.user.username, data['name']))
data['url'] = 'http://testserver/api/v1/teams/%s' % team.pk
self.assertDictContainsSubset(data, response.data)
self.team_data = response.data
self.team = team
def test_teams_create(self):
self._team_create()
def test_add_user_to_team(self):
self._team_create()
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
view = TeamViewSet.as_view({
'post': 'members'
})
data = {'username': self.user.username}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data,
[self.user.username])
self.assertIn(self.team.group_ptr, self.user.groups.all())
def test_add_user_to_team_missing_username(self):
self._team_create()
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
view = TeamViewSet.as_view({
'post': 'members'
})
data = {}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,
{'username': [u'This field is required.']})
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
def test_add_user_to_team_user_does_not_exist(self):
self._team_create()
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
view = TeamViewSet.as_view({
'post': 'members'
})
data = {'username': 'aboy'}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,
{'username': [u'User `aboy` does not exist.']})
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
def test_remove_user_from_team(self):
self._team_create()
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
view = TeamViewSet.as_view({
'post': 'members',
'delete': 'members'
})
data = {'username': self.user.username}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data,
[self.user.username])
self.assertIn(self.team.group_ptr, self.user.groups.all())
request = self.factory.delete(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, pk=self.team.pk)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data,
[])
self.assertNotIn(self.team.group_ptr, self.user.groups.all())
|
167002
|
import turtle as t
tim = t.Turtle()
scr = t.Screen()
def move_forward():
tim.forward(10)
def move_backward():
tim.backward(10)
def clockwise():
tim.setheading(tim.heading() - 10)
def anticlockwise():
tim.setheading(tim.heading() + 10)
def clear():
tim.clear()
tim.penup()
tim.home()
tim.pendown()
scr.listen()
scr.onkey(key="f", fun=move_forward)
scr.onkey(key="d", fun=move_backward)
scr.onkeypress(key="c", fun=clockwise)
scr.onkeypress(key="a", fun=anticlockwise)
scr.onkey(key="x", fun=clear)
scr.exitonclick()
|
167005
|
import os.path, sys
from ctypes import *
from modules.windows_thumbnailcache.lib.olefile import olefile
from modules.windows_thumbnailcache.lib.yjSysUtils import *
#from lib.yjSQLite3 import TSQLite3
import base64, hashlib
def exit(exit_code, msg=None):
if debug_mode: exit_code = 0
if msg: print(msg)
sys.exit(exit_code)
def _cast(buf, fmt):
if debug_mode: assert type(buf) is bytes
return cast(c_char_p(buf), POINTER(fmt)).contents
signJPG = 0xD8FF
signThumbnailCacheFile = 'CMMM'
signBMP = 0x4D42
class TThumbnailCacheFileHeader(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Signature', c_char * 4), # CMMM
('FormatVer', c_uint32), # $20 : FirstEntOffset=$10, $15 : FirstEntOffset=$0C
('CacheType', c_uint32),
('FirEntOffsetVx15', c_uint32),
('FirEntOffsetVx20', c_uint32),
('Unknown', c_uint32)
]
class TThumbnailCacheEntHeaderCommon(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Signature', c_char * 4), # CMMM
('EntLen', c_uint32),
('Unknown1', c_byte * 8),
('NameLen', c_uint32),
('ThumbnailOffset', c_uint32),
('ThumbnailLen', c_uint32)
]
class TThumbnailCacheEntHeader15(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('HC', TThumbnailCacheEntHeaderCommon),
('Unknown2', c_byte * 20)
]
class TThumbnailCacheEntHeader20(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('HC', TThumbnailCacheEntHeaderCommon),
('ThumbnailResX', c_uint32),
('ThumbnailResY', c_uint32),
('Unknown2', c_byte * 20)
]
class TThumbsFileParser:
def __init__(self, fileName, exportDirName, src_id=None):
self.fileName = fileName
self.exportDirName = exportDirName
self.one = None
err = False
if olefile.isOleFile(fileName):
self.ole = olefile.OleFileIO(fileName)
self.header = None
else:
self.ole = None
try:
with open(fileName, 'rb') as f:
data = f.read()
if len(data) <= sizeof(TThumbnailCacheFileHeader):
err = True
return
self.data = TDataAccess(data)
self.header = _cast(self.data.read(sizeof(TThumbnailCacheFileHeader)), TThumbnailCacheFileHeader)
if self.header.Signature.decode('utf-8') != signThumbnailCacheFile:
err = True
return
finally:
if err: self.header = None
def parse(self, sep):
#print(sep)
dirname = self.exportDirName
result = {'ThumbsData': [['Name', 'filesize', 'imagetype', 'Data', 'SHA1']]}
thumbsData = result['ThumbsData']
if sep == 'ole':
ole = self.ole
i = 0
for item in ole.listdir():
name = item[0]
#print('>', name, end='')
rec = []
rec.append(name)
with ole.openstream(item) as f:
f = ole.openstream(item)
data = f.read(100)
p = data.find(struct.pack('H', signJPG))
if p == -1: continue
f = ole.openstream(item)
data = TDataAccess(f.read()).data[p:]
i += 1
rec.append(data) # Data
h = hashlib.sha1()
h.update(data)
rec.append(h.hexdigest()) # SHA1
thumbsData.append(rec)
if dirname:
CreateDir(dirname)
with open('%s%c%s.jpg' % (dirname, PathDelimiter, name), 'wb') as f:
#print(' - exported', end='')
f.write(data)
#print('')
#print('\r\nWindows XP Version Exported. - %d files\r\n' % i)
else:
size_ThumbnailCacheEntHeader15 = sizeof(TThumbnailCacheEntHeader15)
size_ThumbnailCacheEntHeader20 = sizeof(TThumbnailCacheEntHeader20)
if debug_mode: assert size_ThumbnailCacheEntHeader15 == 48
data = self.data
dirname = self.exportDirName
fmtver = self.header.FormatVer
# result = {'ThumbsData': [['Name', 'ResXY', 'ImgType', 'Data', 'SHA1']] }
# thumbsData = result['ThumbsData']
fmtver = self.header.FormatVer
if fmtver == 0x15:
nextOffset = self.header.FirEntOffsetVx15
entHeaderSize = size_ThumbnailCacheEntHeader15
typeEntHeaderRecord = TThumbnailCacheEntHeader15
else:
if debug_mode: assert fmtver in [0x1f, 0x20]
nextOffset = self.header.FirEntOffsetVx20
entHeaderSize = size_ThumbnailCacheEntHeader20
typeEntHeaderRecord = TThumbnailCacheEntHeader20
data.position = nextOffset
i = 0
while data.position < data.size:
data.position = nextOffset
buf = data.read(entHeaderSize)
eh = _cast(buf, typeEntHeaderRecord)
hc = _cast(buf, TThumbnailCacheEntHeaderCommon)
if hc.Signature.decode('utf-8') != signThumbnailCacheFile: break
nextOffset = (data.position - entHeaderSize) + hc.EntLen
if (hc.EntLen == 0) or (nextOffset >= data.size): break
if (hc.NameLen == 0) or (hc.ThumbnailLen == 0): continue
ThumbnailName = data.read(hc.NameLen).decode('utf-16').translate(
str.maketrans('?:\\/', '????')).replace('?', '')
#print(ThumbnailName, end='')
if hc.ThumbnailOffset > 0: data.position += hc.ThumbnailOffset
buf = data.read(hc.ThumbnailLen)
sign = TDataAccess(buf).read(2, 'H')
if sign == signJPG:
imgType = 'JPG'
elif sign == signBMP:
imgType = 'BMP'
else:
imgType = 'PNG'
if fmtver == 0x15:
ThumbnailSize = ''
else:
ThumbnailSize = '%dx%d' % (eh.ThumbnailResX, eh.ThumbnailResY)
i += 1
rec = []
rec.append(ThumbnailName) # Name
rec.append(ThumbnailSize) # filesize(ResXY)
rec.append(imgType) # ImgType
#print(buf)
rec.append(buf) # Data
h = hashlib.sha1()
h.update(buf)
rec.append(h.hexdigest()) # SHA1
thumbsData.append(rec)
if dirname:
CreateDir(dirname)
fn = ThumbnailName + '.' + imgType.lower()
with open('%s%c%s' % (dirname, PathDelimiter, fn), 'wb') as f:
#print(' - exported', end='')
f.write(buf)
#print('')
#print('\r\nWindows 10 Version Exported. - %d files\r\n' % i)
return result
def printHelp():
print(
r"""
Usage:
ThumbnailParser.py <thumbs.db Filename> <Output .db Filename> [/export:<Path to export>]
>python ThumbnailParser.py
>python ThumbnailParser.py c:\samples\thumbs.db re.db
>python ThumbnailParser.py c:\samples\thumbs.db re.db /export:c:\export_dir ; thumbs.db의 이미지들을 지정 폴더(/export)에 저장한다.
""")
def main(srcfile, app_path, export_dir):
# argc = len(argv)
# if argc <= 2:
# printHelp()
# exit(0)
# optExport = '/export:'
# export_dir = None
# optExport = findCmdLineSwitch(argv, optExport)
# if optExport:
# export_dir = ExcludeTrailingBackslash(optExport)
# if os.path.isfile(export_dir): exit(1, "Error: It's file")
#fn = argv[1]
fn = srcfile
# 처리할 소스 파일(src_files)을 구한다.
src_files = []
if ExtractFilePath(fn) == '': fn = app_path + fn
if FileExists(fn):
src_files.append(fn)
else:
exit(1, 'Error: File not found - "%s"' % fn)
'''
dest_file = argv[2]
if ExtractFilePath(dest_file) == '': dest_file = app_path + dest_file
'''
'''
# 결과 db를 생성한다.
if FileExists(dest_file): os.remove(dest_file)
DDL = ['CREATE TABLE ThumbsData (_id integer PRIMARY KEY AUTOINCREMENT, Name VARCHAR(255), Data STRING, SHA1 VARCHAR(20));']
db = TSQLite3(dest_file)
if db.conn is None:
exit(1, 'Error: Cannot create the database connection.')
for sql in DDL:
db.execSQL(sql)
def insertDatasetIntoTable(db, table, dataset):
if len(dataset) == 1: return
if debug_mode: assert len(dataset[0]) == len(dataset[1])
sql = db.getInsertSQL(table, dataset[0])
del dataset[0]
if db.execmanySQL(sql, dataset):
db.commit()
'''
#print('Processing...')
for fn in src_files:
#print(ExtractFileName(fn))
ThumbsFileParser = TThumbsFileParser(fn, export_dir)
if ThumbsFileParser.ole != None and ThumbsFileParser.header == None:
result = ThumbsFileParser.parse('ole')
elif ThumbsFileParser.ole == None and ThumbsFileParser.header != None:
result = ThumbsFileParser.parse('cmmm')
elif ThumbsFileParser.ole == None and ThumbsFileParser.header == None:
print('Error: Unknown format')
continue
#if debug_mode:
# assert len(result['ThumbsData'][0]) == 3
return result
'''
# 결과를 db에 저장한다.
tables = ['ThumbsData']
for table in tables:
insertDatasetIntoTable(db, table, result[table])
'''
#print('Finished.')
# if sys.version_info < (3, 8):
# print('\'%s\' \r\nError: \'%s\' works on Python v3.8 and above.' % (sys.version, ExtractFileName(__file__)))
# exit(1)
# if __name__ == "__main__":
# app_path = IncludeTrailingBackslash(os.path.dirname(os.path.abspath(__file__))) # 현재 소스 경로
# main(sys.argv, len(sys.argv))
|
167018
|
class EvaluatorTestCases:
expressions = [
("1 2 + 2.5 * 2 5 SUM", 14.5, float, 3),
("1 2 3 SUM 4 5 SUM 6 7 SUM", sum(range(8)), int, 3),
("1 2 3 SUM 4 5 SUM 6 7 SUM 8.5 *", sum(range(8)) * 8.5, float, 4),
]
not_ok = [
"""
void test(){
int a; int b; a = int; b = int; int a /* scope violation*/
}
""",
"""
void test(){
int a; int b; {
int a; int b; {
int c; int a;
int q; int q;
}
}
}
""",
"""void a(){ }; void a(){ }""",
"""int i; i = str""",
"""int b; int b;"""
]
ok = [
"""
void test(){
int a; int b; a = int; b = int;
}
""",
"""
void test(){
int a; int b; a = int; b = int; {
int a; int b; {
int d; int e;
}
}
}
""",
"""str i; i = str""",
"""int b; int c;"""
]
|
167036
|
import unittest
import pytest
from tfsnippet.utils import BaseRegistry, ClassRegistry
class RegistryTestCase(unittest.TestCase):
def test_base_registry(self):
a = object()
b = object()
# test not ignore case
r = BaseRegistry(ignore_case=False)
self.assertFalse(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
with pytest.raises(KeyError, match='Object not registered: \'A\''):
_ = r.get('A')
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'a\''):
_ = r.register('a', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('A', b)
self.assertIs(r.get('A'), b)
self.assertListEqual(list(r), ['a', 'A'])
# test ignore case
r = BaseRegistry(ignore_case=True)
self.assertTrue(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
self.assertIs(r.get('A'), a)
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'A\''):
_ = r.register('A', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('B', b)
self.assertIs(r.get('b'), b)
self.assertIs(r.get('B'), b)
self.assertListEqual(list(r), ['a', 'B'])
def test_class_registry(self):
r = ClassRegistry()
with pytest.raises(TypeError, match='`obj` is not a class: 123'):
r.register('int', 123)
class MyClass(object):
def __init__(self, value, message):
self.value = value
self.message = message
r.register('MyClass', MyClass)
self.assertIs(r.get('MyClass'), MyClass)
o = r.construct('MyClass', 123, message='message')
self.assertIsInstance(o, MyClass)
self.assertEqual(o.value, 123)
self.assertEqual(o.message, 'message')
|
167037
|
import argparse
import torch.nn.functional as F
from .. import load_graph_data
from ..train import train_and_eval
from ..train import register_general_args
from .gat import GAT
def gat_model_fn(args, data):
heads = ([args.n_heads] * args.n_layers) + [args.n_out_heads]
return GAT(data.graph,
args.n_hidden_layers,
data.n_feats,
args.n_hidden_units,
data.n_classes,
heads,
F.elu,
args.in_drop,
args.attn_drop,
args.negative_slope,
args.residual)
def register_gat_args(parser):
parser.add_argument("--n-hidden-units", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-hidden-layers", type=int, default=1,
help="number of hidden gat layers")
parser.add_argument("--n-heads", type=int, default=8,
help="number of hidden attention heads")
parser.add_argument("--n-out-heads", type=int, default=1,
help="number of output attention heads")
parser.add_argument("--in-drop", type=float, default=.6,
help="input feature dropout")
parser.add_argument("--attn-drop", type=float, default=.6,
help="attention dropout")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument('--negative-slope', type=float, default=0.2,
help="the negative slope of leaky relu")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GAT')
register_general_args(parser)
register_gat_args(parser)
args = parser.parse_args()
print('Parsed args:', args)
train_and_eval(gat_model_fn, load_graph_data.load(args), args)
|
167076
|
import random
import structlog
from shapely.geometry import Point
import matplotlib.pyplot as plt
from matplotlib import cm
from deepcomp.env.util.utility import log_utility, step_utility, linear_clipped_utility
from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY, SUPPORTED_UTILITIES
class User:
"""
A user/UE moving around in the world and requesting mobile services
Connection to BS are checked before connecting and after every move to check if connection is lost or still stable
"""
def __init__(self, id, map, pos_x, pos_y, movement, util_func='log', dr_req=1):
"""
Create new UE object
:param id: Unique ID of UE (string)
:param map: Map object representing the playground/world
:param pos_x: x-coord of starting position or 'random'
:param pos_y: y-coord of starting position or 'random'
:param movement: Movement utility object implementing the movement of the UE
:param dr_req: Data rate requirement by UE for successful service
"""
self.id = id
self.map = map
self.movement = movement
assert util_func in SUPPORTED_UTILITIES, \
f"Utility function {util_func} not supported. Supported: {SUPPORTED_UTILITIES}"
self.util_func = util_func
self.dr_req = dr_req
# dict of connected BS: BS (only connected BS are keys!) --> data rate of connection
self.bs_dr = {}
# own RNG for reproducibility; global random shares state that's manipulated by RL during training
self.rng = random.Random()
self.init_pos_x = pos_x
self.init_pos_y = pos_y
self.pos = None
self.reset_pos()
self.movement.reset()
# exponentially weighted moving average data rate
self.ewma_dr = 0
self.log = structlog.get_logger(id=self.id, pos=str(self.pos), ewma_dr=self.ewma_dr,
conn_bs=list(self.bs_dr.keys()), dr_req=self.dr_req)
self.log.info('UE init')
def __repr__(self):
return str(self.id)
# compare and hash UEs based on their ID only
def __eq__(self, other):
if type(other) is type(self):
return self.id == other.id
return False
def __hash__(self):
return hash(self.id)
@property
def curr_dr(self):
"""Current data rate the UE gets through all its BS connections"""
dr = sum(list(self.bs_dr.values()))
self.log.debug("Current data rate", curr_dr=dr)
return dr
@property
def dr_req_satisfied(self):
"""Whether or not the UE's data rate requirement is satisfied by its current total data rate"""
return self.curr_dr >= self.dr_req
@property
def utility(self):
"""Utility property based on the current data rate and utility function"""
return self.dr_to_utility(self.curr_dr)
def dr_to_utility(self, dr):
"""Utility function to map given data rate to utility for the UE"""
assert self.util_func in SUPPORTED_UTILITIES, \
f"Utility function {self.util_func} not supported. Supported: {SUPPORTED_UTILITIES}"
if self.util_func == 'log':
return log_utility(dr)
if self.util_func == 'step':
return step_utility(dr, self.dr_req)
if self.util_func == 'linear':
return linear_clipped_utility(dr)
# unknown utility not implemented
raise NotImplementedError(f"Utility function {self.util_func} not implemented!")
def seed(self, seed=None):
self.rng.seed(seed)
self.movement.seed(seed)
def reset_pos(self):
"""(Re)set position based on initial position x and y as Point. Resolve 'random'."""
# set pos_x
pos_x = self.init_pos_x
if pos_x == 'random':
pos_x = self.rng.randint(0, int(self.map.width))
# set pos_y
pos_y = self.init_pos_y
if pos_y == 'random':
pos_y = self.rng.randint(0, int(self.map.height))
# set pos as Point
self.pos = Point(pos_x, pos_y)
def reset(self):
"""Reset UE position, movement, and connections."""
self.reset_pos()
self.movement.reset()
self.bs_dr = {}
self.ewma_dr = 0
def plot(self, ax, radius=2, details=False):
"""
Plot the UE as filled circle with a given radius and the ID. Color from red to green indicating the utility.
:param ax: Matplotlib axis to plot on
:param radius: Radius of the circle
:param details: Whether to show the UE's data rate and utility
:return: A list of created matplotlib artists
"""
# show utility as red to yellow to green. use color map for [0,1) --> normalize utility first
colormap = cm.get_cmap('RdYlGn')
norm = plt.Normalize(MIN_UTILITY, MAX_UTILITY)
color = colormap(norm(self.utility))
artists = ax.plot(*self.pos.buffer(radius).exterior.xy, color=color)
artists.extend(ax.fill(*self.pos.buffer(radius).exterior.xy, color=color))
artists.append(ax.annotate(self.id, xy=(self.pos.x, self.pos.y), ha='center', va='center'))
if details:
# show curr data rate and utility below the UE
artists.append(ax.annotate(f'r: {self.curr_dr:.2f}', xy=(self.pos.x, self.pos.y - radius - 2),
ha='center', va='center'))
artists.append(ax.annotate(f'qoe: {self.utility:.2f}', xy=(self.pos.x, self.pos.y - radius - 6),
ha='center', va='center'))
return artists
def update_curr_dr(self):
"""Update the current data rate of all BS connections according to the current situation (pos & assignment)"""
for bs in self.bs_dr:
self.bs_dr[bs] = bs.data_rate(self)
def update_ewma_dr(self, weight=0.9):
"""
Update the exp. weighted moving avg. of this UE's current data rate:
`EWMA(t) = weight * dr + (1-weight) * EWMA(t-1)`
Used as historic avg. rate for proportional-fair sharing. Called after movement.
:param weight: Weight for EWMA in [0, 1]. The higher, the more focus on new/current dr and less on previous.
"""
self.ewma_dr = weight * self.curr_dr + (1 - weight) * self.ewma_dr
self.log = self.log.bind(ewma_dr=self.ewma_dr)
def move(self):
"""
Do one step: Move according to own movement pattern. Check for lost connections. Update EWMA data rate.
:return: Number of connections lost through movement
"""
self.pos = self.movement.step(self.pos)
num_lost_connections = self.check_bs_connection()
self.log = self.log.bind(pos=str(self.pos))
self.update_ewma_dr()
self.log.debug("User move", lost_connections=num_lost_connections)
return num_lost_connections
def check_bs_connection(self):
"""
Check if assigned BS connections are still stable (after move), else remove.
:return: Number of removed/lost connections
"""
remove_bs = []
for bs in self.bs_dr:
if not bs.can_connect(self.pos):
self.log.info("Losing connection to BS", bs=bs)
remove_bs.append(bs)
# remove/disconnect bs
for bs in remove_bs:
self.disconnect_from_bs(bs)
return len(remove_bs)
def connect_to_bs(self, bs, disconnect=False, return_connected=False):
"""
Try to connect to specified basestation. Return if successful.
:param bs: Basestation to connect to
:param disconnect: If True, disconnect from BS if it was previously connected.
:param return_connected: If True, return whether the UE is now connected to the BS or not.
Else, return if the (dis-)connect was successful.
:return: True if (dis-)connected successfully. False if out of range. If return_connected, return if connected.
"""
log = self.log.bind(bs=bs, disconnect=disconnect, conn_bs=list(self.bs_dr.keys()))
# already connected
if bs in self.bs_dr.keys():
if disconnect:
self.disconnect_from_bs(bs)
log.info("Disconnected")
if return_connected:
return False
else:
log.info("Staying connected")
return True
# not yet connected
if bs.can_connect(self.pos):
# add BS to connections; important: initialize with data rate
# also important: initialize before adding connection to bs.conn_ues; affects how data rate is calc
self.bs_dr[bs] = bs.data_rate(self)
bs.conn_ues.append(self)
self.log = self.log.bind(conn_bs=list(self.bs_dr.keys()))
log.info("Connected")
return True
# log.info("Cannot connect")
return False
def disconnect_from_bs(self, bs):
"""Disconnect from given BS. Assume BS is currently connected."""
assert bs in self.bs_dr.keys(), "Not connected to BS --> Cannot disconnect"
del self.bs_dr[bs]
bs.conn_ues.remove(self)
self.log = self.log.bind(conn_bs=list(self.bs_dr.keys()))
def disconnect_from_all(self):
"""Disconnect from all BS. Necessary before removing UE."""
# copy list of all curr BS to avoid iterating over dict with changing size
bs_list = list(self.bs_dr.keys())
for bs in bs_list:
self.disconnect_from_bs(bs)
def ues_at_same_bs(self):
"""Return set of UEs that are currently connected to any of the BS that this UE is connected to"""
ue_set = set()
for bs in self.bs_dr:
ue_set.update(set(bs.conn_ues))
self.log.debug('UEs at same BS', ue_set=ue_set)
return ue_set
|
167099
|
import torch.nn as nn
import torch.nn.functional as F
from fcdd.models.bases import FCDDNet
class FCDD_CNN224_VARK(FCDDNet):
def __init__(self, in_shape, k=3, **kwargs):
assert k % 2 == 1, 'kernel size needs to be uneven'
p = (k - 1) // 2
super().__init__(in_shape, **kwargs)
self.conv1 = self._create_conv2d(in_shape[0], 32, k, bias=self.bias, padding=p)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=self.bias)
self.pool1 = self._create_maxpool2d(3, 2, 1) # 32 x 112 x 112
self.conv2 = self._create_conv2d(32, 128, k, bias=self.bias, padding=p)
self.bn2d2 = nn.BatchNorm2d(128, eps=1e-04, affine=self.bias)
self.pool2 = self._create_maxpool2d(3, 2, 1) # 128 x 56 x 56
self.conv3 = self._create_conv2d(128, 256, k, bias=self.bias, padding=p)
self.bn2d3 = nn.BatchNorm2d(256, eps=1e-04, affine=self.bias)
self.conv4 = self._create_conv2d(256, 256, k, bias=self.bias, padding=p)
self.bn2d4 = nn.BatchNorm2d(256, eps=1e-04, affine=self.bias)
self.pool3 = self._create_maxpool2d(3, 2, 1) # 256 x 28 x 28
self.conv5 = self._create_conv2d(256, 128, k, bias=self.bias, padding=p)
self.encoder_out_shape = (128, 28, 28)
self.conv_final = self._create_conv2d(128, 1, 1, bias=self.bias)
def forward(self, x, ad=True):
x = self.conv1(x)
x = F.leaky_relu(self.bn2d1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.leaky_relu(self.bn2d2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.leaky_relu(self.bn2d3(x))
x = self.conv4(x)
x = F.leaky_relu(self.bn2d4(x))
x = self.pool3(x)
x = self.conv5(x)
if ad:
x = self.conv_final(x) # n x heads x h' x w'
return x
class FCDD_CNN224_3K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=3, **kwargs)
class FCDD_CNN224_5K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=5, **kwargs)
class FCDD_CNN224_7K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=7, **kwargs)
class FCDD_CNN224_9K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=9, **kwargs)
class FCDD_CNN224_11K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=11, **kwargs)
class FCDD_CNN224_13K(FCDD_CNN224_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=13, **kwargs)
class FCDD_CNN32_VARK(FCDDNet):
def __init__(self, in_shape, k=3, **kwargs):
assert k % 2 == 1, 'kernel size needs to be uneven'
p = (k - 1) // 2
super().__init__(in_shape, **kwargs)
self.conv1 = self._create_conv2d(in_shape[0], 128, k, bias=self.bias, padding=p)
self.bn2d1 = nn.BatchNorm2d(128, eps=1e-04, affine=self.bias)
self.pool1 = self._create_maxpool2d(2, 2)
self.conv2 = self._create_conv2d(128, 256, 3, bias=self.bias, padding=1)
self.bn2d2 = nn.BatchNorm2d(256, eps=1e-04, affine=self.bias)
self.pool2 = self._create_maxpool2d(2, 2)
self.conv3 = self._create_conv2d(256, 128, 3, bias=self.bias, padding=1)
self.conv_final = self._create_conv2d(128, 1, 1, bias=self.bias)
def forward(self, x, ad=True):
x = self.conv1(x)
x = self.pool1(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool2(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
if ad:
x = self.conv_final(x) # n x heads x h' x w'
return x
class FCDD_CNN32_3K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=3, **kwargs)
class FCDD_CNN32_5K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=5, **kwargs)
class FCDD_CNN32_7K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=7, **kwargs)
class FCDD_CNN32_9K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=9, **kwargs)
class FCDD_CNN32_11K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=11, **kwargs)
class FCDD_CNN32_13K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=13, **kwargs)
class FCDD_CNN32_15K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=15, **kwargs)
class FCDD_CNN32_17K(FCDD_CNN32_VARK):
def __init__(self, *args, **kwargs):
super().__init__(*args, k=17, **kwargs)
|
167126
|
from hwt.hdl.constants import INTF_DIRECTION
from hwt.synthesizer.param import Param
from hwt.synthesizer.unit import Unit
class UnitWrapper(Unit):
"""
Class which creates wrapper around original unit instance,
original unit will be stored inside as subunit named baseUnit
:note: This is example of lazy loaded interfaces
and generating of external interfaces based on internal structure.
"""
def __init__(self, baseUnit: Unit):
"""
:param baseUnit: An :class:`hwt.synthesizer.unit.Unit` instance which should be hidden in this wrapper.
"""
super(UnitWrapper, self).__init__()
self._baseUnit = baseUnit
def _copyParamsAndInterfaces(self):
for p in self._baseUnit._params:
myP = Param(p.get_value())
self._registerParameter(p._name, myP)
myP.set_value(p.get_value())
origToWrapInfMap = {}
for intf in self.baseUnit._interfaces:
# clone interface
myIntf = intf.__copy__()
# sub-interfaces are not instantiated yet
# myIntf._direction = intf._direction
myIntf._direction = INTF_DIRECTION.opposite(intf._direction)
self._registerInterface(intf._name, myIntf)
object.__setattr__(self, intf._name, myIntf)
origToWrapInfMap[intf] = myIntf
ei = self._ctx.interfaces
for i in self._interfaces:
self._loadInterface(i, True)
assert i._isExtern
i._signalsForInterface(self._ctx, ei,
self._store_manager.name_scope,
reverse_dir=True)
return origToWrapInfMap
def _getDefaultName(self):
return self._baseUnit._getDefaultName()
def _get_hdl_doc(self):
return self._baseUnit._get_hdl_doc()
def _connectBaseUnitToThisWrap(self, origToWrapInfMap):
for baseIntf, wrapIntf in origToWrapInfMap.items():
if baseIntf._direction is INTF_DIRECTION.MASTER:
wrapIntf(baseIntf)
else:
baseIntf(wrapIntf)
def _impl(self):
self.baseUnit = self._baseUnit
origToWrapInfMap = self._copyParamsAndInterfaces()
self._connectBaseUnitToThisWrap(origToWrapInfMap)
|
167146
|
from ..responses import *
from ..utils import send_json, pop_args, byte_to_dict, get_user
from django.contrib.auth.models import User as Default_User
from ..models import User
from django.contrib.auth.hashers import make_password
from django.views import View
import sys
sys.path.append("../../")
import json
from DjangoCRUDBoard import settings
class UserView(View):
def get(self, request):
keys = ['username']
dic = pop_args(request.GET, *keys)
if None in dic.values():
return send_json(illegalArgument)
users = User.objects.filter(username=dic['username'])
result = get_user(users)
return send_json(result)
def post(self, request):
keys = ['username', 'nickname', 'email', 'password']
dic = pop_args(request.POST, *keys)
if None in dic.values():
return send_json(illegalArgument)
filtered = User.objects.filter(username=dic['username'])
if filtered.count() != 0:
return send_json(userAlreadyRegistered)
filtered = User.objects.filter(nickname=dic['nickname'])
if filtered.count() != 0:
return send_json(userAlreadyRegistered)
User.objects.create_user(**dic)
return send_json(registerSucceed)
def delete(self, request):
keys = ['username', 'email', 'password']
request_dict = byte_to_dict(request.body)
dic = pop_args(request_dict, *keys)
if None in dic.values():
return send_json(illegalArgument)
filtered = User.objects.filter(username=dic['username'], email=dic['email'])
if filtered.count() == 0:
return send_json(noUser)
if not filtered[0].check_password(dic['password']):
return send_json(userDoesNotMatch)
filtered.delete()
return send_json(deleteUserSucceed)
def put(self, request):
original = ['username', 'nickname', 'email', 'password']
to_modified = ['m_username', 'm_nickname', 'm_email', 'm_password']
keys = [*original, *to_modified]
request_dict = byte_to_dict(request.body)
divide_index = len(keys)
dic = pop_args(request_dict, *keys)
# username, email, password 파라미터 없이 온다면
if None in list(dic.values())[:divide_index]:
return send_json(illegalArgument)
# m_username, m_email, m_password 다 아무것도 없을시
if not any(list(dic.values())[divide_index:]):
return send_json(illegalModifyArgument)
filtered = User.objects.filter(username=dic['username'], email=dic['email'])
if filtered.count() == 0:
return send_json(noUser)
if not filtered[0].check_password(dic['password']):
return send_json(userDoesNotMatch)
# 변경하려는 username이 기존에 존재하는지 처리 분기문
filtered_exist = User.objects.filter(username=dic['m_username'])
if filtered_exist.count() != 0:
return send_json(userAlreadyRegistered)
# 변경하려는 아이디, 이메일, 패스워드가 기존과 같다면 어떻게 처리해줘야할지..
update_value = dic.copy()
del update_value['m_username'], update_value['m_email'], update_value['m_password']
'''
해당 딕셔너리 copy 해준 후 뒷 부분 없애줌. 각 if문 마다 filtered.update해주기에는 비효율적이어서
이와 같이 기존 값을 그대로 가지고 있게 하여 m_username만 있고 m_email, m_password없는 등 부분적으로
컬럼이 없을시에도 기존 값을 가지고 있음으로써 1번만 update해주면 됨
'''
if dic['m_username']:
update_value['username'] = dic['m_username']
if dic['m_email']:
update_value['email'] = dic['m_email']
if dic['m_password']:
update_value['password'] = dic['m_password']
filtered.update(username=update_value['username'], email=update_value['email'], password=make_password(update_value['password']))
# 현재 이방식은 normalize_email, normalize_username 를 안해주는데 괜찮을지?! create_user 함수 확인 요구됨.
return send_json(modifyUserSucceed)
|
167156
|
import numpy as np
from transonic import Array, const
from transonic.backends import backends
backend = backends["cython"]
type_formatter = backend.type_formatter
def compare(result, dtype, ndim, memview, mem_layout=None, positive_indices=None):
A = Array[dtype, ndim, memview, mem_layout, positive_indices]
assert A.format_as_backend_type(type_formatter) == result
def test_memview():
memview = "memview"
compare("np.int_t[:, ::1]", int, "2d", memview, "C")
compare("np.int_t[:, :, :]", int, "3d", memview, "strided")
compare("np.int32_t[::1, :]", np.int32, "2d", memview, "F")
def test_array():
memview = None
compare('np.ndarray[np.int_t, ndim=2, mode="c"]', int, "2d", memview, "C")
compare("np.ndarray[np.int_t, ndim=3]", int, "3d", memview, "strided")
compare(
'np.ndarray[np.int32_t, ndim=2, mode="f"]', np.int32, "2d", memview, "F"
)
compare(
"np.ndarray[np.int_t, ndim=2, negative_indices=False]",
int,
"2d",
memview,
positive_indices="positive_indices",
)
def test_const():
A = Array[int, "2d"]
assert "const " + A.format_as_backend_type(type_formatter) == const(
A
).format_as_backend_type(type_formatter)
|
167157
|
from collections import defaultdict
class Solution(object):
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
if len(s1) != len(s2) or set(s1) != set(s2):
return False
if s1 == s2:
return True
if not s1 and not s2:
return True
return self.recurse(s1, s2) or self.recurse(s1, s2[::-1])
def recurse(self, s1, s2):
d = defaultdict(int)
breakpoints = []
for i, c in enumerate(s1[:-1]):
d[c] += 1
c2 = s2[i]
d[c2] -= 1
same = True
for cc in d:
if d[cc] != 0:
same = False
break
if same:
breakpoints.append(i)
for b in breakpoints:
if self.isScramble(s1[: b + 1], s2[: b + 1]) and self.isScramble(
s1[b + 1 :], s2[b + 1 :]
):
return True
return False
|
167168
|
from citrination_client.data import DatasetFile
def test_can_crud_path():
"""
Tests that full get/set/delete functionality is
available for the path property
"""
path = "path"
d = DatasetFile(path)
assert d.path is path
d.path = path
assert d.path is path
del(d.path)
assert d.path is None
def test_can_crud_url():
"""
Tests that full get/set/delete functionality is
available for the url property
"""
path = "path"
d = DatasetFile(path)
url = "http://mysite.com"
assert d.url is None
d.url = url
assert d.url is url
del(d.url)
assert d.url is None
|
167177
|
import copy
import os
import torch
import torchvision
import warnings
import math
import utils.misc
import numpy as np
import os.path as osp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import models.modified_resnet_cifar as modified_resnet_cifar
import models.modified_resnetmtl_cifar as modified_resnetmtl_cifar
import models.modified_linear as modified_linear
from PIL import Image
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
from tensorboardX import SummaryWriter
from utils.compute_features import compute_features
from utils.process_mnemonics import process_mnemonics
from utils.compute_accuracy import compute_accuracy
from trainer.incremental import incremental_train_and_eval
from utils.misc import *
from utils.process_fp import process_inputs_fp
warnings.filterwarnings('ignore')
class Trainer(object):
def __init__(self, the_args):
self.args = the_args
self.log_dir = './logs/'
if not osp.exists(self.log_dir):
os.mkdir(self.log_dir)
self.save_path = self.log_dir + self.args.dataset + '_nfg' + str(self.args.nb_cl_fg) + '_ncls' + str(self.args.nb_cl) + '_nproto' + str(self.args.nb_protos)
self.save_path += '_' + self.args.method
if not osp.exists(self.save_path):
os.mkdir(self.save_path)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023))])
self.trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=self.transform_train)
self.testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=self.transform_test)
self.evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_test)
self.network = modified_resnet_cifar.resnet32
self.network_mtl = modified_resnetmtl_cifar.resnetmtl32
self.lr_strat_first_phase = [int(160*0.5), int(160*0.75)]
self.lr_strat = [int(self.args.epochs*0.5), int(self.args.epochs*0.75)]
self.dictionary_size = self.args.dictionary_size
def map_labels(self, order_list, Y_set):
map_Y = []
for idx in Y_set:
map_Y.append(order_list.index(idx))
map_Y = np.array(map_Y)
return map_Y
def train(self):
self.train_writer = SummaryWriter(logdir=self.save_path)
dictionary_size = self.dictionary_size
top1_acc_list_cumul = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
top1_acc_list_ori = np.zeros((int(self.args.num_classes/self.args.nb_cl), 4, self.args.nb_runs))
X_train_total = np.array(self.trainset.train_data)
Y_train_total = np.array(self.trainset.train_labels)
X_valid_total = np.array(self.testset.test_data)
Y_valid_total = np.array(self.testset.test_labels)
np.random.seed(1993)
for iteration_total in range(self.args.nb_runs):
order_name = osp.join(self.save_path, "seed_{}_{}_order_run_{}.pkl".format(1993, self.args.dataset, iteration_total))
print("Order name:{}".format(order_name))
if osp.exists(order_name):
print("Loading orders")
order = utils.misc.unpickle(order_name)
else:
print("Generating orders")
order = np.arange(self.args.num_classes)
np.random.shuffle(order)
utils.misc.savepickle(order, order_name)
order_list = list(order)
print(order_list)
np.random.seed(self.args.random_seed)
X_valid_cumuls = []
X_protoset_cumuls = []
X_train_cumuls = []
Y_valid_cumuls = []
Y_protoset_cumuls = []
Y_train_cumuls = []
alpha_dr_herding = np.zeros((int(self.args.num_classes/self.args.nb_cl),dictionary_size,self.args.nb_cl),np.float32)
prototypes = np.zeros((self.args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3]))
for orde in range(self.args.num_classes):
prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])]
start_iter = int(self.args.nb_cl_fg/self.args.nb_cl)-1
for iteration in range(start_iter, int(self.args.num_classes/self.args.nb_cl)):
if iteration == start_iter:
last_iter = 0
tg_model = self.network(num_classes=self.args.nb_cl_fg)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
ref_model = None
free_model = None
ref_free_model = None
elif iteration == start_iter+1:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
print("Fusion Mode: "+self.args.fusion_mode)
tg_model = self.network(num_classes=self.args.nb_cl_fg)
ref_dict = ref_model.state_dict()
tg_dict = tg_model.state_dict()
tg_dict.update(ref_dict)
tg_model.load_state_dict(tg_dict)
tg_model.to(self.device)
in_features = tg_model.fc.in_features
out_features = tg_model.fc.out_features
print("Out_features:", out_features)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features, self.args.nb_cl)
new_fc.fc1.weight.data = tg_model.fc.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = out_features*1.0 / self.args.nb_cl
else:
last_iter = iteration
ref_model = copy.deepcopy(tg_model)
in_features = tg_model.fc.in_features
out_features1 = tg_model.fc.fc1.out_features
out_features2 = tg_model.fc.fc2.out_features
print("Out_features:", out_features1+out_features2)
new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, self.args.nb_cl)
new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data
new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data
new_fc.sigma.data = tg_model.fc.sigma.data
tg_model.fc = new_fc
lamda_mult = (out_features1+out_features2)*1.0 / (self.args.nb_cl)
if iteration > start_iter:
cur_lamda = self.args.lamda * math.sqrt(lamda_mult)
else:
cur_lamda = self.args.lamda
actual_cl = order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)]
indices_train_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_train_total])
indices_test_10 = np.array([i in order[range(last_iter*self.args.nb_cl,(iteration+1)*self.args.nb_cl)] for i in Y_valid_total])
X_train = X_train_total[indices_train_10]
X_valid = X_valid_total[indices_test_10]
X_valid_cumuls.append(X_valid)
X_train_cumuls.append(X_train)
X_valid_cumul = np.concatenate(X_valid_cumuls)
X_train_cumul = np.concatenate(X_train_cumuls)
Y_train = Y_train_total[indices_train_10]
Y_valid = Y_valid_total[indices_test_10]
Y_valid_cumuls.append(Y_valid)
Y_train_cumuls.append(Y_train)
Y_valid_cumul = np.concatenate(Y_valid_cumuls)
Y_train_cumul = np.concatenate(Y_train_cumuls)
if iteration == start_iter:
X_valid_ori = X_valid
Y_valid_ori = Y_valid
else:
X_protoset = np.concatenate(X_protoset_cumuls)
Y_protoset = np.concatenate(Y_protoset_cumuls)
if self.args.rs_ratio > 0:
scale_factor = (len(X_train) * self.args.rs_ratio) / (len(X_protoset) * (1 - self.args.rs_ratio))
rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor))
rs_num_samples = int(len(X_train) / (1 - self.args.rs_ratio))
print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples))
X_train = np.concatenate((X_train,X_protoset),axis=0)
Y_train = np.concatenate((Y_train,Y_protoset))
print('Batch of classes number {0} arrives'.format(iteration+1))
map_Y_train = np.array([order_list.index(i) for i in Y_train])
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
is_start_iteration = (iteration == start_iter)
if iteration > start_iter:
old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True)
average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor)
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
novel_embedding = torch.zeros((self.args.nb_cl, num_features))
for cls_idx in range(iteration*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
cls_indices = np.array([i == cls_idx for i in map_Y_train])
assert(len(np.where(cls_indices==1)[0])==dictionary_size)
self.evalset.test_data = X_train[cls_indices].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
cls_features = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1)
cls_embedding = torch.mean(norm_features, dim=0)
novel_embedding[cls_idx-iteration*self.args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm
tg_model.to(self.device)
tg_model.fc.fc2.weight.data = novel_embedding.to(self.device)
self.trainset.train_data = X_train.astype('uint8')
self.trainset.train_labels = map_Y_train
if iteration > start_iter and self.args.rs_ratio > 0 and scale_factor > 1:
print("Weights from sampling:", rs_sample_weights)
index1 = np.where(rs_sample_weights>1)[0]
index2 = np.where(map_Y_train<iteration*self.args.nb_cl)[0]
assert((index1==index2).all())
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples)
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size, shuffle=False, sampler=train_sampler, num_workers=self.args.num_workers)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.args.train_batch_size,
shuffle=True, num_workers=self.args.num_workers)
self.testset.test_data = X_valid_cumul.astype('uint8')
self.testset.test_labels = map_Y_valid_cumul
testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.args.test_batch_size,
shuffle=False, num_workers=self.args.num_workers)
print('Max and min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train)))
print('Max and min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul)))
ckp_name = osp.join(self.save_path, 'run_{}_iteration_{}_model.pth'.format(iteration_total, iteration))
ckp_name_free = osp.join(self.save_path, 'run_{}_iteration_{}_free_model.pth'.format(iteration_total, iteration))
print('Checkpoint name:', ckp_name)
if iteration==start_iter and self.args.resume_fg:
print("Loading first group models from checkpoint")
tg_model = torch.load(self.args.ckpt_dir_fg)
elif self.args.resume and os.path.exists(ckp_name):
print("Loading models from checkpoint")
tg_model = torch.load(ckp_name)
else:
if iteration > start_iter:
ref_model = ref_model.to(self.device)
ignored_params = list(map(id, tg_model.fc.fc1.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters())
base_params = filter(lambda p: p.requires_grad,base_params)
base_params = filter(lambda p: p.requires_grad,base_params)
tg_params_new =[{'params': base_params, 'lr': self.args.base_lr2, 'weight_decay': self.args.custom_weight_decay}, {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}]
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params_new, lr=self.args.base_lr2, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
else:
tg_params = tg_model.parameters()
tg_model = tg_model.to(self.device)
tg_optimizer = optim.SGD(tg_params, lr=self.args.base_lr1, momentum=self.args.custom_momentum, weight_decay=self.args.custom_weight_decay)
if iteration > start_iter:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat, gamma=self.args.lr_factor)
else:
tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=self.lr_strat_first_phase, gamma=self.args.lr_factor)
print("Incremental train")
if iteration > start_iter:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
else:
tg_model = incremental_train_and_eval(self.args.epochs, tg_model, ref_model, free_model, ref_free_model, tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iter, cur_lamda, self.args.dist, self.args.K, self.args.lw_mr)
torch.save(tg_model, ckp_name)
if self.args.dynamic_budget:
nb_protos_cl = self.args.nb_protos
else:
nb_protos_cl = int(np.ceil(self.args.nb_protos*100./self.args.nb_cl/(iteration+1)))
tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1])
num_features = tg_model.fc.in_features
for iter_dico in range(last_iter*self.args.nb_cl, (iteration+1)*self.args.nb_cl):
self.evalset.test_data = prototypes[iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0])
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
mu = np.mean(D,axis=1)
index1 = int(iter_dico/self.args.nb_cl)
index2 = iter_dico % self.args.nb_cl
alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0
w_t = mu
iter_herding = 0
iter_herding_eff = 0
while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if alpha_dr_herding[index1,ind_max,index2] == 0:
alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding
iter_herding += 1
w_t = w_t+mu-D[:,ind_max]
X_protoset_cumuls = []
Y_protoset_cumuls = []
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
X_protoset_cumuls.append(prototypes[iteration2*self.args.nb_cl+iter_dico,np.where(alph==1)[0]])
Y_protoset_cumuls.append(order[iteration2*self.args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0])))
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
class_means = np.zeros((64,100,2))
for iteration2 in range(iteration+1):
for iter_dico in range(self.args.nb_cl):
current_cl = order[range(iteration2*self.args.nb_cl,(iteration2+1)*self.args.nb_cl)]
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico].astype('uint8')
self.evalset.test_labels = np.zeros(self.evalset.test_data.shape[0]) #zero labels
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
num_samples = self.evalset.test_data.shape[0]
mapped_prototypes = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D = mapped_prototypes.T
D = D/np.linalg.norm(D,axis=0)
self.evalset.test_data = prototypes[iteration2*self.args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8')
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size,
shuffle=False, num_workers=self.args.num_workers)
mapped_prototypes2 = compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features)
D2 = mapped_prototypes2.T
D2 = D2/np.linalg.norm(D2,axis=0)
alph = alpha_dr_herding[iteration2,:,iter_dico]
alph = (alph>0)*(alph<nb_protos_cl+1)*1.
alph = alph/np.sum(alph)
class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0])
alph = np.ones(dictionary_size)/dictionary_size
class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2
class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1])
torch.save(class_means, osp.join(self.save_path, 'run_{}_iteration_{}_class_means.pth'.format(iteration_total, iteration)))
current_means = class_means[:, order[range(0,(iteration+1)*self.args.nb_cl)]]
is_start_iteration = (iteration == start_iter)
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
print('Computing accuracy for first-phase classes')
self.evalset.test_data = X_valid_ori.astype('uint8')
self.evalset.test_labels = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
ori_acc, fast_fc = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T
self.train_writer.add_scalar('ori_acc/LwF', float(ori_acc[0]), iteration)
self.train_writer.add_scalar('ori_acc/iCaRL', float(ori_acc[1]), iteration)
map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul])
print('Computing accuracy for all seen classes')
self.evalset.test_data = X_valid_cumul.astype('uint8')
self.evalset.test_labels = map_Y_valid_cumul
evalloader = torch.utils.data.DataLoader(self.evalset, batch_size=self.args.eval_batch_size, shuffle=False, num_workers=self.args.num_workers)
cumul_acc, _ = compute_accuracy(tg_model, free_model, tg_feature_model, current_means, X_protoset_cumuls, Y_protoset_cumuls, evalloader, order_list, is_start_iteration=is_start_iteration, fast_fc=fast_fc, maml_lr=self.args.maml_lr, maml_epoch=self.args.maml_epoch)
top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T
self.train_writer.add_scalar('cumul_acc/LwF', float(cumul_acc[0]), iteration)
self.train_writer.add_scalar('cumul_acc/iCaRL', float(cumul_acc[1]), iteration)
torch.save(top1_acc_list_ori, osp.join(self.save_path, 'run_{}_top1_acc_list_ori.pth'.format(iteration_total)))
torch.save(top1_acc_list_cumul, osp.join(self.save_path, 'run_{}_top1_acc_list_cumul.pth'.format(iteration_total)))
self.train_writer.close
|
167178
|
from typing import Dict, List
from pyhafas.profile.base.helper.date_time import BaseDateTimeHelper
from pyhafas.profile.base.helper.format_products_filter import \
BaseFormatProductsFilterHelper
from pyhafas.profile.base.helper.parse_leg import BaseParseLegHelper
from pyhafas.profile.base.helper.parse_lid import BaseParseLidHelper
from pyhafas.profile.base.helper.request import BaseRequestHelper
from pyhafas.profile.base.requests.journey import BaseJourneyRequest
from pyhafas.profile.base.requests.journeys import BaseJourneysRequest
from pyhafas.profile.base.requests.location import BaseLocationRequest
from pyhafas.profile.base.requests.station_board import BaseStationBoardRequest
from pyhafas.profile.base.requests.trip import BaseTripRequest
from pyhafas.profile.interfaces import ProfileInterface
class BaseProfile(
BaseRequestHelper,
BaseFormatProductsFilterHelper,
BaseParseLidHelper,
BaseDateTimeHelper,
BaseParseLegHelper,
BaseLocationRequest,
BaseJourneyRequest,
BaseJourneysRequest,
BaseStationBoardRequest,
BaseTripRequest,
ProfileInterface):
"""
Profile for a "normal" HaFAS. Only for other profiles usage as basis.
"""
baseUrl: str = ""
defaultUserAgent: str = 'pyhafas'
addMicMac: bool = False
addChecksum: bool = False
salt: str = ""
requestBody: dict = {}
availableProducts: Dict[str, List[int]] = {}
defaultProducts: List[str] = []
def __init__(self, ua=defaultUserAgent):
self.userAgent = ua
|
167236
|
import logging
import sys
import sh
from kubeyard.commands.devel import BaseDevelCommand
logger = logging.getLogger(__name__)
class UpdateRequirementsCommand(BaseDevelCommand):
"""
Command can update requirements using `freeze_requirements` command in container.
Requirements: \n
- `freeze_requirements command` available in container \n
- volume with requirements files mounted \n
Can be overridden in <project_dir>/scripts/update_requirements.
"""
custom_script_name = 'update_requirements'
def run_default(self):
logger.info('Updating requirements for "{}"...'.format(self.image))
sh.docker(
'run', '--rm', '-i',
'-u', '{}:{}'.format(self.uid, self.gid),
'-e', 'CUSTOM_COMPILE_COMMAND="kubeyard update_requirements"',
'-e', 'HOME=/tmp',
*self.volumes,
self.image,
'bash', '-c', 'freeze_requirements',
_out=sys.stdout.buffer,
_err=sys.stdout.buffer,
)
logger.info('Requirements updated!')
|
167244
|
from typing import Optional
from tilecloud import BoundingPyramid, Tile
class InBoundingPyramid:
"""
Creates a filter that filters out tiles that are not in the specified bounding pyramid. When called the
filter returns ``None`` if the tile is not in the bounding pyramid.
bounding_pyramid:
A :class:`tilecloud.BoundingPyramid` object.
"""
def __init__(self, bounding_pyramid: BoundingPyramid):
self.bounding_pyramid = bounding_pyramid
def __call__(self, tile: Tile) -> Optional[Tile]:
if tile is None or tile.tilecoord not in self.bounding_pyramid:
return None
return tile
|
167256
|
expected_output = {
"interface": {
"GigabitEthernet3.90": {
"interface": "GigabitEthernet3.90",
"neighbors": {
"FE80::5C00:40FF:FEFF:209": {
"age": "22",
"ip": "FE80::5C00:40FF:FEFF:209",
"link_layer_address": "5e00.40ff.0209",
"neighbor_state": "STALE",
}
},
}
}
}
|
167270
|
import sys
import time
import argparse
import os
import warnings
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict
import pickle as pk
from torch.nn import Parameter
from layers import DNANodeRepModule, ConvNodeRepModule
from metrics import compute_mae, compute_mape, compute_ssi, compute_geh, \
compute_cpl, compute_cpc, compute_binned_metric, compute_macro_metric, \
mae_metric, cpc_metric, cpl_metric, geh_metric, ssi_metric, mape_metric
from dataset import UrbanPlanningDataset
from training_environment import TrainingSettings as ts, PerformanceLogger, NodeConvType, \
JKType
from training_environment import checkpoint_filepath, OutputLogger
from torch_geometric.nn import JumpingKnowledge
parser = argparse.ArgumentParser(description='UP')
parser.add_argument('--enable-cuda', action='store_true',
help='Enable CUDA')
args = parser.parse_args()
args.device = None
if args.enable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
class EdgeRegressor(nn.Module):
def __init__(self, num_node_features, num_edge_features, node_rep_size,
hidden_dim):
super(EdgeRegressor, self).__init__()
# Linear layer to transform target edge features
self.fc_edges = nn.Sequential(
nn.Linear(num_edge_features + 2 * num_node_features, hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, hidden_dim),
)
concat_hidden_dim = hidden_dim
if ts.include_node_reps:
if ts.node_conv_type == NodeConvType.GraphConvolution:
self.node_rep_module = ConvNodeRepModule(num_node_features,
node_rep_size,
ts.num_node_rep_layers,
ts.improved_gcn,
ts.drop_prob)
elif ts.node_conv_type == NodeConvType.DNAConvolution:
self.node_rep_module = DNANodeRepModule(num_node_features,
node_rep_size,
ts.num_node_rep_layers,
ts.dna_heads,
ts.dna_groups,
ts.drop_prob)
concat_hidden_dim += 2 * node_rep_size
if ts.jk_type is not JKType.NoJK:
self.jk = JumpingKnowledge(ts.jk_type.value, channels=8,
num_layers=ts.num_node_rep_layers)
lin_size = node_rep_size
if ts.jk_type is JKType.Concat:
lin_size = ts.num_node_rep_layers*node_rep_size
self.jk_lin = nn.Linear(lin_size, node_rep_size)
self.node_weight = Parameter(torch.from_numpy(np.array(1.0, dtype=np.float32)))
self.edge_weight = Parameter(torch.from_numpy(np.array(1.0, dtype=np.float32)))
self.regression_head = nn.Sequential(
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=ts.drop_prob),
nn.Linear(hidden_dim, 1)
)
def forward(self, x_nodes, x_edges_batch, edge_indices_batch, edge_indices,
edge_weight=None):
"""
:param x_nodes: Node features of shape [N, D]
:param x_edges_batch: Edge features of shape [B, K]
:param edge_indices_batch: Matrix of shape [B, 2] indicating the
indices of the nodes connected by each edge.
:param edge_indices: Matrix of shape [2, E] indicating for each edge
in the graph the two node IDs it connects.
:param edge_weight: Vector of shape [E] containing the edge weight for
each edge in the graph.
:return: Predictions for edges with shape [B, 1]
"""
# Compute hidden representation of target edge
x_nodes_left = x_nodes[edge_indices_batch[:, 0]]
x_nodes_right = x_nodes[edge_indices_batch[:, 1]]
x_concat = torch.cat([x_nodes_left, x_edges_batch, x_nodes_right], dim=-1)
h_edges = self.fc_edges(x_concat)
h_total = self.node_weight * h_edges
# Compute hidden representations of nodes
if ts.include_node_reps:
intermediate_node_reps = self.node_rep_module(x_nodes,
edge_indices.t(),
edge_weight)
if ts.jk_type is JKType.NoJK:
h_nodes = intermediate_node_reps[-1]
else:
h_nodes = self.jk(intermediate_node_reps)
h_nodes = self.jk_lin(h_nodes)
# Get hidden representations of nodes incident to target edges
h_nodes_left = h_nodes[edge_indices_batch[:, 0]]
h_nodes_right = h_nodes[edge_indices_batch[:, 1]]
h_total += self.edge_weight * h_nodes_left
h_total += self.edge_weight * h_nodes_right
regression_output = self.regression_head(h_total)
return regression_output.squeeze(-1)
def train_epoch(epoch, predictor, data, optimizer, loss_criterion, logger,
lr_schedule):
predictor.train()
for (edge_idcs_batch, x_edges_batch, edge_labels_batch,
_) in data.train_loader:
edge_idcs_batch = edge_idcs_batch.to(device=args.device)
x_edges_batch = x_edges_batch.to(device=args.device)
edge_labels_batch = edge_labels_batch.to(device=args.device)
optimizer.zero_grad()
reg_out = predictor(data.node_feats, x_edges_batch, edge_idcs_batch,
data.flow_topology.edge_indices,
edge_weight=data.flow_topology.edge_weights)
loss = loss_criterion(reg_out, edge_labels_batch)
loss.backward()
optimizer.step()
logger.add_values({"train_loss": loss.item()})
lr_schedule.step()
def validate_epoch(epoch, predictor, data, loss_criterion, data_loader, logger,
test):
predictor.eval()
prefix = "test" if test else "val"
for (edge_idcs_batch, x_edges_batch, edge_labels_batch, edge_buckets_batch) in data_loader:
edge_idcs_batch = edge_idcs_batch.to(device=args.device)
x_edges_batch = x_edges_batch.to(device=args.device)
edge_labels_batch = edge_labels_batch.to(device=args.device)
reg_out = predictor(data.node_feats, x_edges_batch, edge_idcs_batch,
data.flow_topology.edge_indices,
edge_weight=data.flow_topology.edge_weights)
loss = loss_criterion(reg_out, edge_labels_batch)
logger.add_values({
prefix + "_loss": loss.item(),
prefix + "_predictions": reg_out.detach().cpu().numpy(),
prefix + "_labels": edge_labels_batch.detach().cpu().numpy(),
prefix + "_bins": edge_buckets_batch.detach().cpu().numpy()
})
if test:
with open("preds_labels.pk", "wb") as fd:
preds = data.label_scaler.inverse_transform(np.concatenate(logger._current_epoch_metrics["test_predictions"], axis=-1).reshape(-1, 1))
labels = data.label_scaler.inverse_transform(np.concatenate(logger._current_epoch_metrics["test_labels"], axis=-1).reshape(-1, 1))
pk.dump((preds, labels, logger._current_epoch_metrics["test_node_idcs"]), fd)
def run_training():
# Set up training environment
if not os.path.exists(ts.cp_folder):
os.makedirs(ts.cp_folder)
log_filepath = checkpoint_filepath(ts.cp_folder, "log", __file__, {},
".pk")
summary_filepath = checkpoint_filepath(ts.cp_folder, "summary", __file__,
{}, ".txt")
output_logger = OutputLogger(checkpoint_filepath(ts.cp_folder, "output",
__file__, {}, ".txt"))
sys.stdout = output_logger
ts.write_summary_file(checkpoint_filepath(ts.cp_folder, "hyperparams",
__file__, {}, "txt"))
print(ts.settings_description())
# Load data
ds = UrbanPlanningDataset(ts.data_base_path, ts.num_bins, ts.batch_size,
ts.n_quantiles, ts.resampling,
ts.excluded_node_feature_columns,
ts.excluded_edge_feature_columns, False,
ts.include_edge_flow_feat, ts.adj_flow_threshold,
ts.seed)
# Preprocess data
ds.to(args.device)
def _get_metric_funcs(prefix):
preds_key = prefix+"_predictions"
labels_key = prefix+"_labels"
bins_key = prefix+"_bins"
return {
prefix+"_loss": (lambda m: np.nanmean(m[prefix+"_loss"])),
prefix + "_mae": (lambda m: compute_mae(m[preds_key], m[labels_key], ds)),
prefix + "_binned_mae": (lambda m: compute_binned_metric(mae_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_mae": (lambda m: compute_macro_metric(mae_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_mape": (lambda m: compute_mape(m[preds_key], m[labels_key], ds)),
prefix + "_binned_mape": (lambda m: compute_binned_metric(mape_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_mape": (lambda m: compute_macro_metric(mape_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_ssi": (lambda m: compute_ssi(m[preds_key], m[labels_key], ds)),
prefix + "_binned_ssi": (lambda m: compute_binned_metric(ssi_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_ssi": (lambda m: compute_macro_metric(ssi_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_geh": (lambda m: compute_geh(m[preds_key], m[labels_key], ds)),
prefix + "_binned_geh": (lambda m: compute_binned_metric(geh_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_geh": (lambda m: compute_macro_metric(geh_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_cpl": (lambda m: compute_cpl(m[preds_key], m[labels_key], ds)),
prefix + "_binned_cpl": (lambda m: compute_binned_metric(cpl_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_cpl": (lambda m: compute_macro_metric(cpl_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_cpc": (lambda m: compute_cpc(m[preds_key], m[labels_key], ds)),
prefix + "_binned_cpc": (lambda m: compute_binned_metric(cpc_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
prefix + "_macro_cpc": (lambda m: compute_macro_metric(cpc_metric, m[preds_key], m[labels_key], m[bins_key], ds, ts.num_bins)),
}
metric_funcs = {
"train_loss": (lambda m: np.nanmean(m["train_loss"])),
**_get_metric_funcs("val"),
**_get_metric_funcs("test"),
}
logger = PerformanceLogger(metric_funcs, "val_macro_mae", log_filepath,
write_every=ts.write_log_every)
predictor = EdgeRegressor(ds.num_node_feats, ds.num_edge_feats,
hidden_dim=ts.hidden_dim,
node_rep_size=ts.node_rep_size)
predictor = predictor.to(device=args.device)
optimizer = torch.optim.Adam(predictor.parameters(), lr=ts.lr)
lr_schedule = torch.optim.lr_scheduler.MultiStepLR(optimizer,
list(ts.lr_schedule))
loss_criterion = (nn.L1Loss() if ts.regression_loss == "L1"
else nn.MSELoss())
print("Start training")
for epoch in range(-1, ts.num_epochs):
if epoch >= 0:
train_epoch(epoch, predictor, ds, optimizer, loss_criterion,
logger, lr_schedule)
validate_epoch(epoch, predictor, ds, loss_criterion, ds.val_loader,
logger, test=False)
validate_epoch(epoch, predictor, ds, loss_criterion, ds.test_loader,
logger, test=True)
logger.complete_epoch()
print(logger.epoch_summary())
if epoch % ts.write_log_every == 0:
logger.write(log_filepath)
logger.write(log_filepath)
logger.write_summary(summary_filepath, ts.settings_description())
return logger
if __name__ == '__main__':
run_training()
|
167312
|
import paramiko, json, os
from kubernetes import client, config
from apps.common import static_value
from apps.common.utils import init_kubernetes
from apps.network_manager.api import NicApi
def get_master_server_info():
base_path = os.path.dirname(os.path.abspath(__file__))
with open(base_path+'/master_server_info.json','r', encoding='utf-8') as f:
master_server_info = json.load(f)
return master_server_info
def init_kube(network_type,tplg_plcy):
tplg_plcy_opt = ''
if tplg_plcy == 'none':
tplg_plcy_opt = ''
else:
tplg_plcy_opt = tplg_plcy
set_reset_status('RUN')
server_info = get_master_server_info()
cli = paramiko.SSHClient()
cli.set_missing_host_key_policy(paramiko.AutoAddPolicy)
cli.connect(server_info['SERVER'],port=22,username=server_info['USER'],password=server_info['<PASSWORD>'])
set_reset_status('PROC')
stdin, stdout, stderr = cli.exec_command("bash " + server_info['K8S_INIT_FILE'] + " " + network_type + " " + tplg_plcy_opt)
while True:
lines = stdout.readline()
if not lines:
break
stdin, stdout, stderr = cli.exec_command("bash " + server_info['K8S_TOKEN_FILE'])
while True:
lines = stdout.readline()
if not lines:
break
stdin, stdout, stderr = cli.exec_command("bash " + server_info['K8S_API_KEY_FILE'])
while True:
lines = stdout.readline()
if not lines:
break
for node_ip in server_info['WORKER_NODE_IP']:
stdin, stdout, stderr = cli.exec_command("sshpass -p \"" + server_info['PWD'] + "\" ssh gedge@" + node_ip + " 'bash -s' < " + server_info['K8S_WORKER_INIT_FILE'])
while True:
lines = stdout.readline()
if not lines:
break
cli.close()
set_reset_status('END')
def renew_acc_key():
cli = paramiko.SSHClient()
cli.set_missing_host_key_policy(paramiko.AutoAddPolicy)
server_info = get_master_server_info()
cli.connect(server_info['SERVER'],port=22,username=server_info['USER'],password=server_info['PWD'])
stdin, stdout, stderr = cli.exec_command("bash " + server_info['K8S_API_KEY_FILE'])
api_key = stdout.readline()[:-1]
cli.close()
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','w', encoding='utf-8') as f:
kubeconfig['acc_key'] = api_key
json.dump(kubeconfig,f,indent='\t')
init_kubernetes()
def set_kube_network(network):
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','w', encoding='utf-8') as f:
kubeconfig['network'] = network
json.dump(kubeconfig,f,indent='\t')
def get_kube_network():
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
return kubeconfig['network']
def set_topology_policy(tplg_plcy):
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','w', encoding='utf-8') as f:
if tplg_plcy == 'none':
tplg_plcy = 'None'
elif tplg_plcy == 'single':
tplg_plcy = 'Single-Numa-Node'
elif tplg_plcy == 'best':
tplg_plcy = 'Best-Effort'
elif tplg_plcy == 'restricted':
tplg_plcy = 'Restricted'
kubeconfig['topology_policy'] = tplg_plcy
json.dump(kubeconfig,f,indent='\t')
def get_topology_policy():
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
return kubeconfig['topology_policy']
def set_reset_status(status):
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','w', encoding='utf-8') as f:
kubeconfig['reset_status'] = status
json.dump(kubeconfig,f,indent='\t')
def get_reset_status():
with open(static_value.KUBE_CONFIG_PATH+'/kubernetes_config.json','r', encoding='utf-8') as f:
kubeconfig = json.load(f)
return kubeconfig['reset_status']
def create_default_multus():
base_path = os.path.dirname(os.path.abspath(__file__))
with open(base_path+'/init_json/nic-config.json','r', encoding='utf-8') as f:
nic_json = json.load(f)
nic_api = NicApi()
response = nic_api.create_namespaced_nic(namespace=static_value.NAMESPACE,body=nic_json['multus'])
def create_default_sriov():
base_path = os.path.dirname(os.path.abspath(__file__))
with open(base_path+'/init_json/nic-config.json','r', encoding='utf-8') as f:
nic_json = json.load(f)
nic_api = NicApi()
response = nic_api.create_namespaced_nic(namespace=static_value.NAMESPACE,body=nic_json['sriov'])
|
167317
|
import pytest
import globus_sdk
from tests.common import make_response
@pytest.fixture
def make_oauth_token_response():
"""
response with conveniently formatted names to help with iteration in tests
"""
def f(client=None):
return make_response(
response_class=globus_sdk.services.auth.response.OAuthTokenResponse,
json_body={
"access_token": "access_token_1",
"expires_in": 3600,
"id_token": "<PASSWORD>",
"refresh_token": "<PASSWORD>",
"resource_server": "resource_server_1",
"scope": "scope1",
"state": "provided_by_client_to_prevent_replay_attacks",
"token_type": "bearer",
"other_tokens": [
{
"access_token": "<PASSWORD>_token_2",
"expires_in": 3600,
"refresh_token": "<PASSWORD>",
"resource_server": "resource_server_2",
"scope": "scope2 scope2:0 scope2:1",
"token_type": "bearer",
},
{
"access_token": "<PASSWORD>_token_3",
"expires_in": 3600,
"refresh_token": "<PASSWORD>",
"resource_server": "resource_server_3",
"scope": "scope3:0 scope3:1",
"token_type": "bearer",
},
],
},
client=client,
)
return f
@pytest.fixture
def oauth_token_response(make_oauth_token_response):
return make_oauth_token_response()
|
167364
|
from flask import Flask
app = Flask(__name__)
#app.config['CONFIG'] = None
from pbnh.app import views
|
167368
|
from office365.runtime.client_value import ClientValue
class ChatMessageAttachment(ClientValue):
pass
|
167414
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '<KEY>'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view= 'login'
login_manager.login_message_category= 'info'
from flaskblog import routes
|
167419
|
import pandas as pd
waste_rates = pd.DataFrame({'High': [0, 0.1254, 0.1129, 0.1016, 0.0934, 0.0860, 0.0791, 0.0728, 0.0684, 0.0643, 0.0604,
0.0568, 0.0534, 0.0502, 0.0472, 0.0444, 0.0444, 0.0444, 0.0444, 0.0444, 0.0444],
'Medium': [0, 0.1777, 0.1599, 0.1439, 0.1324, 0.1218, 0.1121, 0.1031, 0.0969, 0.0911,
0.0856, 0.0805, 0.0757, 0.0711, 0.0668, 0.0628, 0.0628, 0.0628, 0.0628, 0.0628,
0.0628],
'Low': [0, 0.2404, 0.2163, 0.1947, 0.1791, 0.1648, 0.1516, 0.1395, 0.1311, 0.1232, 0.1158,
0.1089, 0.1024, 0.0962, 0.0904, 0.0850, 0.0850, 0.0850, 0.0850, 0.0850, 0.0850]})
waste_rates.index = range(0, 21)
print(waste_rates)
|
167425
|
client_id="You need to fill this"
client_secret="You need to fill this"
user_agent="You need to fill this"
username="You need to fill this"
password="<PASSWORD>"
|
167484
|
import sys
import os
import torch
from torch import nn
sys.path.append(os.getcwd())
from unimodals.MVAE import MLPEncoder, TSEncoder, TSDecoder # noqa
from objective_functions.recon import elbo_loss, sigmloss1d # noqa
from training_structures.Supervised_Learning import train, test # noqa
from datasets.mimic.get_data import get_dataloader # noqa
from objective_functions.objectives_for_supervised_learning import MVAE_objective # noqa
from unimodals.common_models import MLP # noqa
from fusions.MVAE import ProductOfExperts_Zipped # noqa
traindata, validdata, testdata = get_dataloader(
7, imputed_path='/home/pliang/yiwei/im.pk', flatten_time_series=True)
classes = 2
n_latent = 200
series_dim = 12
timestep = 24
fuse = ProductOfExperts_Zipped((1, 40, n_latent))
encoders = [MLPEncoder(5, 20, n_latent).cuda(), TSEncoder(
series_dim, 30, n_latent, timestep, batch_first=True).cuda()]
decoders = [MLP(n_latent, 20, 5).cuda(), TSDecoder(
series_dim, 30, n_latent, timestep).cuda()]
head = MLP(n_latent, 20, classes).cuda()
elbo = MVAE_objective(2.0, [sigmloss1d, sigmloss1d], [1.0, 1.0], annealing=0.0)
argsdict = {'decoders': decoders}
train(encoders, fuse, head, traindata, validdata, 30, decoders,
optimtype=torch.optim.Adam, lr=0.0001, objective=elbo, objective_args_dict=argsdict)
model = torch.load('best.pt')
# dataset = 'mimic mortality', 'mimic 1', 'mimic 7'
test(model, testdata, dataset='mimic 7')
|
167501
|
import math
from collections import defaultdict
class IDFIndex(object):
finalized = False
def __init__(self):
self.idf_counts = defaultdict(int)
self.N = 0
def update(self, doc):
if self.finalized or not doc:
return
for feature, count in doc.iteritems():
self.idf_counts[feature] += 1
self.N += 1
def prune(self, min_count):
self.idf_counts = {k: count for k, count in self.idf_counts.iteritems() if count >= min_count}
def corpus_frequency(self, key):
return self.idf_counts.get(key, 0)
def tfidf_score(self, key, count=1):
if count < 0:
return 0.0
idf_count = self.idf_counts.get(key, None)
if idf_count is None:
return 0.0
return (math.log(count + 1.0) * (math.log(float(self.N) / idf_count)))
def tfidf_vector(self, token_counts):
tf_idf = [self.tfidf_score(t, count=c) for t, c in token_counts.iteritems()]
norm = math.sqrt(sum((t ** 2 for t in tf_idf)))
return [t / norm for t in tf_idf]
|
167502
|
import unittest
from PEPit.examples.unconstrained_convex_minimization import wc_proximal_point
from PEPit.examples.composite_convex_minimization import wc_proximal_gradient
from PEPit.examples.unconstrained_convex_minimization import wc_gradient_exact_line_search
from PEPit.examples.unconstrained_convex_minimization import wc_inexact_gradient_exact_line_search
from tests.additional_complexified_examples_tests import wc_proximal_gradient_complexified
from tests.additional_complexified_examples_tests import wc_proximal_point_complexified
from tests.additional_complexified_examples_tests import wc_gradient_exact_line_search_complexified
from tests.additional_complexified_examples_tests import wc_inexact_gradient_exact_line_search_complexified
class TestExamples(unittest.TestCase):
def setUp(self):
self.n = 6
self.mu = .1
self.L = 1
self.relative_precision = 10 ** -3
self.verbose = -1
def test_PGD_modified(self):
L, mu, gamma, n = 1, 0.1, 1, 2
wc, theory = wc_proximal_gradient_complexified(L, mu, gamma, n, verbose=self.verbose)
self.assertAlmostEqual(wc, theory, delta=10 ** -3 * theory)
def test_PGD_vs_PGD_modified(self):
L, mu, gamma, n = 1, 0.1, 1, 2
wc_modified, theory = wc_proximal_gradient_complexified(L, mu, gamma, n, verbose=self.verbose)
wc, theory = wc_proximal_gradient(L, mu, gamma, n, verbose=self.verbose)
self.assertAlmostEqual(wc_modified, wc, delta=10 ** -3 * theory)
def test_PPA_modified(self):
n, gamma = 2, 1
wc, theory = wc_proximal_point_complexified(n, gamma, verbose=self.verbose)
self.assertAlmostEqual(wc, theory, delta=10 ** -3 * theory)
def test_PPA_vs_PPA_modified(self):
n, gamma = 2, 1
wc_modified, theory = wc_proximal_point_complexified(n, gamma, verbose=self.verbose)
wc, theory = wc_proximal_point(n, gamma, verbose=self.verbose)
self.assertAlmostEqual(wc_modified, wc, delta=10 ** -3 * theory)
def test_ELS_modified(self):
L, mu, n = 3, .3, 3
wc, theory = wc_gradient_exact_line_search_complexified(L=L, mu=mu, n=n, verbose=self.verbose)
self.assertAlmostEqual(wc, theory, delta=self.relative_precision * theory)
def test_inexact_ELS_modified(self):
L, mu, epsilon, n = 2, .05, .2, 2
wc, theory = wc_inexact_gradient_exact_line_search_complexified(L=L, mu=mu, epsilon=epsilon, n=n, verbose=self.verbose)
self.assertAlmostEqual(wc, theory, delta=self.relative_precision * theory)
def test_ELS_vs_ELS_modified(self):
L, mu, n = 1.5, .12, 3
wc_modified, theory = wc_gradient_exact_line_search_complexified(L=L, mu=mu, n=n, verbose=self.verbose)
wc, theory = wc_gradient_exact_line_search(L=L, mu=mu, n=n, verbose=self.verbose)
self.assertAlmostEqual(wc_modified, wc, delta=10 ** -3 * theory)
def test_inexact_ELS_vs_ELS_modified(self):
L, mu, epsilon, n = 2.3, .23, .2, 2
wc_modified, theory = wc_inexact_gradient_exact_line_search_complexified(L=L, mu=mu, epsilon=epsilon, n=n, verbose=self.verbose)
wc, theory = wc_inexact_gradient_exact_line_search(L=L, mu=mu, epsilon=epsilon, n=n, verbose=self.verbose)
self.assertAlmostEqual(wc_modified, wc, delta=10 ** -3 * theory)
|
167515
|
import torch
import soft_renderer as sr
class NeuralRenderer(torch.nn.Module):
def __init__(self, img_size=256, camera_mode='look_at', orig_size=256, background=[0, 0, 0],
texture_type='surface', anti_aliasing=False, **kwargs):
super(NeuralRenderer, self).__init__()
self.camera_mode = camera_mode
self.texture_type = texture_type
self.renderer = sr.SoftRenderer(image_size=img_size, camera_mode=camera_mode, orig_size=orig_size,
background_color=background, texture_type=self.texture_type,
anti_aliasing=anti_aliasing, **kwargs)
self.renderer = self.renderer.cuda()
def set_camera(self, K, Rt):
# set 3x4 projection matrix (K @ Rt)
P = K @ Rt
self.renderer.set_transform(P)
def set_lighting(self, light_intensity_ambient=0.5, light_intensity_directional=0.5, light_direction=[0, 1, 0]):
# set renderer light values
self.renderer.set_lighting(light_intensity_ambient, light_intensity_directional, light_direction)
def forward(self, vertices, faces, textures=None, mode=None):
vs = vertices.clone()
vs[:, :, 1] *= -1
fs = faces.clone()
if textures is None:
ts = textures
else:
ts = textures.clone()
imgs = self.renderer(vs, fs, ts)
imgs = torch.flip(imgs, (2,)) # invert y axis
text, mask = imgs[:, :-1], imgs[:, -1]
return text, mask
|
167528
|
from __future__ import division
import sys
# set default encoding to utf-8
reload(sys)
sys.setdefaultencoding("utf-8")
|
167569
|
import pyodbc
import pandas
# Define our Query to create the table.
create_table_query = """
-- Create the Table if it Does not exist.
IF Object_ID('youtube_videos') IS NULL
CREATE TABLE [sigma-coding].[dbo].[youtube_videos]
(
[video_id] NVARCHAR(MAX) NOT NULL,
[published_at] DATETIME NULL,
[channel_id] NVARCHAR(MAX) NULL,
[channel_title] NVARCHAR(MAX) NULL,
[video_title] NVARCHAR(MAX) NULL,
[video_description] NVARCHAR(MAX) NULL,
[category_id] INT NULL,
[duration] NVARCHAR(MAX) NULL,
[definition] NVARCHAR(6) NULL,
[caption] BIT NULL,
[licensed_content] BIT NULL,
[has_custom_thumbnail] BIT NULL,
[view_count] INT NULL,
[like_count] INT NULL,
[dislike_count] INT NULL,
[comment_count] INT NULL,
)
"""
# Define the Components of the Connection String.
DRIVER = '{ODBC Driver 17 for SQL Server}'
SERVER_NAME = "ALEX-LAPTOP\ALEX_SQL_SERVER"
DATABASE_NAME = "sigma-coding"
CONNECTION_STRING = """
Driver={driver};
Server={server};
Database={database};
Trusted_Connection=yes;
""".format(
driver=DRIVER,
server=SERVER_NAME,
database=DATABASE_NAME
)
# Create a connection object.
connection_object: pyodbc.Connection = pyodbc.connect(CONNECTION_STRING)
# Create a Cursor Object, using the connection.
cursor_object: pyodbc.Cursor = connection_object.cursor()
# Define the File Path.
data_file = "python/python-pyodbc/youtube_data.csv"
# Load the Data.
youtube_df: pandas.DataFrame = pandas.read_csv(
data_file,
infer_datetime_format=True,
parse_dates=True
)
# Parse the Published At Column.
youtube_df['published_at'] = pandas.to_datetime(youtube_df['published_at'])
# Define the Insert Query.
sql_insert = """
INSERT INTO [sigma-coding].[dbo].[youtube_videos]
(
[video_id],
[published_at],
[channel_id],
[channel_title],
[video_title],
[video_description],
[category_id],
[duration],
[definition],
[caption],
[licensed_content],
[has_custom_thumbnail],
[view_count],
[like_count],
[dislike_count],
[comment_count]
)
VALUES
(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
"""
# Create the Table.
cursor_object.execute(create_table_query)
# Commit the Table.
cursor_object.commit()
# Convert the DataFrame to a RecordSet.
df_records = youtube_df.values.tolist()
# Execute it.
cursor_object.executemany(sql_insert, df_records)
# Commit the Transactions.
cursor_object.commit()
# Define the Select Query.
sql_select = "SELECT * FROM [sigma-coding].[dbo].[youtube_videos]"
# Execute the Query.
records = cursor_object.execute(sql_select).fetchall()
# Define the column names.
columns = [column[0] for column in cursor_object.description]
# Dump to a Pandas DataFrame.
youtube_dump_df = pandas.DataFrame.from_records(
data=records,
columns=columns
)
# print the head.
print(youtube_dump_df.head())
|
167592
|
from mock import patch
import unittest
from esfdw.mapping_to_schema import generate_table_spec, generate_schema, TableSpec, ColumnSpec
class TestMappingToSchema(unittest.TestCase):
def test_generate_table_spec(self):
mapping = {
'index1': {
'mappings': {
'_default_': {
'dynamic_templates': {}
},
'doc1': {
'properties': {
'a': {
'index': 'not_analyzed',
'type': 'string',
'doc_values': True
},
'b': {
'properties': {
'c': {
'properties': {
'd': {
'type': 'date',
'format': 'dateOptionalTime'
}
}
},
'e': {
'type': 'boolean'
}
}
},
'f-f': {
'type': 'double'
},
'g': {
'type': 'long'
},
'h': {
'type': 'short'
}
}
},
'doc2': {
'properties': {
'a': {
'type': 'string'
}
}
},
'doc-3': {
'properties': {
'z': {
'type': 'boolean'
}
}
}
}
},
'index2': {
'mappings': {
'doc1': {
'properties': {
'aa': {
'type': 'date'
}
}
}
}
}
}
spec = sorted(list(generate_table_spec(mapping, ['index1'], [
'doc1', 'doc-3'])), key=lambda x: (x.index, x.name))
self.assertEqual(
spec, [
TableSpec(
'doc1', [
ColumnSpec(
'a', 'text'), ColumnSpec(
'f_f', 'double precision'), ColumnSpec(
'b__c__d', 'timestamp'), ColumnSpec(
'b__e', 'boolean'), ColumnSpec(
'g', 'bigint'), ColumnSpec(
'h', 'smallint')], 'doc1', 'index1'), TableSpec(
'doc_3', [
ColumnSpec(
'z', 'boolean')], 'doc-3', 'index1')])
spec = sorted(list(generate_table_spec(mapping, ['index1'], None)),
key=lambda x: (x.index, x.name))
self.assertEqual(spec,
[TableSpec('doc1',
[ColumnSpec('a', 'text'),
ColumnSpec('f_f', 'double precision'),
ColumnSpec('b__c__d', 'timestamp'),
ColumnSpec('b__e', 'boolean'),
ColumnSpec('g', 'bigint'),
ColumnSpec('h', 'smallint')],
'doc1', 'index1'),
TableSpec('doc2', [ColumnSpec('a', 'text')], 'doc2', 'index1'),
TableSpec('doc_3', [ColumnSpec('z', 'boolean')], 'doc-3', 'index1')]
)
spec = sorted(list(generate_table_spec(
mapping, [], ['doc1', 'doc-3'])), key=lambda x: (x.index, x.name))
self.assertEqual(spec,
[TableSpec('doc1',
[ColumnSpec('a', 'text'),
ColumnSpec('f_f', 'double precision'),
ColumnSpec('b__c__d', 'timestamp'),
ColumnSpec('b__e', 'boolean'),
ColumnSpec('g', 'bigint'),
ColumnSpec('h', 'smallint')],
'doc1', 'index1'),
TableSpec('doc_3', [ColumnSpec('z', 'boolean')], 'doc-3', 'index1'),
TableSpec('doc1', [ColumnSpec('aa', 'timestamp')], 'doc1', 'index2')],
)
@patch('esfdw.mapping_to_schema.generate_table_spec')
def test_generate_schema(self, generate_table_spec_mock):
generate_table_spec_mock.return_value = [
TableSpec(
'table1', [
ColumnSpec(
'a', 'text'), ColumnSpec(
'b', 'integer')], 'table1', 'myindex')]
expected_schema = [
"""DROP FOREIGN TABLE IF EXISTS table1;
CREATE FOREIGN TABLE table1 (
a text,
b integer
) SERVER es_srv OPTIONS (
doc_type 'table1',
index 'myindex',
column_name_translation 'true'
);
"""]
schema = list(generate_schema(None, None, None, 'es_srv'))
self.assertEqual(expected_schema, schema)
if __name__ == '__main__':
unittest.main()
|
167627
|
import json
import os
import time
from datetime import datetime
def datetime_from_timestamp(unix_timestamp):
return datetime.fromtimestamp(int(unix_timestamp)).strftime("%Y-%m-%d %H:%M:%S")
def datetime_now():
return datetime_from_timestamp(time.time())
def load_json(file_name):
with open(file_name, "r") as json_file:
return json.loads(json_file.read())
def save_json(file_name, data):
with open(file_name, "w") as json_file:
return json_file.write(json.dumps(data))
def format_size(size):
units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
def rename_file(old_filename, new_filename):
full_path, filename = os.path.split(old_filename)
filename, extension = os.path.splitext(filename)
temp_filename = os.path.join(full_path, new_filename + extension)
os.rename(old_filename, temp_filename)
return temp_filename
|
167630
|
def main():
import dtlpy as dl
project = dl.projects.get(project_name='Jungle')
dataset = project.datasets.get(dataset_name='Tigers')
converter = dl.Converter()
converter.convert_dataset(dataset=dataset, to_format='yolo',
local_path='home/yolo_annotations/tigers')
|
167643
|
import numpy as np
import pdb
class ModelBranch:
def __init__(self, initialW, initialGrad):
print("initializing model")
self.chain = [[initialW, initialGrad]]
self.pendingGradients = []
self.gradientHistory = []
def updateModel(self):
### TODO:: Refactor out ###
acc = np.zeros(self.chain[0][0].size)
numPending = len(self.pendingGradients)
for grad in self.pendingGradients:
acc += grad
newGrad = acc / numPending
###
newW = self.chain[-1][0] + newGrad
self.chain.append([newW, newGrad])
### Testing to see if gradients can be linked ###
self.gradientHistory.append(self.pendingGradients[:])
###
self.pendingGradients = []
def getWeights(self):
return self.chain[-1][0]
def getPreviousGrad(self):
return self.chain[-1][1]
def submitGradient(self, grad):
self.pendingGradients.append(grad)
|
167649
|
import os
import yaml
_RekallAPI = None
_RekallSessionAPI = None
def RekallAPI(current):
yaml_path = os.path.join(current.request.folder, "private", "api.yaml")
global _RekallAPI
if _RekallAPI is None:
_RekallAPI = {}
for desc in yaml.load(open(yaml_path).read()):
_RekallAPI[desc["plugin"]] = desc
return _RekallAPI
def SessionAPI(current):
yaml_path = os.path.join(
current.request.folder, "private", "session_api.yaml")
global _RekallSessionAPI
if _RekallSessionAPI is None:
_RekallSessionAPI = dict(name="session", args={})
for arg in yaml.load(open(yaml_path).read()):
_RekallSessionAPI["args"][arg["name"]] = arg
return _RekallSessionAPI
def list(current):
"""List all available Rekall plugins."""
return dict(
data=[{'plugin': x['plugin'],
'name': x['name'],
} for x in RekallAPI(current).values()])
def get(current, plugin=None):
if plugin:
return RekallAPI(current).get(plugin) or {}
|
167656
|
from magma import *
from mantle import *
from loam import Peripheral
#from .peripherals.fifo import FIFO
#from .peripherals.uart.tx import UARTTX
class USART(Peripheral):
name = 'usart'
IO = ["RX", In(Bit), "TX", Out(Bit)]
def __init__(self, fpga, name='usart0'):
super(USART,self).__init__(fpga, name)
self.txenable = True
self.rxenable = False
self.baudrate = 9600
def baud(self, rate):
self.baudrate = rate
return self
def enable(self, txenable, rxenable):
self.txenable = txenable
self.rxenable = rxenable
return self
def on(self):
tx = self.TX.getgpio()
tx.output().on()
Peripheral.on(self)
return self
def setup(self, main):
pass
#assert main.FREQUENCY
# baud=115200 of serial transfer (32 MHz / 17 = 153846 ~= 153600)
# baud=9600 of serial transfer (32 MHz / 16 / 13 = 153846 ~= 153600)
#baud = CascadedRing([16, 13])
#baud = CascadedRing([14, 14, 17])
#fifo = FIFO()
#uart = UARTTX()
# data, reset, write, read
#wire(0, fifo.reset)
#wire(uart.Tx_complete, fifo.read)
# data, data_present, baud
#uart(fifo.data_out, fifo.data_present, baud)
#wire(uart.serial_out, main.TX)
#main.TXD = fifo.data_in
#main.TXWRITE = fifo.write
#main.TXSTATUS = fifo.full
|
167667
|
import numpy as np
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from sklearn.model_selection._split import _BaseKFold
from hypernets.utils import logging
logger = logging.get_logger(__name__)
class PrequentialSplit(_BaseKFold):
STRATEGY_PREQ_BLS = 'preq-bls'
STRATEGY_PREQ_SLID_BLS = 'preq-slid-bls'
STRATEGY_PREQ_BLS_GAP = 'preq-bls-gap'
"""
Parameters
----------
strategy : Strategies of requential approach applied in blocks for performance estimation
`preq-bls`: The data is split into n blocks. In the initial iteration, only the first two blocks
are used, the first for training and the second for test. In the next iteration, the second block
is merged with the first and the third block is used for test. This procedure continues until all
blocks are tested.
`preq-slid-bls`: Instead of merging the blocks after each iteration (growing window), one can forget
the older blocks in a sliding window fashion. This idea is typically adopted when past data becomes
deprecated, which is common in non-stationary environments.
`preq-bls-gap`: This illustrates a prequential approach applied in blocks, where a gap block is
introduced. The rationale behind this idea is to increase the independence between training and
test sets.
n_splits : int, default=5.
Number of splits. Must be at least 2.
max_train_size : int, default=None.
Maximum size for a single training set.
test_size : int, default=None.
Number of samples in each test set. Defaults to
``(n_samples - base_size) / (n_splits + 1)``.
gap_size : int, default=0. For strategy `preq-bls`.
Number of samples to exclude from the end of each train set before the test set.
References
----------
<NAME>, <NAME>, <NAME>. Evaluating time series forecasting models: An empirical study on performance
estimation methods[J]. Machine Learning, 2020, 109(11): 1997-2028.
"""
def __init__(self, strategy='preq-bls', base_size=None, n_splits=5, stride=1, *, max_train_size=None,
test_size=None, gap_size=0):
super(PrequentialSplit, self).__init__(n_splits=max(n_splits, 2), shuffle=False, random_state=None)
self.max_train_size = max_train_size
self.test_size = test_size
self.gap_size = gap_size
self.base_size = base_size
self.stride = stride
self.n_folds = n_splits
self.strategy = strategy
self.fold_size = None
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap_size = self.gap_size
base = 0
if self.base_size is not None and self.base_size > 0:
base = self.base_size
base += n_samples % n_folds
if self.test_size is not None and self.test_size > 0:
test_size = self.test_size
else:
test_size = (n_samples - base) // n_folds
self.test_size = test_size
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds, n_samples))
first_test = n_samples - test_size*n_splits
if first_test < 0:
raise ValueError(
("Too many splits={0} for number of samples"
"={1} with test_size={2}").format(n_splits, n_samples, test_size))
indices = np.arange(n_samples)
logger.info(f'n_folds:{self.n_folds}')
logger.info(f'test_size:{test_size}')
if self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS_GAP:
test_starts = range(first_test * 2 + base, n_samples, test_size)
else:
test_starts = range(first_test + base, n_samples, test_size)
last_step = -1
for fold, test_start in enumerate(test_starts):
if last_step == fold // self.stride:
# skip this fold
continue
else:
last_step = fold // self.stride
if self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS:
train_end = test_start - gap_size
if self.max_train_size and self.max_train_size < train_end:
yield (indices[train_end - self.max_train_size:train_end],
indices[test_start:test_start + test_size])
else:
yield (indices[:max(train_end, 0)],
indices[test_start:test_start + test_size])
elif self.strategy == PrequentialSplit.STRATEGY_PREQ_SLID_BLS:
if self.max_train_size and self.max_train_size < test_start:
yield (indices[test_start - self.max_train_size:test_start],
indices[test_start:test_start + test_size])
else:
yield (indices[test_start - (test_size + base):test_start],
indices[test_start:test_start + test_size])
elif self.strategy == PrequentialSplit.STRATEGY_PREQ_BLS_GAP:
yield (indices[:test_start - test_size], indices[test_start:test_start + test_size])
else:
raise ValueError(f'{self.strategy} is not supported')
|
167673
|
from benchmark import *
import oneflow_benchmark
from flowvision.models.alexnet import alexnet
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size1(benchmark, net=alexnet, input_shape=[1, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size2(benchmark, net=alexnet, input_shape=[2, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size4(benchmark, net=alexnet, input_shape=[4, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size8(benchmark, net=alexnet, input_shape=[8, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size16(benchmark, net=alexnet, input_shape=[16, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)
|
167674
|
from datetime import datetime
import uuid
# Message class
class Message():
# Main initialiser
def __init__(self, title, body, from_id, from_name, to_id, to_name, id="", deleted=False, hidden_for_sender=False):
self.title = title
self.body = body
self.from_id = from_id
self.from_name = from_name
self.to_id = to_id
self.to_name = to_name
self.timestamp = datetime.utcnow()
self.date_string = self.timestamp.strftime("%-d %b %Y %H:%M")
self.id = uuid.uuid4().hex if not id else id
self.deleted = deleted
self.hidden_for_sender = hidden_for_sender
# Return dictionary representation of object
def dict(self):
return {
"title": self.title,
"body": self.body,
"from_id": self.from_id,
"from_name": self.from_name,
"to_id": self.to_id,
"to_name": self.to_name,
"timestamp": self.timestamp,
"date_string": self.date_string,
"id": self.id,
"deleted": self.deleted,
"hidden_for_sender": self.hidden_for_sender
}
|
167675
|
from .forecaster import *
from .forecasterModel import *
from .cumulativeBoW import *
import sys
if 'torch' in sys.modules:
from .CRAFTModel import *
from .CRAFT import *
|
167742
|
import random
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
import torchvision.transforms as transforms
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def int_parameter(level, maxval) -> int:
"""
A function to scale between zero to max val with casting to int
:param level: The level of augmentation an integer between 0 to 9
:param maxval: The maximal output value
:return: A int value
"""
return int(level * maxval / PARAMETER_MAX)
def float_parameter(level, maxval):
"""
A function to scale between zero to max val
:param level: The level of augmentation an integer between 0 to 9
:param maxval: The maximal output value
:return: A int value
"""
return float(level) * maxval / PARAMETER_MAX
class BaseAugmentation(object):
def __init__(self, p, level):
"""
Base Augmentation class which performed data augmentation with probability p and severity level
:param p: The probability that the augmentation is performed
:param level: The severity of data augmentation
"""
self.p = p
self.level = level
def __call__(self, img):
if random.random() < self.p:
img = self._augment(img)
return img
def _augment(self, img):
raise NotImplemented
def __repr__(self):
return self.__class__.__name__ + '(p={}, level={})'.format(self.p, self.level)
class AutoContrast(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform auto contrast.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
return ImageOps.autocontrast(img)
class Equalize(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Equalization.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
return ImageOps.equalize(img)
class Invert(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Invert.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
return ImageOps.invert(img)
class Blur(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Image Blur.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
return img.filter(ImageFilter.BLUR)
class Smooth(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Smoothing.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
return img.filter(ImageFilter.SMOOTH)
class Rotate(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform rotation.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
degrees = int_parameter(self.level, 30)
if random.random() > 0.5:
degrees = -degrees
return img.rotate(degrees)
class Posterize(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Posterize.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = int_parameter(self.level, 4)
return ImageOps.posterize(img, 4 - level)
class ShearX(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform ShearX.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = float_parameter(self.level, 0.3)
if random.random() > 0.5:
level = -level
return img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0))
class ShearY(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform ShearY.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = float_parameter(self.level, 0.3)
if random.random() > 0.5:
level = -level
return img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0))
class TranslateX(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform TranslateX.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = int_parameter(self.level, 10)
if random.random() > 0.5:
level = -level
return img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0))
class TranslateY(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform TranslateY.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = int_parameter(self.level, 10)
if random.random() > 0.5:
level = -level
return img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level))
class Solarize(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Solarize.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
level = int_parameter(self.level, 256)
return ImageOps.solarize(img, 256 - level)
class Color(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Color.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
v = float_parameter(self.level, 1.8) + .1 # going to 0 just destroys it
return ImageEnhance.Color(img).enhance(v)
class Contrast(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Contrast change.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
v = float_parameter(self.level, 1.8) + .1 # going to 0 just destroys it
return ImageEnhance.Contrast(img).enhance(v)
class Brightness(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Brightness change.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
v = float_parameter(self.level, 1.8) + .1 # going to 0 just destroys it
return ImageEnhance.Brightness(img).enhance(v)
class Sharpness(BaseAugmentation):
def _augment(self, img):
"""
Data augmentation function that perform Sharpness change.
:param img: PIL Image to be augmented.
:return: PIL Image after augmentation
"""
v = float_parameter(self.level, 1.8) + .1 # going to 0 just destroys it
return ImageEnhance.Sharpness(img).enhance(v)
CIFAR10_AUGMENT_POLICY = transforms.RandomChoice([transforms.Compose([Invert(0.1, 7), Contrast(0.2, 6)]),
transforms.Compose([Rotate(0.7, 2), TranslateX(0.3, 9)]),
transforms.Compose([Sharpness(0.8, 1), Sharpness(0.9, 3)]),
transforms.Compose([ShearY(0.5, 8), TranslateY(0.7, 9)]),
transforms.Compose([AutoContrast(0.5, 8), Equalize(0.9, 2)]),
transforms.Compose([ShearY(0.2, 7), Posterize(0.3, 7)]),
transforms.Compose([Color(0.4, 3), Brightness(0.6, 7)]),
transforms.Compose([Sharpness(0.3, 9), Brightness(0.7, 9)]),
transforms.Compose([Equalize(0.6, 5), Equalize(0.5, 1)]),
transforms.Compose([Contrast(0.6, 7), Sharpness(0.6, 5)]),
transforms.Compose([Color(0.7, 7), TranslateY(0.5, 8)]),
transforms.Compose([Equalize(0.3, 7), AutoContrast(0.4, 8)]),
transforms.Compose([TranslateY(0.4, 3), Sharpness(0.2, 6)]),
transforms.Compose([Brightness(0.9, 6), Color(0.2, 8)]),
transforms.Compose([Solarize(0.5, 2), Invert(0.0, 3)]),
transforms.Compose([Equalize(0.2, 0), AutoContrast(0.6, 0)]),
transforms.Compose([Equalize(0.2, 8), Equalize(0.6, 4)]),
transforms.Compose([Color(0.9, 9), Equalize(0.6, 6)]),
transforms.Compose([AutoContrast(0.8, 4), Solarize(0.2, 8)]),
transforms.Compose([Brightness(0.1, 3), Color(0.7, 0)]),
transforms.Compose([Solarize(0.4, 5), AutoContrast(0.9, 3)]),
transforms.Compose([TranslateY(0.9, 9), TranslateY(0.7, 9)]),
transforms.Compose([AutoContrast(0.9, 2), Solarize(0.8, 3)]),
transforms.Compose([Equalize(0.8, 8), Invert(0.1, 3)]),
transforms.Compose([TranslateY(0.7, 9), AutoContrast(0.9, 1)])])
IMAGENET_AUGMENT_POLICY = transforms.RandomChoice([transforms.Compose([Posterize(0.4, 8), Rotate(0.6, 9)]),
transforms.Compose([Solarize(0.6, 5), AutoContrast(0.6, 5)]),
transforms.Compose([Equalize(0.8, 18), Equalize(0.6, 3)]),
transforms.Compose([Posterize(0.6, 7), Posterize(0.6, 6)]),
transforms.Compose([Equalize(0.4, 7), Solarize(0.2, 4)]),
transforms.Compose([Equalize(0.4, 4), Rotate(0.8, 8)]),
transforms.Compose([Solarize(0.6, 3), Equalize(0.6, 7)]),
transforms.Compose([Posterize(0.8, 5), Equalize(1.0, 2)]),
transforms.Compose([Rotate(0.2, 3), Solarize(0.6, 8)]),
transforms.Compose([Equalize(0.6, 8), Posterize(0.4, 6)]),
transforms.Compose([Rotate(0.8, 8), Color(0.4, 0)]),
transforms.Compose([Rotate(0.4, 9), Equalize(0.6, 2)]),
transforms.Compose([Equalize(0.0, 7), Equalize(0.8, 8)]),
transforms.Compose([Invert(0.6, 4), Equalize(1.0, 8)]),
transforms.Compose([Color(0.6, 4), Color(1.0, 8)]),
transforms.Compose([Rotate(0.8, 8), Color(1.0, 2)]),
transforms.Compose([Color(0.8, 8), Solarize(0.8, 7)]),
transforms.Compose([Sharpness(0.4, 7), Invert(0.6, 8)]),
transforms.Compose([ShearX(0.6, 5), Equalize(1.0, 9)]),
transforms.Compose([Color(0.4, 0), Equalize(0.6, 3)]),
transforms.Compose([Equalize(0.4, 7), Solarize(0.2, 4)]),
transforms.Compose([Solarize(0.6, 5), AutoContrast(0.6, 5)]),
transforms.Compose([Invert(0.6, 4), Equalize(1.0, 8)]),
transforms.Compose([Color(0.6, 4), Contrast(1.0, 8)]),
transforms.Compose([Equalize(0.8, 8), Equalize(0.6, 3)])])
|
167748
|
from pygrank.algorithms.utils import MethodHasher, call, ensure_used_args, remove_used_args
from pygrank.core.signals import GraphSignal, to_signal, NodeRanking
from pygrank.core import backend, GraphSignalGraph, GraphSignalData
from typing import Union, Optional
class Postprocessor(NodeRanking):
def __init__(self, ranker: NodeRanking = None):
self.ranker = ranker
def transform(self, ranks: GraphSignal, *args, **kwargs):
return to_signal(ranks, call(self._transform, kwargs, [ranks]))
def rank(self, *args, **kwargs):
ranks = self.ranker.rank(*args, **kwargs)
kwargs = remove_used_args(self.ranker.rank, kwargs)
return to_signal(ranks, call(self._transform, kwargs, [ranks]))
def _transform(self, ranks: GraphSignal, **kwargs):
raise Exception("_transform method not implemented for the class "+self.__class__.__name__)
def _reference(self):
return self.__class__.__name__
def references(self):
if self.ranker is None:
return [self._reference()]
refs = self.ranker.references()
ref = self._reference()
if ref is not None and len(ref) > 0:
refs.append(ref)
return refs
class Tautology(Postprocessor):
""" Returns ranks as-are.
Can be used as a baseline against which to compare other postprocessors or graph filters.
"""
def __init__(self, ranker: NodeRanking = None):
"""Initializes the Tautology postprocessor with a base ranker.
Args:
ranker: The base ranker instance. If None (default), this works as a base ranker that returns
a copy of personalization signals as-are or a conversion of backend primitives into signals.
"""
super().__init__(ranker)
def transform(self, ranks: GraphSignal, *args, **kwargs) -> GraphSignal:
return ranks
def rank(self,
graph: GraphSignalGraph = None,
personalization: GraphSignalData = None,
*args, **kwargs) -> GraphSignal:
if self.ranker is not None:
return self.ranker.rank(graph, personalization, *args, **kwargs)
return to_signal(graph, personalization)
def _reference(self):
return "tautology" if self.ranker is None else ""
class MabsMaintain(Postprocessor):
"""Forces node ranking posteriors to have the same mean absolute value as prior inputs."""
def __init__(self, ranker):
""" Initializes the postprocessor with a base ranker instance.
Args:
ranker: Optional. The base ranker instance. If None (default), a Tautology() ranker is created.
"""
super().__init__(Tautology() if ranker is None else ranker)
def rank(self, graph=None, personalization=None, *args, **kwargs):
personalization = to_signal(graph, personalization)
norm = backend.sum(backend.abs(personalization.np))
ranks = self.ranker(graph, personalization, *args, **kwargs)
if norm != 0:
ranks.np = ranks.np * norm / backend.sum(backend.abs(ranks.np))
return ranks
def _reference(self):
return "mabs preservation"
class Normalize(Postprocessor):
""" Normalizes ranks by dividing with their maximal value."""
def __init__(self,
ranker: Optional[Union[NodeRanking,str]] = None,
method: Optional[Union[NodeRanking,str]] = "max"):
""" Initializes the class with a base ranker instance. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
method: Optional. Divide ranks either by their "max" (default) or by their "sum" or make the lie in the
"range" [0,1] by subtracting their mean before diving by their max.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Normalize(0.5, algorithm) # sets ranks >= 0.5 to 1 and lower ones to 0
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome, simpler one-liner):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Normalize(0.5).transform(algorithm.rank(graph, personalization))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, method = method, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.method = method
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
min_rank = 0
if self.method == "range":
max_rank = float(backend.max(ranks.np))
min_rank = float(backend.min(ranks.np))
elif self.method == "max":
max_rank = float(backend.max(ranks.np))
elif self.method == "sum":
max_rank = float(backend.sum(ranks.np))
else:
raise Exception("Can only normalize towards max or sum")
if min_rank == max_rank:
return ranks
ret = (ranks.np-min_rank) / (max_rank-min_rank)
return ret
def _reference(self):
if self.method == "range":
return "[0,1] " + self.method + " normalization"
return self.method+" normalization"
class Ordinals(Postprocessor):
""" Converts ranking outcome to ordinal numbers.
The highest rank is set to 1, the second highest to 2, etc.
"""
def __init__(self, ranker=None):
""" Initializes the class with a base ranker instance.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Ordinals(algorithm)
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome, simpler one-liner):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Ordinals().transform(algorithm.rank(graph, personalization))
"""
super().__init__(Tautology() if ranker is None else ranker)
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
return {v: order+1 for order, v in enumerate(sorted(ranks, key=ranks.get, reverse=True))}
def _reference(self):
return "ordinal conversion"
class Transformer(Postprocessor):
"""Applies an element-by-element transformation on a graph signal based on a given expression."""
def __init__(self, ranker=None, expr=backend.exp):
""" Initializes the class with a base ranker instance. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
expr: Optional. A lambda expression to apply on each element. The transformer will automatically try to
apply it on the backend array representation of the graph signal first, so prefer pygrank's backend
functions for faster computations. For example, backend.exp (default) should be preferred instead of
math.exp, because the former can directly parse numpy arrays, tensors, etc.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> r1 = pg.Normalize(algorithm, "sum").rank(graph, personalization)
>>> r2 = pg.Transformer(algorithm, lambda x: x/pg.sum(x)).rank(graph, personalization)
>>> print(pg.Mabs(r1)(r2))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, expr = expr, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.expr = expr
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
try:
return self.expr(ranks.np)
except:
return {v: self.expr(ranks[v]) for v in ranks}
def _reference(self):
return "element-by-element "+self.expr.__name__
class Threshold(Postprocessor):
""" Converts ranking outcome to binary values based on a threshold value."""
def __init__(self,
ranker: Union[str, float, NodeRanking] = None,
threshold: Union[str, float, NodeRanking] = "gap"):
""" Initializes the Threshold postprocessing scheme. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
threshold: Optional. The minimum numeric value required to output rank 1 instead of 0. If "gap" (default)
then its value is automatically determined based on the maximal percentage increase between consecutive
ranks.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Threshold(algorithm, 0.5) # sets ranks >= 0.5 to 1 and lower ones to 0
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Threshold(0.5).transform(algorithm.rank(graph, personalization))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, threshold = threshold, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.threshold = threshold
def _transform(self,
ranks: GraphSignal,
**kwargs):
ensure_used_args(kwargs)
threshold = self.threshold
if threshold == "gap":
# TODO maybe enable ranks = {v: ranks[v] / ranks.graph.degree(v) for v in ranks} with a postprocessor
max_diff = 0
threshold = 0
prev_rank = 0
for v in sorted(ranks, key=ranks.get, reverse=True):
if prev_rank > 0:
diff = (prev_rank - ranks[v]) / prev_rank
if diff > max_diff:
max_diff = diff
threshold = ranks[v]
prev_rank = ranks[v]
return {v: 1 if ranks[v] >= threshold else 0 for v in ranks.keys()}
def _reference(self):
return str(self.threshold)+" threshold"
class Sweep(Postprocessor):
"""
Applies a sweep procedure that divides personalized node ranks by corresponding non-personalized ones.
"""
def __init__(self,
ranker: NodeRanking,
uniform_ranker: NodeRanking = None):
"""
Initializes the sweep procedure.
Args:
ranker: The base ranker instance.
uniform_ranker: Optional. The ranker instance used to perform non-personalized ranking. If None (default)
the base ranker is used.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Sweep(algorithm) # divides node scores by uniform ranker'personalization non-personalized outcome
>>> ranks = algorithm.rank(graph, personalization
Example with different rankers:
>>> import pygrank as pg
>>> graph, personalization, algorithm, uniform_ranker = ...
>>> algorithm = pg.Sweep(algorithm, uniform_ranker=uniform_ranker)
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome):
>>> import pygrank as pg
>>> graph, personalization, uniform_ranker, algorithm = ...
>>> ranks = pg.Threshold(uniform_ranker).transform(algorithm.rank(graph, personalization))
"""
super().__init__(ranker)
self.uniform_ranker = ranker if uniform_ranker is None else uniform_ranker
self.centrality = MethodHasher(lambda graph: self.uniform_ranker.rank(graph), assume_immutability=True)
def _transform(self,
ranks: GraphSignal,
**kwargs):
ensure_used_args(kwargs)
uniforms = self.centrality(ranks.graph).np
return ranks.np/(1.E-12+uniforms)
def _reference(self):
if self.uniform_ranker != self.ranker:
return "sweep ratio postprocessing \\cite{andersen2007local} where non-personalized ranking is performed with a "+self.uniform_ranker.cite()
return "sweep ratio postprocessing \\cite{andersen2007local}"
|
167787
|
import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
h_per_item = sigmoid(np.dot(data, W1) + b1)
yhat_per_item = softmax(np.dot(h_per_item, W2) + b2)
cost = -np.sum(labels * np.log(yhat_per_item))
### END YOUR CODE
### YOUR CODE HERE: backward propagation
grad_softmax_per_item = yhat_per_item - labels
grad_b2 = np.sum(grad_softmax_per_item, axis=0, keepdims=True)
grad_W2 = np.dot(h_per_item.T, grad_softmax_per_item)
grad_sigmoid_per_item = sigmoid_grad(h_per_item)
grad_b1_per_item = np.dot(grad_softmax_per_item, W2.T) * grad_sigmoid_per_item
grad_b1 = np.sum(grad_b1_per_item, axis=0, keepdims=True)
grad_W1 = np.dot(data.T, grad_b1_per_item)
### END YOUR CODE
assert grad_b2.shape == b2.shape
assert grad_W2.shape == W2.shape
assert grad_b1.shape == b1.shape
assert grad_W1.shape == W1.shape
### Stack gradients (do not modify)
grad = np.concatenate((grad_W1.flatten(), grad_b1.flatten(),
grad_W2.flatten(), grad_b2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print "Running sanity check..."
N = 20
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in xrange(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
|
167794
|
import numpy as np
import pykin.utils.transform_utils as t_utils
import pykin.utils.kin_utils as k_utils
import pykin.kinematics.jacobian as jac
from pykin.planners.planner import Planner
from pykin.utils.error_utils import OriValueError, CollisionError
from pykin.utils.kin_utils import ShellColors as sc, logging_time
from pykin.utils.log_utils import create_logger
from pykin.utils.transform_utils import get_linear_interpoation, get_quaternion_slerp
logger = create_logger('Cartesian Planner', "debug",)
class CartesianPlanner(Planner):
"""
path planner in Cartesian space
Args:
robot(SingleArm or Bimanual): The manipulator robot type is SingleArm or Bimanual
self_collision_manager: CollisionManager for robot's self collision check
object_collision_manager: CollisionManager for collision check between robot and object
n_step(int): Number of waypoints
dimension(int): robot arm's dof
waypoint_type(str): Type of waypoint ex) "Linear", "Cubic", "Circular"
"""
def __init__(
self,
robot,
self_collision_manager=None,
object_collision_manager=None,
n_step=500,
dimension=7,
waypoint_type="Linear"
):
super(CartesianPlanner, self).__init__(
robot,
self_collision_manager,
object_collision_manager,
dimension)
self.n_step = n_step
self.waypoint_type = waypoint_type
self.eef_name = self.robot.eef_name
self.arm = None
self._dimension = dimension
super()._setup_q_limits()
super()._setup_eef_name()
def __repr__(self):
return 'pykin.planners.cartesian_planner.{}()'.format(type(self).__name__)
@logging_time
def get_path_in_joinst_space(
self,
current_q=None,
goal_pose=None,
waypoints=None,
resolution=1,
damping=0.5,
epsilon=1e-12,
pos_sensitivity=0.03,
is_slerp=False
):
self._cur_qpos = super()._change_types(current_q)
self._goal_pose = super()._change_types(goal_pose)
init_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
self._cur_pose = self.robot.get_eef_pose(init_fk)
self._resolution = resolution
self._damping = damping
self._pos_sensitivity = pos_sensitivity
self._is_slerp = is_slerp
if waypoints is None:
waypoints = self.generate_waypoints(is_slerp)
paths, target_positions = self._compute_path_and_target_pose(waypoints, epsilon)
return paths, target_positions
def _compute_path_and_target_pose(self, waypoints, epsilon):
cnt = 0
total_cnt = 10
while True:
cnt += 1
collision_pose = {}
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.eef_name].h_mat
eef_position = cur_fk[self.eef_name].pos
paths = [self._cur_qpos]
target_positions = [eef_position]
for step, (pos, ori) in enumerate(waypoints):
target_transform = t_utils.get_h_mat(pos, ori)
err_pose = k_utils.calc_pose_error(target_transform, current_transform, epsilon)
J = jac.calc_jacobian(self.robot.desired_frames, cur_fk, self._dimension)
J_dls = np.dot(J.T, np.linalg.inv(np.dot(J, J.T) + self._damping**2 * np.identity(6)))
dq = np.dot(J_dls, err_pose)
self._cur_qpos = np.array([(self._cur_qpos[i] + dq[i]) for i in range(self._dimension)]).reshape(self._dimension,)
is_collision_free = self._collision_free(self._cur_qpos)
if not is_collision_free:
_, name = self.self_c_manager.in_collision_other(other_manager=self.object_c_manager, return_names=True)
collision_pose[step] = (name, np.round(target_transform[:3,3], 6))
continue
if not self._check_q_in_limits(self._cur_qpos):
continue
cur_fk = self.robot.kin.forward_kinematics(self.robot.desired_frames, self._cur_qpos)
current_transform = cur_fk[self.robot.eef_name].h_mat
if step % (1/self._resolution) == 0 or step == len(waypoints)-1:
paths.append(self._cur_qpos)
target_positions.append(pos)
err = t_utils.compute_pose_error(self._goal_pose[:3], cur_fk[self.eef_name].pos)
if collision_pose.keys():
logger.error(f"Failed Generate Path.. Collision may occur.")
for name, pose in collision_pose.values():
logger.warning(f"\n\tCollision Names : {name} \n\tCollision Position : {pose}")
# logger.warning(f"Collision Position : {pose}")
raise CollisionError("Conflict confirmed. Check the object position!")
if err < self._pos_sensitivity:
logger.info(f"Generate Path Successfully!! Error is {err:6f}")
break
if cnt > total_cnt:
logger.error(f"Failed Generate Path.. The number of retries of {cnt} exceeded")
paths, target_positions = None, None
break
logger.error(f"Failed Generate Path.. Position Error is {err:6f}")
print(f"{sc.BOLD}Retry Generate Path, the number of retries is {cnt}/{total_cnt} {sc.ENDC}\n")
return paths, target_positions
# TODO
# generate cubic, circular waypoints
def generate_waypoints(self, is_slerp):
if self.waypoint_type == "Linear":
waypoints = [path for path in self._get_linear_path(self._cur_pose, self._goal_pose, is_slerp)]
if self.waypoint_type == "Cubic":
pass
if self.waypoint_type == "Circular":
pass
return waypoints
def get_waypoints(self):
return self.waypoints
def _change_pose_type(self, pose):
ret = np.zeros(7)
ret[:3] = pose[:3]
if isinstance(pose, (list, tuple)):
pose = np.asarray(pose)
ori = pose[3:]
if ori.shape == (3,):
ori = t_utils.get_quaternion_from_rpy(ori)
ret[3:] = ori
elif ori.shape == (4,):
ret[3:] = ori
else:
raise OriValueError(ori.shape)
return ret
def _get_linear_path(self, init_pose, goal_pose, is_slerp):
for step in range(1, self.n_step + 1):
delta_t = step / self.n_step
pos = get_linear_interpoation(init_pose[:3], goal_pose[:3], delta_t)
ori = init_pose[3:]
if is_slerp:
ori = get_quaternion_slerp(init_pose[3:], goal_pose[3:], delta_t)
yield (pos, ori)
def _get_cubic_path(self):
pass
def _get_cicular_path(self):
pass
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, resolution):
self._resolution = resolution
@property
def damping(self):
return self._damping
@damping.setter
def damping(self, damping):
self._damping = damping
@property
def pos_sensitivity(self):
return self._pos_sensitivity
@pos_sensitivity.setter
def pos_sensitivity(self, pos_sensitivity):
self._pos_sensitivity = pos_sensitivity
@property
def is_slerp(self):
return self._is_slerp
@is_slerp.setter
def is_slerp(self, is_slerp):
self._is_slerp = is_slerp
|
167799
|
from time import sleep
from .core import Attempt, Question, TestCase
import pymongo
import random
def _attempt_checker(mongo_uri):
db = pymongo.MongoClient(mongo_uri)
db = db.openjudge
while True:
attempt = db.attempt_queue.find_one_and_delete({})
if attempt is None:
sleep(random.random())
else:
att = Attempt()
att.__dict__ = attempt
# has this been answered correctly earlier?
query = {"qid": att.qid, "user": att.user,
"status": True}
if db.history.find_one(query) is None:
q = db.questions.find_one({"qid": att.qid})
que = Question()
que.__dict__ = q
que.test_cases = [TestCase(**t) for t in q['test_cases']]
checked_attempt = que(att)
else:
att.status = False
att.log = None
checked_attempt = att
db.history.insert_one(checked_attempt.__dict__)
def run_judge(mongo_uri, n_judges):
_attempt_checker(mongo_uri)
|
167834
|
from eager_core.engine_params import EngineParams
class GazeboEngine(EngineParams):
"""
Gazebo engine parameters for EAGER environments.
This class includes all settings available for the Gazebo physics engine.
:param world: A path to a Gazebo world (.world) file.
:param dt: The time step when :func:`eager_core.eager_env.EagerEnv.step` is called
:param max_update_rate: The maximum amount of steps within a second
:param gui: Will run Pybullet without gui if set to False
:param seed: The seed for the physics simulation
"""
def __init__(self,
world: str = '$(find eager_bridge_gazebo)/worlds/eager_empty.world',
dt: float = 0.08,
max_update_rate: float = 0.0, # 0.0 --> simulate as fast as possible
gui: bool = True,
seed=None):
# Only define variables (locally) you wish to store on the parameter server (done in baseclass constructor).
bridge_type = 'gazebo'
launch_file = '$(find eager_bridge_%s)/launch/%s.launch' % (bridge_type, bridge_type)
# Store parameters as properties in baseclass
# IMPORTANT! Do not define variables locally you do **not** want to store
# on the parameter server anywhere before calling the baseclass' constructor.
kwargs = locals().copy()
kwargs.pop('self')
if seed is None:
kwargs.pop('seed')
super(GazeboEngine, self).__init__(**kwargs)
# Calculate other parameters based on previously defined attributes.
self.time_step = self.dt
# Error check the parameters here.
|
167849
|
import data_helper
import keras
import numpy as np
import defs
import precision_recall
import sys
import os
import shutil
import math
def sq_error(vector1, vector2):
if len(vector1) != len(vector2):
raise ValueError("vectors not of the same size")
return sum(math.pow(vector1[i] - vector2[i], 2) for i in range(0, len(vector1)))
def interpret_result(yhati, threshold=0.5):
"""
:param yhati: result of prediction for a file
:return: String: the language
"""
for i in range(0, len(yhati)):
if yhati[i] > threshold:
return defs.langs[i]
print 'usage: test_run.py [model file] [folder to copy failed files]'
folderName = None
modelFile = './save_tmp.h5'
if len(sys.argv) > 1:
modelFile = sys.argv[1]
if len(sys.argv) > 2:
folderName = sys.argv[2]
if not os.path.exists(folderName):
os.mkdir(folderName)
X, Y, Z = data_helper.get_input_and_labels(data_helper.test_root_folder,
defs.file_characters_truncation_limit,
max_files=1000)
x = np.array(X)
model = keras.models.load_model(modelFile)
y_hat = model.predict(x)
success = 0
class_success = {}
class_count = {}
expecteds = []
predicteds = []
sum_sq_error = 0
for i in range(0, len(y_hat)):
yi = Y[i]
y_hati = y_hat[i]
expected = interpret_result(yi)
predicted = interpret_result(y_hati)
expecteds.append(yi)
predicteds.append(y_hati)
sum_sq_error += sq_error(yi, y_hati)
if expected not in class_count:
class_count[expected] = 1
else:
class_count[expected] += 1
if expected == predicted:
success += 1
if predicted not in class_success:
class_success[predicted] = 1
else:
class_success[predicted] += 1
elif folderName is not None:
fn = os.path.basename(Z[i])
lang = os.path.basename(os.path.dirname(Z[i]))
langFolder = os.path.join(folderName, lang)
if not os.path.exists(langFolder):
os.mkdir(langFolder)
shutil.copyfile(Z[i], os.path.join(langFolder, fn))
print "Final result: {}/{} ({})".format(success, len(y_hat), (success * 1.0 / len(y_hat) * 1.0))
prs = precision_recall.calculate_precision_recall(expecteds, predicteds, defs.langs)
for c in prs:
print "{} - Precision:{} Recall: {}".format(prs[c].get_name(), prs[c].precision(), prs[c].recall())
for key in class_count:
if key not in class_success:
class_success[key] = 0
print "{}:\t\t{}/{} ({})".format(key, class_success[key], class_count[key], class_success[key] * 1.0 / class_count[key] * 1.0)
print "Sum of Squared Error: {}".format(sum_sq_error)
|
167864
|
import ast
import logging
import six
import sys
TRUST_AST_TYPES = (ast.Call, ast.Module, ast.List, ast.Tuple, ast.Dict, ast.Name, ast.Num, ast.Str,
ast.Assign, ast.Load)
if sys.version_info[:2] == (3, 3):
TRUST_AST_TYPES = TRUST_AST_TYPES + (ast.Bytes,)
elif six.PY3:
TRUST_AST_TYPES = TRUST_AST_TYPES + (ast.Bytes, ast.NameConstant)
class InvalidETLConfig(Exception):
pass
builtin_macros = [
'KEEP_EVENT_',
'DROP_EVENT_',
'KEEP_FIELDS_',
'DROP_FIELDS_',
'RENAME_FIELDS_',
'ALIAS_',
'DISPATCH_EVENT_',
'TRANSFORM_EVENT_',
'KV_FIELDS_'
]
built_in_fns = ['V', 'JSON', 'CSV', 'REGEX', 'EMPTY', 'NO_EMPTY', 'DROP_F', 'KV', 'TSV', 'PSV', 'LOOKUP', 'SPLIT', 'ZIP']
built_in_ids = ['KV', 'ANY', 'ALL', 'F_TIME', 'F_META', 'F_TAGS', 'SPLIT', 'JSON', 'True', 'False', 'None']
logger = logging.getLogger(__name__)
class RestrictConfigParser(ast.NodeVisitor):
def visit_ImportFrom(self, node):
if node.module == 'aliyun.log.etl_core' and len(node.names) == 1 and node.names[0].name == '*':
logger.info("[Passed] import detected: from aliyun.log.etl_core import *")
else:
raise InvalidETLConfig("unknown import: {0}".format(node.module))
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
if isinstance(node.func.ctx, ast.Load) and node.func.id in built_in_fns:
logger.info("[Passed] known call detected")
else:
raise InvalidETLConfig("unknown call id detected: {0}".format(node.func.id))
else:
raise InvalidETLConfig("unknown call type detected: {0}".format(node.func))
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
for p in builtin_macros:
if node.id.startswith(p):
logger.info('[Passed] assign detected: ', node.id)
break
else:
raise InvalidETLConfig('unknown assign detected: ', node.id)
elif isinstance(node.ctx, ast.Load):
if node.id in built_in_ids:
logger.info(' [Passed] assigned name:', node.id)
else:
raise InvalidETLConfig('unknown load detected: ', node.id)
else:
raise InvalidETLConfig("unknown Name: {0}".format(node.id))
def generic_visit(self, node):
if isinstance(node, TRUST_AST_TYPES):
logger.info("... known type detected: ", type(node))
else:
raise InvalidETLConfig("unknown type detected: {0}".format(type(node)))
ast.NodeVisitor.generic_visit(self, node)
def parse(self, code):
self.visit(ast.parse(code))
|
167894
|
from . import util, config
from datetime import date
import sys
import re
import requests
from pathlib import Path
import json
import boto3
import gzip
DefaultVersion = 'v2'
class StatIds:
Combined = 'combined'
MedianTripTimes = 'median-trip-times'
AllStatIds = [
StatIds.Combined,
StatIds.MedianTripTimes,
]
class PrecomputedStats:
def __init__(self, data):
self.data = data
def get_direction_stats(self, route_id, direction_id):
routes_data = self.data['routes']
route_data = routes_data.get(route_id, None)
if route_data is None:
return None
return route_data['directions'].get(direction_id, None)
def get_direction_stat_value(self, route_id, direction_id, stat_key):
dir_stats = self.get_direction_stats(route_id, direction_id)
if dir_stats is None:
return None
return dir_stats.get(stat_key, None)
def get_stop_stat_value(self, route_id, direction_id, stat_key, stop_id):
stops_map = self.get_direction_stat_value(route_id, direction_id, stat_key)
if stops_map is None:
return None
return stops_map.get(stop_id, None)
def get_trip_time_stats(self, route_id, direction_id, start_stop_id, end_stop_id):
start_stop_stats = self.get_stop_stat_value(route_id, direction_id, 'tripTimes', start_stop_id)
if start_stop_stats is None:
return None
return start_stop_stats.get(end_stop_id, None)
def get_median_trip_time(self, route_id, direction_id, start_stop_id, end_stop_id):
trip_time_stats = self.get_trip_time_stats(route_id, direction_id, start_stop_id, end_stop_id)
if trip_time_stats is None:
return None
return trip_time_stats[1]
def get_p10_trip_time(self, route_id, direction_id, start_stop_id, end_stop_id):
trip_time_stats = self.get_trip_time_stats(route_id, direction_id, start_stop_id, end_stop_id)
if trip_time_stats is None:
return None
return trip_time_stats[0]
def get_p90_trip_time(self, route_id, direction_id, start_stop_id, end_stop_id):
trip_time_stats = self.get_trip_time_stats(route_id, direction_id, start_stop_id, end_stop_id)
if trip_time_stats is None:
return None
return trip_time_stats[2]
def get_num_trips(self, route_id, direction_id, start_stop_id, end_stop_id):
trip_time_stats = self.get_trip_time_stats(route_id, direction_id, start_stop_id, end_stop_id)
if trip_time_stats is None:
return None
return trip_time_stats[3]
def get_median_wait_time(self, route_id, direction_id):
return self.get_direction_stat_value(route_id, direction_id, 'medianWaitTime')
def get_median_headway(self, route_id, direction_id):
return self.get_direction_stat_value(route_id, direction_id, 'medianHeadway')
def get_on_time_rate(self, route_id, direction_id):
return self.get_direction_stat_value(route_id, direction_id, 'onTimeRate')
def get_precomputed_stats(agency_id, stat_id: str, d: date, start_time_str = None, end_time_str = None, scheduled = False, version = DefaultVersion) -> PrecomputedStats:
cache_path = get_cache_path(agency_id, stat_id, d, start_time_str, end_time_str, scheduled, version)
try:
with open(cache_path, "r") as f:
text = f.read()
return PrecomputedStats(json.loads(text))
except FileNotFoundError as err:
pass
s3_bucket = config.s3_bucket
s3_path = get_s3_path(agency_id, stat_id, d, start_time_str, end_time_str, scheduled, version)
s3_url = f"http://{s3_bucket}.s3.amazonaws.com/{s3_path}"
r = requests.get(s3_url)
if r.status_code == 404:
raise FileNotFoundError(f"{s3_url} not found")
if r.status_code == 403:
raise FileNotFoundError(f"{s3_url} not found or access denied")
if r.status_code != 200:
raise Exception(f"Error fetching {s3_url}: HTTP {r.status_code}: {r.text}")
data = json.loads(r.text)
cache_dir = Path(cache_path).parent
if not cache_dir.exists():
cache_dir.mkdir(parents = True, exist_ok = True)
with open(cache_path, "w") as f:
f.write(r.text)
return PrecomputedStats(data)
def get_time_range_path(start_time_str, end_time_str):
if start_time_str is None and end_time_str is None:
return ''
else:
return f'_{start_time_str.replace(":","")}_{end_time_str.replace(":","")}'
def get_s3_path(agency_id: str, stat_id: str, d: date, start_time_str, end_time_str, scheduled=False, version = DefaultVersion) -> str:
time_range_path = get_time_range_path(start_time_str, end_time_str)
date_str = str(d)
date_path = d.strftime("%Y/%m/%d")
prefix = "scheduled-stats" if scheduled else "observed-stats"
return f"{prefix}/{version}/{agency_id}/{date_path}/{prefix}_{version}_{agency_id}_{stat_id}_{date_str}{time_range_path}.json.gz"
def get_cache_path(agency_id: str, stat_id: str, d: date, start_time_str, end_time_str, scheduled=False, version = DefaultVersion) -> str:
time_range_path = get_time_range_path(start_time_str, end_time_str)
date_str = str(d)
if re.match('^[\w\-]+$', agency_id) is None:
raise Exception(f"Invalid agency: {agency_id}")
if re.match('^[\w\-]+$', stat_id) is None:
raise Exception(f"Invalid stat id: {stat_id}")
if re.match('^[\w\-]+$', date_str) is None:
raise Exception(f"Invalid date: {date_str}")
if re.match('^[\w\-]+$', version) is None:
raise Exception(f"Invalid version: {version}")
if re.match('^[\w\-\+]*$', time_range_path) is None:
raise Exception(f"Invalid time range: {time_range_path}")
prefix = "scheduled-stats" if scheduled else "observed-stats"
return f'{util.get_data_dir()}/{prefix}_{version}_{agency_id}/{date_str}/{prefix}_{version}_{agency_id}_{stat_id}_{date_str}{time_range_path}.json'
def save_stats(agency_id, stat_id, d, start_time_str, end_time_str, scheduled, data, save_to_s3=False):
data_str = json.dumps({
'version': DefaultVersion,
'stat_id': stat_id,
'start_time': start_time_str,
'end_time': end_time_str,
**data
}, separators=(',', ':'))
cache_path = get_cache_path(agency_id, stat_id, d, start_time_str, end_time_str, scheduled)
cache_dir = Path(cache_path).parent
if not cache_dir.exists():
cache_dir.mkdir(parents = True, exist_ok = True)
print(f'saving to {cache_path}')
with open(cache_path, "w") as f:
f.write(data_str)
if save_to_s3:
s3 = boto3.resource('s3')
s3_path = get_s3_path(agency_id, stat_id, d, start_time_str, end_time_str, scheduled)
s3_bucket = config.s3_bucket
print(f'saving to s3://{s3_bucket}/{s3_path}')
object = s3.Object(s3_bucket, s3_path)
object.put(
Body=gzip.compress(bytes(data_str, 'utf-8')),
CacheControl='max-age=86400',
ContentType='application/json',
ContentEncoding='gzip',
ACL='public-read'
)
|
167931
|
from gzip import GzipFile
from exporters.readers import FSReader
from exporters.exceptions import ConfigurationError
from .utils import meta
import pytest
class FSReaderTest(object):
@classmethod
def setup_class(cls):
cls.options = {
'input': {
'dir': './tests/data/fs_reader_test',
}
}
cls.options_pointer = {
'input': {
'dir_pointer': './tests/data/fs_reader_pointer',
}
}
cls.options_empty_folder = {
'input': {
'dir': './tests/data/fs_reader_empty_folder',
}
}
@staticmethod
def _make_fs_reader(options):
full_config = {
'name': 'exporters.readers.fs_reader.FSReader',
'options': options
}
reader = FSReader(full_config, meta())
reader.set_last_position(None)
return reader
def test_read_from_folder(self):
expected = [
{u'item': u'value1'}, {u'item': u'value2'}, {u'item': u'value3'},
{u'item2': u'value1'}, {u'item2': u'value2'}, {u'item2': u'value3'},
]
reader = self._make_fs_reader(self.options)
batch = list(reader.get_next_batch())
assert expected == batch
def test_read_from_pointer(self):
expected = [
{u'item': u'value1'}, {u'item': u'value2'}, {u'item': u'value3'},
{u'item2': u'value1'}, {u'item2': u'value2'}, {u'item2': u'value3'},
]
reader = self._make_fs_reader(self.options_pointer)
batch = list(reader.get_next_batch())
assert expected == batch
def test_read_from_empty_folder(self):
reader = self._make_fs_reader(self.options_empty_folder)
list(reader.get_next_batch())
assert reader.is_finished()
def test_read_from_file(self):
reader = self._make_fs_reader({
'input': './tests/data/fs_reader_test/fs_test_data.jl.gz',
})
batch = list(reader.get_next_batch())
expected = [
{u'item': u'value1'}, {u'item': u'value2'}, {u'item': u'value3'}
]
assert expected == batch
def test_read_from_multiple_files(self):
reader = self._make_fs_reader({
'input': [
'./tests/data/fs_reader_test/fs_test_data.jl.gz',
'./tests/data/fs_reader_test/fs_test_data_2.jl.gz',
]
})
batch = list(reader.get_next_batch())
expected = [
{u'item': u'value1'}, {u'item': u'value2'}, {u'item': u'value3'},
{u'item2': u'value1'}, {u'item2': u'value2'}, {u'item2': u'value3'},
]
assert expected == batch
def test_read_from_file_and_dir(self):
reader = self._make_fs_reader({
'input': [
'./tests/data/fs_reader_test/fs_test_data.jl.gz',
{'dir': './tests/data/fs_reader_test'}
]
})
batch = list(reader.get_next_batch())
expected = [
{u'item': u'value1'}, {u'item': u'value2'}, {u'item': u'value3'},
{u'item2': u'value1'}, {u'item2': u'value2'}, {u'item2': u'value3'},
]
assert expected == batch
def test_dir_specification_no_dir_or_dir_pointer(self):
with pytest.raises(ConfigurationError) as err:
self._make_fs_reader({'input': {}})
assert str(err.value) == ('Input directory dict must contain "dir"'
' or "dir_pointer" element (but not both)')
def test_dir_specification_both_dir_and_dir_pointer(self):
with pytest.raises(ConfigurationError) as err:
self._make_fs_reader({
'input': {'dir': './foo', 'dir_pointer': './bar'}
})
assert str(err.value) == ('Input directory dict must not contain both'
' "dir" and "dir_pointer" elements')
def test_dir_specification_with_pattern(self):
reader = self._make_fs_reader({
'input': {
'dir': './tests/data/fs_reader_test/',
'pattern': 'fs_reader_test/[^/]+2\.jl\.gz$'
}
})
expected = [
{u'item2': u'value1'}, {u'item2': u'value2'}, {u'item2': u'value3'},
]
batch = list(reader.get_next_batch())
assert expected == batch
def test_dot_files_ignored_by_default(self, tmpdir_with_dotfiles):
reader = self._make_fs_reader({'input': {
'dir': tmpdir_with_dotfiles.strpath,
}})
assert list(reader.get_next_batch()) == [{"bar": 1}]
reader = self._make_fs_reader({'input': {
'dir': tmpdir_with_dotfiles.strpath,
'pattern': r'/\.[^/]*$',
}})
assert list(reader.get_next_batch()) == []
def test_dot_files_included_with_flag(self, tmpdir_with_dotfiles):
reader = self._make_fs_reader({'input': {
'dir': tmpdir_with_dotfiles.strpath,
'pattern': r'/\.[^/]*$',
'include_dot_files': True,
}})
assert list(reader.get_next_batch()) == [{"foo": 1}]
reader = self._make_fs_reader({'input': {
'dir': tmpdir_with_dotfiles.strpath,
'include_dot_files': True,
}})
assert list(reader.get_next_batch()) == [{"foo": 1}, {"bar": 1}]
@pytest.fixture
def tmpdir_with_dotfiles(tmpdir):
with GzipFile(tmpdir.join('.foo.jl.gz').strpath, 'w') as zf:
zf.write('{"foo": 1}')
with GzipFile(tmpdir.join('bar.jl.gz').strpath, 'w') as zf:
zf.write('{"bar": 1}')
return tmpdir
|
167932
|
import wikiquotes
import os
from time import sleep
from sys import argv
from sys import exit
from getpass import getpass
from random import randint
from PIL import Image, ImageDraw, ImageFont, ImageStat
wd = os.getcwd() # Get the working directory
def getInstagramFile(username):
"""
This function handles the call to the instagram-scraper
and the actual download of instagram images.
It does so by calling a command line with os.system()
Arguments taken:
username: the instagram account name
Returns:
the result of keepRandomFile()
Internal calls:
getLogin()
keepRandomFile()
"""
usr, pswd = getLogin() # Get a username and password
current = os.listdir(wd) # Get the current files in the working directory
os.system("instagram-scraper "+username+" -u "+usr+" -p "+pswd+" -d "+wd+" -m 20 -t image")
new = os.listdir(wd) # Once downloaded, get the new list of files in the working directory
return keepRandomFile(current, new) # Return a random file
def keepRandomFile(current, new):
"""
This function selects a random file from the previously
downloaded with instagram-scraper and deletes the rest.
Arguments taken:
current: the list of current files in the wd
new: the list of new files in the wd, with the instagram pictures
Returns:
toKeep: the name of the randomly selected file
"""
for item in current: # Iterate the two lists of files to find the original ones
if item in new:
new.remove(item) # Remove the original from the list of new ones
flag = True # Flag used to separate .jpg from other formats
while flag:
toKeep = new[randint(0,len(new)-1)] # Pick a random file
if toKeep.endswith('.jpg') or toKeep.endswith('.png'):
flag = False # Exit the loop if it is a .jpg or .png file
for item in new: # Delete the files you don't want to keep
if toKeep in item:
pass
else:
os.remove(os.path.join(wd,item))
return toKeep
def getLogin():
"""
Deactivated by default
This function handles the login session for the instagram-scraper
using the getpass library through console input.
If no user or password are written the function
will return "a" as password and username. This is ok for
instagram-scraper and will get the public pictures from
the selected profile.
Arguments taken:
None
Returns:
usr: instagram username
pswd: instagram password
"""
return "a", "a" # Comment this line for login options
# Uncomment the next lines for login options
"""
usr = raw_input("IG Username (yours, optional): ")
pswd = str(getpass())
if pswd == "" or usr == "":
return "a", "a"
else:
return usr,pswd
"""
def getQuote(author, language = False):
"""
This function retrieves a random quote from the selected
wikiquote page.
Arguments taken:
author: the page to retrieve the quote from
Returns:
quote: the quote itself in a string
count: the lenght of the quote in words
language: the languages used to retrieve the quote (english or spanish)
"""
count = float('inf')
languages = ["en","es","de","fr","it"] # Languages to search the author
while count > 15: # Keep it short, no quotes longer than 15 words
if language == False:
for lang in languages: # Iterate trough the languages until it finds a result
try:
quote = wikiquotes.random_quote(author,lang)
break
except:
exit("Couldn't get the quote for some reason.")
else:
try:
quote = wikiquotes.random_quote(author,language)
except:
exit("Couuldn't get the quote for some reason.")
count = quote.split(" ")
count = len(count) # Count the words in the sentence
if '"' in quote:
quote = quote[1:-1]
return quote, count
def writeOnImage(picture,quoteL):
"""
This function opens the image for the PIL library to work with.
It also handles the size of the text and the position it should
go inside the picture. It writes to the pictures and saves it
in the current working directory.
Arguments taken:
picture: the name of the file to edit
quoteL: the quote, divided in two
Returns:
"1.png": the name of the file to save
Internal calls:
complementaryColor()
"""
txtPos = []
try:
img = Image.open(picture) # Open the image with the PIL library
except:
exit("Something went wrong opening the picture")
width, height = img.size # Get the size of the image
hFourth = height/4 # Get the fourth of the height of the image
wThird = width/3 # Get the third of the width of the image
fontsPath = os.path.join(wd,"fonts")
fonts = os.listdir(fontsPath)
scale = 0.08
def calculateFontSize(scale, width, height, quoteL, fontsPath):
"""
Calculates the size of the font depending on the
width of the given image.
Arguments taken:
scale: the scale of the font against the image
width: width of the image
height: height of the image
quoteL: the quote to write
fontsPath: folder where the fonts exist
Returns:
tSize: text size in pixels
fontSize: the size of the calculated font
fnt: the font in ImageFont format
"""
fontSize = int(height*scale)
# Iterate through the fonts folder
if fonts:
fnt = ImageFont.truetype(os.path.join(fontsPath,fonts[randint(0,len(fonts)-1)]),fontSize)
else:
fnt = ImageFont.truetype(wd+'\\fonts\\IndieFlower.ttf',fontSize) # Selected font
tSize = [d.textsize(quoteL[0],font = fnt),d.textsize(quoteL[1],font = fnt)]
# Check if the text fits the image
if tSize[0][0] > width or tSize[1][0] > width:
scale = scale - 0.005
return calculateFontSize(scale, width, height, quoteL, fontsPath)
else:
return tSize, int(fontSize), fnt
d = ImageDraw.Draw(img) # Open the image to write on it
# Get the size that will have the text in the picture as a list
# where position [0] is the first half of the sentence
# and position [1] the second, to be able to fit it in the picture.
tSize, fontSize, fnt = calculateFontSize(scale, width, height, quoteL, fontsPath)
# tSize = [d.textsize(quoteL[0],font = fnt),d.textsize(quoteL[1],font = fnt)]
# Complementary color for the outline, median for the text
border_color = complementaryColor(img)
text_color = medianColor(img)
# Determinate the position of the first line of text, by substracting
# the lenght of the text from the width of the image and the one fourth
# of the height of the picture multiplied by 3. tSize[0][0] = lenght
# This is a tuple
txtPos.append((((width/2)-(int(tSize[0][0])/2)),hFourth*3))
# Determinate the poisition of the second ilne of text as before, but
# adding the size of the font to fit it just below the first line
txtPos.append((((width/2)-(int(tSize[1][0])/2)),hFourth*3+fontSize))
# Draw borders for outlined font
border_width = 1
d.text((txtPos[0][0]-border_width, txtPos[0][1]-border_width), quoteL[0], font=fnt, fill=border_color)
d.text((txtPos[0][0]+border_width, txtPos[0][1]-border_width), quoteL[0], font=fnt, fill=border_color)
d.text((txtPos[0][0]-border_width, txtPos[0][1]+border_width), quoteL[0], font=fnt, fill=border_color)
d.text((txtPos[0][0]+border_width, txtPos[0][1]+border_width), quoteL[0], font=fnt, fill=border_color)
d.text((txtPos[1][0]-border_width, txtPos[1][1]-border_width), quoteL[1], font=fnt, fill=border_color)
d.text((txtPos[1][0]+border_width, txtPos[1][1]-border_width), quoteL[1], font=fnt, fill=border_color)
d.text((txtPos[1][0]-border_width, txtPos[1][1]+border_width), quoteL[1], font=fnt, fill=border_color)
d.text((txtPos[1][0]+border_width, txtPos[1][1]+border_width), quoteL[1], font=fnt, fill=border_color)
# Write the first half of the text using the parameters calculated above
d.text(txtPos[0], quoteL[0], font=fnt, fill = text_color)
# d.text((((width/2)-(int(tSize[0][0])/2)),hFourth*3), quoteL[0], font=fnt, fill = (r, g, b))
# Write the second half of the text using the parameters calculated above
d.text(txtPos[1], quoteL[1], font=fnt, fill = text_color)
# d.text((((width/2)-(int(tSize[1][0])/2)),hFourth*3+fontSize), quoteL[1], font=fnt, fill = (r, g, b))
img.save('1.png') # Save the image as 1.png
os.remove(os.path.join(wd,picture)) # Remove the original picture
return "1.png"
def complementaryColor(img):
"""
Get the text used for the color by using the
median color of the image and calculating
the complementary. This is not ideal does not work
quite properly.
Arguments taken:
img: the image in PIL format
Returns:
r: red color
g: green color
b: blue color
"""
a = ImageStat.Stat(img) # Open the image with ImageStat
a = a.median # Calculate the median of each color band
r = 255 - a[0] # Calculate
g = 255 - a[1] # the
b = 255 - a[2] # complementary
return int(r), int(g), int(b)
def medianColor(img):
"""
Get the median color of the image.
Arguments taken:
img: the image in PIL format
Returns:
r: red color
g: green color
b: blue color
"""
a = ImageStat.Stat(img) # Open the image with ImageStat
a = a.median # Calculate the median of each color band
r = a[0] # Calculate
g = a[1] # the
b = a[2] # complementary
return int(r), int(g), int(b)
def main():
"""
The main function handles the call to all
the other functions that make the script work.
It also takes care of some other things, like getting
the arguments from the console line. And for some
reason also to divide the quote in two and sort
it by language.
Arguments taken:
None
Returns:
None
"""
# Get the arguments from the console
# Otherwise assign two default ones
try:
account = argv[1] if len(argv) > 1 else "natgeo"
quote_author = argv[2] if len(argv) > 1 else "<NAME>"
language = argv[3] if len(argv) > 1 else "en"
except:
language = "en"
# Get the picture to work with
picture = getInstagramFile(str(account))
# Get the quote
quoteL = []
quote, lenght = getQuote(str(quote_author), language)
# Split the quote in two equal parts, by words
quoteL.append(" ".join(quote.rsplit(" ")[:int(lenght/2)]))
quoteL.append(" ".join(quote.rsplit(" ")[int(lenght/2):]))
# Write the text on the image
image = writeOnImage(picture,quoteL)
# When finished, open the image to see
os.system(os.path.join(wd,image))
exit("All done, goodbye!")
if __name__ == '__main__':
"""
This is the main call
"""
main()
|
167940
|
import numpy as np
from configparser import SafeConfigParser
from pyfisher.lensInterface import lensNoise
import orphics.theory.gaussianCov as gcov
from orphics.theory.cosmology import Cosmology
import orphics.tools.io as io
cc = Cosmology(lmax=6000,pickling=True)
theory = cc.theory
# Read config
iniFile = "../pyfisher/input/params.ini"
Config = SafeConfigParser()
Config.optionxform=str
Config.read(iniFile)
expName = "ColinACT"
lensName = "ColinLensing"
ls,Nls,ellbb,dlbb,efficiency = lensNoise(Config,expName,lensName,beamOverride=None,noiseTOverride=None,lkneeTOverride=None,lkneePOverride=None,alphaTOverride=None,alphaPOverride=None)
#planck_file = "input/planck_nlkk.dat"
#lp,nlplanck = np.loadtxt(planck_file,usecols=[0,1],unpack=True)
LF = gcov.LensForecast(theory)
ells = np.arange(2,6000,1)
clkk = theory.gCl('kk',ells)
pl = io.Plotter(scaleY='log')
pl.add(ells,clkk)
#pl.add(lp,nlplanck,ls="-.")
pl.add(ls,Nls,ls="-.")
pl._ax.set_ylim(5e-10,1e-5)
pl.done("output/clsn.png")
#LF.loadGenericCls("kk",ells,clkk,lp,nlplanck)
LF.loadGenericCls("kk",ells,clkk,ls,Nls)
ellBinEdges = np.arange(20,3000,20)
fsky = 0.4
specType = "kk"
sn,errs = LF.sn(ellBinEdges,fsky,specType)
print(sn)
|
167996
|
import subprocess
from os.path import join
from app import create_app
from flask import current_app
from flask.ext.script import Shell, Manager, Server
manager = Manager(create_app)
def _make_shell_context():
"""
Shell context: import helper objects here.
"""
return dict(app=current_app)
manager.add_option('--flask-config', dest='config', help='Specify Flask config file', required=False)
manager.add_command('shell', Shell(make_context=_make_shell_context))
manager.add_command('runserver', Server(host='0.0.0.0'))
@manager.command
def build():
"""
Build static assets.
"""
from app.assets import init
assets = init(current_app)
assets.build_all()
if __name__ == '__main__':
manager.run()
|
167997
|
from yafs.action import generic_action
import logging
class my_custom_action(generic_action):
def __init__(self, *args, **kwargs):
super(my_custom_action, self).__init__(*args, **kwargs)
self.plates = {}
self.fees = {}
#mandatory function
def action(self,ma): #mobile_entity
# print "ACTION"
# print ma
# print ma.next_time
# print ma.get_current_position()
# print "-"*10
logging.info(" Performing Action from VEHICLE: %i in: %i " % (ma.id, ma.get_current_position()))
if ma.get_current_position() in self.sim.service_coverage.keys(): #sim is an instance of CORE.py
if ma.plate in self.plates:
self.fees[ma.plate]={"arrive":self.plates[ma.plate],"end":self.sim.env.now}
else:
self.plates[ma.plate]=self.sim.env.now
|
168002
|
from __future__ import absolute_import
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from core.minisim.util.normalization import apply_normalization, get_state_statistics
from core.model import Model
from utils.init_weights import init_weights, normalized_columns_initializer
class A3CMlpNarrowingMinisimModel(Model):
def __init__(self, args):
super(A3CMlpNarrowingMinisimModel, self).__init__(args)
self.lstm_layer_count = 1
self.num_robots = args.num_robots
self.hist_len = args.hist_len
self.target_data_dim = args.target_data_dim
self.hidden_vb_dim = args.hidden_vb_dim
self.mean, self.std = get_state_statistics()
# build model
# 0. feature layers
self.fc1 = nn.Linear(self.input_dims[0] * self.input_dims[1], self.hidden_dim)
self.rl1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim // 2)
self.rl2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_dim // 2, self.hidden_dim // 4)
self.rl3 = nn.ReLU()
# lstm
if self.enable_lstm:
self.lstm = nn.LSTMCell(self.hidden_dim // 4, self.hidden_vb_dim, 1)
final_input_size = self.hidden_vb_dim
else:
final_input_size = self.hidden_dim // 4
# 1. policy output
self.policy_5 = nn.Linear(final_input_size + self.target_data_dim * self.hist_len, self.output_dims)
self.policy_6 = nn.Softmax()
# 2. value output
self.value_5 = nn.Linear(final_input_size + self.target_data_dim * self.hist_len, 1)
self._reset()
def _init_weights(self):
self.apply(init_weights)
nn.init.xavier_normal(self.fc1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_normal(self.fc2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_normal(self.fc3.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_normal(self.lstm.weight_hh.data, gain=nn.init.calculate_gain('sigmoid'))
nn.init.xavier_normal(self.lstm.weight_ih.data, gain=nn.init.calculate_gain('sigmoid'))
nn.init.xavier_normal(self.policy_5.weight.data, gain=nn.init.calculate_gain('linear'))
nn.init.xavier_normal(self.value_5.weight.data, gain=nn.init.calculate_gain('linear'))
self.fc1.bias.data.fill_(0)
self.fc2.bias.data.fill_(0)
self.fc3.bias.data.fill_(0)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
self.policy_5.bias.data.fill_(0)
self.value_5.bias.data.fill_(0)
def forward(self, x, lstm_hidden_vb=None):
apply_normalization(x, self.mean, self.std)
target_data = x[:, :, self.input_dims[1]:self.input_dims[1]
+ self.target_data_dim * self.hist_len]
target_data = target_data.contiguous().view(self.num_robots,
self.target_data_dim * self.hist_len)
laser_scans = x[:, :, :self.input_dims[1]]
# TODO: contiguous here will slow everything down a lot?
x = laser_scans.contiguous()
x = self.rl1(self.fc1(x))
x = self.rl2(self.fc2(x))
x = self.rl3(self.fc3(x))
x = x.view(-1, self.hidden_dim // 4)
if self.enable_lstm:
x, c = self.lstm(x, lstm_hidden_vb)
x_aug = torch.cat((x, target_data), 1)
p = self.policy_5(x_aug)
p = self.policy_6(p)
v = self.value_5(x_aug)
if self.enable_lstm:
return p, v, (x, c)
else:
return p, v
|
168007
|
import argparse
from pathlib import Path
import tensorflow as tf
from keras import backend as K
from .network_definition import Colorization
from .training_utils import (
evaluation_pipeline,
checkpointing_system,
plot_evaluation,
metrics_system,
)
parser = argparse.ArgumentParser(description="Eval")
parser.add_argument(
"tfrecords",
type=str,
metavar="TFRECORDS_DIR",
help="evaluate using all tfrecords in TFRECORDS_DIR",
)
parser.add_argument(
"output",
type=str,
metavar="OUR_DIR",
help="use OUR_DIR to load checkpoints and write images",
)
parser.add_argument(
"--run-id",
required=True,
type=str,
metavar="RUN_ID",
help="load checkpoint from the run RUN_ID",
)
args = parser.parse_args()
dir_tfrecords = Path(args.tfrecords).expanduser().resolve().as_posix()
dir_output = Path(args.output).expanduser().resolve().joinpath(args.run_id).as_posix()
# PARAMETERS
run_id = args.run_id
val_number_of_images = 100
# START
sess = tf.Session()
K.set_session(sess)
# Build the network and the various operations
col = Colorization(256)
evaluations_ops = evaluation_pipeline(col, val_number_of_images, dir_tfrecords)
summary_writer = metrics_system(sess, dir_output)
saver, checkpoint_paths, latest_checkpoint = checkpointing_system(dir_output)
with sess.as_default():
# Initialize
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Coordinate the loading of image files.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Restore
if latest_checkpoint is not None:
print(f"Restoring from: {latest_checkpoint}")
saver.restore(sess, latest_checkpoint)
else:
print(f"No checkpoint found in: {checkpoint_paths}")
exit(1)
res = sess.run(evaluations_ops)
print("Cost: {}".format(res["cost"]))
plot_evaluation(res, "eval", dir_output)
# Finish off the filename queue coordinator.
coord.request_stop()
coord.join(threads)
|
168027
|
from pyradioconfig.parts.ocelot.calculators.calc_freq_offset_comp import CALC_Freq_Offset_Comp_ocelot
from pyradioconfig.parts.sol.calculators.calc_utilities import Calc_Utilities_Sol
class Calc_Freq_Offset_Comp_Sol(CALC_Freq_Offset_Comp_ocelot):
def calc_afc_scale_value(self, model):
# Overriding this function due to variable name change
# Load model values into local variables
freqgain = model.vars.freq_gain_actual.value
mod_format = model.vars.modulation_type.value
mode = model.vars.frequency_comp_mode.value
scale = model.vars.afc_step_scale.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
remodoutsel = model.vars.MODEM_PHDMODCTRL_REMODOUTSEL.value
digmix_res = model.vars.digmix_res_actual.value
synth_res = model.vars.synth_res_actual.value
phscale = 2 ** model.vars.MODEM_TRECPMDET_PHSCALE.value
mode_index = self.freq_comp_mode_index(model, mode)
demod_sel = model.vars.demod_select.value
digmixfb = Calc_Utilities_Sol().get_fefilt_actual(model, 'DIGMIXCTRL_DIGMIXFBENABLE')
baudrate = model.vars.rx_baud_rate_actual.value
osr = model.vars.oversampling_rate_actual.value
afc_tx_adjust_enable = model.vars.afc_tx_adjust_enable.value
afc_oneshot = model.vars.MODEM_AFC_AFCONESHOT.value
freq_offset_hz = model.vars.freq_offset_hz.value
baudrate = model.vars.baudrate.value
modulation_index = model.vars.modulation_index.value
if digmixfb:
res = digmix_res
else:
res = synth_res
# AFC to synth for Legacy
if(demod_sel==model.vars.demod_select.var_enum.LEGACY):
if mode_index >= 4 and freqgain > 0:
if mod_format == model.vars.modulation_type.var_enum.FSK2 or \
mod_format == model.vars.modulation_type.var_enum.FSK4:
afcscale = baudrate * osr / ( 256 * freqgain * res)
afcscale_tx = baudrate * osr / ( 256 * freqgain * synth_res)
else:
afcscale = baudrate * osr / ( 256 * res)
afcscale_tx = baudrate * osr / ( 256 * synth_res)
else:
afcscale = 0.0
afcscale_tx = 0.0
elif((demod_sel==model.vars.demod_select.var_enum.TRECS_VITERBI or
demod_sel==model.vars.demod_select.var_enum.TRECS_SLICER) and
model.vars.MODEM_VITERBIDEMOD_VITERBIKSI1.value != 0) or \
demod_sel==model.vars.demod_select.var_enum.LONGRANGE or \
(demod_sel==model.vars.demod_select.var_enum.SOFT_DEMOD and
mod_format == model.vars.modulation_type.var_enum.FSK2):
if remoden and remodoutsel == 1:
afcscale = baudrate * osr * phscale / (256 * freqgain * res)
afcscale_tx = baudrate * osr * phscale / (256 * freqgain * synth_res)
elif (freq_offset_hz / baudrate) > 0.57 and modulation_index <= 0.5:
afc_boost = baudrate / freq_offset_hz
afcscale = afc_boost * baudrate * phscale / (256 * res)
afcscale_tx = baudrate * phscale / (256 * synth_res)
else:
afcscale = baudrate * phscale / (256 * res)
afcscale_tx = baudrate * phscale / (256 * synth_res)
elif (demod_sel == model.vars.demod_select.var_enum.BCR):
# digital mixer frequency comp
afcscale = model.vars.pro2_afc_gain.value / res
afcscale_tx = model.vars.pro2_afc_gain.value / synth_res
else:
afcscale = 0.0
afcscale_tx = 0.0
afcscale = afcscale * scale
#Special case to set afc_scale_tx to 0 to disable TX AFC adjust when using oneshot
#See https://jira.silabs.com/browse/MCUW_RADIO_CFG-1510
if (afc_tx_adjust_enable == False) and afc_oneshot:
afcscale_tx = 0.0
model.vars.afc_scale.value = afcscale
model.vars.afc_scale_tx.value = afcscale_tx
# def calc_afc_scale_tx_reg(self, model):
# """
# convert AFC scale TX value to mantissa and exponent register values
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
#
# afc_scale = model.vars.afc_scale_tx.value
#
# if afc_scale == 0:
# best_m = 0
# best_e = 0
# else:
# best_diff = 99e9
# # find best m, e pair that gives a scale less than or equal to the target scale
# for m in range(1,32):
# for e in range(-8,8):
# diff = afc_scale - m * 2**e
#
# if diff > 0 and diff <= best_diff:
# best_diff = diff
# best_e = e
# best_m = m
#
# if best_e < 0:
# best_e += 16
#
# self._reg_write(model.vars.MODEM_AFCADJTX_AFCSCALEE, int(best_e))
# self._reg_write(model.vars.MODEM_AFCADJTX_AFCSCALEM, int(best_m))
#
# def calc_afc_scale_tx_actual(self, model):
# """
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
#
# e = float(model.vars.MODEM_AFCADJTX_AFCSCALEE.value)
# m = float(model.vars.MODEM_AFCADJTX_AFCSCALEM.value)
#
# if e > 7:
# e -= 16
#
# model.vars.afc_scale_tx_actual.value = m * 2**e
#
# def calc_fdm0thresh_val(self, model):
# """
# in FDM0 mode set FDM0THRESH register
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
#
# timingbases = model.vars.timingbases_actual.value
# #scale = model.vars.freq_gain_scale.value
#
# # only used in FDM0 mode which is active if timingbases = 0
# if timingbases > 0:
# model.vars.fdm0_thresh.value = 0
# return
#
# # nominal frequency deviation is +/- 64 we like to set this threshold
# # to half of that so 32 but if the FREQGAIN setting is scaled to avoid
# # saturation we need to scale this value accordingly
# fdm0_thresh = 32 #* scale
#
# if fdm0_thresh < 8:
# fdm0_thresh = 8
# elif fdm0_thresh > 71: # Limiting so the register won't overflow
# fdm0_thresh = 71 # See calc_fdm0thresh_reg for details
#
# model.vars.fdm0_thresh.value = int(fdm0_thresh)
#
# def calc_afc_scale_reg(self, model):
# """
# convert AFC scale value to mantissa and exponent register values
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
#
# afc_scale = model.vars.afc_scale.value
#
# if afc_scale == 0:
# best_m = 0
# best_e = 0
# else:
# best_diff = 99e9
# best_e = 0
# best_m = 0
# # start with the highest allowed mantissa and find best m, e pair
# for m in xrange(1, 31, 1):
# e = math.floor(math.log(afc_scale / m, 2))
# diff = abs(afc_scale - m * 2**e)
#
# # solution is valid only if e is within the limits
# if (diff < best_diff) and e >= -8 and e <= 7:
# best_diff = diff
# best_e = e
# best_m = m
#
# if best_e < 0:
# best_e += 16
#
# if best_m > 31:
# best_m = 31
#
# self._reg_write(model.vars.MODEM_AFCADJRX_AFCSCALEE, int(best_e))
# self._reg_write(model.vars.MODEM_AFCADJRX_AFCSCALEM, int(best_m))
#
# def calc_afc_scale_actual(self, model):
# """
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
# e = float(model.vars.MODEM_AFCADJRX_AFCSCALEE.value)
# m = float(model.vars.MODEM_AFCADJRX_AFCSCALEM.value)
#
# if e > 7:
# e -= 16
#
# model.vars.afc_scale_actual.value = m * 2**e
#
# def calc_freq_comp_mode(self, model):
# """
# determine best frequency compensation mode based on emprical data
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
# preamble_len = model.vars.preamble_length.value
# demod_select = model.vars.demod_select.value
# preamsch = model.vars.MODEM_TRECPMDET_PREAMSCH.value
# modtype = model.vars.modulation_type.value
# afc_mode = model.vars.afc_run_mode.var_enum.CONTINUOUS
# tol = model.vars.baudrate_tol_ppm.value
#
# # enable 1-shot for Viterbi demod only if preamble length is larger than 15 and preamble search mode is enabled
# if demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI or demod_select == model.vars.demod_select.var_enum.TRECS_SLICER:
# if preamble_len >= 16 and preamsch:
# afc_mode = model.vars.afc_run_mode.var_enum.ONE_SHOT
# # enable 1-shot for BCR only if preamble length is larger than 15 and modulation is not OOK and baudrate offset is less than 1%
# elif demod_select == model.vars.demod_select.var_enum.BCR:
# if preamble_len >= 16 and modtype != model.vars.modulation_type.var_enum.OOK and tol < 50000:
# afc_mode = model.vars.afc_run_mode.var_enum.ONE_SHOT
#
# # default mode
# freq_mode = model.vars.frequency_comp_mode.var_enum.INTERNAL_ALWAYS_ON
#
# model.vars.frequency_comp_mode.value = freq_mode
# model.vars.afc_run_mode.value = afc_mode
#
#
# def calc_afconeshoft_reg(self, model):
# modtype = model.vars.modulation_type.value
# run_mode = model.vars.afc_run_mode.value
# comp_mode = model.vars.frequency_comp_mode.value
#
# comp_mode_index = self.freq_comp_mode_index(model, comp_mode)
#
# if (run_mode == model.vars.afc_run_mode.var_enum.ONE_SHOT) and (modtype != model.vars.modulation_type.var_enum.OOK and modtype != model.vars.modulation_type.var_enum.ASK): #and comp_mode_index > 3:
# oneshot = 1
# else:
# oneshot = 0
#
# if comp_mode_index > 3:
# limreset = 1
# else:
# limreset = 0
#
# self._reg_write(model.vars.MODEM_AFC_AFCONESHOT, oneshot)
# self._reg_write(model.vars.MODEM_AFC_AFCDELDET, 0)
# self._reg_write(model.vars.MODEM_AFC_AFCDSAFREQOFFEST, 0)
# self._reg_write(model.vars.MODEM_AFC_AFCENINTCOMP, 0)
# self._reg_write(model.vars.MODEM_AFC_AFCLIMRESET, limreset)
# self._reg_write(model.vars.MODEM_AFC_AFCGEAR, 3)
#
# def calc_afcdel_reg(self, model):
# """
# calculate AFC Delay based on over sampling rate (osr) if AFC is enabled
#
# Args:
# model (ModelRoot) : Data model to read and write variables from
# """
#
# mode = model.vars.frequency_comp_mode.value
# osr = model.vars.oversampling_rate_actual.value
# demod_select = model.vars.demod_select.value
# afconeshot = model.vars.MODEM_AFC_AFCONESHOT.value
# del_digmix_to_demod = model.vars.grpdelay_to_demod.value
# remoddwn = model.vars.MODEM_PHDMODCTRL_REMODDWN.value + 1
#
# if demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI or demod_select == model.vars.demod_select.var_enum.TRECS_SLICER:
# if afconeshot:
# # AFCDEL is in symbols when used for Viterbi Demod so divide by OSR
# # if REMODDWN is not 1 that will also need to be taken into account
# afcdel = math.ceil(del_digmix_to_demod / osr * remoddwn)
# else:
# afcdel = 0
# else:
# mode_index = self.freq_comp_mode_index(model, mode)
#
# # AFC mode
# if mode_index >= 4:
# afcdel = model.vars.grpdelay_to_demod.value
# else:
# afcdel = 0
#
# if afcdel > 31:
# afcdel = 31
#
# self._reg_write(model.vars.MODEM_AFC_AFCDEL, int(afcdel))
def afc_adj_limit(self, model):
freq_limit = model.vars.freq_offset_hz.value
synth_res = model.vars.synth_res_actual.value
afclimreset = model.vars.afc_lim_reset_actual.value
digmix_res = model.vars.digmix_res_actual.value
digmixfb = Calc_Utilities_Sol().get_fefilt_actual(model, 'DIGMIXCTRL_DIGMIXFBENABLE')
if digmixfb:
res = digmix_res
else:
res = synth_res
# calculate limit
afcadjlim = freq_limit / res
# if AFC_LIM_RESET is enabled we reset to the center frequency
# once the accumulated offset reaches the limit. In this mode we
# like to set the limit to about 20% higher than where we like the
# limit to be
if afclimreset:
afcadjlim *= 1.2
return int(round(afcadjlim))
def calc_afc_adjlim_actual(self, model):
afcadjlim = model.vars.MODEM_AFCADJLIM_AFCADJLIM.value
synth_res = model.vars.synth_res_actual.value
digmix_res = model.vars.digmix_res_actual.value
digmixfb = digmixfb = Calc_Utilities_Sol().get_fefilt_actual(model, 'DIGMIXCTRL_DIGMIXFBENABLE')
if digmixfb:
res = digmix_res
else:
res = synth_res
model.vars.afc_limit_hz_actual.value = afcadjlim * res
def calc_afconeshoft_reg(self, model):
modtype = model.vars.modulation_type.value
run_mode = model.vars.afc_run_mode.value
comp_mode = model.vars.frequency_comp_mode.value
demod_select = model.vars.demod_select.value
comp_mode_index = self.freq_comp_mode_index(model, comp_mode)
if (run_mode == model.vars.afc_run_mode.var_enum.ONE_SHOT) and (modtype != model.vars.modulation_type.var_enum.OOK and modtype != model.vars.modulation_type.var_enum.ASK):
oneshot = 1
else:
oneshot = 0
if (comp_mode_index > 3) or (demod_select == model.vars.demod_select.var_enum.BCR):
limreset = 1
else:
limreset = 0
self._reg_write(model.vars.MODEM_AFC_AFCONESHOT, oneshot)
self._reg_write(model.vars.MODEM_AFC_AFCDELDET, 0)
self._reg_write(model.vars.MODEM_AFC_AFCDSAFREQOFFEST, 0)
self._reg_write(model.vars.MODEM_AFC_AFCENINTCOMP, 0)
self._reg_write(model.vars.MODEM_AFC_AFCLIMRESET, limreset)
self._reg_write(model.vars.MODEM_AFC_AFCGEAR, 3)
|
168031
|
from traceback_with_variables import print_cur_tb # , format_cur_tb, iter_cur_tb_lines
def f(n):
print_cur_tb()
# cur_tb_str = format_cur_tb()
# cur_tb_lines = list(iter_cur_tb_lines())
return n + 1
def main():
f(10)
main()
|
168079
|
import os
import sys
from pbstools import PythonJob
from shutil import copyfile
import datetime
import numpy as np
python_file = r"/home/jeromel/Documents/Projects/Deep2P/repos/deepinterpolation/examples/cluster_lib/generic_ephys_process_sync.py"
output_folder = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/processed_2020_03_02"
model_file = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/trained_models/unet_single_ephys_1024_mean_squared_error_2020_02_29_15_28/2020_02_29_15_28_unet_single_ephys_1024_mean_squared_error-1050.h5"
dat_file = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/continuous.dat2"
nb_probes = 384
raw_data = np.memmap(dat_file, dtype="int16")
img_per_movie = int(raw_data.size / nb_probes)
pre_post_frame = 30
pre_post_omission = 1
end_frame = img_per_movie - pre_post_frame - pre_post_omission - 1
nb_jobs = 200
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
jobdir = output_folder
start_frame = pre_post_omission + pre_post_frame
try:
os.mkdir(jobdir)
except:
print("folder already exists")
output_terminal = os.path.join(jobdir, run_uid + "_running_terminal.txt")
script_basename = os.path.basename(__file__)
copyfile(
os.path.realpath(__file__), os.path.join(jobdir, run_uid + "_" + script_basename)
)
job_settings = {
"queue": "braintv",
"mem": "250g",
"walltime": "24:00:00",
"ppn": 16,
}
job_settings.update(
{
"outfile": os.path.join(jobdir, "$PBS_JOBID.out"),
"errfile": os.path.join(jobdir, "$PBS_JOBID.err"),
"email": "<EMAIL>",
"email_options": "a",
}
)
arg_to_pass = (
" --dat_file "
+ dat_file
+ " --output_folder "
+ output_folder
+ " --model_file "
+ model_file
)
arg_to_pass += (
" --start_frame "
+ str(start_frame)
+ " --end_frame "
+ str(end_frame)
+ " --pre_post_frame "
+ str(pre_post_frame)
)
arg_to_pass += (
" --nb_jobs " + str(nb_jobs) + " --pre_post_omission " + str(pre_post_omission)
)
PythonJob(
python_file,
python_executable="/home/jeromel/.conda/envs/deep_work2/bin/python",
conda_env="deep_work2",
jobname="movie_2p",
python_args=arg_to_pass + " > " + output_terminal,
**job_settings
).run(dryrun=False)
|
168097
|
import numpy as np
from soco_openqa.soco_mrc.mrc_model import MrcModel
from collections import defaultdict
class Reader:
def __init__(self, model):
self.model_id = model
self.reader = MrcModel('us', n_gpu=1)
self.thresh = 0.8
def predict(self, query, top_passages):
batch = [{'q': query, 'doc': p['answer']} for p in top_passages]
preds = self.reader.batch_predict(
self.model_id,
batch,
merge_pred=True,
stride=128,
batch_size=50
)
candidates = defaultdict(list)
for a_id, a in enumerate(preds):
if a.get('missing_warning'):
continue
score = self.thresh * (a['score']) + (1 - self.thresh) * (top_passages[a_id]['score'])
candidates[a['value']].append({'combined_score': score,
'reader_score':a['score'],
'ranker_score':top_passages[a_id]['score'],
'idx': a_id,
'prob': a['prob'],
'answer_span': a['answer_span']})
# get best passages with best answer
answers = []
for k, v in candidates.items():
combined_scores = [x['combined_score'] for x in v]
reader_scores = [x['reader_score'] for x in v]
ranker_scores = [x['ranker_score'] for x in v]
idxes = [x['idx'] for x in v]
best_idx = int(np.argmax(combined_scores))
best_a_id = idxes[best_idx]
answers.append({'value': k,
'score': combined_scores[best_idx],
'reader_score': reader_scores[best_idx],
'ranker_score': ranker_scores[best_idx],
'prob': v[best_idx]['prob'],
'answer_span': v[best_idx]['answer_span'],
"source": {
'context': top_passages[best_a_id]['answer'],
'url': top_passages[best_a_id].get('meta', {}).get('url'),
'doc_id': top_passages[best_a_id].get('meta', {}).get('doc_id')
}
})
answers = sorted(answers, key=lambda x: x['score'], reverse=True)
return answers
|
168180
|
import FWCore.ParameterSet.Config as cms
JetResolutionESProducer_AK4PFchs = cms.ESProducer("JetResolutionESProducer",
label = cms.string('AK4PFchs')
)
JetResolutionESProducer_SF_AK4PFchs = cms.ESProducer("JetResolutionScaleFactorESProducer",
label = cms.string('AK4PFchs')
)
|
168188
|
import cv2
from geosolver.diagram.draw_on_image import draw_point, draw_instance, draw_label
from geosolver.ontology.ontology_semantics import evaluate
from geosolver.utils.prep import display_image
__author__ = 'minjoon'
class ImageSegment(object):
def __init__(self, segmented_image, sliced_image, binarized_segmented_image, pixels, offset, key):
self.sliced_image = sliced_image
self.segmented_image = segmented_image
self.binarized_segmented_image = binarized_segmented_image
self.pixels = pixels
self.offset = offset
self.shape = segmented_image.shape
self.key = key
self.area = segmented_image.shape[0] * segmented_image.shape[1]
def display_segmented_image(self, block=True):
display_image(self.segmented_image, block=block)
def display_binarized_segmented_image(self, block=True):
display_image(self.binarized_segmented_image, block=block)
def display_pixels(self, block=True):
image = cv2.cvtColor(self.segmented_image, cv2.COLOR_GRAY2BGR)
for pixel in self.pixels:
draw_point(image, pixel)
display_image(image, block=block)
class ImageSegmentParse(object):
def __init__(self, original_image, diagram_image_segment, label_image_segments):
"""
:param numpy.ndarray original_image:
:param ImageSegment diagram_image_segment:
:param dict label_image_segments:
:return:
"""
assert isinstance(diagram_image_segment, ImageSegment)
assert isinstance(label_image_segments, dict)
self.original_image = original_image
self.diagram_image_segment = diagram_image_segment
self.label_image_segments = label_image_segments
def get_colored_original_image(self):
return cv2.cvtColor(self.original_image, cv2.COLOR_GRAY2BGR)
def display_diagram(self):
self.diagram_image_segment.display_segmented_image()
def display_labels(self):
for image_segment in self.label_image_segments.values():
image_segment.display_segmented_image()
def get_image_instances(self, instances, **kwargs):
image = self.get_colored_original_image()
for instance in instances:
draw_instance(image, instance, offset=self.diagram_image_segment.offset, **kwargs)
return image
def display_instances(self, instances, block=True, **kwargs):
display_image(self.get_image_instances(instances, **kwargs), block=block)
class PrimitiveParse(object):
def __init__(self, image_segment_parse, lines, circles):
assert isinstance(image_segment_parse, ImageSegmentParse)
self.image_segment_parse = image_segment_parse
self.lines = lines
self.circles = circles
self.primitives = dict(lines.items() + circles.items())
def display_primitives(self, block=True, **kwargs):
self.image_segment_parse.display_instances(self.primitives.values(), block=block, **kwargs)
def get_image_primitives(self, **kwargs):
return self.image_segment_parse.get_image_instances(self.primitives.values(), **kwargs)
def display_each_primitive(self, **kwargs):
for primitive in self.primitives.values():
self.image_segment_parse.display_instances([primitive], block=True, **kwargs)
class CoreParse(object):
def __init__(self, primitive_parse, intersection_points, point_variables, circles, radius_variables, assignment):
assert isinstance(primitive_parse, PrimitiveParse)
self.image_segment_parse = primitive_parse.image_segment_parse
self.primitive_parse = primitive_parse
self.intersection_points = intersection_points
self.circles = circles
self.point_variables = point_variables
self.radius_variables = radius_variables
self.variable_assignment = assignment
def evaluate(self, formula):
return evaluate(formula, self.variable_assignment)
def is_grounded(self, formula):
return formula.is_grounded(self.variable_assignment.keys())
def get_image_points(self, **kwargs):
image = self.image_segment_parse.get_colored_original_image()
offset = self.image_segment_parse.diagram_image_segment.offset
for key, point in self.intersection_points.iteritems():
label = Label("%d" % key, point)
draw_label(image, label, offset=offset, **kwargs)
draw_point(image, point, offset=offset, **kwargs)
return image
def display_points(self, block=True, **kwargs):
image = self.get_image_points(**kwargs)
display_image(image, block=block)
class GraphParse(object):
# TODO :
def __init__(self, core_parse, line_graph, circle_dict, arc_graphs):
assert isinstance(core_parse, CoreParse)
self.core_parse = core_parse
self.primitive_parse = core_parse.primitive_parse
self.image_segment_parse = core_parse.primitive_parse.image_segment_parse
self.line_graph = line_graph # Undirected graph
self.circle_dict = circle_dict
self.arc_graphs = arc_graphs # Directed graph
self.intersection_points = core_parse.intersection_points
self.point_variables = core_parse.point_variables
self.radius_variables = core_parse.radius_variables
def display_instances(self, instances, block=True, **kwargs):
self.image_segment_parse.display_instances(instances, block=block, **kwargs)
class Label:
def __init__(self, text, position):
self.text = text
self.position = position
class ImageLabelParse:
def __init__(self, image, labels):
self.image = image
self.labels = labels
def get_labeled_image(self, **kwargs):
image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
for label in self.labels.values():
draw_label(image, label, **kwargs)
draw_point(image, label.position)
return image
|
168223
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import gridspec
from kcsd import csd_profile as CSD
from kcsd import ValidateKCSD2D
from figure_properties import *
from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization
def set_axis(ax, letter=None):
"""
Formats the plot's caption.
Parameters
----------
ax: Axes object.
x: float
X-position of caption.
y: float
Y-position of caption.
letter: string
Caption of the plot.
Default: None.
Returns
-------
ax: modyfied Axes object.
"""
ax.text(
-0.05,
1.05,
letter,
fontsize=20,
weight='bold',
transform=ax.transAxes)
return ax
def make_single_subplot(ax, val_type, xs, ys, values, cax, title=None,
ele_pos=None, xlabel=False, ylabel=False, letter='',
t_max=1., mask=False, level=False):
cmap = cm.Greys
ax.set_aspect('equal')
if t_max is None:
t_max = np.max(np.abs(values))
if level is not False:
levels = level
else:
levels = np.linspace(0, 0.2, 32)
im = ax.contourf(xs, ys, values,
levels=levels, cmap=cmap, alpha=1)
CS = ax.contour(xs, ys, values, cmap='Greys')
ax.clabel(CS, # label every second level
inline=1,
fmt='%1.2f',
colors='blue')
if val_type == 'err':
ax.scatter(ele_pos[:, 0], ele_pos[:, 1], s=20, marker='.', c='black',
zorder=3)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
if xlabel:
ax.set_xlabel('X (mm)')
if ylabel:
ax.set_ylabel('Y (mm)')
if title is not None:
ax.set_title(title)
ax.set_xticks([0, 0.5, 1])
ax.set_yticks([0, 0.5, 1])
ticks = np.linspace(0, 0.2, 3, endpoint=True)
plt.colorbar(im, cax=cax, orientation='horizontal', format='%.2f',
ticks=ticks)
set_axis(ax, letter=letter)
plt.tight_layout()
return ax, cax
def generate_reliability_map(point_error, ele_pos, title):
csd_at = np.mgrid[0.:1.:100j,
0.:1.:100j]
csd_x, csd_y = csd_at
plt.figure(figsize=(17, 6))
gs = gridspec.GridSpec(2, 1, height_ratios=[1., 0.04], left=0.415,
right=0.585, top=0.880, bottom=0.110)
ax = plt.subplot(gs[0, 0])
cax = plt.subplot(gs[1, 0])
make_single_subplot(ax, 'err', csd_x, csd_y, point_error, cax=cax,
ele_pos=ele_pos, title=None, xlabel=True, ylabel=True,
letter=' ', t_max=0.2, level=np.linspace(0, 0.2, 16))
plt.savefig(title + '.png', dpi=300)
plt.show()
if __name__ == '__main__':
CSD_PROFILE = CSD.gauss_2d_large
CSD_SEED = 16
ELE_LIMS = [0.05, 0.95] # range of electrodes space
method = 'cross-validation'
Rs = np.arange(0.2, 0.5, 0.1)
lambdas = np.zeros(1)
noise = 0
KK = ValidateKCSD2D(CSD_SEED, h=50., sigma=1., n_src_init=400,
est_xres=0.01, est_yres=0.01, ele_lims=ELE_LIMS)
k, csd_at, true_csd, ele_pos, pots = make_reconstruction(KK, CSD_PROFILE,
CSD_SEED,
total_ele=100,
noise=noise,
Rs=Rs,
lambdas=lambdas,
method=method)
error_l = np.load('error_maps_2D/point_error_large_100_all_ele.npy')
error_s = np.load('error_maps_2D/point_error_small_100_all_ele.npy')
error_all = np.concatenate((error_l, error_s))
symm_array_large = matrix_symmetrization(error_l)
symm_array_small = matrix_symmetrization(error_s)
symm_array_all = matrix_symmetrization(error_all)
generate_reliability_map(np.mean(symm_array_all, axis=0), ele_pos,
'Reliability_map_random_newRDM_symm')
generate_reliability_map(np.mean(symm_array_large, axis=0), ele_pos,
'Reliability_map_large_newRDM_symm')
generate_reliability_map(np.mean(symm_array_small, axis=0), ele_pos,
'Reliability_map_small_newRDM_symm')
|
168229
|
import factory
from faker import Faker
from src.association.tests.factories import ModelFactory
from src.customers.domain.entities import Customer
fake = Faker()
class CustomerFactory(ModelFactory):
class Meta:
model = Customer
user_id = factory.LazyAttribute(lambda _: fake.pyint(min_value=1, max_value=1000))
stripe_customer_id = factory.LazyAttribute(lambda obj: f"cus_{obj.user_id}")
|
168320
|
import neuralnet_pytorch.ext as ext
__all__ = ['batch_pairwise_dist']
batch_pairwise_dist = ext.bpd_forward
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.