id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1703449
|
import sys
import re
"""This script gets EMAIL, PASSWORD, SITE_URL, SECRET, ELECTION_YEAR, UPDATE_PROFILE, DB_NAME, DB_URL, and FALE_CONOSCO_EMAIL environment variables used on app engine from Github Secrets and
replace on app.yaml."""
app_engine_file = "app.yaml"
if __name__ == "__main__":
if len(sys.argv) != 10:
sys.exit("invalid number of arguments: {}".format(len(sys.argv)))
email = sys.argv[1]
password = sys.argv[2]
site_url = sys.argv[3]
secret = sys.argv[4]
election_year = sys.argv[5]
update_profile = sys.argv[6]
db_name = sys.argv[7]
db_url = sys.argv[8]
fale_conosco_email = sys.argv[9]
file_content = ""
with open(app_engine_file, "r") as file:
app_engine_file_content = file.read()
line = re.sub(r"##EMAIL", email, app_engine_file_content)
line = re.sub(r"##PASSWORD", password, line)
line = re.sub(r"##SITE_URL", site_url, line)
line = re.sub(r"##SECRET", secret, line)
line = re.sub(r"##ELECTION_YEAR", election_year, line)
line = re.sub(r"##UPDATE_PROFILE", update_profile, line)
line = re.sub(r"##DB_NAME", db_name, line)
line = re.sub(r"##DB_URL", db_url, line)
line = re.sub(r"##FALE_CONOSCO_EMAIL", fale_conosco_email, line)
file_content = line
with open(app_engine_file, "w") as file:
file.write(file_content)
|
1703465
|
from __future__ import print_function
__author__ = '<NAME>, <EMAIL>'
from numpy import random
from random import sample
from scipy import isscalar
from pybrain.datasets.dataset import DataSet
from pybrain.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter([[x] for x in iter(self)])
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError/ponderation
def _evaluateSequence(self, f, seq, verbose = False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target-res).flatten()**2)
totalError += e
ponderation += len(target)
if verbose:
print(( 'out: ', fListToString( list( res ) )))
print(( 'correct:', fListToString( target )))
print(( 'error: % .8f' % e))
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver = 1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res/averageOver
def splitWithProportion(self, proportion = 0.5, shuffle=True, margin=0):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples.
The first dataset will have a fraction given by `proportion` of the
dataset chosen randomly from this dataset (using random.permutation).
The elements in this set will change each time this funciton is called.
The right (second) dataset will contain the remaining samples, (also permuted randomly).
Arguments:
proportion (float): Fraction of dataset to return first in the pair of Datasets returned
Must be between 0 and 1 inclusive.
default: 0.5
margin (float): Fraction of dataset to be unused when splitting without shuffling.
This unused portion of the dataset allows the dividing index to shift randomly.
Must be between 0 and 1 inclusive.
default: 0 (repeatable nonrandom splits)
Returns:
left (Dataset): the portion of the dataset requested of length int(N * portion).
right (Dataset): the remaining portion of the dataset of length int(N * (1 - portion)).
"""
separator = int(len(self) * proportion)
index0, indexN = 0, len(self)
if shuffle:
indicies = random.permutation(len(self))
else:
indicies = random.np.arange(len(self))
if margin:
index_margin = int(margin * len(self))
index0 = random.randint(0, int(index_margin / 2) + 1)
indexN = len(self) - index_margin + index0
assert(indexN <= len(self))
separator = int((indexN - index0) * proportion)
leftIndicies = indicies[index0:(index0 + separator)]
rightIndicies = indicies[(index0 + separator):indexN]
leftDs = SupervisedDataSet(inp=self['input'][leftIndicies].copy(),
target=self['target'][leftIndicies].copy())
rightDs = SupervisedDataSet(inp=self['input'][rightIndicies].copy(),
target=self['target'][rightIndicies].copy())
return leftDs, rightDs
class SequentialSupervisedDataSet(SupervisedDataSet):
"""A SupervisedDataSet is an ordered sequence with two fields, one for input and one for the target
A SequentialSupervisedDataSet is identical to a SupervisedDataSet except that it maintains
the order of the samples (both the output and the input). Indices of a new sequence are stored whenever
the method newSequence() is called. The last (open) sequence is considered
a normal sequence even though it does not have a following "new sequence"
marker."""
def splitWithProportion(self, proportion=0.5, margin=0):
"""Produce two new datasets, each containing a part of the sequences.
The first dataset will have a fraction given by `proportion` of the
dataset. This split is repeatable and nonrandom. So the left (first)
dataset will contain the first M samples unshuffled, where M is int(len(samples) * proportion)
and the right (second) dataset will contain the remaining samples, unshuffled.
Arguments:
proportion (float): Fraction of dataset to return first in the pair of Datasets returned
Must be between 0 and 1 inclusive.
default: 0.5
Returns:
left (Dataset): the portion of the dataset requested of length int(N * portion).
right (Dataset): the remaining portion of the dataset of length int(N * (1 - portion)).
"""
return super(SequentialSupervisedDataSet, self).splitWithProportion(proportion=proportion, shuffle=False, margin=margin)
|
1703475
|
from sqlalchemy import Column, String, Integer, Float, Date, Boolean, create_engine, PrimaryKeyConstraint
from sqlalchemy.ext.declarative import declarative_base
from dotenv import load_dotenv
import os
Base = declarative_base()
class NewHouseByArea(Base):
'''--新房成交信息,按面积划分的信息'''
__tablename__ = 'newhousebyarea'
thedate = Column(Date, nullable=False, primary_key=True)
region = Column(String(255), nullable=False, primary_key=True)
area_level = Column(String(255), nullable=False, primary_key=True)
deal_count = Column(Integer)
area = Column(Float)
price = Column(Float)
total_price = Column(Integer)
#PrimaryKeyConstraint(name='newhousebyarea_primary_key')
def __repr__(self):
return '<type "NewHouseByArea">{}, {}, {}'.format(self.thedate, self.region, self.area_level)
def __str__(self):
return self.__repr__()
class NewHouseByType(Base):
'''--新房成交信息,按类型划分'''
__tablename__ = 'newhousebytype'
thedate = Column(Date, nullable=False, primary_key=True)
region = Column(String(255), nullable=False, primary_key=True)
house_type = Column(String(255), nullable=False, primary_key=True)
deal_count = Column(Integer)
area = Column(Float)
price = Column(Float)
availableforsalecount = Column(Integer)
availableforsalearea = Column(Integer)
#PrimaryKeyConstraint(name='newhousebytype_primary_key')
def __repr__(self):
return '<type "NewHouseByType">{}, {}, {}'.format(self.thedate, self.region, self.house_type)
def __str__(self):
return self.__repr__()
class NewHouseByUse(Base):
'''新房成交信息,按用途划分'''
__tablename__ = 'newhousebyuse'
thedate = Column(Date, nullable=False, primary_key=True)
region = Column(String(255), nullable=False, primary_key=True)
use_type = Column(String(255), nullable=False, primary_key=True)
deal_count = Column(Integer)
area = Column(Float)
price = Column(Float)
availableforsalecount = Column(Integer)
availableforsalearea = Column(Integer)
#PrimaryKeyConstraint('thedate', 'region', 'use_type', name='newhousebyuse_primary_key')
def __repr__(self):
return '<type "NewHouseByUse">{}, {}, {}'.format(self.thedate, self.region, self.use_type)
def __str__(self):
return self.__repr__()
# thedate, region, use_type
class OldHouseByUse(Base):
'''--二手房成交数据,按照用途分类'''
__tablename__ = 'oldhousebyuse'
thedate = Column(Date, nullable=False, primary_key=True)
region = Column(String(255), nullable=False, primary_key=True)
use_type = Column(String(255), nullable=False, primary_key=True)
area = Column(Float)
deal_count = Column(Integer)
class OldHouseSource(Base):
'''二手房成交信息,按用途划分'''
__tablename__ = 'oldhousesource'
thedate = Column(Date, nullable=False)
region = Column(String(255), nullable=False)
serial_num = Column(String(255), nullable=False, primary_key=True)
project_name = Column(String(255), nullable=False)
area = Column(Float)
use_type = Column(String(255))
code = Column(String(30))
agency_info = Column(String(255))
def __str__(self):
return '<type "OldHouseSource">{}, {}, {}'.format(self.thedate, self.region, self.serial_num)
class NewHouseSourceProject(Base):
'''--新房的预售信息,项目信息'''
__tablename__ = 'newhousesrc_project'
id = Column(Integer, primary_key=True, autoincrement=True)
thedate = Column(Date)
region = Column(String(255), nullable=False)
project_name = Column(String(255), nullable=False)
builder = Column(String(255), nullable=False)
address = Column(String(255), nullable=False)
house_usage = Column(String(255), nullable=False)
land_usage = Column(String(255))
land_years_limit = Column(Integer)
land_serial_num = Column(String(255))
presale_license_num = Column(String(255), nullable=False, unique=True)
pre_sale_count = Column(Integer)
pre_area = Column(Float)
now_sale_count = Column(Integer)
now_area = Column(Float)
def __str__(self):
return '<type "NewHouseSourceProject">{}, {}'.format(self.region, self.project_name)
class NewHouseSourceBuilding(Base):
'''--新房预售信息,楼栋信息'''
__tablename__ = 'newhousesrc_building'
id = Column(Integer, autoincrement=True, primary_key=True)
project_id = Column(Integer, nullable=False)
project_name = Column(String(255), nullable=False)
building_name = Column(String(255), nullable=False)
plan_license = Column(String(255), nullable=False)
build_license = Column(String(255), nullable=False)
is_crawled = Column(Boolean, nullable=False)
def __str__(self):
return '<type "NewHouseSourceBuilding">{}, {}'.format(self.project_name, self.building_name)
class NewHouseSourceHouse(Base):
'''-- 新房预售,每一套房屋的信息'''
__tablename__ = 'newhousesrc_house'
id = Column(Integer, autoincrement=True, primary_key=True)
build_id = Column(Integer, nullable=False)
building_name = Column(String(255))
branch = Column(String(10))
room_num = Column(String(255))
floor = Column(String(255))
house_type = Column(String(255))
contact_code = Column(String(255))
price = Column(Float)
usage = Column(String(255))
build_area = Column(Float)
inside_area = Column(Float)
share_area = Column(Float)
def __str__(self):
return '<type "NewHouseSourceHouse">{}, {}, {}'.format(self.building_name, self.branch, self.room_num)
class NewHouseSourceProjectSummary(Base):
'''--项目的简要信息,判断是否有新项目,以后后续的各种爬虫,都是基于这个来的'''
__tablename__ = 'newhousesrc_project_summary'
id = Column(Integer, autoincrement=True, nullable=False, primary_key=True)
thedate = Column(Date, nullable=False)
region = Column(String(255))
presale_license_num = Column(String(255))
project_name = Column(String)
builder = Column(String(255))
url = Column(String(1024), nullable=False)
is_crawled = Column(Boolean)
def __str__(self):
return '<type "NewHouseSourceProjectSummary">{}, {}'.format(self.thedate, self.project_name)
# class TestId(Base):
# __tablename__ = 'test_id'
# id = Column(Integer, primary_key=True, autoincrement=True)
# thedate = Column(Date)
# region = Column(String(255), nullable=False, unique=True)
load_dotenv()
engine = create_engine(os.getenv('DATABASE_URI', 'sqlite:///:memory:'))
#
# DBSession = sessionmaker(bind=engine)
# session = DBSession()
# newhouse = NewHouseByArea(thedate='2019-2-2', region='福田区', area_level='90平方米以下', deal_count=1234, area=90, price=42000, total_price=350)
# session.add(newhouse)
# session.commit()
# session.close()
|
1703510
|
import os
from zstacklib.utils import shell
from zstacklib.utils import lvm
from kvmagent.plugins.bmv2_gateway_agent import exception
from kvmagent.plugins.bmv2_gateway_agent import utils as bm_utils
from kvmagent.plugins.bmv2_gateway_agent.volume import base
from kvmagent.plugins.bmv2_gateway_agent.volume import helper
class SharedBlockVolume(base.BaseVolume):
def __init__(self, *args, **kwargs):
super(SharedBlockVolume, self).__init__(*args, **kwargs)
def check_exist(self):
""" Check whether the volume exist
For shared block, check both lv path and device mapper path.
"""
if not os.path.exists(self.real_path):
lvm.active_lv(self.real_path)
# raise exception.DeviceNotExist(
# instance_uuid=self.instance_uuid,
# volume_uuid=self.volume_uuid,
# dev=self.real_path)
@property
def nbd_backend(self):
return self.real_path
@property
def dm_backend(self):
return self.nbd_dev
@property
def dm_backend_slave_name(self):
return self.nbd_dev.split('/')[-1]
@property
def iscsi_backend(self):
return self.dm_id_dev
@property
def real_path(self):
""" Get the shared block's real path
Mark the shared block lun active, and check the device whether exist.
Note that the kernel will create a device mapper dev to point the lv
is added. A symbolic link /dev/VG-Name/LVName pointing to the device
node is also added. Therefore here will be three dev point the the
lv: `/dev/dm-X`, `/dev/VG-Name/LVName`, `/dev/mapper/VGName-LVName`.
The src path in volume params should like
`sharedblock://VG-Name/LVName`, so convert it to real path
`/dev/VG-Name/LVName`
"""
path = self.volume_obj.path.replace('sharedblock://', '/dev/')
return path
# @property
# def dm_name(self):
# """ Construct device mapper dev name
# As metion in real_path, the device mapper dev already created
# during mark the shared block lun active, therefore the only thing
# need to do is get the exist dm name
# """
# vg_name, lv_name = self.real_path.split('/')[-2:]
# return '{vg_name}-{lv_name}'.format(vg_name=vg_name, lv_name=lv_name)
def attach(self):
helper.NbdDeviceOperator(self).connect()
helper.DmDeviceOperator(self).create()
helper.IscsiOperator(self).setup()
def detach(self):
helper.IscsiOperator(self).revoke()
helper.DmDeviceOperator(self).remove()
helper.NbdDeviceOperator(self).disconnect()
# Do not remove the dm device, because it was created by kernel
# helper.DmDeviceOperator(self).remove()
lvm.deactive_lv(self.real_path)
def pre_take_volume_snapshot(self):
# NOTE: self is src_vol
nbd_operator = helper.NbdDeviceOperator(self)
nbd_operator.fetch_nbd_id()
dm_operator = helper.DmDeviceOperator(self)
with bm_utils.rollback(dm_operator.resume):
dm_operator.suspend()
nbd_operator.disconnect()
def post_take_volume_snapshot(self, src_vol):
# NOTE: self is dst_vol
src_nbd_operator = helper.NbdDeviceOperator(src_vol)
with bm_utils.rollback(src_nbd_operator.connect):
# Use src vol to init dm device operator
dm_operator = helper.DmDeviceOperator(src_vol)
# Use dst vol to init nbd device operator
nbd_operator = helper.NbdDeviceOperator(self)
nbd_operator.connect()
with bm_utils.rollback(nbd_operator.disconnect):
dm_operator.reload(self)
dm_operator.resume()
# helper.NbdDeviceOperator(src_vol).disconnect()
# Rename the dm dev to new volume's dm name
#dm_operator.rename(self)
def resume(self):
# NOTE: self should be src_vol
helper.DmDeviceOperator(self).resume()
def rollback_volume_snapshot(self, src_vol):
""" Rollback volume snapshot if the action failed
"""
src_nbd_operator = helper.NbdDeviceOperator(src_vol)
snapshot_nbd_operator = helper.NbdDeviceOperator(self)
dm_operator = helper.DmDeviceOperator(src_vol)
def _rollback():
# Set snapshot vol nbd id to None to disconnect the nbd id
self.nbd_id = None
snapshot_nbd_operator.disconnect()
# No need to set src vol nbd id to None, because the volume
# operate processed one by one.
# src_vol.nbd_id = None
src_nbd_operator.connect()
dm_operator.reload(src_vol)
dm_operator.resume()
with bm_utils.transcantion(retries=5, sleep_time=1) as cursor:
cursor.execute(_rollback)
|
1703570
|
from django.contrib import admin
from .models import *
# Register your models here.
@admin.register(ItemType)
class ItemTypeAdmin(admin.ModelAdmin):
list_display = ("name","about","item_type_keywords","created_at","updated_at")
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ("name","item_type","about","image","item_keywords","created_at","updated_at")
@admin.register(Weight)
class WeightAdmin(admin.ModelAdmin):
list_display= ("name","value_in_kg","created_at","updated_at")
@admin.register(Address)
class AddressAdmin(admin.ModelAdmin):
list_display = ("name","address_line_1","address_line_2","city","state","pincode","gst_number","delivery_instructions","created_by","created_at","updated_at")
@admin.register(Inventory)
class InventoryAdmin(admin.ModelAdmin):
list_display = ("name","address","contact_name","contact_number","max_quantity","weight_group","created_at","updated_at","created_by")
@admin.register(Listing)
class ListingAdmin(admin.ModelAdmin):
list_display= ("name","item_type","inventory_name","quantity","weight_group","original_price_per_quantity","selling_price_per_quantity","minimum_order_quantity","status","keywords","created_at","updated_at","created_by")
@admin.register(OrderedItems)
class OrderedItemsAdmin(admin.ModelAdmin):
list_display = ("number","invoice_id","order_status","inventory_name","quantity","weight_group","purchase_price_per_quantity","GST_applicable","rebate","cgst","sgst","igst","is_igst","total_price")
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ("number","shipping_address","billing_address","payment_mode","payment_details","total_price","grand_total","created_by","created_at","updated_at")
@admin.register(Quote)
class QuoteAdmin(admin.ModelAdmin):
list_filter = ("created_at","updated_at")
list_display = ("item_name","quantity","billing_address","weight_group","seller_action_required","buyer_action_required","po_file","quote_file","status","created_by","created_at","updated_at")
search_fields = ("id","created_by","created_at","updated_at")
readonly_fields = ("id","created_at","updated_at")
|
1703585
|
from pecan import rest
from wsme import types as wtypes
from jmilkfansblog.api.expose import expose as wsexpose
from jmilkfansblog.controllers.v1.views import posts as posts_views
from jmilkfansblog.db import api as db_api
class Post(wtypes.Base):
"""Response data validation for post object"""
id = str
title = str
text = wtypes.text
user_id = str
@classmethod
def sample(cls, post):
sample = cls(
id=post.id,
title=post.title,
text=post.text,
user_id=post.user_id)
return sample
class Posts(wtypes.Base):
"""Response data validation for posts object"""
posts = [Post]
class PostsController(rest.RestController):
"""REST controller for Posts."""
_custom_actions = {
'detail': ['GET']}
def __init__(self):
super(PostsController, self).__init__()
self.posts_views = posts_views.ViewBuilder()
@wsexpose(Posts)
def get(self):
"""Get a list of the posts."""
# FIXME(JmilkFan): Support Chinese
posts = db_api.post_get_all()
return Posts(posts=[Post.sample(post) for post in posts])
@wsexpose()
def get_one(self):
pass
@wsexpose()
def post(self):
pass
@wsexpose()
def patch(self):
pass
@wsexpose()
def delete(self):
pass
@wsexpose()
def detail(self):
return "Detailed information."
|
1703633
|
import numpy as np
import json
def unjson(file):
with open(file, 'r') as fo:
dict = json.load(fo)
return dict
# r is noise rate
r = 0.2
count = 0
p_a = ''
p_g = ''
a = unjson(p_a)
for i in range(len(a['annotations'])):
if np.random.random() < r:
a['annotations'][i]['category_id'] = np.random.randint(1, 20)
count += 1
with open(p_g, 'w') as file:
json.dump(a, file)
print(count)
|
1703640
|
import streamlit as st
def welcome():
st.sidebar.success("Select an experiment above")
st.markdown(
"""
# Welcome to Sapsan!
---
Sapsan is a pipeline for Machine Learning (ML) based turbulence modeling. While turbulence
is important in a wide range of mediums, the pipeline primarily focuses on astrophysical application.
With Sapsan, one can create their own custom models or use either conventional or physics-informed
ML approaches for turbulence modeling included with the pipeline ([estimators](https://github.com/pikarpov-LANL/Sapsan/wiki/Estimators)).
For example, Sapsan features ML models in its set of tools to accurately capture the turbulent nature applicable to Core-Collapse Supernovae.
> ## **Purpose**
> Sapsan takes out all the hard work from data preparation and analysis in turbulence
> and astrophysical applications, leaving you focused on ML model design, layer by layer.
**👈 Select an experiment from the dropdown on the left** to see what Sapsan can do!
### Want to learn more?
- Check out Sapsan on [Github](https://github.com/pikarpov-LANL/Sapsan)
- Find the details on the [Wiki] (https://github.com/pikarpov-LANL/Sapsan/wiki)
"""
)
show_license = st.checkbox('License Information', value=False)
if show_license:
st.markdown(
"""
Sapsan has a BSD-style license, as found in the [LICENSE] (https://github.com/pikarpov-LANL/Sapsan/blob/master/LICENSE) file.
© (or copyright) 2019. Triad National Security, LLC. All rights reserved. This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department of Energy/National Nuclear Security Administration. All rights in the program are reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear Security Administration. The Government is granted for itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so.
"""
)
|
1703687
|
import numpy as np
import geopandas as gpd
class Voronoi:
def __init__(self, detection_base):
"""
:param detection_base:
"""
self.detection_base = detection_base
@property
def _centered_coords(self):
"""
scipy.spatial.Voronoi needs coordinates centered at 0.
:return:
"""
array = self.detection_base.height_model.array
height, width = self.detection_base.height_model.cell_size_x * array.shape[0], \
self.detection_base.height_model.cell_size_y * array.shape[1]
min_x, min_y = self.detection_base.height_model._bounding_box[0], \
self.detection_base.height_model._bounding_box[2]
coords = self.detection_base._coords_array_single
coords[:, 0], coords[:,1] = coords[:, 0] - (min_x + (width / 2)), coords[:,1] - (min_y + (height / 2))
return coords
def translate(self, series):
"""
Translates the polygons centered at 0 back to physical space.
:return:
"""
array = self.detection_base.height_model.array
height, width = self.detection_base.height_model.cell_size_x * array.shape[0], \
self.detection_base.height_model.cell_size_y * array.shape[1]
min_x, min_y = self.detection_base.height_model._bounding_box[0], \
self.detection_base.height_model._bounding_box[2]
series = series.translate(xoff=min_x + (width / 2), yoff=min_y + (height / 2))
return series
def segment(self, intersect=True):
"""
Segments the input detection object with the Voronoi segmentation algorithm.
:param intersect: If true, intersects with the bounding box of the height model.
"""
from scipy.spatial import Voronoi
from shapely.geometry import LineString
from shapely.ops import polygonize
vor = Voronoi(self._centered_coords)
lines = [LineString(vor.vertices[line]) for line in vor.ridge_vertices if -1 not in line]
poly_generator = polygonize(lines)
series = gpd.GeoSeries(poly_generator)
series = self.translate(series)
series.crs = self.detection_base.height_model.crs
if intersect:
series = series.intersection(self.detection_base.height_model._bounding_box_poly)
return series
|
1703699
|
from collections import Counter
from copy import deepcopy
import os
import yaml
from urllib.parse import urlsplit
import datefinder
from ._base import BaseParser
from .articles import Article
from .cleaners import clean_hashtags
from .extractors import SeleniumExtractor, RequestsExtractor
from .log import get_logger
logger = get_logger(__name__)
def call_extractor(mode):
if mode == "selenium":
extractor = SeleniumExtractor()
elif mode == "requests":
extractor = RequestsExtractor()
return extractor
class NewspapersParser(BaseParser):
def __init__(self, parser, mode="requests"):
self.PARSER = parser
self.RMODE = mode
super().__init__()
def call_extractor(self):
self.extractor = call_extractor(self.RMODE)
def check_valid_url(self, url):
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(url))
return self.PARSER in base_url
def parse(self, url):
assert self.check_valid_url(url), \
"Invalid url for '{}' parser".format(self.PARSER)
article = Article(url)
soup = self.get_soup(url)
if soup is None:
return
try:
cfg = self.cfg["site"][self.PARSER]
except KeyError:
raise NotImplementedError("Invalid parser")
# URL
article.url = url
# title::text
article.title = self.get_strs(soup, **cfg["title"])
# authors::text
article.authors = self.get_strs(soup, **cfg["authors"])
# text::text
article.text = self.get_strs(soup, **cfg["text"])
# published_date::text
article.published_date = self.get_strs(soup, **cfg["pubd"])
# tags::text
article.tags = self.get_strs(soup, **cfg["tags"])
# image_urls::links
article.image_urls = self.get_links(soup, **cfg["image_urls"])
return article
class AutoCrawlParser(BaseParser):
def __init__(self, mode="requests"):
self.RMODE = mode
super().__init__()
def call_extractor(self):
self.extractor = call_extractor(self.RMODE)
def load_config(self, fpath="html.yaml"):
super().load_config()
self.BASE_CONFIG = os.path.join(
os.path.dirname(__file__), fpath)
with open(self.BASE_CONFIG) as f:
self.auto_cfg = yaml.load(f)
# START::main method
def get_title(self, title):
return title
def get_text(self, text):
return text
def get_authors(self, authors):
if authors:
return authors[0]
return []
def get_pubd(self, seqs):
result = []
for seq in seqs:
matches = datefinder.find_dates(seq)
result.extend(list(matches))
if result:
result = str(Counter(result).most_common(1)[0][0]).split()[0]
else:
result = ""
return result
def get_tags(self, tags):
return [clean_hashtags(tag) for tag in tags]
def get_image_urls(self, image_urls):
return image_urls
# END::main method
def parse_tag(self, tree, filter_, **kwargs):
"""
:param filter_: True if return string, False if return list elements
"""
# arc_attrs::to get links, not text
src_attrs = kwargs.get("src_attrs", None)
attrs = kwargs.get("attrs", None)
vals = kwargs.get("vals", None)
name = kwargs.get("name", None)
exc_vals = kwargs.get("exc_vals", None)
if exc_vals is not None:
temp_tree = deepcopy(tree)
temp_tree = self.exclude_content(temp_tree, attrs, exc_vals)
result = self.get_elements_by_tag(
temp_tree, attrs, vals, name
)
else:
if src_attrs is not None:
result = self.get_links_by_tag(
tree, attrs, vals, src_attrs, name
)
else:
result = self.get_elements_by_tag(
tree, attrs, vals, name
)
if filter_:
result = self.filter_content(result)
return result
def auto_parse(self, url):
cfg = self.auto_cfg
self.call_extractor()
tree = self.get_xpath_tree(url)
article = Article(url)
# URL
article.url = url
# title::text
article.title = self.get_title(
self.parse_tag(tree, True, **cfg["title"])
)
# authors::text
article.authors = self.get_authors(
self.parse_tag(tree, False, **cfg["authors"])
)
# text::text
article.text = self.get_text(
self.parse_tag(tree, True, **cfg["text"])
)
# published_date::text
article.published_date = self.get_pubd(
self.parse_tag(tree, False, **cfg["pubd"])
)
# tags::text
article.tags = self.get_tags(
self.parse_tag(tree, False, **cfg["tags"])
)
# image_urls::links
article.image_urls = self.get_image_urls(
self.parse_tag(tree, False, **cfg["image_urls"])
)
return article
|
1703705
|
description = 'DEL setup at beam TREFF'
includes = [
'voltage',
'short_table_treff',
]
group = 'basic'
|
1703711
|
from nose.tools import assert_equal, assert_raises
class TestRadixSort(object):
def test_sort(self):
radix_sort = RadixSort()
assert_raises(TypeError, radix_sort.sort, None)
assert_equal(radix_sort.sort([]), [])
array = [128, 256, 164, 8, 2, 148, 212, 242, 244]
expected = [2, 8, 128, 148, 164, 212, 242, 244, 256]
assert_equal(radix_sort.sort(array), expected)
print('Success: test_sort')
def main():
test = TestRadixSort()
test.test_sort()
if __name__ == '__main__':
main()
|
1703735
|
import numpy as np
def is_numeric(u):
return isinstance(u, (int, float)) \
or np.isscalar(u)
|
1703739
|
from sqlalchemy import Column, Integer, String
from bitcoin_acks.database.base import Base
class Repositories(Base):
__tablename__ = 'repositories'
id = Column(Integer, primary_key=True)
path = Column(String)
name = Column(String)
|
1703771
|
import unittest
from dataclasses import dataclass
from typing import Optional
from komand_sql.connection.connection import Connection
import logging
from komand_sql.connection.schema import Input
@dataclass
class TestCase:
name: str
input_type: str
input_port: Optional[str]
expected: str
testcases = [
TestCase(
name="mssql_connect",
input_type="MSSQL",
input_port="1433",
expected="mssql+pymssql://username:password@198.51.100.1:1433/database_name",
),
TestCase(
name="postgres_connect",
input_type="PostgreSQL",
input_port="1433",
expected="postgres://username:password@198.51.100.1:1433/database_name",
),
TestCase(
name="mysql_connect",
input_type="MySQL",
input_port="1433",
expected="mysql+mysqldb://username:password@198.51.100.1:1433/database_name",
),
TestCase(
name="mssql_connect_without_port",
input_type="MSSQL",
input_port=None,
expected="mssql+pymssql://username:password@198.51.100.1:1433/database_name",
),
TestCase(
name="postgres_connect_without_port",
input_type="PostgreSQL",
input_port=None,
expected="postgres://username:password@198.51.100.1:5432/database_name",
),
TestCase(
name="mysql_connect_without_port",
input_type="MySQL",
input_port=None,
expected="mysql+mysqldb://username:password@198.51.100.1:3306/database_name",
),
TestCase(
name="mssql_connect_different_port",
input_type="MSSQL",
input_port="1111",
expected="mssql+pymssql://username:password@1192.168.127.12:1111/database_name",
),
TestCase(
name="postgres_connect_different_port",
input_type="PostgreSQL",
input_port="1111",
expected="postgres://username:password@198.51.100.1:1111/database_name",
),
TestCase(
name="mysql_connect_different_port",
input_type="MySQL",
input_port="1111",
expected="mysql+mysqldb://username:password@198.51.100.1:1111/database_name",
),
]
class TestConnection(unittest.TestCase):
@staticmethod
def configure_connection(type_: str, port: Optional[str]):
default_connection = Connection()
default_connection.logger = logging.getLogger("connection logger")
params = {
Input.CREDENTIALS: {"username": "username", "password": "password"},
Input.PORT: port,
Input.TYPE: type_,
Input.HOST: "198.51.100.1",
Input.DB: "database_name",
}
default_connection.connect(params)
return default_connection
def test_connect(self):
for case in testcases:
with self.subTest(case.name):
actual = TestConnection.configure_connection(case.input_type, case.input_port).conn_str
self.assertEqual(
case.expected,
actual,
)
|
1703865
|
import os
import random
import sys
import time
import numpy as np
import torch
from tensorboardX import SummaryWriter
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def set_cuda(config):
use_cuda = torch.cuda.is_available()
assert config.use_cuda == use_cuda
if use_cuda:
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
device = (
torch.device("cuda:{}".format(config.gpu))
if use_cuda
else torch.device("cpu")
)
devices_id = config.gpu
return device, devices_id
def set_tensorboard(config):
summary_dir = os.path.join(config.logdir, config.expname)
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
for file_name in os.listdir(summary_dir):
if file_name.startswith("events.out.tfevents"):
print(f"Event file {file_name} already exists")
if input("Remove this file? (y/n) ") == "y":
os.remove(os.path.join(summary_dir, file_name))
print(f"Event file {file_name} removed")
return SummaryWriter(summary_dir)
def set_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
|
1703884
|
from http import HTTPStatus
from waterbutler.core.exceptions import ProviderError
class DropboxUnhandledConflictError(ProviderError):
def __init__(self, error_data):
super().__init__('Dropbox has many unique error messages for code 409 (Conflict), this '
'one was not specifically handled in the provider: {}'.format(error_data),
code=HTTPStatus.CONFLICT)
class DropboxNamingConflictError(ProviderError):
def __init__(self, path):
super().__init__('Cannot complete action: file or folder already exists at {}'.format(path),
code=HTTPStatus.CONFLICT)
|
1703887
|
from __future__ import division
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
from pycuda.tools import DeviceMemoryPool, PageLockedMemoryPool
from pycuda.sparse.packeted import PacketedSpMV
from pycuda.sparse.operator import DiagonalPreconditioner
from pycuda.sparse.cg import solve_pkt_with_cg
import numpy as np
def _solve_cuda(lap_sparse, B, return_full_prob=False, maxiter=100, tol=5e-5):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
print("using gpu mode")
dev_pool = DeviceMemoryPool()
pagelocked_pool = PageLockedMemoryPool()
csr_mat = lap_sparse
csr_mat = csr_mat.astype(np.float32)
inv_mat_diag = 1 / csr_mat.diagonal()
spmv = PacketedSpMV(csr_mat, True, csr_mat.dtype)
X = []
for i in range(len(B)):
rhs = -B[i].astype(spmv.dtype)
if True:
precon = DiagonalPreconditioner(spmv.permute(gpuarray.to_gpu(inv_mat_diag,
allocator=dev_pool.allocate)))
else:
precon = None
print("start solve")
start = drv.Event()
stop = drv.Event()
start.record()
rhs_gpu = gpuarray.to_gpu(rhs, dev_pool.allocate)
tol = 1e-7 if spmv.dtype == np.float64 else tol
res_gpu, it_count, res_count = solve_pkt_with_cg(spmv, rhs_gpu,
precon, tol=tol,
pagelocked_allocator=pagelocked_pool.allocate)
res = res_gpu.get()
stop.record()
stop.synchronize()
elapsed = stop.time_since(start) * 1e-3
est_flops = (csr_mat.nnz * 2 * (it_count + res_count)
+ csr_mat.shape[0] * (2 + 2 + 2 + 2 + 2) * it_count)
if precon is not None:
est_flops += csr_mat.shape[0] * it_count
print("size: %d, elapsed: %g s, %d it, %d residual, it/second: %g, "
"%g gflops/s" % (
csr_mat.shape[0],
elapsed, it_count, res_count, it_count / elapsed,
est_flops / elapsed / 1e9))
x0 = res[0]
X.append(x0)
pagelocked_pool.stop_holding()
dev_pool.stop_holding()
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
|
1703899
|
import pytest
import numpy as np
from gibbs.minimization import PygmoSelfAdaptiveDESettings, OptimizationProblem
from gibbs.minimization import OptimizationMethod, ScipyDifferentialEvolutionSettings
seed = 123
def f_rosenbrock(x):
"""
Define the benchmark Rosenbrock function.
:param numpy.ndarray x:
The function's argument array.
:return:
The evaluated function at the given input array.
:rtype: numpy.float64
"""
dim = len(x)
f = 0.0
for i in range(dim-1):
left_term = 100. * (x[i + 1] - x[i] * x[i]) * (x[i + 1] - x[i] * x[i])
right_term = (1. - x[i]) * (1. - x[i])
f += left_term + right_term
return f
@pytest.mark.parametrize("problem_dimension", range(2, 5))
def test_pygmo_sade_rosenbrock_minimization(problem_dimension):
bounds = problem_dimension * [[-6, 6]]
solver_settings = PygmoSelfAdaptiveDESettings(
gen=1000,
popsize=60,
seed=seed
)
problem = OptimizationProblem(
objective_function=f_rosenbrock,
bounds=bounds,
optimization_method=OptimizationMethod.PYGMO_DE1220,
solver_args=solver_settings
)
solution = problem.solve_minimization()
assert pytest.approx(np.ones(problem_dimension), rel=1e-3) == solution.x
@pytest.mark.parametrize("problem_dimension", range(2, 5))
def test_scipy_de_rosenbrock_minimization(problem_dimension):
bounds = problem_dimension * [[-6, 6]]
solver_settings = ScipyDifferentialEvolutionSettings(
number_of_decision_variables=problem_dimension,
seed=seed
)
problem = OptimizationProblem(
objective_function=f_rosenbrock,
bounds=bounds,
optimization_method=OptimizationMethod.SCIPY_DE,
solver_args=solver_settings
)
solution = problem.solve_minimization()
assert pytest.approx(np.ones(problem_dimension), rel=1e-3) == solution.x
|
1703916
|
from vue import VueComponent, computed
class NavigationItem(VueComponent):
item: dict
template = """
<el-menu-item-group v-if="is_group_header">
<span slot="title">{{ item.group }}</span>
</el-menu-item-group>
<component v-else
@click="$emit('click', item)"
:is="item_tag"
:disabled="item.disabled"
:index="item.id">
<template v-if="is_submenu">
<template slot="title">
<i :class="item.icon"></i>
<span slot="title">{{ item.title }}</span>
</template>
<navigation-item
v-for="sub_item in item.children"
:key="sub_item.id"
:item="sub_item"
@click="$emit('click', $event)"
>
</navigation-item>
</template>
<template v-else>
<i :class="item.icon"></i>
{{ item.title }}
</template>
</component>
"""
@computed
def item_tag(self):
if self.is_submenu:
return "el-submenu"
return "el-menu-item"
@computed
def is_menu_item(self):
return not self.is_group_header and not self.is_submenu
@computed
def is_group_header(self):
return "group" in self.item
@computed
def is_submenu(self):
return "children" in self.item
class NavigationMenu(VueComponent):
content: list
template = """
<div>
<el-menu
class="navigation-menu">
<navigation-item
@click="$emit('click', $event)"
v-for="item in content"
:key="item.id"
:item="item"
>
</navigation-item>
</el-menu>
</div>
"""
def register():
NavigationItem.register()
NavigationMenu.register()
|
1703923
|
from .base import BaseLayer
import hetu as ht
class Relu(BaseLayer):
def __call__(self, x):
return ht.relu_op(x)
|
1703959
|
import logging
from flow.utils.flow_warnings import deprecation_warning
class SumoParams:
def __init__(self,
port=None,
sim_step=0.1,
emission_path=None,
lateral_resolution=None,
no_step_log=True,
sumo_binary="sumo",
overtake_right=False,
ballistic=False,
seed=None,
restart_instance=False,
print_warnings=True,
teleport_time=-1):
"""Sumo-specific parameters
These parameters are used to customize a sumo simulation instance upon
initialization. This includes passing the simulation step length,
specifying whether to use sumo's gui during a run, and other features
described in the Attributes below.
Attributes
----------
port: int, optional
Port for Traci to connect to; finds an empty port by default
sim_step: float optional
seconds per simulation step; 0.1 by default
emission_path: str, optional
Path to the folder in which to create the emissions output.
Emissions output is not generated if this value is not specified
lateral_resolution: float, optional
width of the divided sublanes within a lane, defaults to None (i.e.
no sublanes). If this value is specified, the vehicle in the
network cannot use the "LC2013" lane change model.
no_step_log: bool, optional
specifies whether to add sumo's step logs to the log file, and
print them into the terminal during runtime, defaults to True
sumo_binary: str, optional
specifies whether to visualize the rollout(s). May be:
- 'sumo-gui' to run the experiment with the gui
- 'sumo' to run without the gui (default)
overtake_right: bool, optional
whether vehicles are allowed to overtake on the right as well as
the left
ballistic: bool, optional
specifies whether to use ballistic step updates. This is somewhat
more realistic, but increases the possibility of collisions.
Defaults to False
seed: int, optional
seed for sumo instance
restart_instance: bool, optional
specifies whether to restart a sumo instance upon reset. Restarting
the instance helps avoid slowdowns cause by excessive inflows over
large experiment runtimes, but also require the gui to be started
after every reset if "sumo_binary" is set to True.
print_warnings: bool, optional
If set to false, this will silence sumo warnings on the stdout
teleport_time: int, optional
If negative, vehicles don't teleport in gridlock. If positive,
they teleport after teleport_time seconds
"""
self.port = port
self.sim_step = sim_step
self.emission_path = emission_path
self.lateral_resolution = lateral_resolution
self.no_step_log = no_step_log
self.sumo_binary = sumo_binary
self.seed = seed
self.ballistic = ballistic
self.overtake_right = overtake_right
self.restart_instance = restart_instance
self.print_warnings = print_warnings
self.teleport_time = teleport_time
class EnvParams:
def __init__(self,
vehicle_arrangement_shuffle=False,
starting_position_shuffle=False,
additional_params=None,
horizon=500,
sort_vehicles=False,
warmup_steps=0,
sims_per_step=1,
evaluate=False):
"""Environment and experiment-specific parameters.
This includes specifying the bounds of the action space and relevant
coefficients to the reward function, as well as specifying how the
positions of vehicles are modified in between rollouts.
Attributes
vehicle_arrangement_shuffle: bool, optional
determines if initial conditions of vehicles are shuffled at
reset; False by default
starting_position_shuffle: bool, optional
determines if starting position of vehicles should be updated
between rollouts; False by default
additional_params: dict, optional
Specify additional environment params for a specific
environment configuration
horizon: int, optional
number of steps per rollouts
sort_vehicles: bool, optional
specifies whether vehicles are to be sorted by position during
a simulation step. If set to True, the environment parameter
self.sorted_ids will return a list of all vehicles ideas sorted
by their absolute position.
warmup_steps: int, optional
number of steps performed before the initialization of training
during a rollout. These warmup steps are not added as steps
into training, and the actions of rl agents during these steps
are dictated by sumo. Defaults to zero
sims_per_step: int, optional
number of sumo simulation steps performed in any given rollout
step. RL agents perform the same action for the duration of
these simulation steps.
evaluate: bool, optional
flag indicating that the evaluation reward should be used
so the evaluation reward should be used rather than the
normal reward
"""
self.vehicle_arrangement_shuffle = vehicle_arrangement_shuffle
self.starting_position_shuffle = starting_position_shuffle
self.additional_params = \
additional_params if additional_params is not None else {}
self.horizon = horizon
self.sort_vehicles = sort_vehicles
self.warmup_steps = warmup_steps
self.sims_per_step = sims_per_step
self.evaluate = evaluate
def get_additional_param(self, key):
return self.additional_params[key]
class NetParams:
def __init__(self,
no_internal_links=True,
in_flows=None,
osm_path=None,
netfile=None,
additional_params=None):
"""Network configuration parameters
Unlike most other parameters, NetParams may vary drastically dependent
on the specific network configuration. For example, for the ring road
the network parameters will include a characteristic length, number of
lanes, and speed limit.
In order to determine which additional_params variable may be needed
for a specific scenario, refer to the ADDITIONAL_NET_PARAMS variable
located in the scenario file.
Parameters
----------
no_internal_links : bool, optional
determines whether the space between edges is finite. Important
when using networks with intersections; default is False
in_flows : InFlows type, optional
specifies the inflows of specific edges and the types of vehicles
entering the network from these edges
osm_path : str, optional
path to the .osm file that should be used to generate the network
configuration files. This parameter is only needed / used if the
OpenStreetMapGenerator generator class is used.
netfile : str, optional
path to the .net.xml file that should be passed to SUMO. This is
only needed / used if the NetFileGenerator class is used, such as
in the case of Bay Bridge experiments (which use a custom net.xml
file)
additional_params : dict, optional
network specific parameters; see each subclass for a description of
what is needed
"""
self.no_internal_links = no_internal_links
self.in_flows = in_flows
self.osm_path = osm_path
self.netfile = netfile
self.additional_params = additional_params or {}
class InitialConfig:
def __init__(self,
shuffle=False,
spacing="uniform",
min_gap=0,
perturbation=0.0,
x0=0,
bunching=0,
lanes_distribution=float("inf"),
edges_distribution="all",
positions=None,
lanes=None,
additional_params=None):
"""Initial configuration parameters.
These parameters that affect the positioning of vehicle in the
network at the start of a rollout. By default, vehicles are uniformly
distributed in the network.
Attributes
----------
shuffle: bool, optional
specifies whether the ordering of vehicles in the Vehicles class
should be shuffled upon initialization.
spacing: str, optional
specifies the positioning of vehicles in the network relative to
one another. May be one of: "uniform", "random", or "custom".
Default is "uniform".
min_gap: float, optional
minimum gap between two vehicles upon initialization, in meters.
Default is 0 m.
x0: float, optional
position of the first vehicle to be placed in the network
perturbation: float, optional
standard deviation used to perturb vehicles from their uniform
position, in meters. Default is 0 m.
bunching: float, optional
reduces the portion of the network that should be filled with
vehicles by this amount.
lanes_distribution: int, optional
number of lanes vehicles should be dispersed into. If the value is
greater than the total number of lanes on an edge, vehicles are
spread across all lanes.
edges_distribution: list <str>, optional
list of edges vehicles may be placed on initialization, default is
all lanes (stated as "all")
positions: list, optional
used if the user would like to specify user-generated initial
positions.
lanes: list, optional
used if the user would like to specify user-generated initial
positions.
additional_params: dict, optional
some other network-specific params
"""
self.shuffle = shuffle
self.spacing = spacing
self.min_gap = min_gap
self.perturbation = perturbation
self.x0 = x0
self.bunching = bunching
self.lanes_distribution = lanes_distribution
self.edges_distribution = edges_distribution
self.positions = positions
self.lanes = lanes
self.additional_params = additional_params or dict()
def get_additional_params(self, key):
return self.additional_params[key]
class SumoCarFollowingParams:
def __init__(self,
accel=1.0,
decel=1.5,
sigma=0.5,
tau=1.0, # past 1 at sim_step=0.1 you no longer see waves
min_gap=2.5,
max_speed=30,
speed_factor=1.0,
speed_dev=0.1,
impatience=0.5,
car_follow_model="IDM",
**kwargs):
"""Parameters for sumo-controlled acceleration behavior
Attributes
----------
accel: float
see Note
decel: float
see Note
sigma: float
see Note
tau: float
see Note
min_gap: float
see minGap Note
max_speed: float
see maxSpeed Note
speed_factor: float
see speedFactor Note
speed_dev: float
see speedDev in Note
impatience: float
see Note
car_follow_model: str
see carFollowModel in Note
kwargs: dict
used to handle deprecations
Note
----
For a description of all params, see:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
# check for deprecations (minGap)
if "minGap" in kwargs:
deprecation_warning(self, "minGap", "min_gap")
min_gap = kwargs["minGap"]
# check for deprecations (maxSpeed)
if "maxSpeed" in kwargs:
deprecation_warning(self, "maxSpeed", "max_speed")
max_speed = kwargs["maxSpeed"]
# check for deprecations (speedFactor)
if "speedFactor" in kwargs:
deprecation_warning(self, "speedFactor", "speed_factor")
speed_factor = kwargs["speedFactor"]
# check for deprecations (speedDev)
if "speedDev" in kwargs:
deprecation_warning(self, "speedDev", "speed_dev")
speed_dev = kwargs["speedDev"]
# check for deprecations (carFollowModel)
if "carFollowModel" in kwargs:
deprecation_warning(self, "carFollowModel", "car_follow_model")
car_follow_model = kwargs["carFollowModel"]
# create a controller_params dict with all the specified parameters
self.controller_params = {
"accel": accel,
"decel": decel,
"sigma": sigma,
"tau": tau,
"minGap": min_gap,
"maxSpeed": max_speed,
"speedFactor": speed_factor,
"speedDev": speed_dev,
"impatience": impatience,
"carFollowModel": car_follow_model,
}
class SumoLaneChangeParams:
def __init__(self,
model="LC2013",
lc_strategic=1.0,
lc_cooperative=1.0,
lc_speed_gain=1.0,
lc_keep_right=1.0,
lc_look_ahead_left=2.0,
lc_speed_gain_right=1.0,
lc_sublane=1.0,
lc_pushy=0,
lc_pushy_gap=0.6,
lc_assertive=1,
lc_impatience=0,
lc_time_to_impatience=float("inf"),
lc_accel_lat=1.0,
**kwargs):
"""Parameters for sumo-controlled lane change behavior
Attributes
----------
model: str, optional
see laneChangeModel in Note
lc_strategic: float, optional
see lcStrategic in Note
lc_cooperative: float, optional
see lcCooperative in Note
lc_speed_gain: float, optional
see lcSpeedGain in Note
lc_keep_right: float, optional
see lcKeepRight in Note
lc_look_ahead_left: float, optional
see lcLookaheadLeft in Note
lc_speed_gain_right: float, optional
see lcSpeedGainRight in Note
lc_sublane: float, optional
see lcSublane in Note
lc_pushy: float, optional
see lcPushy in Note
lc_pushy_gap: float, optional
see lcPushyGap in Note
lc_assertive: float, optional
see lcAssertive in Note
lc_impatience: float, optional
see lcImpatience in Note
lc_time_to_impatience: float, optional
see lcTimeToImpatience in Note
lc_accel_lat: float, optional
see lcAccelLate in Note
kwargs: dict
used to handle deprecations
Note
----
For a description of all params, see:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
# check for deprecations (lcStrategic)
if "lcStrategic" in kwargs:
deprecation_warning(self, "lcStrategic", "lc_strategic")
lc_strategic = kwargs["lcStrategic"]
# check for deprecations (lcCooperative)
if "lcCooperative" in kwargs:
deprecation_warning(self, "lcCooperative", "lc_cooperative")
lc_cooperative = kwargs["lcCooperative"]
# check for deprecations (lcSpeedGain)
if "lcSpeedGain" in kwargs:
deprecation_warning(self, "lcSpeedGain", "lc_speed_gain")
lc_speed_gain = kwargs["lcSpeedGain"]
# check for deprecations (lcKeepRight)
if "lcKeepRight" in kwargs:
deprecation_warning(self, "lcKeepRight", "lc_keep_right")
lc_keep_right = kwargs["lcKeepRight"]
# check for deprecations (lcLookaheadLeft)
if "lcLookaheadLeft" in kwargs:
deprecation_warning(self, "lcLookaheadLeft", "lc_look_ahead_left")
lc_look_ahead_left = kwargs["lcLookaheadLeft"]
# check for deprecations (lcSpeedGainRight)
if "lcSpeedGainRight" in kwargs:
deprecation_warning(
self, "lcSpeedGainRight", "lc_speed_gain_right")
lc_speed_gain_right = kwargs["lcSpeedGainRight"]
# check for deprecations (lcSublane)
if "lcSublane" in kwargs:
deprecation_warning(self, "lcSublane", "lc_sublane")
lc_sublane = kwargs["lcSublane"]
# check for deprecations (lcPushy)
if "lcPushy" in kwargs:
deprecation_warning(self, "lcPushy", "lc_pushy")
lc_pushy = kwargs["lcPushy"]
# check for deprecations (lcPushyGap)
if "lcPushyGap" in kwargs:
deprecation_warning(self, "lcPushyGap", "lc_pushy_gap")
lc_pushy_gap = kwargs["lcPushyGap"]
# check for deprecations (lcAssertive)
if "lcAssertive" in kwargs:
deprecation_warning(self, "lcAssertive", "lc_assertive")
lc_assertive = kwargs["lcAssertive"]
# check for deprecations (lcImpatience)
if "lcImpatience" in kwargs:
deprecation_warning(self, "lcImpatience", "lc_impatience")
lc_impatience = kwargs["lcImpatience"]
# check for deprecations (lcTimeToImpatience)
if "lcTimeToImpatience" in kwargs:
deprecation_warning(
self, "lcTimeToImpatience", "lc_time_to_impatience")
lc_time_to_impatience = kwargs["lcTimeToImpatience"]
# check for deprecations (lcAccelLat)
if "lcAccelLat" in kwargs:
deprecation_warning(self, "lcAccelLat", "lc_accel_lat")
lc_accel_lat = kwargs["lcAccelLat"]
# check for valid model
if model not in ["LC2013", "SL2015"]:
logging.error("Invalid lane change model! Defaulting to LC2013")
model = "LC2013"
if model == "LC2013":
self.controller_params = {
"laneChangeModel": model,
"lcStrategic": str(lc_strategic),
"lcCooperative": str(lc_cooperative),
"lcSpeedGain": str(lc_speed_gain),
"lcKeepRight": str(lc_keep_right),
# "lcLookaheadLeft": str(lcLookaheadLeft),
# "lcSpeedGainRight": str(lcSpeedGainRight)
}
elif model == "SL2015":
self.controller_params = {
"laneChangeModel": model,
"lcStrategic": str(lc_strategic),
"lcCooperative": str(lc_cooperative),
"lcSpeedGain": str(lc_speed_gain),
"lcKeepRight": str(lc_keep_right),
"lcLookaheadLeft": str(lc_look_ahead_left),
"lcSpeedGainRight": str(lc_speed_gain_right),
"lcSublane": str(lc_sublane),
"lcPushy": str(lc_pushy),
"lcPushyGap": str(lc_pushy_gap),
"lcAssertive": str(lc_assertive),
"lcImpatience": str(lc_impatience),
"lcTimeToImpatience": str(lc_time_to_impatience),
"lcAccelLat": str(lc_accel_lat)
}
class InFlows:
def __init__(self):
"""
Used to add inflows to a network. Inflows can be specified for any edge
that has a specified route or routes.
"""
self.num_flows = 0
self.__flows = []
def add(self,
veh_type,
edge,
begin=1,
end=2e6,
vehs_per_hour=None,
period=None,
probability=None,
number=None,
**kwargs):
"""Specifies a new inflow for a given type of vehicles and edge.
Parameters
----------
veh_type: str
type of vehicles entering the edge, must match one of the types set
in the Vehicles class.
edge: str
starting edge for vehicles in this inflow.
begin: float, optional
see Note
end: float, optional
see Note
vehs_per_hour: float, optional
see vehsPerHour in Note
period: float, optional
see Note
probability: float, optional
see Note
number: int, optional
see Note
kwargs: dict, optional
see Note
Note
----
For information on the parameters start, end, vehs_per_hour, period,
probability, number, as well as other vehicle type and routing
parameters that may be added via \*\*kwargs, refer to:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
# check for deprecations (vehsPerHour)
if "vehsPerHour" in kwargs:
deprecation_warning(self, "vehsPerHour", "vehs_per_hour")
vehs_per_hour = kwargs["vehsPerHour"]
# delete since all parameters in kwargs are used again later
del kwargs["vehsPerHour"]
new_inflow = {"name": "flow_%d" % self.num_flows, "vtype": veh_type,
"route": "route" + edge, "end": end}
new_inflow.update(kwargs)
if begin is not None:
new_inflow["begin"] = begin
if vehs_per_hour is not None:
new_inflow["vehsPerHour"] = vehs_per_hour
if period is not None:
new_inflow["period"] = period
if probability is not None:
new_inflow["probability"] = probability
if number is not None:
new_inflow["number"] = number
self.__flows.append(new_inflow)
self.num_flows += 1
def get(self):
return self.__flows
|
1703979
|
from django.db import models
from .helper import DefaultFields, ShortableNameFields
class LegislativeTerm(DefaultFields, ShortableNameFields):
start = models.DateField()
end = models.DateField(null=True, blank=True)
def __str__(self):
return self.short_name
|
1703994
|
import numpy as np
from pandas import DataFrame
class procrustes_test(object):
"""
Docstring for function ecopy.procrustes_test
====================
Conducts permutation procrustes test of relationship
between two non-diagonal (raw) matrices
Use
----
procrustes_test(mat1, mat2, nperm)
Returns an object of class procrustes_test
Parameters
----------
mat1: A raw site x species matrix (or any object x descriptor)
mat2: A raw site x descriptor matrix (or any object x descriptor)
nperm: Number of permutations
Attributes (see online documentation for descriptions)
---------
m12_obs: Observed test statistic, m12**2
pval: p-value
perm: Number of permutations
Methods
--------
summary(): provides a summary of test results
Example
--------
import ecopy as ep
d1 = ep.load_data('varespec')
d2 = ep.load_data('varechem')
d = ep.procrustes_test(d1, d2)
print(d.summary())
"""
def __init__(self, mat1, mat2, nperm=999):
if isinstance(mat1, DataFrame):
X = np.array(mat1).astype('float')
else:
X = mat1.astype('float')
if isinstance(mat2, DataFrame):
Y = np.array(mat2).astype('float')
else:
Y = mat2.astype('float')
if X.shape[0] != Y.shape[0]:
msg = 'Matrices must have the same number of rows'
raise ValueError(msg)
X_cent = np.apply_along_axis(lambda x: x - x.mean(), 0, X)
Y_cent = np.apply_along_axis(lambda y: y - y.mean(), 0, Y)
X_cent = X_cent / np.sqrt(np.sum(X_cent**2))
Y_cent = Y_cent / np.sqrt(np.sum(Y_cent**2))
W = np.sum(np.linalg.svd(X_cent.T.dot(Y_cent), compute_uv=0))
self.m12_obs = 1 - W**2
m12_perm = np.zeros(nperm)
i = 0
while i < nperm:
idx = np.random.permutation(range(X_cent.shape[0]))
X_perm = X_cent[idx,:]
W_perm = np.sum(np.linalg.svd(X_perm.T.dot(Y_cent), compute_uv=0))
m12_perm[i] = 1 - W_perm**2
i += 1
self.pval = np.mean(m12_perm < self.m12_obs)
self.perm = nperm
def summary(self):
summ = '\nm12 squared = {0:.3}\np = {1:.3}\npermutations = {2}'.format(self.m12_obs, self.pval, self.perm)
return summ
|
1704011
|
import asyncio
import importlib
import logging
import os
import re
import time
import core.utils
from core.exceptions import ParseTemplateError, ParseTemplateRuntimeError
from core.storage import cache
from core.template_parser.nodes import site_configs
from core.template_parser.nodes.base import TemplateNode
from core.template_parser.queue_wrapper import QueueWrapper
from core.template_parser.utils import get_module_function, check_if_null, dict_to_string, safe_login_module
from gui.constants import SITE_ICON_PATH
logger = logging.getLogger(__name__)
class Site(TemplateNode):
def __init__(self,
parent,
raw_module_name,
use_folder,
raw_folder_name,
raw_function,
raw_folder_function,
raw_login_function,
function_kwargs,
consumer_kwargs,
**kwargs):
super().__init__(parent=parent,
folder_name=raw_folder_name,
unique_key_kwargs=self.get_unique_key_kwargs(
raw_module_name=raw_module_name,
raw_folder_name=raw_folder_name,
use_folder=use_folder,
raw_function=raw_function,
raw_folder_function=raw_folder_function,
raw_login_function=raw_login_function,
function_kwargs=function_kwargs,
),
use_folder=use_folder,
is_producer=True,
**kwargs)
self.raw_module_name = raw_module_name
self.function_kwargs = function_kwargs
self.consumer_kwargs = consumer_kwargs
self.raw_folder_name = raw_folder_name
self.raw_function = raw_function
self.raw_folder_function = raw_folder_function
self.raw_login_function = raw_login_function
self.folder_module_name, self.folder_function_name = self.get_folder_module_func_name(raw_module_name,
raw_folder_function,
raw_folder_name,
use_folder)
self.module_name, self.function_name = self.get_module_func_name(raw_module_name,
raw_function)
self.login_module_name, self.login_function_name = self.get_login_func_name(raw_module_name,
raw_login_function)
@staticmethod
def get_unique_key_kwargs(**kwargs):
return dict(
raw_module_name=kwargs.get("raw_module_name"),
raw_folder_name=kwargs.get("raw_folder_name"),
use_folder=kwargs.get("use_folder"),
raw_function=kwargs.get("raw_function"),
raw_login_function=kwargs.get("raw_login_function"),
raw_folder_function=kwargs.get("raw_folder_function"),
function_kwargs=kwargs.get("function_kwargs"),
)
@staticmethod
def _import_module(module_name):
try:
return importlib.import_module(module_name)
except ModuleNotFoundError:
raise ParseTemplateError(f"Module with name: {module_name} does not exist")
@staticmethod
def _test_function_exist(module, function_name):
if not hasattr(module, function_name):
raise ParseTemplateError(f"Function: {function_name} in module:"
f" {module} does not exist")
if not callable(getattr(module, function_name)):
raise ParseTemplateError(f"Function: {function_name} in module:"
f" {module} is not a function")
@staticmethod
def get_module_func_name(raw_module_name, raw_function):
if raw_module_name != "custom":
module_name = raw_module_name
function_name = "producer"
else:
if raw_function is None:
raise ParseTemplateError(f"Expected a 'function' field with custom")
module_name, function_name = get_module_function(raw_function)
module_name = "sites." + module_name
site_module = Site._import_module(module_name)
Site._test_function_exist(site_module, function_name)
return module_name, function_name
@staticmethod
def get_folder_module_func_name(raw_module_name, raw_folder_function, raw_folder_name, use_folder):
if raw_module_name != "custom":
folder_module_name = raw_module_name
folder_function_name = "get_folder_name"
else:
folder_module_name, folder_function_name = None, None
if raw_folder_name is None and use_folder:
if raw_folder_function is None:
raise ParseTemplateError(f"Expected a 'folder_function' or 'folder_name' field with custom")
folder_module_name, folder_function_name = get_module_function(raw_folder_function)
if folder_module_name is not None:
folder_module_name = "sites." + folder_module_name
folder_module = Site._import_module(folder_module_name)
Site._test_function_exist(folder_module, folder_function_name)
return folder_module_name, folder_function_name
@staticmethod
def get_login_func_name(raw_module_name, raw_login_function):
if raw_login_function is None:
login_module_name = "sites." + raw_module_name
module = Site._import_module(login_module_name)
if hasattr(module, "login"):
return login_module_name, "login"
return None, None
raw_login_parts = raw_login_function.split(".")
login_module_name = ".".join(["sites"] + raw_login_parts[:-1])
login_func_name = raw_login_parts[-1]
login_func_module = Site._import_module(login_module_name)
Site._test_function_exist(login_func_module, login_func_name)
return login_module_name, login_func_name
def __str__(self):
return self.module_name
def convert_to_dict(self, result=None):
attributes = {
"module": self.raw_module_name,
"use_folder": self.use_folder,
"folder_name": self.raw_folder_name,
"function": self.raw_function,
"folder_function": self.raw_folder_function,
"login_function": self.raw_login_function,
**self.function_kwargs,
**self.consumer_kwargs,
}
result = {}
for key, value in attributes.items():
if value is not None:
result[key] = value
return super().convert_to_dict(result=result)
def get_gui_name(self):
if self.folder_name is not None:
return self.folder_name
return self.raw_module_name
def get_type_name(self):
return self.raw_module_name
def get_gui_icon_path(self):
image_files = os.listdir(SITE_ICON_PATH)
file_name = None
for image_file in image_files:
if self.raw_module_name in image_file:
file_name = image_file
break
if file_name is None:
return super(Site, self).get_gui_icon_path()
path = os.path.join(SITE_ICON_PATH, file_name)
return path
def get_configs(self):
configs = site_configs.SiteConfigs(super().get_configs())
attributes = [
"raw_module_name",
"use_folder",
"raw_folder_name",
"raw_function",
"raw_folder_function",
"raw_login_function",
"consumer_kwargs",
"function_kwargs",
]
for attribute in attributes:
try:
setattr(configs, attribute, getattr(self, attribute))
except ValueError as e:
logger.debug(f"Tried to set wrong value {attribute}: {getattr(self, attribute)}. Error: {str(e)}")
return configs
async def add_producers(self, producers, session, queue, download_settings, cancellable_pool, signal_handler):
if check_if_null(self.function_kwargs):
raise ParseTemplateRuntimeError("Found null field")
if self.login_module_name is not None:
login_module = importlib.import_module(self.login_module_name)
login_function = getattr(login_module, self.login_function_name)
await safe_login_module(session, download_settings, login_function, self.function_kwargs)
if self.base_path is None:
self.folder_name = await self.retrieve_folder_name(session=session,
signal_handler=signal_handler,
download_settings=download_settings)
self.base_path = core.utils.safe_path_join(self.parent.base_path, self.folder_name)
signal_handler.update_base_path(self.unique_key, self.base_path)
queue_wrapper = QueueWrapper(queue,
signal_handler=signal_handler,
unique_key=self.unique_key,
download_settings=download_settings,
cancellable_pool=cancellable_pool,
**self.consumer_kwargs)
site_module = importlib.import_module(self.module_name)
producer_function = getattr(site_module, self.function_name)
coroutine = self.exception_handler(producer_function, signal_handler)(session=session,
queue=queue_wrapper,
base_path=self.base_path,
download_settings=download_settings,
**self.function_kwargs)
producers.append(asyncio.ensure_future(coroutine))
async def retrieve_folder_name(self, session, signal_handler, download_settings):
if self.folder_name is not None or self.folder_module_name is None:
return self.folder_name
folder_module = importlib.import_module(self.folder_module_name)
function = getattr(folder_module, self.folder_function_name)
logger.debug(f"Calling folder function: {function.__module__}."
f"{function.__name__}<{dict_to_string(self.function_kwargs)}>")
folder_name = await function(session=session, download_settings=download_settings, **self.function_kwargs)
folder_name = folder_name.strip()
folder_name_cache = cache.get_json("folder_name")
folder_name_cache[self.kwargs_hash] = folder_name
signal_handler.update_folder_name(self.unique_key, folder_name)
return folder_name
def exception_handler(self, function, signal_handler):
unique_key = self.unique_key
async def wrapper(session, queue, base_path, download_settings, *args, **kwargs):
function_name = f"{function.__module__}.{function.__name__}"
function_name_kwargs = f"{function_name}<{dict_to_string(kwargs)}>"
try:
logger.debug(f"Starting: {function_name_kwargs}")
signal_handler.start(unique_key)
t = time.time()
result = await function(session=session, queue=queue, base_path=base_path,
download_settings=download_settings, *args, **kwargs)
logger.debug(f"Finished: {function_name_kwargs}, time: {(time.time() - t):.2f}")
return result
except asyncio.CancelledError as e:
raise e
except TypeError as e:
keyword = re.findall("'(.+)'", e.args[0])
logger.error(f"The producer {function_name_kwargs} got an unexpected keyword: {keyword}."
f" Stopping the producer..", exc_info=True)
signal_handler.got_error(unique_key, f"Unexpected keyword: {keyword}.")
return
except Exception as e:
logger.error(f"Got an unexpected error from producer: {function_name_kwargs},"
f" Error: {type(e).__name__}: {e}", exc_info=True)
signal_handler.got_error(self.unique_key, f"{type(e).__name__}: {e}")
return
finally:
signal_handler.finished(unique_key)
return wrapper
def has_website_url(self):
site_module = importlib.import_module(self.module_name)
return hasattr(site_module, "get_website_url")
def get_website_url(self):
site_module = importlib.import_module(self.module_name)
return getattr(site_module, "get_website_url")(**self.function_kwargs)
|
1704013
|
import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
import numpy as np
from torch.autograd import Variable
class VRNN(nn.Module):
def __init__(self, x_dim, h_dim, z_dim, n_layers, writer, bias=False):
super(VRNN, self).__init__()
self.x_dim = x_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.n_layers = n_layers
self.writer = writer
# feature-extracting transformations
self.phi_x = nn.Sequential(
nn.Linear(x_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.phi_z = nn.Sequential(
nn.Linear(z_dim, h_dim),
nn.LeakyReLU())
# encoder
self.enc = nn.Sequential(
nn.Linear(h_dim + h_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.enc_mean = nn.Linear(h_dim, z_dim)
self.enc_logvar = nn.Linear(h_dim, z_dim) # nn.Softplus())
# prior
self.prior = nn.Sequential(
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.prior_mean = nn.Linear(h_dim, z_dim)
self.prior_logvar = nn.Linear(h_dim, z_dim) # nn.Softplus()
# decoder
self.dec = nn.Sequential(
nn.Linear(h_dim + h_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.dec_logvar = nn.Linear(h_dim, x_dim) # nn.Softplus()
self.dec_mean = nn.Sequential(nn.Linear(self.h_dim, self.x_dim), nn.Hardtanh(min_val=-10, max_val=10)) # nn.Sigmoid()
# recurrence
self.rnn = nn.GRU(h_dim + h_dim, h_dim, n_layers, bias)
#self.l_abs = nn.Linear(self.x_dim, self.h_dim)
def _encoder(self, phi_x_t, h):
enc_t = self.enc(torch.cat([phi_x_t, h[-1]], 1))
enc_mean_t = self.enc_mean(enc_t)
enc_logvar_t = self.enc_logvar(enc_t)
return enc_mean_t, enc_logvar_t
def _prior(self, h):
prior_t = self.prior(h[-1])
prior_mean_t = self.prior_mean(prior_t)
prior_logvar_t = self.prior_logvar(prior_t)
return prior_mean_t, prior_logvar_t
def _decoder(self, phi_z_t, h):
dec_t = self.dec(torch.cat([phi_z_t, h[-1]], 1))
dec_mean_t = self.dec_mean(dec_t)
dec_logvar_t = self.dec_logvar(dec_t)
return dec_mean_t, dec_logvar_t
def forward(self, x, obs_traj_in):
"""
Inputs:
- x: Tensor of shape (obs_len, batch, 2)
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
kld_loss, nll_loss = 0, 0
x_list, mean_list = [torch.zeros(2)], [torch.zeros(2)]
h = Variable(torch.zeros(self.n_layers, x.size(1), self.h_dim), requires_grad=True).cuda()
#h = self.l_abs(obs_traj_in.cuda()).unsqueeze(0)
for t in range(1, x.size(0)):
phi_x_t = self.phi_x(x[t])
# encoder mean and logvar
enc_mean_t, enc_logvar_t = self._encoder(phi_x_t, h)
# prior mean and logvar
prior_mean_t, prior_logvar_t = self._prior(h)
# sampling and reparameterization
z_t = self._reparameterized_sample(enc_mean_t, enc_logvar_t)
phi_z_t = self.phi_z(z_t.cuda())
# decoder
dec_mean_t, dec_logvar_t = self._decoder(phi_z_t, h)
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
# computing losses
kld_loss += self._kld_gauss(enc_mean_t, enc_logvar_t, prior_mean_t, prior_logvar_t)
nll_loss += self._nll_gauss(dec_mean_t, dec_logvar_t, x[t])
"""
self.writer.add_histogram('input_trajectory', x[t], t)
self.writer.add_histogram('decoder_mean', dec_mean_t, t)
"""
x_list.append(x[t][0])
mean_list.append(dec_mean_t[0])
return kld_loss, nll_loss, (x_list, mean_list), h
def _generate_sample(self, h):
# prior mean and logvar
prior_mean_t, prior_logvar_t = self._prior(h)
# sampling and reparameterization
z_t = self._reparameterized_sample(prior_mean_t, prior_logvar_t)
phi_z_t = self.phi_z(z_t.cuda())
# decoder
dec_mean_t, dec_logvar_t = self._decoder(phi_z_t, h)
#sample_t = self._reparameterized_sample(dec_mean_t, dec_logvar_t)
return dec_mean_t, phi_z_t
def sample(self, seq_len, batch_dim, h_prec=None):
with torch.no_grad():
if h_prec is None:
h = Variable(torch.zeros(self.n_layers, 1, self.h_dim)).cuda()
sample = torch.zeros(seq_len, self.x_dim)
for t in range(seq_len):
sample_t, phi_z_t = self._generate_sample(h)
phi_x_t = self.phi_x(sample_t.view(1, -1).cuda())
sample[t] = sample_t.data
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
else:
h = h_prec
sample = torch.zeros(seq_len, batch_dim, self.x_dim)
for t in range(seq_len):
sample_t, phi_z_t = self._generate_sample(h)
phi_x_t = self.phi_x(sample_t.cuda())
sample[t] = sample_t.data
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
return sample
def reset_parameters(self, stdv=1e-1):
for weight in self.parameters():
weight.data.normal_(0, stdv)
def _init_weights(self, stdv):
pass
def _reparameterized_sample(self, mean, logvar):
"""Using std to sample"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std).cuda()
return mean + eps * std
def _kld_gauss(self, mean_enc, logvar_enc, mean_prior, logvar_prior):
"""Using std to compute KLD"""
x1 = torch.sum((logvar_prior - logvar_enc), dim=1)
x2 = torch.sum(torch.exp(logvar_enc - logvar_prior), dim=1)
x3 = torch.sum((mean_enc - mean_prior).pow(2) / (torch.exp(logvar_prior)), dim=1)
kld_element = x1 - mean_enc.size(1) + x2 + x3
return torch.mean(0.5 * kld_element)
def _nll_gauss(self, mean, logvar, x):
x1 = torch.sum(((x - mean).pow(2)) / torch.exp(logvar), dim=1)
x2 = x.size(1) * np.log(2 * np.pi)
x3 = torch.sum(logvar, dim=1)
nll = torch.mean(0.5 * (x1 + x2 + x3))
return nll
|
1704015
|
import csv
import re
from functools import lru_cache
from pathlib import Path
"""
The original court tag in our XML looks like this:
<court abbreviation="Ill." jurisdiction="Illinois">Illinois Supreme Court</court>
The tag does not follow a defined dictionary of court names, and requires a variety of cleanup that happens here:
(a) Strip whitespace and normalize smart quotes.
(b) Apply custom transformations specified in .csv files.
The .csv files specify a Court ID, which is arbitrary, and a set of transformations:
Court Name -> Fixed Court Name
Court Name Abbreviation -> Fixed Court Name Abbreviation
Jurisdiction -> Fixed Jurisdiction
If 'Fixed Court Name' is numeric, it is assumed to be a Court ID, and all 'Fixed' values are pulled from the
referenced row.
Documentation of .csv files:
* tribal_jurisdiction.csv changes certain courts from West's American Tribal Law Reporter to have jurisdiction
'Tribal Jurisdictions' instead of 'United States'. Courts included are those that don't appear in any other
reporter. See https://github.com/harvard-lil/capstone/issues/592
* manual_fixes.csv is exported from the "Court Names" spreadsheet in our "Data updates" Google folder. It was
originally created with this SQL:
\copy (SELECT ct.id, ct.name, ct.name_abbreviation, COUNT(cs.id), j.name, MIN(cs.decision_date),
MAX(cs.decision_date), MIN(cs.id) FROM capdb_court ct, capdb_jurisdiction j, capdb_casemetadata cs
WHERE cs.court_id=ct.id AND ct.jurisdiction_id=j.id GROUP BY ct.id, j.id) To '/home/jcushman/courts.csv'
With CSV
and manually filled in by Adam and Jack in March 2019.
"""
def fix_court_tag(jurisdiction_name, court_name, court_abbrev):
"""
Main function to translate
old_jurisdiction, old_court_name, old_abbreviation
to
(new_jurisdiction, new_court_name, new_abbreviation)
"""
# normalize whitespace etc.
court_name = court_name_strip(court_name)
court_abbrev = court_abbreviation_strip(court_abbrev)
# repeatedly correct values from normalizations() dict (function call is cached, so dict is only loaded once)
key = (jurisdiction_name, court_name, court_abbrev)
prev_keys = {key}
while key in normalizations():
key = normalizations()[key]
# avoid infinite loops
if key in prev_keys:
break
prev_keys.add(key)
return key
@lru_cache(None)
def normalizations():
"""
Load .csv files from the fix_court_tag directory, and return a mapping of
(old_jurisdiction, old_court_name, old_abbreviation) -> (new_jurisdiction, new_court_name, new_abbreviation)
"""
# get a fixed version of a given line of a csv
def fixed(line):
return (
line['Fixed Jurisdiction'] or line['Jurisdiction'],
line['Fixed Court Name'] or line['Court Name'],
line['Fixed Court Name Abbreviation'] or line['Court Name Abbreviation'],
)
# read lines of each csv
to_fix = {}
for path in Path(__file__).parent.glob('*.csv'):
with path.open() as in_file:
reader = csv.DictReader(in_file)
lines_by_id = {line['Court ID']:line for line in reader}
for line in lines_by_id.values():
key = (line['Jurisdiction'], line['Court Name'], line['Court Name Abbreviation'])
# sometimes a number is accidentally put in the wrong column:
if line['Fixed Court Name Abbreviation'].isdigit():
line['Fixed Court Name'] = line['Fixed Court Name Abbreviation']
# if 'Fixed Court Name' is numeric, look up corrected value from referenced Court ID:
if line['Fixed Court Name'].isdigit():
if line['Fixed Court Name'] == line['Court ID']:
raise ValueError("Court %s cannot have itself as reference." % line['Court ID'])
new_line = lines_by_id[line['Fixed Court Name']]
line_ids = {line['Court ID'], new_line['Court ID']}
while new_line['Fixed Court Name'].isdigit():
new_line = lines_by_id[new_line['Fixed Court Name']]
if new_line['Court ID'] in line_ids:
raise ValueError("Court list %s is a reference loop" % line_ids)
line_ids.add(new_line['Court ID'])
to_fix[key] = fixed(new_line)
# else fill in corrections directly, if any:
else:
fixed_line = fixed(line)
if fixed_line != key:
to_fix[key] = fixed_line
return to_fix
### whitespace normalizations ###
def normalize_whitespace(text):
return re.sub(r'\s+', ' ', text).strip()
def normalize_quotes(text):
return text.replace("’", "'")
def court_name_strip(name_text):
name_text = normalize_whitespace(name_text)
name_text = normalize_quotes(name_text)
name_text = re.sub(r'[\\+`\]]', '', name_text)
name_text = re.sub('Court for The', 'Court for the', name_text)
name_text = re.sub('Appeals[A-Za-z]', 'Appeals', name_text)
name_text = re.sub('Pennsylvania[A-Za-z0-9\.]', 'Pennsylvania', name_text)
return name_text
def court_abbreviation_strip(name_abbreviation_text):
name_abbreviation_text = normalize_whitespace(name_abbreviation_text)
name_abbreviation_text = normalize_quotes(name_abbreviation_text)
name_abbreviation_text = re.sub('`', '', name_abbreviation_text)
return name_abbreviation_text
|
1704032
|
from pyjamas.JSONService import JSONProxy
class SchoolCalendarService(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "SchoolCalendarService.php", ["getPeople"])
|
1704061
|
import pickle
import time
import unittest
from pygsti.baseobjs import profiler
from ..testutils import BaseTestCase
class ProfilerTestCase(BaseTestCase):
def setUp(self):
super(ProfilerTestCase, self).setUp()
def test_profler_methods(self):
comm=None
mem = profiler._get_root_mem_usage(comm)
mem = profiler._get_max_mem_usage(comm)
start_time = time.time()
p = profiler.Profiler(comm, default_print_memcheck=True)
p.add_time("My Name", start_time, prefix=1)
p.add_count("My Count", inc=1, prefix=1)
p.add_count("My Count", inc=2, prefix=1)
p.memory_check("My Memcheck", prefix=1)
p.memory_check("My Memcheck", prefix=1)
p.print_memory("My Memcheck just to print")
p.print_memory("My Memcheck just to print", show_minmax=True)
p.print_message("My Message")
p.print_message("My Message", all_ranks=True)
s = p._format_times(sort_by="name")
s = p._format_times(sort_by="time")
with self.assertRaises(ValueError):
p._format_times(sort_by="foobar")
s = p._format_counts(sort_by="name")
s = p._format_counts(sort_by="count")
with self.assertRaises(ValueError):
p._format_counts(sort_by="foobar")
s = p._format_memory(sort_by="name")
s = p._format_memory(sort_by="usage")
with self.assertRaises(ValueError):
p._format_memory(sort_by="foobar")
with self.assertRaises(NotImplementedError):
p._format_memory(sort_by="timestamp")
empty = profiler.Profiler(comm, default_print_memcheck=True)
self.assertEqual(empty._format_memory(sort_by="timestamp"),"No memory checkpoints")
def test_profiler_pickling(self):
comm=None
start_time = time.time()
p = profiler.Profiler(comm, default_print_memcheck=True)
p.add_time("My Name", start_time, prefix=1)
p.add_count("My Count", inc=1, prefix=1)
p.add_count("My Count", inc=2, prefix=1)
p.memory_check("My Memcheck", prefix=1)
s = pickle.dumps(p)
p2 = pickle.loads(s)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
1704106
|
from torchnlp.samplers.balanced_sampler import BalancedSampler
from torchnlp.samplers.bptt_batch_sampler import BPTTBatchSampler
from torchnlp.samplers.bptt_sampler import BPTTSampler
from torchnlp.samplers.bucket_batch_sampler import BucketBatchSampler
from torchnlp.samplers.deterministic_sampler import DeterministicSampler
from torchnlp.samplers.distributed_batch_sampler import DistributedBatchSampler
from torchnlp.samplers.distributed_sampler import DistributedSampler
from torchnlp.samplers.noisy_sorted_sampler import NoisySortedSampler
from torchnlp.samplers.oom_batch_sampler import get_number_of_elements
from torchnlp.samplers.oom_batch_sampler import OomBatchSampler
from torchnlp.samplers.repeat_sampler import RepeatSampler
from torchnlp.samplers.sorted_sampler import SortedSampler
__all__ = [
'BalancedSampler',
'BPTTBatchSampler',
'BPTTSampler',
'BucketBatchSampler',
'DeterministicSampler',
'DistributedBatchSampler',
'DistributedSampler',
'get_number_of_elements',
'NoisySortedSampler',
'OomBatchSampler',
'RepeatSampler',
'SortedSampler',
]
|
1704115
|
class Solution:
def minEatingSpeed(self, piles, H):
piles.sort()
l, r = 1, max(piles)
while l <= r:
mid = (l + r) // 2
h = sum(math.ceil(p / mid) for p in piles)
if h > H:
l = mid + 1
elif h < H:
r = mid - 1
else:
return mid
return l
|
1704147
|
from collections import OrderedDict
from hitchstory import utils
from strictyaml import Map
from copy import copy
class Arguments(object):
"""A null-argument, single argument or group of arguments of a hitchstory step."""
def __init__(self, yaml_args, params):
"""Create arguments from dict (from yaml)."""
self._params = params
if yaml_args is None:
self.is_none = True
self.single_argument = False
elif yaml_args.is_mapping():
self.is_none = False
self.single_argument = False
self.yaml = yaml_args
elif yaml_args.is_scalar():
self.is_none = False
self.single_argument = True
self.yaml = yaml_args
def parameterize(self, value):
"""
Replace parameters with specified variables.
"""
if isinstance(value, OrderedDict):
parameterized_value = copy(value)
for val_name, val_value in value.items():
for param_name, param_value in self._params.items():
if utils.is_parameter(val_value):
if param_name == utils.parameter_name(val_value):
parameterized_value[val_name] = param_value
return parameterized_value
else:
for name, parameter in self._params.items():
if utils.is_parameter(value):
if name == utils.parameter_name(value):
return parameter
return value
def _revalidate(self, validator):
self.yaml.revalidate(validator)
def validate_args(self, validator):
"""
Validate step using StrictYAML validators specified in @validate decorators.
"""
self._revalidate(Map(validator, key_validator=utils.UnderscoredSlug()))
self.data = {}
for key, value in self.yaml.items():
self.data[key.data] = self.parameterize(value.data)
def validate_single_argument(self, validator):
self.yaml.revalidate(validator)
self.data = self.parameterize(self.yaml.data)
def validate_kwargs(self, validator):
self.yaml.revalidate(validator)
self.data = self.parameterize(self.yaml.data)
|
1704153
|
from oic.utils.stateless import StateLess
__author__ = 'roland'
def _eq(l1, l2):
return set(l1) == set(l2)
def test_access_code():
keys = {"oct": ["symmetric key123"]} # keylength 16 bytes=128 bits
st = StateLess(keys, enc_alg="A128KW", enc_method="A128CBC-HS256")
con = st.create_authz_session("subject",
{"redirect_uri": "https://example.com"})
tok = st.get_token(con)
_info = st[tok]
assert _eq(_info.keys(), ["typ", "aud", "val", "sub"])
assert _info["sub"] == "subject"
assert _info["typ"] == "code"
assert _info["aud"] == "https://example.com"
def test_update_to_access_token():
keys = {"oct": ["symmetric key123"]}
st = StateLess(keys, enc_alg="A128KW", enc_method="A128CBC-HS256")
tok = st.create_authz_session("subject",
{"redirect_uri": "https://example.com"})
assert tok["aud"] == "https://example.com"
assert tok["sub"] == "subject"
|
1704182
|
import socket
from .constants import (Command, CommandFlag, MonoEffect)
from .utils import clamp
from .WifiLedShopLightState import WifiLedShopLightState
class WifiLedShopLight:
"""
A Wifi LED Shop Light
"""
def __init__(self, ip, port = 8189, timeout = 5, retries = 5):
"""
Creates a new Wifi LED Shop light
:param ip: The IP of the controller on the network (STA Mode, not AP mode).
:param port: The port the controller should listen on. It should almost always be left as the default.
:param timeout: The timeout in seconds to wait listening to the socket.
:param retries: The number of times to retry sending a command if it fails or times out before giving up.
"""
self.ip = ip
self.port = port
self.timeout = timeout
self.retries = retries
self.state = WifiLedShopLightState()
self.sock = None
self.reconnect()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def reconnect(self):
"""
Try to (re-)connect to the controller via a socket
"""
if self.sock:
self.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.timeout)
self.sock.connect((self.ip, self.port))
def close(self):
"""
Closes the socket connection to the light
"""
self.sock.close()
self.sock = None
def set_color(self, r=0, g=0, b=0):
"""
Sets the color of the light (rgb each 0 to 255)
"""
r = clamp(r)
g = clamp(g)
b = clamp(b)
self.state.color = (r, g, b)
self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])
def set_brightness(self, brightness=0):
"""
Sets the brightness of the light
:param brightness: An int describing the brightness (0 to 255, where 255 is the brightest)
"""
brightness = clamp(brightness)
self.state.brightness = brightness
self.send_command(Command.SET_BRIGHTNESS, [int(brightness)])
def set_speed(self, speed=0):
"""
Sets the speed of the effect. Not all effects use the speed, but it can be safely set regardless
:param speed: An int describing the speed an effect will play at. (0 to 255, where 255 is the fastest)
"""
speed = clamp(speed)
self.state.speed = speed
self.send_command(Command.SET_SPEED, [int(speed)])
def set_preset(self, preset=0):
"""
Sets the light effect to the provided built-in effect number
:param preset: The preset effect to use. Valid values are 0 to 255. See the MonoEffect enum, or MONO_EFFECTS and PRESET_EFFECTS for mapping.
"""
preset = clamp(preset)
self.state.mode = preset
self.send_command(Command.SET_PRESET, [int(preset)])
def set_custom(self, custom):
"""
Sets the light effect to the provided custom effect number
:param custom: The custom effect to use. Valid values are 1 to 12. See the CustomEffect enum.
"""
custom = clamp(custom, 1, 12)
self.state.mode = custom
self.send_command(Command.SET_CUSTOM, [int(custom)])
def toggle(self):
"""
Toggles the state of the light without checking the current state
"""
self.state.is_on = not self.state.is_on
self.send_command(Command.TOGGLE)
def turn_on(self):
"""
Toggles the light on only if it is not already on
"""
if not self.state.is_on:
self.toggle()
def turn_off(self):
"""
Toggles the light off only if it is not already off
"""
if self.state.is_on:
self.toggle()
def set_segments(self, segments):
"""
Sets the total number of segments. Total lights is segments * lights_per_segment.
:param segments: The number of segments
"""
self.send_command(Command.SET_SEGMENT_COUNT, [segments])
def set_lights_per_segment(self, lights_per_segment):
"""
Sets the number of lights per segment. Total lights is segments * lights_per_segment.
:param lights_per_segment: The number of lights per segment
"""
lights_per_segment_data = list(lights_per_segment.to_bytes(2, byteorder='little'))
self.send_command(Command.SET_LIGHTS_PER_SEGMENT, lights_per_segment_data)
def set_calculated_segments(self, total_lights, segments):
"""
Helper function to automatically set the number of segments and lights per segment
to reach the target total lights (rounded down to never exceed total_lights)
Usually you know the total number of lights you have available on a light strip
and want to split it into segments that take up the whole strip
:param total_lights: The target total number of lights to use
:param segments: The number of segments to split the total into
"""
self.set_segments(segments)
self.set_lights_per_segment(int(total_lights / segments))
def send_command(self, command, data=[]):
"""
Helper method to send a command to the controller.
Mostly for internal use, prefer the specific functions where possible.
Formats the low level message details like Start/End flag, binary data, and command
:param command: The command to send to the controller. See the Command enum for valid commands.
"""
min_data_len = 3
padded_data = data + [0] * (min_data_len - len(data))
raw_data = [CommandFlag.START, *padded_data, command, CommandFlag.END]
self.send_bytes(raw_data)
def send_bytes(self, data):
"""
Helper method to send raw bytes directly to the controller
Mostly for internal use, prefer the specific functions where possible
"""
raw_data = bytes(data)
attempts = 0
while True:
try:
self.sock.sendall(raw_data)
return
except (socket.timeout, BrokenPipeError):
if (attempts < self.retries):
self.reconnect()
attempts += 1
else:
raise
def sync_state(self):
"""
Syncs the state of the controller with the state of this object
"""
attempts = 0
while True:
try:
# Send the request for sync data
self.send_command(Command.SYNC)
response = self.sock.recv(1024)
# Extract the state data
state = bytearray(response)
self.state.update_from_sync(state)
return
except (socket.timeout, BrokenPipeError):
# When there is an error with the socket, close the connection and connect again
if attempts < self.retries:
self.reconnect()
attempts += 1
else:
raise
def __repr__(self):
return f"""WikiLedShopLight @ {self.ip}:{self.port}
state: {self.state}
"""
|
1704189
|
def decode(data, size):
"""
Decodes RLE encoded data.
"""
data = bytearray(data) # <- python 2/3 compatibility fix
result = bytearray(size)
src = 0
dst = 0
while src < len(data):
header = data[src]
if header > 127:
header -= 256
src += 1
if 0 <= header <= 127:
length = header + 1
if src + length <= len(data) and dst + length <= size:
result[dst:dst + header + 1] = data[src:src + length]
src += length
dst += length
else:
raise ValueError('Invalid RLE compression')
elif header == -128:
pass
else:
length = 1 - header
if src + 1 <= len(data) and dst + length <= size:
result[dst:dst + length] = [data[src]] * length
src += 1
dst += length
else:
raise ValueError('Invalid RLE compression')
if dst < size:
raise ValueError('Expected %d bytes but decoded only %d bytes' % (
size, dst))
return bytes(result)
def encode(data):
"""
Encodes data using RLE encoding.
"""
if len(data) == 0:
return data
if len(data) == 1:
return b'\x00' + data
data = bytearray(data)
result = bytearray()
buf = bytearray()
pos = 0
repeat_count = 0
MAX_LENGTH = 127
# we can safely start with RAW as empty RAW sequences
# are handled by finish_raw(buf, result)
state = 'RAW'
while pos < len(data) - 1:
current_byte = data[pos]
if data[pos] == data[pos + 1]:
if state == 'RAW':
# end of RAW data
finish_raw(buf, result)
state = 'RLE'
repeat_count = 1
elif state == 'RLE':
if repeat_count == MAX_LENGTH:
# restart the encoding
finish_rle(result, repeat_count, data, pos)
repeat_count = 0
# move to next byte
repeat_count += 1
else:
if state == 'RLE':
repeat_count += 1
finish_rle(result, repeat_count, data, pos)
state = 'RAW'
repeat_count = 0
elif state == 'RAW':
if len(buf) == MAX_LENGTH:
# restart the encoding
finish_raw(buf, result)
buf.append(current_byte)
pos += 1
if state == 'RAW':
buf.append(data[pos])
finish_raw(buf, result)
else:
repeat_count += 1
finish_rle(result, repeat_count, data, pos)
return bytes(result)
def finish_raw(buf, result):
if len(buf) == 0:
return
result.append(len(buf) - 1)
result.extend(buf)
buf[:] = bytearray()
def finish_rle(result, repeat_count, data, pos):
result.append(256 - (repeat_count - 1))
result.append(data[pos])
|
1704205
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(projection='ortho',
lat_0=0, lon_0=0)
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
for lon in range(0, 360, 20):
for lat in range(-60, 90, 30):
map.tissot(lon, lat, 4, 50)
plt.show()
|
1704248
|
from arekit.common.data.input.providers.label.base import LabelProvider
class MultipleLabelProvider(LabelProvider):
def __init__(self, label_scaler):
super(MultipleLabelProvider, self).__init__(label_scaler=label_scaler)
def calculate_output_uint_label(self, expected_uint_label, etalon_uint_label):
return expected_uint_label
@property
def OutputLabelsUint(self):
return [self.LabelScaler.label_to_uint(label) for label in self.SupportedLabels]
|
1704256
|
from setuptools import setup, find_packages
setup(name='lm-evaluation',
version='0.1',
description='LM Evaluation',
url='https://github.com/AI21Labs/lm-evaluation',
author='AI21 Labs',
install_requires=[
"tqdm",
"requests",
"pandas",
"smart_open[gcs]"
],
extras_require={},
packages=find_packages(),
zip_safe=True)
|
1704278
|
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from cogitare.core.model import Model
from cogitare.utils import not_training, training
@add_metaclass(ABCMeta)
class SequentialModel(Model):
"""
.. warning:: This module is experimental and its interface may change in future releases.
SequentialModel is an extension of :class:`~cogitare.Model` that includes support for sequential
models. It's designed to work with RNNs, such as LSTM and GRUs, and can be easily used for any
model that operates over timestep per timestep.
If you are using a RNN, but passing the whole sequence as input, you should consider using
the :class:`~cogitare.Model` interface. This interface is desined for timestep per timestep
and can be used for Many-to-Many models and for Many-to-One.
While training, you can use plugins to watch and interact with the model.
The plugin works like an event mechanism, you register a callback function to
a specific event, and then you gain access to some variables of the model at
specific steps of the training process.
Check the :meth:`~cogitare.Model.register_plugin` for more information about the
available events and variables that the model can interact with.
Methods that your model must implement:
- **forward** (data, hidden, timestep, seqlen): receives the data at
the current timestep, the hidden state, the current timestep, and the sequence size;
- **loss** (output, data, hidden, timestep, seqlen): returns the loss
at the current timestep;
- **get_initial_state** (self, batch): start the RNN hidden state.
Expected input on :meth:`~cogitare.Model.learn`:
- **dataset** : an iterator, that returns one batch of samples per
iteration. Each bach is an iterator, containing data for each timestep.
The batch can be of any type (list, numpy array, tensor, string, etcs).
It is recommended to wrap your dataset using
the :class:`~cogitare.data.SequentialDataSet` object,
that provides a high-performance data loading interface.
"""
def __init__(self):
self.valid_hooks = self.valid_hooks + ('on_start_timestep', 'on_end_timestep')
super(SequentialModel, self).__init__()
@abstractmethod
def get_initial_state(self, batch):
"""Returns the initial state of the RNN.
Args:
batch: the current batch.
Returns:
state (torch.Tensor): the initial state.
"""
pass
def forward_seq(self, sequence):
"""Forward a whole sequence in the model, and return a list of the output
at each timestep.
Args:
sequence (iterable): an iterable with each item being the data for
the current timestep.
Retuns:
output (iterable): a list with the :meth:`~cogitare.SequentialModel.forward` output for each timestep.
"""
outputs = []
hidden = self.get_initial_state([sequence])
seqlen = len(sequence)
for timestep, data in enumerate(sequence, 1):
output, hidden = self.forward(data, hidden, timestep, seqlen)
outputs.append(output)
return outputs
@abstractmethod
def forward(self, data, hidden, timestep, seqlen):
"""
.. note:: When developing a Model, the class must implement this method.
The method receive four parameters, the data obtained by the timestep iterator,
the hidden state at the current timestep, the timestep, and the leghth of the sequence.
It must return a tuple with the model output after forwarding the data and the new hidden state.
Args:
data: this is the data got from iterating over the timesteps, got from
iterating over the batches in the dataset provided in the
:meth:`~cogitare.Model.learn` method. Its type and shape depend exclusively on
the input dataset, no transformations or type checking are made during training.
For most models, this will be a tuple containing ``(x_data_t, y_data_t)``, but can be
anything.
hidden (torch.Tensor): the hidden state at the current timestep. If this is the first timestep,
the hidden state is got from :meth:`~cogitare.SequentialModel.get_initial_state`. Otherwise, it is got
from the :meth:`~cogitare.SequentialModel.forward` returned value.
timestep (int): indicates the current timestem (from 1 to seqlen)
seqlen (int): the number of timesteps in the sequence.
Returns:
(output, hidden): the data after processing the input data, and the new hidden state.
Usually, these are :class:`torch.Tensor`.
"""
pass
@abstractmethod
def loss(self, output, data, hidden, timestep, seqlen):
"""
.. note:: When developing a Model, the class must implement this method.
It will receive the output and the hidden state of the :meth:`~cogitare.Model.forward` method,
with the the data obtained by the timestep iterator (the same used in forward),
and must return the model loss considering the model output and expected output.
If the model is Many-to-Many, it should return a valid loss for each timestep.
If the model is Many-to-One, it should return a valid loss in the last timestep (
when timestep == seqlen), and return None otherwise.
Args:
output: the :meth:`~cogitare.SequentialModel.forward` output
data: this is the data got from iterating over the timesteps, got from
iterating over the batches in the dataset provided in the
:meth:`~cogitare.Model.learn` method. Its type and shape depend exclusively on
the input dataset, no transformations or type checking are made during training.
For most models, this will be a tuple containing ``(x_data_t, y_data_t)``, but can be
anything.
hidden (torch.Tensor): the hidden state at the current timestep. If this is the first timestep,
the hidden state is got from :meth:`~cogitare.SequentialModel.get_initial_state`. Otherwise, it is got
from the :meth:`~cogitare.SequentialModel.forward` returned value.
timestep (int): indicates the current timestem (from 1 to seqlen)
seqlen (int): the number of timesteps in the sequence.
Returns:
loss (torch.Tensor, None): the model loss. The loss will be used to backpropagate the errors.
"""
pass
def _forward_batch(self, batch_num, batch, optimizer):
seqlen = len(batch)
losses = []
total_loss = 0
self.state['num_timesteps'] = seqlen
self.state['losses_timestep'] = losses
self.state['current_timestep'] = None
optimizer.zero_grad()
hidden = self.get_initial_state(batch)
for timestep, data in enumerate(batch, 1):
self.state['current_timestep'] = timestep
self.state['sample_at_timestep'] = data
self.hook('on_start_timestep')
output, hidden = self.forward(data, hidden, timestep, seqlen)
loss = self.loss(output, data, hidden, timestep, seqlen)
if loss is not None:
total_loss += loss
losses.append(loss.data.item())
self.state['output_at_timestep'] = output
self.hook('on_end_timestep')
self.state['output'] = output
self.hook('before_backward')
total_loss.backward()
self.hook('before_step')
optimizer.step()
return sum(losses) / len(losses)
def _start_learn_state(self, dataset, optimizer, validation_dataset, max_epochs):
super(SequentialModel, self)._start_learn_state(dataset, optimizer,
validation_dataset, max_epochs)
self.state.update({'num_timesteps': None,
'losses_timestep': None,
'current_timestep': None,
'sample_at_timestep': None,
'output_at_timestep': None})
@not_training
def evaluate(self, dataset, *args, **kwargs):
"""
Iterate over batches in the dataset and returns a list of the of losses of each batch.
This method does not affect training variables and can be used to evaluate the
model performance in a different data (such as validation and test sets).
Args:
dataset: batch-timestep iterator
args/kwargs: :meth:`~cogitare.SequentialModel.forward` arguments. If provided, the
forward will receive these parameters.
Returns:
output (list): the losses in the provided batches, one loss per batch.
"""
losses = []
for batch in dataset:
hidden = self.get_initial_state(batch)
seqlen = len(batch)
losses_batch = []
for timestep, data in enumerate(batch, 1):
output, hidden = self.forward(data, hidden, timestep, seqlen)
loss = self.loss(output, data, hidden, timestep, seqlen)
if loss is not None:
losses_batch.append(loss.data.item())
losses.append(sum(losses_batch) / len(losses_batch))
return losses
@training
def learn(self, dataset, optimizer, validation_dataset=None, max_epochs=50):
"""
Optimize the model parameters using the dataset. This function use the algorithm::
for epoch in max_epochs:
try:
for batch in data:
# forward the data
hidden = get_initial_state(batch)
seqlen = len(batch[0])
for idx, timestep in enumerate(batch, 1):
output, hidden = forward(timestep, hidden, idx, seqlen)
error = loss(output, timestep, hidden, idx, seqlen)
if error is not None:
# optimize the parameters
backward(error)
optimizer.step()
if validation_dataset:
evaluate_model(validation_dataset)
except StopTraining:
# stop the training process if request by a plugin
If the ``validation_dataset`` is present, it can be used by plugins to evaluate the
validation/test loss/error during training.
To achieve a better performance, and have access to everyday dataset manipulation
features, it's recommended to use the :class:`~cogitare.data.SequentialDataSet` class. It
provides a interface that loads batches using multiple threads/processes
and provides useful tasks such as data splitting, async data loading, shuffling, and more. For
sequential data with variable length, it can automatically pad the sequences such that all of them
have the same length.
Args:
dataset (iterator): an iterator that returns one batch per iteration.
Each batch is an iterator, where each item is a sequence. To have a better
performance and a easy to use interface, it is recommended to
use the :class:`~cogitare.data.SequentialDataSet`.
optimizer (torch.optim): the instance of a :class:`torch.optim.Optimizer` object.
validation_dataset (iterator, optional): if provided, must have the same
caracteristics that the ``dataset``. This may be used by the model and
by plugins to evaluate the model performance during training.
max_epochs (int): the number of epochs before ending the training procedure.
Returns:
status (bool): False if stopped by :class:`~cogitare.utils.StopTraining`. True otherwise.
"""
return super(SequentialModel, self).learn(dataset, optimizer, validation_dataset, max_epochs)
|
1704283
|
from django.apps import AppConfig
class DjangoWebProfilerConfig(AppConfig):
name = 'django_web_profiler'
|
1704284
|
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import copy
import logging
import pandas as pd
import multiprocessing as mp
from ..orbit import TestOrbit
from ..utils import Timeout
from ..utils import _initWorker
from ..utils import _checkParallel
logger = logging.getLogger(__name__)
__all__ = [
"Backend"
]
TIMEOUT = 30
def propagation_worker(orbits, t1, backend):
with Timeout(seconds=TIMEOUT):
try:
propagated = backend._propagateOrbits(orbits, t1)
except TimeoutError:
logger.critical("Propagation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
propagated = pd.DataFrame()
return propagated
def ephemeris_worker(orbits, observers, backend):
with Timeout(seconds=TIMEOUT):
try:
ephemeris = backend._generateEphemeris(orbits, observers)
except TimeoutError:
logger.critical("Ephemeris generation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
ephemeris = pd.DataFrame()
return ephemeris
def orbitDetermination_worker(observations, backend):
with Timeout(seconds=TIMEOUT):
try:
orbits = backend._orbitDetermination(observations)
except TimeoutError:
logger.critical("Orbit determination timed out on observations (showing first 5): {}".format(observations["obs_id"].values[:5]))
orbits = pd.DataFrame()
return orbits
def projectEphemeris_worker(ephemeris, test_orbit_ephemeris):
assert len(ephemeris["mjd_utc"].unique()) == 1
assert len(test_orbit_ephemeris["mjd_utc"].unique()) == 1
assert ephemeris["mjd_utc"].unique()[0] == test_orbit_ephemeris["mjd_utc"].unique()[0]
observation_time = ephemeris["mjd_utc"].unique()[0]
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
test_orbit_ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToEphemeris(ephemeris)
return ephemeris
class Backend:
def __init__(self, name="Backend", **kwargs):
self.__dict__.update(kwargs)
self.name = name
self.is_setup = False
return
def setup(self):
return
def _propagateOrbits(self, orbits, t1):
"""
Propagate orbits from t0 to t1.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have orbit propagation implemented."
)
raise NotImplementedError(err)
def propagateOrbits(
self,
orbits,
t1,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate each orbit in orbits to each time in t1.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits to propagate.
t1 : `~astropy.time.core.Time`
Times to which to propagate each orbit.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
propagated : `~pandas.DataFrame`
Propagated orbits with at least the following columns:
orbit_id : Input orbit ID.
mjd_tdb : Time at which state is defined in MJD TDB.
x, y, z, vx, vy, vz : Orbit as cartesian state vector with units
of au and au per day.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
t1_duplicated = [copy.deepcopy(t1) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
propagation_worker_ray = ray.remote(propagation_worker)
propagation_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, t1_duplicated, backend_duplicated):
p.append(propagation_worker_ray.remote(o, t, b))
propagated_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
propagated_dfs = p.starmap(
propagation_worker,
zip(
orbits_split,
t1_duplicated,
backend_duplicated,
)
)
p.close()
propagated = pd.concat(propagated_dfs)
propagated.reset_index(
drop=True,
inplace=True
)
else:
propagated = self._propagateOrbits(
orbits,
t1
)
return propagated
def _generateEphemeris(self, orbits, observers):
"""
Generate ephemerides for the given orbits as observed by
the observers.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have ephemeris generation implemented."
)
raise NotImplementedError(err)
def generateEphemeris(
self,
orbits,
observers,
test_orbit=None,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Generate ephemerides for each orbit in orbits as observed by each observer
in observers.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits for which to generate ephemerides.
observers : dict or `~pandas.DataFrame`
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
test_orbit : `~thor.orbits.orbits.Orbits`
Test orbit to use to generate projected coordinates.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
ephemeris : `~pandas.DataFrame`
Ephemerides with at least the following columns:
orbit_id : Input orbit ID
observatory_code : Observatory's MPC code.
mjd_utc : Observation time in MJD UTC.
RA : Right Ascension in decimal degrees.
Dec : Declination in decimal degrees.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
observers_duplicated = [copy.deepcopy(observers) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
ephemeris_worker_ray = ray.remote(ephemeris_worker)
ephemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, observers_duplicated, backend_duplicated):
p.append(ephemeris_worker_ray.remote(o, t, b))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
ephemeris_worker,
zip(
orbits_split,
observers_duplicated,
backend_duplicated,
)
)
p.close()
ephemeris = pd.concat(ephemeris_dfs)
ephemeris.reset_index(
drop=True,
inplace=True
)
else:
ephemeris = self._generateEphemeris(
orbits,
observers
)
if test_orbit is not None:
test_orbit_ephemeris = self._generateEphemeris(
test_orbit,
observers
)
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g).copy() for g in ephemeris_grouped.groups]
test_orbit_ephemeris_grouped = test_orbit_ephemeris.groupby(by=["observatory_code", "mjd_utc"])
test_orbit_ephemeris_split = [test_orbit_ephemeris_grouped.get_group(g) for g in test_orbit_ephemeris_grouped.groups]
if num_jobs > 1:
if parallel_backend == "ray":
projectEphemeris_worker_ray = ray.remote(projectEphemeris_worker)
projectEphemeris_worker_ray = projectEphemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
p.append(projectEphemeris_worker_ray.remote(e, te))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
projectEphemeris_worker,
zip(
ephemeris_split,
test_orbit_ephemeris_split
)
)
p.close()
else:
ephemeris_dfs = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
ephemeris_df = projectEphemeris_worker(e, te)
ephemeris_dfs.append(ephemeris_df)
ephemeris = pd.concat(ephemeris_dfs)
ephemeris.reset_index(
drop=True,
inplace=True
)
ephemeris.sort_values(
by=["orbit_id", "observatory_code", "mjd_utc"],
inplace=True,
ignore_index=True
)
return ephemeris
def _orbitDetermination(self):
err = (
"This backend does not have orbit determination implemented."
)
raise NotImplementedError(err)
def orbitDetermination(
self,
observations,
chunk_size=10,
num_jobs=1,
parallel_backend="mp"
):
"""
Run orbit determination on the input observations. These observations
must at least contain the following columns:
obj_id : Object ID
mjd_utc : Observation time in MJD UTC.
RA_deg : Topocentric Right Ascension in decimal degrees.
Dec_deg : Topocentric Declination in decimal degrees.
sigma_RA_deg : 1-sigma uncertainty in RA.
sigma_Dec_deg : 1-sigma uncertainty in Dec.
observatory_code : MPC observatory code.
Parameters
----------
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
"""
unique_objs = observations["obj_id"].unique()
observations_split = [observations[observations["obj_id"].isin(unique_objs[i:i+chunk_size])].copy() for i in range(0, len(unique_objs), chunk_size)]
backend_duplicated = [copy.deepcopy(self) for i in range(len(observations_split))]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
orbitDetermination_worker_ray = ray.remote(orbitDetermination_worker)
orbitDetermination_worker_ray = orbitDetermination_worker_ray.options(
num_returns=1,
num_cpus=1
)
od = []
for o, b in zip(observations_split, backend_duplicated):
od.append(orbitDetermination_worker_ray.remote(o, b))
od_orbits_dfs = ray.get(od)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
od_orbits_dfs = p.starmap(
orbitDetermination_worker,
zip(
observations_split,
backend_duplicated,
)
)
p.close()
od_orbits = pd.concat(od_orbits_dfs, ignore_index=True)
return od_orbits
def _getObserverState(self, observers, origin="heliocenter"):
err = (
"This backend does not have observer state calculations implemented."
)
raise NotImplementedError(err)
|
1704298
|
import functools
from typing import Callable, List, Optional
import click
from globus_cli.parsing.command_state import (
debug_option,
format_option,
map_http_status_option,
verbose_option,
)
def common_options(
f: Optional[Callable] = None, *, disable_options: Optional[List[str]] = None
) -> Callable:
"""
This is a multi-purpose decorator for applying a "base" set of options
shared by all commands.
It can be applied either directly, or given keyword arguments.
Usage:
>>> @common_options
>>> def mycommand(abc, xyz):
>>> ...
or
>>> @common_options(disable_options=["format"])
>>> def mycommand(abc, xyz):
>>> ...
to disable use of `--format`
"""
if disable_options is None:
disable_options = []
if f is None:
return functools.partial(common_options, disable_options=disable_options)
f = debug_option(f)
f = verbose_option(f)
f = click.help_option("-h", "--help")(f)
# if the format option is being allowed, it needs to be applied to `f`
if "format" not in disable_options:
f = format_option(f)
# if the --map-http-status option is being allowed, ...
if "map_http_status" not in disable_options:
f = map_http_status_option(f)
return f
def collection_id_arg(f: Optional[Callable] = None, *, metavar: str = "COLLECTION_ID"):
if f is None:
return functools.partial(collection_id_arg, metavar=metavar)
return click.argument("collection_id", metavar=metavar, type=click.UUID)(f)
def endpoint_id_arg(f: Optional[Callable] = None, *, metavar: str = "ENDPOINT_ID"):
"""
This is the `ENDPOINT_ID` argument consumed by many Transfer endpoint
related operations. It accepts alternate metavars for cases when another
name is desirable (e.x. `SHARE_ID`, `HOST_ENDPOINT_ID`), but can also be
applied as a direct decorator if no specialized metavar is being passed.
Usage:
>>> @endpoint_id_arg
>>> def command_func(endpoint_id):
>>> ...
or
>>> @endpoint_id_arg(metavar='HOST_ENDPOINT_ID')
>>> def command_func(endpoint_id):
>>> ...
"""
if f is None:
return functools.partial(endpoint_id_arg, metavar=metavar)
return click.argument("endpoint_id", metavar=metavar, type=click.UUID)(f)
def task_submission_options(f):
"""
Options shared by both transfer and delete task submission
"""
def notify_opt_callback(ctx, param, value):
"""
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
"""
# if no value was set, don't set any explicit options
# the API default is "everything on"
if value is None:
return {}
value = value.lower()
value = [x.strip() for x in value.split(",")]
# [""] is what you'll get if value is "" to start with
# special-case it into "off", which helps avoid surprising scripts
# which take a notification settings as inputs and build --notify
if value == [""]:
value = ["off"]
off = "off" in value
on = "on" in value
# set-ize it -- duplicates are fine
vals = {x for x in value if x not in ("off", "on")}
if (vals or on) and off:
raise click.UsageError('--notify cannot accept "off" and another value')
allowed_vals = {"on", "succeeded", "failed", "inactive"}
if not vals <= allowed_vals:
raise click.UsageError(
"--notify received at least one invalid value among {}".format(
list(vals)
)
)
# return the notification options to send!
# on means don't set anything (default)
if on:
return {}
# off means turn off everything
if off:
return {
"notify_on_succeeded": False,
"notify_on_failed": False,
"notify_on_inactive": False,
}
# otherwise, return the exact set of values seen
else:
return {
"notify_on_succeeded": "succeeded" in vals,
"notify_on_failed": "failed" in vals,
"notify_on_inactive": "inactive" in vals,
}
def format_deadline_callback(ctx, param, value):
if not value:
return None
return value.strftime("%Y-%m-%d %H:%M:%S")
f = click.option(
"--dry-run",
is_flag=True,
help="Don't actually submit the task, print submission data instead",
)(f)
f = click.option(
"--notify",
callback=notify_opt_callback,
help=(
"Comma separated list of task events which notify by email. "
"'on' and 'off' may be used to enable or disable notifications "
"for all event types. Otherwise, use 'succeeded', 'failed', or "
"'inactive'"
),
)(f)
f = click.option(
"--submission-id",
help=(
"Task submission ID, as generated by `globus task "
"generate-submission-id`. Used for safe resubmission in the "
"presence of network failures."
),
)(f)
f = click.option("--label", default=None, help="Set a label for this task.")(f)
f = click.option(
"--deadline",
default=None,
type=click.DateTime(),
callback=format_deadline_callback,
help="Set a deadline for this to be canceled if not completed by.",
)(f)
f = click.option(
"--skip-activation-check",
is_flag=True,
help="Submit the task even if the endpoint(s) aren't currently activated.",
)(f)
return f
def delete_and_rm_options(
f: Optional[Callable] = None,
*,
supports_batch: bool = True,
default_enable_globs: bool = False,
):
"""
Options which apply both to `globus delete` and `globus rm`
"""
if f is None:
return functools.partial(
delete_and_rm_options,
supports_batch=supports_batch,
default_enable_globs=default_enable_globs,
)
f = click.option("--recursive", "-r", is_flag=True, help="Recursively delete dirs")(
f
)
f = click.option(
"--ignore-missing",
"-f",
is_flag=True,
help="Don't throw errors if the file or dir is absent",
)(f)
f = click.option(
"--star-silent",
"--unsafe",
"star_silent",
is_flag=True,
help=(
'Don\'t prompt when the trailing character is a "*".'
+ (" Implicit in --batch" if supports_batch else "")
),
)(f)
f = click.option(
"--enable-globs/--no-enable-globs",
is_flag=True,
default=default_enable_globs,
show_default=True,
help=(
"Enable expansion of *, ?, and [ ] characters in the last "
"component of file paths, unless they are escaped with "
"a preceeding backslash, \\"
),
)(f)
if supports_batch:
f = click.option(
"--batch",
type=click.File("r"),
help=(
"Accept a batch of source/dest path pairs from a file. Use the "
" special `-` value to read from stdin; otherwise opens the file from "
"the argument and passes through lines from that file. Uses "
"SOURCE_ENDPOINT_ID and DEST_ENDPOINT_ID as passed on the commandline. "
"Commandline paths are still allowed and are used as prefixes to the "
"batchmode inputs. "
),
)(f)
return f
def synchronous_task_wait_options(f):
def polling_interval_callback(ctx, param, value):
if not value:
return None
if value < 1:
raise click.UsageError(
f"--polling-interval={value} was less than minimum of 1"
)
return value
def exit_code_callback(ctx, param, value):
if not value:
return None
exit_stat_set = [0, 1] + list(range(50, 100))
if value not in exit_stat_set:
raise click.UsageError("--timeout-exit-code must have a value in 0,1,50-99")
return value
f = click.option(
"--timeout",
type=int,
metavar="N",
help=(
"Wait N seconds. If the Task does not terminate by "
"then, or terminates with an unsuccessful status, "
"exit with status 1"
),
)(f)
f = click.option(
"--polling-interval",
default=1,
type=int,
show_default=True,
callback=polling_interval_callback,
help="Number of seconds between Task status checks.",
)(f)
f = click.option(
"--heartbeat",
"-H",
is_flag=True,
help=(
'Every polling interval, print "." to stdout to '
"indicate that task wait is still active"
),
)(f)
f = click.option(
"--timeout-exit-code",
type=int,
default=1,
show_default=True,
callback=exit_code_callback,
help=(
"If the task times out, exit with this status code. Must have "
"a value in 0,1,50-99"
),
)(f)
f = click.option("--meow", is_flag=True, hidden=True)(f)
return f
def security_principal_opts(
*,
allow_anonymous=False,
allow_all_authenticated=False,
allow_provision=False,
):
def preprocess_security_principals(f):
@functools.wraps(f)
def decorator(*args, **kwargs):
identity = kwargs.pop("identity", None)
group = kwargs.pop("group", None)
provision_identity = kwargs.pop("provision_identity", None)
has_identity = identity or provision_identity
if identity and provision_identity:
raise click.UsageError(
"Only one of --identity or --provision-identity allowed"
)
if kwargs.get("principal") is not None:
if has_identity or group:
raise click.UsageError("You may only pass one security principal")
else:
if has_identity and group:
raise click.UsageError(
"You have passed both an identity and a group. "
"Please only pass one principal type"
)
elif not has_identity and not group:
raise click.UsageError(
"You must provide at least one principal "
"(identity, group, etc.)"
)
if identity:
kwargs["principal"] = ("identity", identity)
elif provision_identity:
kwargs["principal"] = ("provision-identity", provision_identity)
else:
kwargs["principal"] = ("group", group)
return f(*args, **kwargs)
return decorator
def decorate(f: Callable) -> Callable:
# order matters here -- the preprocessor must run after option
# application, so it has to be applied first
if isinstance(f, click.Command):
# if we're decorating a command, put the preprocessor on its
# callback, not on `f` itself
f.callback = preprocess_security_principals(f.callback)
else:
# otherwise, we're applying to a function, but other decorators may
# have been applied to give it params
# so, copy __click_params__ to preserve those parameters
oldfun = f
f = preprocess_security_principals(f)
f.__click_params__ = getattr(oldfun, "__click_params__", []) # type: ignore
f = click.option(
"--identity",
metavar="IDENTITY_ID_OR_NAME",
help="Identity to use as a security principal",
)(f)
f = click.option(
"--group", metavar="GROUP_ID", help="Group to use as a security principal"
)(f)
if allow_anonymous:
f = click.option(
"--anonymous",
"principal",
flag_value=("anonymous", ""),
help="Allow anyone access, even without logging in "
"(treated as a security principal)",
)(f)
if allow_all_authenticated:
f = click.option(
"--all-authenticated",
"principal",
flag_value=("all_authenticated_users", ""),
help="Allow anyone access, as long as they login "
"(treated as a security principal)",
)(f)
if allow_provision:
f = click.option(
"--provision-identity",
metavar="IDENTITY_USERNAME",
help="Identity username to use as a security principal. "
"Identity will be provisioned if it does not exist.",
)(f)
return f
return decorate
def no_local_server_option(f):
"""
Option for commands that start auth flows and might need to disable
the default local server behavior
"""
return click.option(
"--no-local-server",
is_flag=True,
help=(
"Manual authorization by copying and pasting an auth code. "
"This option is implied if the CLI detects you are using a "
"remote connection."
),
)(f)
|
1704308
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
fg, bg="black", "white"
COLS = 5
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
flags = pd.read_csv("datasets/countries.csv").split_columns(('nationality', 'tld', 'country'), "|").explode('country').set_index('country')['flag']
flags["EU"] = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Flag_of_Europe.svg/1024px-Flag_of_Europe.svg.png"
flags["UN"] = 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2f/Flag_of_the_United_Nations.svg/1024px-Flag_of_the_United_Nations.svg.png'
def half_and_half(img1, img2):
assert img1.size == img2.size
w, h = (wh - 1 for wh in img1.size)
mask_array = np.fromfunction(lambda y,x: h-(h*x)/w >= y, tuple(reversed(img1.size)))
mask = Image.fromarray(mask_array * 255).convert("1")
return img2.overlay(img1, mask=mask, copy=True)
def horizontal_combo(imgs):
assert all(img.size == imgs[0].size for img in imgs)
w, n = imgs[0].width, len(imgs)
combo = imgs[0]
for i, img in enumerate(imgs[1:], 1):
mask_array = 1 - np.fromfunction(lambda y,x: (i * (w / n) <= x) * (x < (i + 1) * (w / n)), tuple(reversed(imgs[0].size)))
mask = Image.fromarray(mask_array * 255).convert("1")
combo = img.overlay(combo, mask=mask, copy=True)
return combo
def make_flag(countries):
flagimgs = [Image.from_url_with_cache(get_non(flags, country, default_img)).convert("RGB").resize((300, 180)) for country in countries]
if len(flagimgs) == 2: flagimg = half_and_half(*flagimgs)
else: flagimg = horizontal_combo(flagimgs)
return flagimg.resize_fixed_aspect(width=60).trim(1).pad(1, "grey")
def split_rows(array, n):
pad = len(array[0])
split = list(generate_batches(array, n))
return list(map(artial(sum, []), zip_longest(*split, fillvalue=[None]*pad)))
for source in ["eusites_moz", "eusites_alexa"]: # Alexa list from http://rpki.surfnet.nl/top500.php
df = pd.read_csv("datasets/{}.csv".format(source)).split_columns(('country'), "|")
array = [[
make_flag(get_non(d, 'country')),
Image.from_url_with_cache(get_non(d, 'logo', default_img)).pad_to_aspect(4, 3, bg="white").resize_fixed_aspect(width=100),
Image.from_column([
Image.from_text("{}. {}".format(d['rank'], d['domain'].upper()), arial(20, bold=True), fg=fg, bg=bg, padding=(0,1)),
Image.from_text(get_non(d, 'description', '??'), arial(16), fg=fg, bg=bg, padding=(0,1))
], xalign=0, bg="white").pad((0,0,10,0), "white")
]
for _, d in df.iterrows()]
array = split_rows(array, math.ceil(len(array)/COLS))
grid = Image.from_array(array, bg=bg, padding=(8,10), xalign=(0,0,0,0)*COLS).pad((5,0), bg=bg)
# TODO: titles/footers (based in; e.g. xhamster
if source == "eusites_moz":
title = Image.from_column([
Image.from_text("50 most important European websites according to Moz".upper(), arial(72, bold=True), fg=fg, bg=bg),
Image.from_text("European-founded or owned web domains in Moz's Top 500 list, based on number of linking root domains", arial(48, italics=True), fg=fg, bg=bg).pad(5,bg=bg)
], bg=bg)
footer = Image.from_text("(excludes foreign sites based in Europe such as google.de; sites marked with an * also appear further down the list under additional domain names)", arial(24), fg=fg, bg=bg, padding=2)
else:
title = Image.from_column([
Image.from_text("50 most popular European websites according to Alexa".upper(), arial(72, bold=True), fg=fg, bg=bg),
Image.from_text("European-founded or owned web domains in Alexa's Top 500 list; excludes foreign sites based in Europe", arial(48, italics=True), fg=fg, bg=bg).pad(5,bg=bg)
], bg=bg)
footer = Image.EMPTY_IMAGE
img = Image.from_column([title, grid, footer], bg=bg, padding=(0,10))
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/{}.png".format(source))
|
1704414
|
import logging
import os
import pdb
import socket
import ssl as ssllib
import sys
from threading import Thread
import traceback
from irc.connection import Factory
import six
from . import paste_backends
from .bot import IrcpdbBot
from .exceptions import NoAllowedNicknamesSelected, NoChannelSelected
from .parse import parse_irc_uri
logger = logging.getLogger(__name__)
DEFAULT_PARAMS = {
'channel': None,
'nickname': None,
'server': 'chat.freenode.net',
'port': 6697,
'password': <PASSWORD>,
'ssl': True,
'limit_access_to': None,
'message_wait_seconds': 0.8,
'paste_minimum_response_length': 20,
'activation_timeout': 60
}
class Ircpdb(pdb.Pdb):
def __init__(self, uri=None, **kwargs):
"""Initialize the socket and initialize pdb."""
params = DEFAULT_PARAMS.copy()
params.update(parse_irc_uri(uri))
params.update(kwargs)
# Backup stdin and stdout before replacing them by the socket handle
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
self.read_timeout = 0.1
if not params.get('limit_access_to'):
raise NoAllowedNicknamesSelected(
"You must specify a list of nicknames that are allowed "
"to interact with the debugger using the "
"`limit_access_to` keyword argument."
)
elif isinstance(params.get('limit_access_to'), six.string_types):
params['limit_access_to'] = [params.get('limit_access_to')]
connect_params = {}
if not params.get('nickname'):
params['nickname'] = socket.gethostname().split('.')[0]
if not params.get('channel'):
raise NoChannelSelected(
"You must specify a channel to connect to using the "
"`channel` keyword argument."
)
if params.get('ssl'):
connect_params['connect_factory'] = (
Factory(wrapper=ssllib.wrap_socket)
)
# Writes to stdout are forbidden in mod_wsgi environments
try:
logger.info(
"ircpdb has connected to %s:%s on %s\n",
params.get('server'),
params.get('port'),
params.get('channel')
)
except IOError:
pass
r_pipe, w_pipe = os.pipe()
# The A pipe is from the bot to pdb
self.p_A_pipe = os.fdopen(r_pipe, 'r')
self.b_A_pipe = os.fdopen(w_pipe, 'w')
r_pipe, w_pipe = os.pipe()
# The B pipe is from pdb to the bot
self.b_B_pipe = os.fdopen(r_pipe, 'r')
self.p_B_pipe = os.fdopen(w_pipe, 'w')
pdb.Pdb.__init__(
self,
stdin=self.p_A_pipe,
stdout=self.p_B_pipe,
)
paste_backend = paste_backends.GistBackend()
self.bot = IrcpdbBot(
channel=params.get('channel'),
nickname=params.get('nickname'),
server=params.get('server'),
port=params.get('port'),
password=<PASSWORD>('password'),
limit_access_to=params.get('limit_access_to'),
message_wait_seconds=params.get('message_wait_seconds'),
paste_minimum_response_length=(
params.get('paste_minimum_response_length')
),
paste_backend=paste_backend,
activation_timeout=params.get('activation_timeout'),
**connect_params
)
def shutdown(self):
"""Revert stdin and stdout, close the socket."""
sys.stdout = self.old_stdout
sys.stdin = self.old_stdin
pipes = [
self.p_A_pipe,
self.p_B_pipe,
self.b_A_pipe,
self.b_B_pipe
]
for pipe in pipes:
try:
pipe.close()
except IOError:
logger.warning(
"IOError encountered while closing a pipe; messages "
"may have been lost."
)
self.bot.disconnect()
def do_continue(self, arg):
"""Clean-up and do underlying continue."""
try:
return pdb.Pdb.do_continue(self, arg)
finally:
self.shutdown()
do_c = do_cont = do_continue
def do_quit(self, arg):
"""Clean-up and do underlying quit."""
try:
return pdb.Pdb.do_quit(self, arg)
finally:
self.shutdown()
do_q = do_exit = do_quit
def set_trace(*args, **kwargs):
"""Wrapper function to keep the same import x; x.set_trace() interface.
We catch all the possible exceptions from pdb and cleanup.
"""
if not args and 'DEFAULT_IRCPDB_URI' in os.environ:
args = (
os.environ['DEFAULT_IRCPDB_URI'],
)
debugger = Ircpdb(*args, **kwargs)
try:
irc_feeder = Thread(
target=debugger.bot.process_forever,
args=(debugger.b_B_pipe, debugger.b_A_pipe, ),
)
irc_feeder.daemon = True
irc_feeder.start()
debugger.set_trace(sys._getframe().f_back)
except Exception:
debugger.shutdown()
traceback.print_exc()
|
1704464
|
import pytest
from parameterized import parameterized
from api.saml.metadata.filter import SAMLSubjectFilter, SAMLSubjectFilterError
from api.saml.metadata.model import (
SAMLAttribute,
SAMLAttributeStatement,
SAMLAttributeType,
SAMLSubject,
)
from core.python_expression_dsl.evaluator import DSLEvaluationVisitor, DSLEvaluator
from core.python_expression_dsl.parser import DSLParser
class TestSAMLSubjectFilter(object):
@parameterized.expand(
[
(
"fails_in_the_case_of_syntax_error",
'subject.attribute_statement.attributes["eduPersonEntitlement"].values[0 == "urn:mace:nyu.edu:entl:lib:eresources"',
SAMLSubject(
None,
SAMLAttributeStatement(
[
SAMLAttribute(
name=SAMLAttributeType.eduPersonEntitlement.name,
values=["urn:mace:nyu.edu:entl:lib:eresources"],
)
]
),
),
None,
SAMLSubjectFilterError,
),
(
"fails_in_the_case_of_unknown_attribute",
'subject.attribute_statement.attributes["mail"].values[0] == "urn:mace:nyu.edu:entl:lib:eresources"',
SAMLSubject(
None,
SAMLAttributeStatement(
[
SAMLAttribute(
name=SAMLAttributeType.eduPersonEntitlement.name,
values=["urn:mace:nyu.edu:entl:lib:eresources"],
)
]
),
),
None,
SAMLSubjectFilterError,
),
(
"fails_when_subject_is_not_used",
'attributes["eduPersonEntitlement"].values[0] == "urn:mace:nyu.edu:entl:lib:eresources"',
SAMLSubject(
None,
SAMLAttributeStatement(
[
SAMLAttribute(
name=SAMLAttributeType.eduPersonEntitlement.name,
values=["urn:mace:nyu.edu:entl:lib:eresources"],
)
]
),
),
None,
SAMLSubjectFilterError,
),
(
"can_filter_when_attribute_has_one_value",
'"urn:mace:nyu.edu:entl:lib:eresources" == subject.attribute_statement.attributes["eduPersonEntitlement"].values[0]',
SAMLSubject(
None,
SAMLAttributeStatement(
[
SAMLAttribute(
name=SAMLAttributeType.eduPersonEntitlement.name,
values=["urn:mace:nyu.edu:entl:lib:eresources"],
)
]
),
),
True,
),
(
"can_filter_when_attribute_has_multiple_values",
'"urn:mace:nyu.edu:entl:lib:eresources" in subject.attribute_statement.attributes["eduPersonEntitlement"].values',
SAMLSubject(
None,
SAMLAttributeStatement(
[
SAMLAttribute(
name=SAMLAttributeType.eduPersonEntitlement.name,
values=[
"urn:mace:nyu.edu:entl:lib:eresources",
"urn:mace:nyu.edu:entl:lib:books",
],
)
]
),
),
True,
),
]
)
def test_execute(
self, _, expression, subject, expected_result, expected_exception=None
):
# Arrange
parser = DSLParser()
visitor = DSLEvaluationVisitor()
evaluator = DSLEvaluator(parser, visitor)
subject_filter = SAMLSubjectFilter(evaluator)
# Act
if expected_exception:
with pytest.raises(expected_exception):
subject_filter.execute(expression, subject)
else:
result = subject_filter.execute(expression, subject)
# Assert
assert expected_result == result
@parameterized.expand(
[
(
"fails_in_the_case_of_syntax_error",
'subject.attribute_statement.attributes["eduPersonEntitlement"].values[0 == "urn:mace:nyu.edu:entl:lib:eresources"',
SAMLSubjectFilterError,
),
(
"fails_when_subject_is_not_used",
'attributes["eduPersonEntitlement"].values[0] == "urn:mace:nyu.edu:entl:lib:eresources"',
SAMLSubjectFilterError,
),
(
"can_filter_by_attribute_oid",
'subject.attribute_statement.attributes["urn:oid:1.3.6.1.4.1.5923.1.8"].values[0] == "urn:mace:nyu.edu:entl:lib:eresources"',
None,
),
(
"can_filter_when_attribute_has_one_value",
'"urn:mace:nyu.edu:entl:lib:eresources" == subject.attribute_statement.attributes["eduPersonEntitlement"].values[0]',
None,
),
(
"can_filter_when_attribute_has_multiple_values",
'"urn:mace:nyu.edu:entl:lib:eresources" in subject.attribute_statement.attributes["eduPersonEntitlement"].values',
None,
),
]
)
def test_validate(self, _, expression, expected_exception):
# Arrange
parser = DSLParser()
visitor = DSLEvaluationVisitor()
evaluator = DSLEvaluator(parser, visitor)
subject_filter = SAMLSubjectFilter(evaluator)
# Act
if expected_exception:
with pytest.raises(expected_exception):
subject_filter.validate(expression)
else:
subject_filter.validate(expression)
|
1704510
|
import pytest
from dbt.tests.util import run_dbt
from tests.functional.configs.fixtures import BaseConfigProject
class TestConfigIndivTests(BaseConfigProject):
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"seeds": {
"quote_columns": False,
},
"vars": {
"test": {
"seed_name": "seed",
}
},
"tests": {"test": {"enabled": True, "severity": "WARN"}},
}
def test_configuring_individual_tests(
self,
project,
):
assert len(run_dbt(["seed"])) == 1
assert len(run_dbt(["run"])) == 2
# all tests on (minus sleeper_agent) + WARN
assert len(run_dbt(["test"])) == 5
# turn off two of them directly
assert len(run_dbt(["test", "--vars", '{"enabled_direct": False}'])) == 3
# turn on sleeper_agent data test directly
assert (
len(
run_dbt(
["test", "--models", "sleeper_agent", "--vars", '{"enabled_direct": True}']
)
)
== 1
)
# set three to ERROR directly
results = run_dbt(
[
"test",
"--models",
"config.severity:error",
"--vars",
'{"enabled_direct": True, "severity_direct": "ERROR"}',
],
expect_pass=False,
)
assert len(results) == 2
assert results[0].status == "fail"
assert results[1].status == "fail"
|
1704550
|
from abc import ABCMeta, abstractmethod
from ctypes import c_void_p, c_char_p, c_int, c_double, byref
from .pyfluid import pyfluid as lib
from . import pybindings as pb
from . import method_decorators as decorators
from .vector3 import Vector3, Vector3_t
from .aabb import AABB, AABB_t
class FluidSource:
__metaclass__ = ABCMeta
@abstractmethod
def __init__():
pass
def __call__(self):
return self._obj
@property
def position(self):
libfunc = lib.FluidSource_get_position
pb.init_lib_func(libfunc, [c_void_p, c_void_p], Vector3_t)
cvect = pb.execute_lib_func(libfunc, [self()])
return Vector3.from_struct(cvect)
@position.setter
def position(self, pos):
libfunc = lib.FluidSource_set_position
pb.init_lib_func(libfunc, [c_void_p, Vector3_t, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), pos.to_struct()])
@property
def velocity(self):
libfunc = lib.FluidSource_get_velocity
pb.init_lib_func(libfunc, [c_void_p, c_void_p], Vector3_t)
cvect = pb.execute_lib_func(libfunc, [self()])
return Vector3.from_struct(cvect)
@velocity.setter
def velocity(self, vel):
libfunc = lib.FluidSource_set_velocity
pb.init_lib_func(libfunc, [c_void_p, Vector3_t, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), vel.to_struct()])
@property
def direction(self):
libfunc = lib.FluidSource_get_direction
pb.init_lib_func(libfunc, [c_void_p, c_void_p], Vector3_t)
cvect = pb.execute_lib_func(libfunc, [self()])
return Vector3.from_struct(cvect)
@direction.setter
def direction(self, direction):
libfunc = lib.FluidSource_set_direction
pb.init_lib_func(libfunc, [c_void_p, Vector3_t, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), direction.to_struct()])
@property
def is_inflow(self):
libfunc = lib.FluidSource_is_inflow
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self()]))
@is_inflow.setter
def is_inflow(self, boolval):
if boolval:
libfunc = lib.FluidSource_set_as_inflow
else:
libfunc = lib.FluidSource_set_as_outflow
pb.init_lib_func(libfunc, [c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self()])
@property
def is_outflow(self):
libfunc = lib.FluidSource_is_outflow
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self()]))
@is_outflow.setter
def is_outflow(self, boolval):
if boolval:
libfunc = lib.FluidSource_set_as_outflow
else:
libfunc = lib.FluidSource_set_as_inflow
pb.init_lib_func(libfunc, [c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self()])
@property
def is_active(self):
libfunc = lib.FluidSource_is_active
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self()]))
@is_active.setter
def is_active(self, boolval):
if boolval:
libfunc = lib.FluidSource_activate
else:
libfunc = lib.FluidSource_deactivate
pb.init_lib_func(libfunc, [c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self()])
@property
def id(self):
libfunc = lib.FluidSource_get_id
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return pb.execute_lib_func(libfunc, [self()])
def get_AABB(self):
libfunc = lib.FluidSource_get_AABB
pb.init_lib_func(libfunc, [c_void_p, c_void_p], AABB_t)
cbbox = pb.execute_lib_func(libfunc, [self()])
return AABB.from_struct(cbbox)
@decorators.xyz_or_vector
def contains_point(self, px, py, pz):
cpos = Vector3_t(px, py, pz)
libfunc = lib.FluidSource_contains_point
pb.init_lib_func(libfunc, [c_void_p, Vector3_t, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self(), cpos]))
class CuboidFluidSource(FluidSource):
def __init__(self, bbox = None, velocity = None):
if bbox is None:
bbox = AABB()
if velocity is None:
velocity = Vector3()
libfunc = lib.CuboidFluidSource_new
pb.init_lib_func(libfunc, [AABB_t, Vector3_t, c_void_p], c_void_p)
self._obj = pb.execute_lib_func(libfunc, [bbox.to_struct(),
velocity.to_struct()])
def __del__(self):
libfunc = lib.CuboidFluidSource_destroy
pb.init_lib_func(libfunc, [c_void_p], None)
try:
libfunc(self._obj)
except:
pass
@property
def width(self):
libfunc = lib.CuboidFluidSource_get_width
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_double)
return pb.execute_lib_func(libfunc, [self()])
@width.setter
def width(self, width):
libfunc = lib.CuboidFluidSource_set_width
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), width])
@property
def height(self):
libfunc = lib.CuboidFluidSource_get_height
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_double)
return pb.execute_lib_func(libfunc, [self()])
@height.setter
def height(self, height):
libfunc = lib.CuboidFluidSource_set_height
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), height])
@property
def depth(self):
libfunc = lib.CuboidFluidSource_get_depth
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_double)
return pb.execute_lib_func(libfunc, [self()])
@depth.setter
def depth(self, depth):
libfunc = lib.CuboidFluidSource_set_depth
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), depth])
def set_AABB(self, bbox):
libfunc = lib.CuboidFluidSource_set_AABB
pb.init_lib_func(libfunc, [c_void_p, AABB_t, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), bbox.to_struct()])
@property
def AABB(self):
return self.get_AABB()
@AABB.setter
def AABB(self, bbox):
self.set_AABB(bbox)
@property
def center(self):
libfunc = lib.CuboidFluidSource_get_center
pb.init_lib_func(libfunc, [c_void_p, c_void_p], Vector3_t)
cvect = pb.execute_lib_func(libfunc, [self()])
return Vector3.from_struct(cvect)
@center.setter
def center(self, center):
libfunc = lib.CuboidFluidSource_set_center
pb.init_lib_func(libfunc, [c_void_p, Vector3_t, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), center.to_struct()])
def expand(self, val):
libfunc = lib.CuboidFluidSource_expand
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), val])
class SphericalFluidSource(FluidSource):
def __init__(self, position = None, radius = None, velocity = None):
if position is None:
position = Vector3()
if radius is None:
radius = 0.0
if velocity is None:
velocity = Vector3()
libfunc = lib.SphericalFluidSource_new
pb.init_lib_func(libfunc,
[Vector3_t, c_double, Vector3_t, c_void_p], c_void_p)
self._obj = pb.execute_lib_func(libfunc, [position.to_struct(),
radius,
velocity.to_struct()])
def __del__(self):
libfunc = lib.SphericalFluidSource_destroy
pb.init_lib_func(libfunc, [c_void_p], None)
try:
libfunc(self._obj)
except:
pass
@property
def radius(self):
libfunc = lib.SphericalFluidSource_get_radius
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_double)
return pb.execute_lib_func(libfunc, [self()])
@radius.setter
def radius(self, radius):
libfunc = lib.SphericalFluidSource_set_radius
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), radius])
def expand(self, val):
libfunc = lib.SphericalFluidSource_expand
pb.init_lib_func(libfunc, [c_void_p, c_double, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), val])
|
1704551
|
from cellrank.tl.estimators.mixins.decomposition import EigenMixin, SchurMixin
from cellrank.tl.estimators.mixins._lineage_drivers import LinDriversMixin
from cellrank.tl.estimators.mixins._absorption_probabilities import AbsProbsMixin
|
1704569
|
from .algorithmic import (
ParallelGroupBy,
Rolling,
nansum, masked_sum,
nanmean, masked_mean,
nanvar,
nanstd,
masked_last,
masked_first,
nanlast,
nanmax,
nanmin,
pad_2d,
covariance,
pearsonr,
linear_regression_1d,
quantile,
)
|
1704584
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.decorators.cache import cache_page
from .feeds import ArticleFeed
from .views import SourceSearchView, HomepageView, SlackMessageView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from haystack.views import search_view_factory
from source.articles.views import ArticleList, ArticleDetail
from source.utils.caching import ClearCache
STANDARD_CACHE_TIME = getattr(settings, 'CACHE_MIDDLEWARE_SECONDS', 60*15)
FEED_CACHE_TIME = getattr(settings, 'FEED_CACHE_SECONDS', 60*15)
urlpatterns = patterns('',
url(
regex = '^$',
view = cache_page(HomepageView.as_view(template_name='homepage.html'), STANDARD_CACHE_TIME),
kwargs = {},
name = 'homepage',
),
(r'^articles/', include('source.articles.urls')),
(r'^code/', include('source.code.urls')),
(r'^guides/', include('source.guides.urls')),
(r'^jobs/', include('source.jobs.urls')),
(r'^organizations/', include('source.people.urls.organizations')),
(r'^people/', include('source.people.urls.people')),
(r'^api/1.0/', include('source.api.urls')),
url(
regex = '^search/$',
view = search_view_factory(view_class=SourceSearchView, form_class=SearchForm, searchqueryset=SearchQuerySet().order_by('django_ct')),
kwargs = {},
name = 'haystack_search',
),
url(
regex = '^clear-cache/$',
view = ClearCache.as_view(),
kwargs = {},
name = 'clear_cache',
),
url(
regex = '^send-to-slack/$',
view = SlackMessageView.as_view(),
kwargs = {},
name = 'send_to_slack',
),
url(
regex = '^rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'homepage_feed',
),
url(
regex = '^category/(?P<category>[-\w]+)/$',
view = cache_page(ArticleList.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_list_by_category',
),
url(
regex = '^category/(?P<category>[-\w]+)/rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'article_list_by_category_feed',
),
url(
regex = '^(?P<section>[-\w]+)/$',
view = cache_page(ArticleList.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_list_by_section',
),
url(
regex = '^(?P<section>[-\w]+)/rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'article_list_by_section_feed',
),
url(
regex = '^(?P<section>[-\w]+)/(?P<slug>[-\w]+)/$',
view = cache_page(ArticleDetail.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_detail',
),
)
|
1704626
|
import sys
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
if __name__ == '__main__':
if sys.argv[1] == "1":
print("Result of this algorithm run: UNSAT,1,1,1,12354")
if sys.argv[1] == "2":
print("Result of this algorithm run: UNSAT,2,3,4,12354,additional info")
|
1704640
|
from core.constants import Visibility
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.feedgenerator import Rss201rev2Feed, rfc2822_date
from entry.models import TLocation
from indieweb.constants import MPostKinds, MPostStatuses
from post.models import TPost
from settings.models import MSiteSettings
from streams.models import MStream
class ExtendedRSSFeed(Rss201rev2Feed):
"""
Create a type of RSS feed that has content:encoded elements.
"""
def root_attributes(self):
attrs = super().root_attributes()
attrs["xmlns:content"] = "http://purl.org/rss/1.0/modules/content/"
return attrs
def add_item_elements(self, handler, item): # noqa: C901
"""
Overrides the base class because there's no hook around the title tag.
Excluding the title tag if no title is set to allow checkins and notes to display inline
on micro.blog
"""
if item["title"]:
handler.addQuickElement("title", item["title"])
# Django super start
handler.addQuickElement("link", item["link"])
if item["description"] is not None:
handler.addQuickElement("description", item["description"])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item["author_email"], item["author_name"]))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item["pubdate"] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item["pubdate"]))
if item["comments"] is not None:
handler.addQuickElement("comments", item["comments"])
if item["unique_id"] is not None:
guid_attrs = {}
if isinstance(item.get("unique_id_is_permalink"), bool):
guid_attrs["isPermaLink"] = str(item["unique_id_is_permalink"]).lower()
handler.addQuickElement("guid", item["unique_id"], guid_attrs)
if item["ttl"] is not None:
handler.addQuickElement("ttl", item["ttl"])
# Enclosure.
if item["enclosures"]:
enclosures = list(item["enclosures"])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement(
"enclosure",
"",
{
"url": enclosure.url,
"length": enclosure.length,
"type": enclosure.mime_type,
},
)
# Categories.
for cat in item["categories"]:
handler.addQuickElement("category", cat)
# Django Superclass End
# Add our html content to the feed
handler.addQuickElement("content:encoded", item["content_encoded"])
class AllEntriesFeed(Feed):
feed_type = ExtendedRSSFeed
item_guid_is_permalink = False
def __call__(self, request, *args, **kwargs):
self.request = request
return super().__call__(request, *args, **kwargs)
def title(self):
title = MSiteSettings.objects.values_list("title", flat=True).first()
return title or "Tanzawa"
def link(self):
return reverse("feeds:feed")
def items(self):
return (
TPost.objects.visible_for_user(self.request.user.id)
.filter(m_post_status__key=MPostStatuses.published)
.exclude(visibility=Visibility.UNLISTED)
.select_related("m_post_kind")
.prefetch_related(
"ref_t_entry",
"ref_t_entry__t_reply",
"ref_t_entry__t_location",
"ref_t_entry__t_checkin",
)
.all()
.order_by("-dt_published")[:10]
)
def item_title(self, item: TPost):
if item.m_post_kind.key in (MPostKinds.checkin, MPostKinds.note):
return None
return item.post_title
def item_description(self, item: TPost):
return item.ref_t_entry.p_summary
def item_extra_kwargs(self, item: TPost):
t_entry = item.ref_t_entry
e_content = t_entry.e_content
if item.m_post_kind.key == MPostKinds.reply:
e_content = f"<blockquote>{t_entry.t_reply.quote}</blockquote>{e_content}"
elif item.m_post_kind.key == MPostKinds.bookmark:
t_bookmark = t_entry.t_bookmark
e_content = (
f"Bookmark: "
f'<a href="{t_bookmark.u_bookmark_of}"'
f">{t_bookmark.title or t_bookmark.u_bookmark_of}</a>"
f"<blockquote>{t_bookmark.quote}</blockquote>{e_content}"
)
elif item.m_post_kind.key == MPostKinds.checkin:
e_content = f"{item.post_title}<br/>{e_content}"
try:
e_content = f"{e_content}<br/>Location: {t_entry.t_location.summary}"
except TLocation.DoesNotExist:
pass
return {"content_encoded": e_content}
def item_guid(self, obj: TPost) -> str:
return obj.uuid
def item_author_name(self, item: TPost):
return item.p_author.get_full_name()
def item_author_link(self, item: TPost):
return reverse("public:author", args=[item.p_author.username])
def item_pubdate(self, item: TPost):
return item.dt_published
def item_updateddate(self, item: TPost):
return item.dt_updated
class StreamFeed(AllEntriesFeed):
def get_object(self, request, stream_slug: str):
return get_object_or_404(MStream.objects.visible(request.user), slug=stream_slug)
def items(self, obj):
return (
TPost.objects.visible_for_user(self.request.user.id)
.filter(streams=obj, m_post_status__key=MPostStatuses.published)
.exclude(visibility=Visibility.UNLISTED)
.select_related("ref_t_entry")
.all()
.order_by("-dt_published")[:10]
)
|
1704644
|
import socket
import json
import datetime
import time
currentStatusFileName = "currPhase.json"
currentSignalPlanFileName = "signalPlan.json"
# Read a config file into a json object:
configFile = open("/nojournal/bin/mmitss-phase3-master-config.json", 'r')
config = (json.load(configFile))
configFile.close()
hostIp = config["HostIp"]
port = config["PortNumber"]["TrafficControllerInterface"]
tciSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tciSocket.bind((hostIp, port))
priorityRequestSolverPort = config["PortNumber"]["PrioritySolver"]
priorityRequestSolverCommunicationInfo = (hostIp, priorityRequestSolverPort)
priorityRequestSolverToTCIInterfacePort = config["PortNumber"]["PrioritySolverToTCIInterface"]
priorityRequestSolverToTCIInterfaceCommunicationInfo = (
hostIp, priorityRequestSolverToTCIInterfacePort)
def getJsonString(fileName):
f = open(fileName, 'r')
data = f.read()
f.close()
return data
data = getJsonString(currentSignalPlanFileName)
tciSocket.sendto(data.encode(), priorityRequestSolverCommunicationInfo)
print("Sent Signal Plan to Solver at time: ", time.time())
while(True):
# Receive data on the TCI socket
data, address = tciSocket.recvfrom(10240)
data = data.decode()
# Load the received data into a json object
receivedMessage = json.loads(data)
if receivedMessage["MsgType"] == "Schedule":
if receivedMessage["Schedule"] == "Clear":
print("Received a clear request at time: ", time.time())
else:
print("Received a new schedule at time: ", time.time())
elif receivedMessage["MsgType"] == "CurrNextPhaseRequest":
# print("Received CurrNextPhaseRequest at time: ", time.time())
data = getJsonString(currentStatusFileName)
tciSocket.sendto(
data.encode(), priorityRequestSolverToTCIInterfaceCommunicationInfo)
print("Sent Current Phase Status to Solver at time: ", time.time())
elif receivedMessage["MsgType"] == "TimingPlanRequest":
# print("Received TimingPlanRequest at time ", time.time())
data = getJsonString(currentSignalPlanFileName)
tciSocket.sendto(data.encode(), priorityRequestSolverCommunicationInfo)
print("Sent Signal Plan to Solver at time: ", time.time())
tciSocket.close()
|
1704648
|
import json
import pymongo
from flask import request, abort, json, render_template, Response
from flask.ext import restful
from flask.ext.restful import reqparse
from flask_rest_service import app, api, mongo
from bson.objectid import ObjectId
from bson.code import Code
# ----- /stats -----
class Stats(restful.Resource):
# ----- GET Request -----
def get(self):
return Response(render_template("index.html") , mimetype='text/html')
api.add_resource(Stats, '/stats')
|
1704664
|
from chainer.links.model.vision.resnet import ResNet50Layers
import collections
import chainer.functions as F
from chainer.links import BatchNormalization
class C4Backbone(ResNet50Layers):
def __init__(self, pretrained_model):
super().__init__(pretrained_model)
del self.res5
del self.fc6
for l in self.links():
if isinstance(l, BatchNormalization):
l.disable_update()
@property
def functions(self):
return collections.OrderedDict(
[('conv1', [self.conv1, self.bn1, F.relu]),
('pool1', [lambda x: F.max_pooling_2d(x, ksize=3, stride=2)]),
('res2', [self.res2]), ('res3', [self.res3]), ('res4',
[self.res4])])
def __call__(self, x, **kwargs):
return super().__call__(x, ['res4'], **kwargs)['res4'],
|
1704755
|
import torch.nn.init as init
import torch.nn as nn
import math
import os
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
# print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def step_topk_scheduler(init_topk1, init_topk2, epoch, step_size, limit=(384, 384), ratio=0.875):
multiplier = ratio ** (epoch // step_size)
topk1 = int(init_topk1 * multiplier)
topk2 = int(init_topk2 * multiplier)
if topk1 <= limit[0] and topk2 <= limit[1]:
topk1, topk2 = limit[0], limit[1]
return topk1, topk2
def freeze_bn(m, freeze_bn_affine=False):
if isinstance(m, nn.BatchNorm2d):
m.eval()
if freeze_bn_affine:
m.weight.requires_grad = False
m.bias.requires_grad = False
|
1704798
|
import requests
from flask import request
from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from ..models import User, Permission, login_manager
class SearchGithub(Resource):
@jwt_required()
def post(self):
# TODO: add ability to search by topic, size, # followers, etc..
req_data = request.get_json()
repo = req_data.get("repo")
current_user = get_jwt_identity()
_user = User.objects(username=current_user).first()
request_url = "https://api.github.com/search/repositories?q=%s" % repo
res = requests.get(
url=request_url,
headers={
"Accept": "application/json",
"Authorization": "token {}".format(_user.github_access_token),
},
)
if res.status_code != 200:
raise AssertionError
res_json = res.json()
top_ten_results = res_json["items"][:10]
return top_ten_results
|
1704841
|
from common_utils.bio_prf_eval import BioEval
class Task:
def __init__(self, args):
self.args = args
# set up eval tool for model selection
self.bio_eval = BioEval()
self.bio_eval.set_logger(self.args.logger)
# set up data loader
# init or reload model
def train(self):
pass
def _eval(self):
pass
def eval(self):
pass
def predict(self):
pass
|
1704857
|
from toee import *
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
triggerer.begin_dialog( attachee, 1 )
return SKIP_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
game.global_flags[836] = 1
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
# if (not attachee.has_wielded(4082) or not attachee.has_wielded(4112)):
if (not attachee.has_wielded(4500) or not attachee.has_wielded(4112)):
attachee.item_wield_best_all()
# game.new_sid = 0
return RUN_DEFAULT
def san_start_combat( attachee, triggerer ):
while(attachee.item_find(8903) != OBJ_HANDLE_NULL):
attachee.item_find(8903).destroy()
#if (attachee.d20_query(Q_Is_BreakFree_Possible)): # workaround no longer necessary!
# create_item_in_inventory( 8903, attachee )
# if (not attachee.has_wielded(4082) or not attachee.has_wielded(4112)):
if (not attachee.has_wielded(4500) or not attachee.has_wielded(4112)):
attachee.item_wield_best_all()
# game.new_sid = 0
return RUN_DEFAULT
def san_resurrect( attachee, triggerer ):
game.global_flags[836] = 0
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
if (not attachee.has_wielded(4500) or not attachee.has_wielded(4112)):
attachee.item_wield_best_all()
attachee.item_wield_best_all()
if (attachee.map != 5085 and not game.combat_is_active()):
for obj in game.obj_list_vicinity(attachee.location,OLC_PC):
attachee.turn_towards(obj)
if (is_safe_to_talk(attachee,obj)):
obj.begin_dialog(attachee,1)
game.new_sid = 0
return RUN_DEFAULT
def run_off(npc, pc):
npc.item_transfer_to_by_proto( pc, 11002)
npc.runoff(npc.location-3)
return RUN_DEFAULT
|
1704875
|
from pytest_dash.utils import (
import_app,
wait_for_text_to_equal,
wait_for_element_by_css_selector
)
# Basic test for the component rendering.
def test_render_component(dash_threaded, selenium):
# Start a dash app contained in `usage.py`
# dash_threaded is a fixture by pytest-dash
# It will load a py file containing a Dash instance named `app`
# and start it in a thread.
app = import_app('usage')
dash_threaded(app)
# Get the generated component input with selenium
# The html input will be a children of the #input dash component
my_component = wait_for_element_by_css_selector(selenium, '#input > input')
assert 'my-value' == my_component.get_attribute('value')
# Clear the input
my_component.clear()
# Send keys to the custom input.
my_component.send_keys('Hello dash')
# Wait for the text to equal, if after the timeout (default 10 seconds)
# the text is not equal it will fail the test.
wait_for_text_to_equal(selenium, '#output', 'You have entered Hello dash')
|
1704916
|
from dlib import *
import numpy as np
import sys
sys.path = ['./superfast/build'] + sys.path
import superfast
import tools
# img = load_grayscale_image(sys.argv[1])
img = load_grayscale_image('./images/find_page/paper01.jpg')
# What about this image? Need to do something to fix it
# img = load_grayscale_image('./images/find_page/tissue_01.jpg')
ht = hough_transform(300)
img = sub_image(gaussian_blur(img,3))
img = resize_image(img, ht.size, ht.size)
win1 = image_window(img)
img = equalize_histogram(img)
x,y = sobel_edge_detector(img)
edges = suppress_non_maximum_edges(x,y)
win3 = image_window(edges)
normalize_image_gradients(x,y)
superfast.discard_wacky_edge_groups(edges, x, y)
edges = hysteresis_threshold(edges, partition_pixels(edges), 50)
win4 = image_window(edges)
himg = ht(edges)
hits = ht.find_strong_hough_points(himg, hough_count_thresh=ht.size/5, angle_nms_thresh=15, radius_nms_thresh=10)
print("hough hits found: ", len(hits))
# keep just the 50 best lines
if (len(hits) > 50):
hits = hits[0:50]
# NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Use tools.find_hough_boxes_less_simple(), which now also checks if all the corners are connected.
line_pixels = ht.find_pixels_voting_for_lines(edges, hits, 4,4)
boxes = tools.find_hough_boxes_less_simple(ht, hits, line_pixels)
if len(boxes) > 0:
c1,c2,c3,c4,area,idx1,idx2,idx3,idx4 = boxes[0]
win1.add_overlay(line(c1,c2))
win1.add_overlay(line(c2,c3))
win1.add_overlay(line(c3,c4))
win1.add_overlay(line(c4,c1))
page = extract_image_4points(img, [c1,c2,c3,c4], 200,200)
win_page = image_window(page)
# NEW !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# plot the pixels that make up this box.
cimg = convert_image(edges, dtype='rgb_pixel')
for l in [line_pixels[i] for i in [idx1, idx2, idx3, idx4]]:
for p in l:
cimg[p.y,p.x,:] = (255,0,0)
win4.set_image(cimg)
input("hit enter to exit")
|
1704923
|
import re, time, string
from datetime import datetime, date, timedelta
__all__ = ['Cookie']
class Cookie(dict):
def __init__(self, input=None):
if input:
self.load(input)
def load(self, data):
ckey = None
for key, val in _rx_cookie.findall(data):
if key.lower() in _c_keys:
if ckey:
self[ckey][key] = _unquote(val)
elif key[0] == '$':
# RFC2109: NAMEs that begin with $ are reserved for other uses
# and must not be used by applications.
continue
else:
self[key] = _unquote(val)
ckey = key
def __setitem__(self, key, val):
if needs_quoting(key):
return
dict.__setitem__(self, key, Morsel(key, val))
def serialize(self, full=True):
return '; '.join(m.serialize(full) for m in self.values())
def values(self):
return [m for _,m in sorted(self.items())]
__str__ = serialize
def __repr__(self):
return '<%s: [%s]>' % (self.__class__.__name__,
', '.join(map(repr, self.values())))
def cookie_property(key, serialize=lambda v: v):
def fset(self, v):
self[key] = serialize(v)
return property(lambda self: self[key], fset)
def serialize_max_age(v):
if isinstance(v, timedelta):
return str(v.seconds + v.days*24*60*60)
elif isinstance(v, int):
return str(v)
else:
return v
def serialize_cookie_date(v):
if v is None:
return None
elif isinstance(v, str):
return v
elif isinstance(v, int):
v = timedelta(seconds=v)
if isinstance(v, timedelta):
v = datetime.utcnow() + v
if isinstance(v, (datetime, date)):
v = v.timetuple()
r = time.strftime('%%s, %d-%%s-%Y %H:%M:%S GMT', v)
return r % (weekdays[v[6]], months[v[1]])
class Morsel(dict):
__slots__ = ('name', 'value')
def __init__(self, name, value):
assert name.lower() not in _c_keys
assert not needs_quoting(name)
assert isinstance(value, str)
self.name = name
# we can encode the unicode value as UTF-8 here,
# but then the decoded cookie would still be str,
# so we don't do that
self.value = value
self.update(dict.fromkeys(_c_keys, None))
path = cookie_property('path')
domain = cookie_property('domain')
comment = cookie_property('comment')
expires = cookie_property('expires', serialize_cookie_date)
max_age = cookie_property('max-age', serialize_max_age)
httponly = cookie_property('httponly', bool)
secure = cookie_property('secure', bool)
def __setitem__(self, k, v):
k = k.lower()
if k in _c_keys:
dict.__setitem__(self, k, v)
def serialize(self, full=True):
result = []
add = result.append
add("%s=%s" % (self.name, _quote(self.value)))
if full:
for k in _c_valkeys:
v = self[k]
if v:
assert isinstance(v, str), v
add("%s=%s" % (_c_renames[k], _quote(v)))
expires = self['expires']
if expires:
add("expires=%s" % expires)
if self.secure:
add('secure')
if self.httponly:
add('HttpOnly')
return '; '.join(result)
__str__ = serialize
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.name, repr(self.value))
_c_renames = {
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
}
_c_valkeys = sorted(_c_renames)
_c_keys = set(_c_renames)
_c_keys.update(['expires', 'secure', 'httponly'])
#
# parsing
#
_re_quoted = r'"(?:\\"|.)*?"' # any doublequoted string
_legal_special_chars = "~!@#$%^&*()_+=-`.?|:/(){}<>'"
_re_legal_char = r"[\w\d%s]" % ''.join(map(r'\%s'.__mod__,
_legal_special_chars))
_re_expires_val = r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT"
_rx_cookie = re.compile(
# key
(r"(%s+?)" % _re_legal_char)
# =
+ r"\s*=\s*"
# val
+ r"(%s|%s|%s*)" % (_re_quoted, _re_expires_val, _re_legal_char)
)
_rx_unquote = re.compile(r'\\([0-3][0-7][0-7]|.)')
def _unquote(v):
if v and v[0] == v[-1] == '"':
v = v[1:-1]
def _ch_unquote(m):
v = m.group(1)
if v.isdigit():
return chr(int(v, 8))
return v
v = _rx_unquote.sub(_ch_unquote, v)
return v
#
# serializing
#
_trans_noop = ''.join(chr(x) for x in xrange(256))
# these chars can be in cookie value w/o causing it to be quoted
_no_escape_special_chars = "!#$%&'*+-.^_`|~/"
_no_escape_chars = string.ascii_letters + string.digits + \
_no_escape_special_chars
# these chars never need to be quoted
_escape_noop_chars = _no_escape_chars+': '
# this is a map used to escape the values
_escape_map = dict((chr(i), '\\%03o' % i) for i in xrange(256))
_escape_map.update(zip(_escape_noop_chars, _escape_noop_chars))
_escape_map['"'] = '\\"'
_escape_map['\\'] = '\\\\'
_escape_char = _escape_map.__getitem__
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = (None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
def needs_quoting(v):
return v.translate(_trans_noop, _no_escape_chars)
def _quote(v):
if needs_quoting(v):
return '"' + ''.join(map(_escape_char, v)) + '"'
return v
|
1704930
|
import json
import os
import shutil
import warnings
from typing import List, Tuple
from sklearn.model_selection import train_test_split
def read_json(fp: str) -> dict:
"""Helper. Reads a JSON file and returns content.
:param fp: The filepath to the JSON file.
:dtype fp: str
"""
with open(fp) as f:
f_data = json.load(f)
return f_data
def generate_folders(fp_rootdir: str = "./rs19_person") -> None:
""""Generates folders to save the new dataset in. Follows YOLOv5 standard.
:param fp_rootdir: Foldername of the new dataset.
:dtype fp_rootdir: str
"""
for folder in (os.path.join(fp_rootdir, "images/train"), os.path.join(fp_rootdir, "images/val"), os.path.join(fp_rootdir, "images/test"),
os.path.join(fp_rootdir, "labels/train"), os.path.join(fp_rootdir, "labels/val"), os.path.join(fp_rootdir, "labels/test")):
if not os.path.exists(folder): # Only makes new a folder if it doesn't exist already
os.makedirs(folder)
else:
warnings.warn(f"Folder {folder} already exists. Make sure there's no train/val/test contamination"
" from a previously generated subset with a (possible) different seed.", Warning, stacklevel=2)
def read_all_labels(fp_folder: str = "./rs19_val/jsons/rs19_val") -> List[dict]:
"""Loads the data of all JSON files from a provided folder and returns the contents in a list.
:param fp_folder: Filepath to the folder containing JSON files.
:dtype fp_folder: str
"""
return [read_json(os.path.join(fp_folder, file)) for file in os.listdir(fp_folder)]
def get_relevant_data(json_data: List[dict], classname: str = "person") -> List[str]:
"""Generates a list of all filenames with at least one instance of the provided class.
:param json_data: List of dicts, each dict is JSON data in RailSem19 format.
:dtype json_data: List[dict], generaly the output of `read_all_labels()`.
"""
return [label["frame"] for label in json_data if classname in (l["label"] for l in label["objects"])]
def train_val_test_split(relevant_data: List[str], seed: int = 42) -> Tuple[List[str], List[str], List[str]]:
"""Splits a list in seperate train, validate and test datasets.
TODO: add params for train / val / test sizes
:param relevant_data: The list to be divided, generaly a list of filenames.
:dtype relevant_data: List[str]
"""
relevant_data = sorted(relevant_data) # Ensures the input to the split is always the same
train, rest = train_test_split(relevant_data, test_size=0.3, shuffle=True, random_state=seed) # 70% to train
val, test = train_test_split(rest, test_size=0.5, shuffle=True, random_state=seed) # Divide the remaining 30% equally over val and test
return train, val, test
def copy_images(filenames_train_val_test: Tuple[List[str], List[str], List[str]], fp_in: str = "./rs19_val", fp_out: str = "./rs19_person") -> None:
"""Copies images from RailSem19 dataset to the correct folders. In YOLOv5 folder format.
:param filenames_train_val_test: Tuple of lists containing the filenames (without extension) of the images,
generaly the output of `train_val_test_split()`.
:dtype filenamse_train_val_test: Tuple[List[str], List[str], List[str]]
:param fp_in: Filepath of the folder where the images are currently stored.
:dtype fp_in: str
:param fp_out: Root directory to place the contents of the three datasets in.
Do not forget to first make these folders with `generate_folders()`.
:dtype fp_out: str
"""
folders_out = (os.path.join(fp_out, "images/train"), os.path.join(fp_out, "images/val"), os.path.join(fp_out, "images/test"))
for dataset, folder in zip(filenames_train_val_test, folders_out):
for filename in dataset:
path_in = os.path.join(fp_in, "jpgs", "rs19_val", filename + ".jpg") # Filepath to the image in the RailSem19 dataset
path_out = os.path.join(folder, filename + ".jpg") # Filepath to copy the image to
if not os.path.isfile(path_out): # Skip copying if file already exists, performance improvement
shutil.copyfile(path_in, path_out)
def poly2bbox(points: List[Tuple[int, int]], classnumber: int = 0,
img_width: int = 1920, img_height: int = 1080) -> Tuple[int, int, int, int, int]:
"""Converts N by 2 list to 4 coordinate bbox in YOLOv5 format.
Format: [classnumber, x_center, y_center, width, height]
Based on: https://stackoverflow.com/a/46336730
:param points: Polygon points of an object, in RailSem19 format.
:dtype points: List[Tuple[int, int]]
:param classnumber: The number of the class, to directly output as first variable in the output-list
:dtype classnumber: int
:param img_width: The width of the image containing the bbox
:dtype img_width: int
:param img_height: The height of the image containing the bbox
:dtype img_height: int
"""
x_coords, y_coords = zip(*points) # Transpose N by 2 list to a 2 by N list
x_min = min(x_coords) # Leftbound of bbox
y_min = min(y_coords) # Lowerbound of bbox
width = (max(x_coords) - x_min) / img_width # Width of bbox
height = (max(y_coords) - y_min) / img_height # Height of bbox
x_center = width / 2 + x_min / img_width
y_center = height / 2 + y_min / img_height
return [classnumber, x_center, y_center, width, height]
def generate_all_labels(filenames_train_val_test: Tuple[List[str], List[str], List[str]], fp_in: str = "./rs19_val",
fp_out: str = "./rs19_person", relevant_classes: dict = {"person": 0}) -> None:
"""Converts all JSON labels in RailSem19 format to txt labels in YOLOv5 format. Writes txts to corresponding folders.
:param filenames_train_val_test: Tuple of lists containing the filenames (without extension) of the images,
generaly the output of `train_val_test_split()`.
:dtype filenamse_train_val_test: Tuple[List[str], List[str], List[str]]
:param fp_in: Filepath of the folder where the images are currently stored.
:dtype fp_in: str
:param fp_out: Root directory to place the contents of the three datasets in.
Do not forget to first make these folders with `generate_folders()`.
:dtype fp_out: str
:param relevant_classes: Dict of all classes that are worth saving in YOLOv5 format
:dtype relevant_classes: dict, keys are the classnames in RailSem19 format, values are the classnumbers in YOLOv5 format.
"""
folders_out = (os.path.join(fp_out, "labels/train"), os.path.join(fp_out, "labels/val"), os.path.join(fp_out, "labels/test"))
for dataset, folder in zip(filenames_train_val_test, folders_out):
for filename in dataset:
path_in = os.path.join(fp_in, "jsons", "rs19_val", filename + ".json") # Filepath to the label in the RailSem19 dataset
path_out = os.path.join(folder, filename + ".txt") # Filepath to copy the label to
data = read_json(path_in) # Read the JSON annotation
img_width, img_height = data["imgWidth"], data["imgHeight"]
data_txt = [] # List of all objects in a single image
for obj in data["objects"]:
if obj["label"] in relevant_classes.keys(): # Only save relevant classes to the txt
polygon = obj["polygon"] # TODO: Add support if bbox is known instead of polygon
bbox = poly2bbox(polygon, relevant_classes[obj["label"]], img_width, img_height)
data_txt.append(" ".join(str(i) for i in bbox)) # Add instance to list of instances of this image
data_txt = "\n".join(data_txt) # Combine all objects into single string
if not os.path.isfile(path_out): # Skip writing if file already exists, performance improvement
with open(path_out, "w") as file_out:
file_out.write(data_txt)
if __name__ == "__main__":
fp_railsem19_dataset = "./data/rs19_val"
fp_new_dataset = "./data/rs19_person"
generate_folders(fp_rootdir=fp_new_dataset) # Generate empty folders
json_data = read_all_labels(fp_folder=os.path.join(fp_railsem19_dataset, "jsons", "rs19_val")) # Load all JSON labels
# List of all filenames with the class `person`
data_person = get_relevant_data(json_data=json_data, classname="person")
# List of all filenames with the class `person-group`
data_person_group = get_relevant_data(json_data=json_data, classname="person-group")
relevant_data = set(data_person) - set(data_person_group) # Only keep images where `person` is present and `person-group` is not
filenames_train_val_test = train_val_test_split(relevant_data, seed=42) # Split filenames in train / val / test split
copy_images(filenames_train_val_test, fp_in=fp_railsem19_dataset,
fp_out=fp_new_dataset) # Copy relevant images to corresponding folders
generate_all_labels(filenames_train_val_test, fp_in=fp_railsem19_dataset, fp_out=fp_new_dataset, relevant_classes={"person": 0})
print([len(l) for l in filenames_train_val_test])
|
1704945
|
from .responder import Responder, ResponderType
from pepper.framework import *
from pepper.language import Utterance
from pepper.knowledge import animations
from random import choice
from typing import Optional, Union, Tuple, Callable
class BrexitResponder(Responder):
@property
def type(self):
return ResponderType.Topic
@property
def requirements(self):
return [TextToSpeechComponent]
def respond(self, utterance, app):
# type: (Utterance, Union[TextToSpeechComponent]) -> Optional[Tuple[float, Callable]]
transcript = utterance.transcript.lower()
if all(key in transcript for key in ['you', 'think', 'brexit']):
# -> What do you think about the brexit?
return 1.0, lambda: app.say("I just know what is written in the news and what people tell me. "
"I don't think I'm smart enough yet to make up my own mind about the Brexit.")
elif all(key in transcript for key in ['you', 'know', 'brexit']):
# -> What do you know about the brexit?
return 1.0, lambda: app.say("I know the Brexit is about politics! I don't know a lot about politics!")
elif all(key in transcript for key in ['brexit', 'you']):
# -> Brexit for Robots?
return 1.0, lambda: app.say("I just hope I can still talk to and learn from my British Robot friends!")
|
1704946
|
import copy
from os import path
from typing import Dict
import torch
from torch import nn
from torch.utils.data import TensorDataset
from torchvision.datasets import CIFAR10, MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
def transform_mnist():
return Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
def sample_from_class(data_set, k):
"""
function to sample data and their labels from a dataset in pytorch in
a stratified manner
Args
----
data_set
k: the number of samples that will be accuimulated in the new slit
Returns
-----
train_dataset
val_dataset
"""
class_counts = {}
train_data = []
train_label = []
test_data = []
test_label = []
for data, label in data_set:
class_i = label.item() if isinstance(label, torch.Tensor) else label
class_counts[class_i] = class_counts.get(class_i, 0) + 1
if class_counts[class_i] <= k:
train_data.append(data)
train_label.append(label)
else:
test_data.append(data)
test_label.append(label)
train_data = torch.stack(train_data)
train_label = torch.tensor(train_label, dtype=torch.int64)
test_data = torch.stack(test_data)
test_label = torch.tensor(test_label, dtype=torch.int64)
return (
TensorDataset(train_data, train_label),
TensorDataset(test_data, test_label),
)
def load_mnist():
train_set = MNIST(
root="./data/data_mnist/",
train=True,
transform=transform_mnist(),
download=True,
)
val_set, tr_set = sample_from_class(train_set, 500)
test_set = MNIST(
root="./data/data_mnist/",
train=False,
transform=transform_mnist(),
download=True,
)
return [train_set, val_set, test_set]
class SampleCNN(nn.Module):
def __init__(self, shape=(3, 32, 32), batch_size=4):
super().__init__()
self.input_shape = shape
self.batch_size = batch_size
self.conv1 = nn.Conv2d(in_channels=shape[0], out_channels=8, kernel_size=5)
self.pool1 = nn.MaxPool2d(2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=8, out_channels=4, kernel_size=3)
self.pool2 = nn.MaxPool2d(2)
self.relu2 = nn.ReLU()
self.flatten = nn.Flatten()
self.interface_shape = self.get_shape()
self.interface = nn.Linear(in_features=self.interface_shape.numel(), out_features=32)
self.relu3 = nn.ReLU()
self.linear = nn.Linear(in_features=32, out_features=10)
def get_shape(self):
sample = torch.randn(size=(self.batch_size, *self.input_shape))
out = self.conv1(sample)
out = self.pool1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.pool2(out)
out = self.relu2(out)
return out.shape[1:]
def forward(self, x):
out = self.conv1(x)
out = self.pool1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.pool2(out)
out = self.relu2(out)
out = self.flatten(out)
out = self.interface(out)
out = self.relu3(out)
return self.linear(out)
class SimpleTrainer:
def __init__(
self, datasets=None, dataloaders=None, models_path=".", cuda="cuda:0",
):
super().__init__()
self.datasets = datasets
# TODO: choose GPU with less memory
self.devicy = torch.device(
cuda if torch.cuda.is_available() else "cpu"
)
self.datasizes = {
i: len(sett)
for i, sett in zip(["train", "val", "test"], self.datasets)
}
self.models_path = models_path
self.dataloaders = dataloaders
self.criterion = nn.CrossEntropyLoss()
def train(
self, net: nn.Module, parameters: Dict[str, float], name: str,
) -> nn.Module:
net.to(device=self.devicy) # pyre-ignore [28]
optimizer = torch.optim.Adam(
net.parameters(), lr=parameters.get("learning_rate")
)
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=parameters.get("learning_step"),
gamma=parameters.get("learning_gamma"),
)
# Train Network
net = self.train_loop(
net, optimizer, exp_lr_scheduler, name, parameters.get("epochs")
)
return net
def train_loop(self, model, optimizer, scheduler, name, epochs):
"""
Training loop
"""
best_loss = 10 ** 8
for _ in tqdm(range(epochs)):
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0.0
for inputs, labels in self.dataloaders[phase]:
inputs = inputs.to(self.devicy)
labels = labels.to(self.devicy)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = self.criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * labels.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == "train":
scheduler.step()
epoch_acc = running_corrects / self.datasizes[phase]
epoch_loss = running_loss / self.datasizes[phase]
# deep copy the model
if phase == "val" and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_wts)
torch.save(
model.state_dict(), path.join(self.models_path, str(name) + ".pth")
)
return model
def evaluate(self, net: nn.Module) -> float:
correct = 0
total = 0
data_loader = self.dataloaders["test"]
net.eval()
with torch.no_grad():
for inputs, labels in data_loader:
inputs = inputs.to(device=self.devicy)
labels = labels.to(device=self.devicy)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
|
1704956
|
from TelegramBotAPI.client.requestsclient import RequestsClient
from TelegramBotAPI.types.methods import getUpdates
from twisted.application import service
from twisted.internet import reactor, threads, defer
from twisted.python import log
class TwistedClient(service.Service):
name = 'telegrambot_client'
_limit = 10
_timeout = 5
_lock = None
_poll = True
_offset = None
_poll_backoff = 0
def __init__(self, token, on_update, proxy=None, debug=False):
self._lock = defer.DeferredLock()
self._token = token
self._proxy = proxy
self._debug = debug
assert callable(on_update)
self._on_update = on_update
def startService(self):
self._client = RequestsClient(self._token, proxy=self._proxy, debug=self._debug)
reactor.callLater(0, self._poll_updates)
def stopService(self):
self._poll = False
def send_method(self, m):
d = self._lock.acquire()
def do_send(_):
return threads.deferToThread(self._send_thread, m)
def do_release(value):
self._lock.release()
return value
d.addCallback(do_send)
d.addBoth(do_release)
return d
def _send_thread(self, m):
resp = self._client.send_method(m)
return resp
@defer.inlineCallbacks
def _poll_updates(self, _=None):
while self._poll:
yield threads.deferToThread(self._poll_updates_thread)
if self._poll_backoff:
d = defer.Deferred()
reactor.callLater(self._poll_backoff, d.callback, None)
log.msg('Backing off update poll for %s' % self._poll_backoff)
self._poll_backoff = 0
yield d
def _poll_updates_thread(self):
m = getUpdates()
m.timeout = self._timeout
m.limit = self._limit
if self._offset is not None:
m.offset = self._offset
try:
updates = self._client.send_method(m)
reactor.callFromThread(self._handle_updates, updates)
except Exception as e:
reactor.callFromThread(self._handle_updates_error, e)
# import traceback
# log.msg(traceback.format_exc())
@defer.inlineCallbacks
def _handle_updates(self, updates):
if updates:
for update in updates:
self._offset = update.update_id + 1
try:
yield defer.maybeDeferred(self._on_update, update.message)
except Exception as e:
# import traceback
# log.msg(traceback.format_exc())
log.msg(e)
pass
def _handle_updates_error(self, e):
raise e
log.msg(e)
self._poll_backoff = 5
|
1705006
|
from .base import MIN_HEARTBEAT_CHECK_SPAN, MIN_HEARTBEAT_SPAN, DEFAULT_MASTER_PORT, DEFAULT_SLAVE_PORT, \
DEFAULT_CHANNEL, DEFAULT_HEARTBEAT_CHECK_SPAN, DEFAULT_HEARTBEAT_TOLERANCE, DEFAULT_HEARTBEAT_SPAN, LOCAL_HOST, \
GLOBAL_HOST, DEFAULT_REQUEST_RETRIES, DEFAULT_REQUEST_RETRY_WAITING
|
1705053
|
import torch.nn as nn # type: ignore
import torch # type: ignore
import torch.nn.init # type: ignore
import math # type: ignore
from collections import OrderedDict
#import models
use_relu = False
use_bn = True
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
global use_bn
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=not use_bn)
def calculate_scale(data):
if data.dim() == 2:
scale = math.sqrt(3 / data.size(1))
else:
scale = math.sqrt(3 / (data.size(1) * data.size(2) * data.size(3)))
return scale
class ChannelAttention(nn.Module):
def __init__(self, input_size, bottleneck_size):
super(ChannelAttention, self).__init__()
self.input_size = input_size
self.bottleneck_size = bottleneck_size
self.se_fc1 = nn.Conv2d(self.input_size, self.bottleneck_size, kernel_size = 1)
self.se_fc2 = nn.Conv2d(self.bottleneck_size, self.input_size, kernel_size = 1)
def forward(self, x):
w_max = nn.functional.max_pool2d(x, x.size(2))
w_max = nn.functional.relu(self.se_fc1(w_max))
w_avg = nn.functional.avg_pool2d(x, x.size(2))
w_avg = nn.functional.relu(self.se_fc1(w_avg))
w = w_max + w_avg
w = torch.sigmoid(self.se_fc2(w))
x = x * w
return x
class SpatialAttention(nn.Module):
def __init__(self, kernel_size):
super(SpatialAttention, self).__init__()
self.kernel_size = kernel_size
self.conv1 = nn.Conv2d(2, 1, kernel_size = self.kernel_size, padding=self.kernel_size//2)
def forward(self, x):
w_max, _ = torch.max(x, dim=1, keepdim=True)
w_avg = torch.mean(x, dim=1, keepdim=True)
w = torch.cat([w_avg, w_max], dim=1)
w = self.conv1(w)
w = torch.sigmoid(w)
x = x * w
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
global use_bn
self.use_bn = use_bn
self.conv1 = conv3x3(inplanes, planes, stride)
torch.nn.init.normal_(self.conv1.weight.data, 0, 0.01)
if self.conv1.bias is not None:
self.conv1.bias.data.zero_()
if self.use_bn:
self.bn1 = nn.BatchNorm2d(planes)
if use_relu:
self.relu1 = nn.ReLU(inplace=True)
else:
self.relu1 = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes)
torch.nn.init.normal_(self.conv2.weight.data, 0, 0.01)
if self.conv2.bias is not None:
self.conv2.bias.data.zero_()
if self.use_bn:
self.bn2 = nn.BatchNorm2d(planes)
if use_relu:
self.relu2 = nn.ReLU(inplace=True)
else:
self.relu2 = nn.PReLU(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.use_bn:
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
if self.use_bn:
out = self.bn2(out)
out = self.relu2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
return out
class AggBlock(nn.Module):
def __init__(self, layer_lvl, layers, agg_type='concat', channel_attention=False):
super(AggBlock, self).__init__()
self.layer_lvl = layer_lvl
self.agg_type = agg_type
self.channel_attention = channel_attention
if self.layer_lvl == 1:
inplanes = 64
outplanes = 128
elif self.layer_lvl == 2:
inplanes = 128
outplanes = 256
elif self.layer_lvl == 3:
inplanes = 256
outplanes = 512
self.agg_layer = _make_layer(BasicBlock, inplanes, outplanes, layers, stride=2)
if self.channel_attention:
self.ch_att = ChannelAttention(outplanes, outplanes//4)
if self.agg_type == 'concat':
self.conv1 = nn.Conv2d(inplanes * 3, inplanes, kernel_size=1)
def forward(self, prev_x, rgb_x, depth_x, ir_x):
if self.agg_type == 'concat':
x = torch.cat((rgb_x,depth_x,ir_x), dim=1)
x = nn.functional.relu(self.conv1(x))
if self.layer_lvl in [2,3]:
x = prev_x + x
x = self.agg_layer(x)
if self.channel_attention:
x = self.ch_att(x)
return x
def _load_pretrained_weights(self, weights):
pretrained_weights = self.state_dict()
replace_k = 'layer{}'.format(self.layer_lvl + 1)
for k,v in pretrained_weights.items():
if 'num_batches_tracked' in k or k.startswith('conv1') or 'ch_att' in k:
continue
pretrained_weights[k] = weights[k.replace('agg_layer', replace_k)]
self.load_state_dict(pretrained_weights)
def _make_layer(block, inplanes, planes, blocks, stride=1):
layers = []
layers.append(nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=0,
bias=False))
scale = calculate_scale(layers[-1].weight.data)
torch.nn.init.uniform_(layers[-1].weight.data, -scale, scale)
if layers[-1].bias is not None:
layers[-1].bias.data.zero_()
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.PReLU(planes))
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
inplanes = planes
for i in range(0, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
class ResNetCaffe(nn.Module):
def __init__(self, layers,
block=None,
k=1,
use_bn_=True,
init='kaiming_normal',
channel_attention=False):
global use_relu
global use_bn
self.use_bn = use_bn
self.channel_attention = channel_attention
super(ResNetCaffe, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=0, bias=False)
scale = calculate_scale(self.conv1.weight.data)
torch.nn.init.uniform_(self.conv1.weight.data, -scale, scale)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.PReLU(32)
block = block if block is not None else BasicBlock
self.layer1 = _make_layer(block, 32, 64, layers[0])
self.layer2 = _make_layer(block, 64, 128, layers[1], stride=2)
self.layer3 = _make_layer(block, 128, 256, layers[2], stride=2)
if self.channel_attention:
self.ch_att1 = ChannelAttention(64, 64//4)
self.ch_att2 = ChannelAttention(128, 128//4)
self.ch_att3 = ChannelAttention(256, 256//4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x_layer1 = self.layer1(x)
if self.channel_attention:
x_layer1 = self.ch_att1(x_layer1)
x_layer2 = self.layer2(x_layer1)
if self.channel_attention:
x_layer2 = self.ch_att2(x_layer2)
x_layer3 = self.layer3(x_layer2)
if self.channel_attention:
x_layer3 = self.ch_att3(x_layer3)
return x_layer1, x_layer2, x_layer3
def _load_pretrained_weights(self, weights):
pretrained_weights = self.state_dict()
for k,v in pretrained_weights.items():
if 'num_batches_tracked' in k or 'ch_att' in k:
continue
pretrained_weights[k] = weights[k]
self.load_state_dict(pretrained_weights)
class ResNetDLAS(nn.Module):
def __init__(self, block, layers, DLAS_type='A', pretrained=False):
super(ResNetDLAS, self).__init__()
global use_bn
self.use_bn = use_bn
self.DLAS_type = DLAS_type
self.channel_attention = False
if self.DLAS_type == 'A':
self.layer4 = _make_layer(block, 256 * 3, 512, layers[3], stride=2)
self.main_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.agg_avgpool = nn.AdaptiveAvgPool2d((1,1))
self.agg_layer1 = AggBlock(layer_lvl=1, layers=layers[1], agg_type='concat')
self.agg_layer2 = AggBlock(layer_lvl=2, layers=layers[2], agg_type='concat')
self.agg_layer3 = AggBlock(layer_lvl=3, layers=layers[3], agg_type='concat')
elif self.DLAS_type == 'B':
self.channel_attention = True
self.layer4 = _make_layer(block, 256 * 3, 512, layers[3], stride=2)
self.ch_att4 = ChannelAttention(512, 512//4)
self.main_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.agg_avgpool = nn.AdaptiveAvgPool2d((1,1))
self.agg_layer1 = AggBlock(layer_lvl=1, layers=layers[1],
agg_type='concat', channel_attention=self.channel_attention)
self.agg_layer2 = AggBlock(layer_lvl=2, layers=layers[2],
agg_type='concat', channel_attention=self.channel_attention)
self.agg_layer3 = AggBlock(layer_lvl=3, layers=layers[3],
agg_type='concat', channel_attention=self.channel_attention)
elif self.DLAS_type == 'C':
self.layer4 = _make_layer(block, 256 * 3, 512, layers[3], stride=2)
self.main_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.agg_avgpool = nn.AdaptiveAvgPool2d((1,1))
self.agg_layer1 = AggBlock(layer_lvl=1, layers=layers[1], agg_type='concat')
self.agg_layer2 = AggBlock(layer_lvl=2, layers=layers[2], agg_type='concat')
self.agg_layer3 = AggBlock(layer_lvl=3, layers=layers[3], agg_type='concat')
self.main_bottleneck = nn.Conv2d(512, 128, kernel_size=1)
self.agg_bottleneck = nn.Conv2d(512,128, kernel_size=1)
self.rgb_backbone = ResNetCaffe(layers[:3], channel_attention=self.channel_attention)
self.depth_backbone = ResNetCaffe(layers[:3], channel_attention=self.channel_attention)
self.ir_backbone = ResNetCaffe(layers[:3], channel_attention=self.channel_attention)
if pretrained:
self._load_pretrained_weights()
def forward(self, x, y, z):
x_layer1, x_layer2, x_layer3 = self.rgb_backbone(x)
y_layer1, y_layer2, y_layer3 = self.depth_backbone(y)
z_layer1, z_layer2, z_layer3 = self.ir_backbone(z)
x = torch.cat((x_layer3,y_layer3,z_layer3), dim=1)
x = self.layer4(x)
if self.channel_attention:
x = self.ch_att4(x)
x = self.main_avgpool(x)
agg_layer1 = self.agg_layer1(None, x_layer1, y_layer1, z_layer1)
agg_layer2 = self.agg_layer2(agg_layer1, x_layer2, y_layer2, z_layer2)
agg_layer3 = self.agg_layer3(agg_layer2, x_layer3, y_layer3, z_layer3)
agg_x = self.agg_avgpool(agg_layer3)
if self.DLAS_type == 'C':
x = self.main_bottleneck(x)
agg_x = self.agg_bottleneck(agg_x)
x = x + agg_x
x = x.view(x.size(0), -1)
return x
def _load_pretrained_weights(self):
weights = torch.load('/media2/a.parkin/codes/Liveness_challenge/models/pretrained/resnet_caffe_mcs_orgl.pth', map_location='cpu')
self.rgb_backbone._load_pretrained_weights(weights)
self.depth_backbone._load_pretrained_weights(weights)
self.ir_backbone._load_pretrained_weights(weights)
self.agg_layer1._load_pretrained_weights(weights)
self.agg_layer2._load_pretrained_weights(weights)
self.agg_layer3._load_pretrained_weights(weights)
pretrained_weights = self.layer4.state_dict()
for k,v in pretrained_weights.items():
if 'num_batches_tracked' in k:
continue
weight = weights['layer4.' + k]
if (self.DLAS_type in ['A', 'B', 'C']) and (k == '0.weight'):
weight = torch.cat((weight, weight, weight), dim=1)
pretrained_weights[k] = weight
self.layer4.load_state_dict(pretrained_weights)
def resnetDLAS_A(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetDLAS(BasicBlock, [1, 2, 5, 3], DLAS_type='A', pretrained=pretrained)
return model
def resnetDLAS_B(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetDLAS(BasicBlock, [1, 2, 5, 3], DLAS_type='B', pretrained=pretrained)
return model
def resnetDLAS_C(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetDLAS(BasicBlock, [1, 2, 5, 3], DLAS_type='C', pretrained=pretrained)
return model
def test():
model = resnetDLAS_C(pretrained=True)
rgb = torch.rand(2, 3, 112, 112)
ir = torch.rand(2, 3, 112, 112)
depth = torch.rand(2, 3, 112, 112)
print(model(rgb, depth, ir).size())
|
1705066
|
import os
import uuid
from django.db.models import CharField, FileField, BooleanField
from django.utils import timezone
from apps.vadmin.op_drf.models import CoreModel
def files_path(instance, filename):
return '/'.join(['system', timezone.now().strftime("%Y-%m-%d"), str(uuid.uuid4()) + os.path.splitext(filename)[-1]])
class SaveFile(CoreModel):
name = CharField(max_length=128, verbose_name="文件名称", null=True, blank=True)
type = CharField(max_length=200, verbose_name="文件类型", null=True, blank=True)
size = CharField(max_length=64, verbose_name="文件大小", null=True, blank=True)
address = CharField(max_length=16, verbose_name="存储位置", null=True, blank=True) # 本地、阿里云、腾讯云..
source = CharField(max_length=16, verbose_name="文件来源", null=True, blank=True) # 导出、用户上传.
oss_url = CharField(max_length=200, verbose_name="OSS地址", null=True, blank=True)
status = BooleanField(default=True, verbose_name="文件是否存在")
file = FileField(verbose_name="文件URL", upload_to=files_path, )
class Meta:
verbose_name = '文件管理'
verbose_name_plural = verbose_name
def __str__(self):
return f"{self.name}"
|
1705075
|
import magma as m
from loam.boards.tinyfpga import B2
b2 = B2()
b2.PIN4.input().on()
b2.PIN5.output().on()
main = b2.main()
m.wire( main.PIN4, main.PIN5 )
m.EndDefine()
|
1705091
|
import datetime
import os
from functools import wraps
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, SESSION_KEY
from django.contrib.sessions.backends.db import SessionStore
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase, TestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from xvfbwrapper import Xvfb
from orchestra.contrib.accounts.models import Account
from .python import random_ascii
class AppDependencyMixin(object):
DEPENDENCIES = ()
@classmethod
def setUpClass(cls):
current_app = cls.__module__.split('.tests.')[0]
INSTALLED_APPS = (
'orchestra',
'orchestra.contrib.accounts',
current_app
)
INSTALLED_APPS += cls.DEPENDENCIES
INSTALLED_APPS += (
# Third-party apps
'south',
'django_extensions',
'djcelery',
'djcelery_email',
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'rest_framework',
# Django.contrib
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
settings.INSTALLED_APPS = INSTALLED_APPS
super(AppDependencyMixin, cls).setUpClass()
class BaseTestCase(TestCase, AppDependencyMixin):
def create_account(self, username='', superuser=False):
if not username:
username = '%s_superaccount' % random_ascii(5)
password = '<PASSWORD>'
if superuser:
return Account.objects.create_superuser(username, password=password, email='<EMAIL>')
return Account.objects.create_user(username, password=password, email='<EMAIL>')
class BaseLiveServerTestCase(AppDependencyMixin, LiveServerTestCase):
@classmethod
def setUpClass(cls):
# Avoid problems with the overlaping menu when clicking
settings.ADMIN_TOOLS_MENU = 'admin_tools.menu.Menu'
cls.vdisplay = Xvfb()
cls.vdisplay.start()
cls.selenium = WebDriver()
super(BaseLiveServerTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
cls.vdisplay.stop()
super(BaseLiveServerTestCase, cls).tearDownClass()
def create_account(self, username='', superuser=False):
if not username:
username = '%s_superaccount' % random_ascii(5)
password = '<PASSWORD>'
self.account_password = password
if superuser:
return Account.objects.create_superuser(username, password=password, email='<EMAIL>')
return Account.objects.create_user(username, password=password, email='<EMAIL>')
def setUp(self):
from orm.api import Api
super(BaseLiveServerTestCase, self).setUp()
self.rest = Api(self.live_server_url + '/api/')
self.rest.enable_logging()
self.account = self.create_account(superuser=True)
def admin_login(self):
session = SessionStore()
session[SESSION_KEY] = self.account_id
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session.save()
## to set a cookie we need to first visit the domain.
self.selenium.get(self.live_server_url + '/admin/')
self.selenium.add_cookie(dict(
name=settings.SESSION_COOKIE_NAME,
value=session.session_key, #
path='/',
))
def rest_login(self):
self.rest.login(username=self.account.username, password=self.account_password)
def take_screenshot(self):
timestamp = datetime.datetime.now().isoformat().replace(':', '')
filename = 'screenshot_%s_%s.png' % (self.id(), timestamp)
path = '/home/orchestra/snapshots'
self.selenium.save_screenshot(os.path.join(path, filename))
def admin_delete(self, obj):
opts = obj._meta
app_label, model_name = opts.app_label, opts.model_name
delete = reverse('admin:%s_%s_delete' % (app_label, model_name), args=(obj.pk,))
url = self.live_server_url + delete
self.selenium.get(url)
confirmation = self.selenium.find_element_by_name('post')
confirmation.submit()
self.assertNotEqual(url, self.selenium.current_url)
def admin_disable(self, obj):
opts = obj._meta
app_label, model_name = opts.app_label, opts.model_name
change = reverse('admin:%s_%s_change' % (app_label, model_name), args=(obj.pk,))
url = self.live_server_url + change
self.selenium.get(url)
is_active = self.selenium.find_element_by_id('id_is_active')
is_active.click()
save = self.selenium.find_element_by_name('_save')
save.submit()
self.assertNotEqual(url, self.selenium.current_url)
def admin_change_password(self, obj, password):
opts = obj._meta
app_label, model_name = opts.app_label, opts.model_name
change_password = reverse('admin:%s_%s_change_password' % (app_label, model_name), args=(obj.pk,))
url = self.live_server_url + change_password
self.selenium.get(url)
password_field = self.selenium.find_element_by_id('id_password1')
password_field.send_keys(password)
password_field = self.selenium.find_element_by_id('id_password2')
password_field.send_keys(password)
password_field.submit()
self.assertNotEqual(url, self.selenium.current_url)
def snapshot_on_error(test):
@wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except:
self = args[0]
self.take_screenshot()
raise
return inner
def save_response_on_error(test):
@wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except:
self = args[0]
timestamp = datetime.datetime.now().isoformat().replace(':', '')
filename = '%s_%s.html' % (self.id(), timestamp)
path = '/home/orchestra/snapshots'
with open(os.path.join(path, filename), 'w') as dumpfile:
dumpfile.write(self.rest.last_response.content)
raise
return inner
|
1705107
|
from wagtail.admin.edit_handlers import FieldPanel
from wagtailleafletwidget.app_settings import (
LEAFLET_WIDGET_ZOOM
)
from wagtailleafletwidget.widgets import (
GeoField,
)
class GeoPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.classname = kwargs.pop('classname', "")
self.hide_latlng = kwargs.pop('hide_latlng', False)
self.zoom = kwargs.pop('zoom', LEAFLET_WIDGET_ZOOM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
field = self.model._meta.get_field(self.field_name)
srid = getattr(field, 'srid', 4326)
return {
self.field_name: GeoField(
hide_latlng=self.hide_latlng,
zoom=self.zoom,
srid=srid,
id_prefix='id_',
used_in='GeoPanel',
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
classname=self.classname,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
)
|
1705159
|
from .utils import fix_unicode
from .utils import swap_kar_location
__all__ = ['from_unicode']
CHAR_TABLE = {
2437: 'A',
2438: 'Aw',
2439: 'B',
2440: 'C',
2441: 'D',
2442: 'E',
2443: 'F',
2447: 'G',
2448: 'H',
2451: 'I',
2452: 'J',
2453: 'K',
2454: 'L',
2455: 'M',
2456: 'N',
2457: 'O',
2458: 'P',
2459: 'Q',
2460: 'R',
2461: 'S',
2462: 'T',
2463: 'U',
2464: 'V',
2465: 'W',
2466: 'X',
2467: 'Y',
2468: 'Z',
2469: 'a',
2470: 'b',
2471: 'c',
2472: 'd',
2474: 'e',
2475: 'f',
2476: 'g',
2477: 'h',
2478: 'i',
2479: 'j',
2480: 'k',
2482: 'l',
2486: 'm',
2487: 'n',
2488: 'o',
2489: 'p',
2524: 'r',
2525: 's',
2527: 't',
2510: 'u',
2434: 'v',
2494: 'w',
2495: 'x',
2496: 'y',
2509: 'z',
2534: '0',
2535: '1',
2536: '2',
2537: '3',
2538: '4',
2539: '5',
2540: '6',
2541: '7',
2542: '8',
2543: '9'
}
def from_unicode(s: str) -> str:
s = fix_unicode(s)
# s = replace_conj(s)
s = swap_kar_location(s)
# s = handle_surrounding_char(s)
s = s.translate(CHAR_TABLE)
return s
|
1705171
|
from typing import Optional
from ruptures.base import BaseCost
from ruptures.detection import Binseg
from sklearn.linear_model import LinearRegression
from etna.transforms.decomposition.change_points_trend import ChangePointsTrendTransform
from etna.transforms.decomposition.change_points_trend import TDetrendModel
class BinsegTrendTransform(ChangePointsTrendTransform):
"""BinsegTrendTransform uses :py:class:`ruptures.detection.Binseg` model as a change point detection model.
Warning
-------
This transform can suffer from look-ahead bias. For transforming data at some timestamp
it uses information from the whole train part.
"""
def __init__(
self,
in_column: str,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init BinsegTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if ``jump==k``,
the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=in_column,
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
|
1705189
|
import numpy as np
import pandas as pd
import xarray as xr
import typing as tp
NdType = tp.Union[np.ndarray, pd.DataFrame, xr.DataArray, pd.Series]
NdTupleType = tp.Union[
tp.Tuple[NdType],
tp.Tuple[NdType, NdType],
tp.Tuple[NdType, NdType, NdType],
tp.Tuple[NdType, NdType, NdType, NdType],
]
XR_TIME_DIMENSION = "time"
def nd_universal_adapter(d1_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_np_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_pd_df_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.Series):
return nd_pd_s_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_xr_da_adapter(d1_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_np_adapter(d1_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
shape = nd_args[0].shape
if len(shape) == 1:
args = nd_args + plain_args
return d1_function(*args)
nd_args_2d = tuple(a.reshape(-1, shape[-1]) for a in nd_args)
result2d = np.empty_like(nd_args_2d[0], )
for i in range(nd_args_2d[0].shape[0]):
slices = tuple(a[i] for a in nd_args_2d)
args = slices + plain_args
result2d[i] = d1_function(*args)
return result2d.reshape(shape)
def nd_pd_df_adapter(d1_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.DataFrame:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.DataFrame(np_result, columns=nd_args[0].columns, index=nd_args[0].index)
def nd_pd_s_adapter(d1_function, nd_args: tp.Tuple[pd.Series], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.Series(np_result, nd_args[0].index)
def nd_xr_da_adapter(d1_function, nd_args: tp.Tuple[xr.DataArray], plain_args: tuple) -> xr.DataArray:
origin_dims = nd_args[0].dims
transpose_dims = tuple(i for i in origin_dims if i != XR_TIME_DIMENSION) + (XR_TIME_DIMENSION,)
np_nd_args = tuple(a.transpose(*transpose_dims).values for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
return xr.DataArray(np_result, dims=transpose_dims, coords=nd_args[0].coords).transpose(*origin_dims)
def nd_to_1d_universal_adapter(np_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_to_1d_np_adapter(nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_to_1d_pd_df_adapter(np_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_to_1d_xr_da_adapter(np_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_to_1d_np_adapter(np_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
args = nd_args + plain_args
return np_function(*args)
def nd_to_1d_pd_df_adapter(np_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_to_1d_np_adapter(np_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.Series(np_result, index=nd_args[0].index)
def nd_to_1d_xr_da_adapter(np_function, nd_args: tp.Tuple[xr.DataArray], plain_args: tuple) -> xr.DataArray:
origin_dims = nd_args[0].dims
transpose_dims = tuple(i for i in origin_dims if i != XR_TIME_DIMENSION) + (XR_TIME_DIMENSION,)
np_nd_args = tuple(a.transpose(*transpose_dims).values for a in nd_args)
np_result = nd_to_1d_np_adapter(np_function, np_nd_args, plain_args)
return xr.DataArray(
np_result,
dims=[XR_TIME_DIMENSION],
coords=[nd_args[0].coords[XR_TIME_DIMENSION]]
)
|
1705216
|
import os
import tempfile
import unittest
import numpy as np
from keras_pos_embd.backend import keras
from keras_pos_embd import TrigPosEmbedding
class TestSinCosPosEmbd(unittest.TestCase):
def test_invalid_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=5,
)
def test_missing_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
)
def test_brute(self):
seq_len = np.random.randint(1, 10)
embd_dim = np.random.randint(1, 20) * 2
indices = np.expand_dims(np.arange(seq_len), 0)
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len,),
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=embd_dim,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(indices)[0].tolist()
for i in range(seq_len):
for j in range(embd_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embd_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embd_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embd_dim, i, j, expect, actual))
def test_add(self):
seq_len = np.random.randint(1, 10)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, embed_dim))
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len, embed_dim),
mode=TrigPosEmbedding.MODE_ADD,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = 1.0 + np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = 1.0 + np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embed_dim, i, j, expect, actual))
def test_concat(self):
seq_len = np.random.randint(1, 10)
feature_dim = np.random.randint(1, 20)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, feature_dim))
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len, feature_dim),
output_dim=embed_dim,
mode=TrigPosEmbedding.MODE_CONCAT,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][feature_dim + j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embed_dim, i, j, expect, actual))
|
1705298
|
print(data_view_config)
# { 'dataset_ids': ['1160'],
# 'group_by': [],
# 'model_type': 'default',
# 'descriptors': [
# {
# 'category': 'Real',
# 'descriptor_key': 'Temperature (Property Band gap)',
# 'units': '',
# 'lower_bound': 0.0,
# 'upper_bound': 1946.0
# }, {
# 'category': 'Categorical',
# 'descriptor_key': 'Property Color',
# 'descriptor_values': [
# 'Yellow', 'Pale Yellow', 'Violet', 'Gray', 'Amber', 'Orange-Red',
# 'Dark Brown', 'Red', 'Blue', 'White', 'Red-Yellow', 'Brown',
# 'Black', 'Ocher', 'Bluish', 'Bronze', 'Light Gray', 'Dark Green',
# 'Yellow-White', 'Copper-Red', 'Brown-Black', 'Yellow-Orange',
# 'Orange', 'Dark Gray', 'Dark Red'
# ],
# 'finite_set': True
# }, {
# 'category': 'Real',
# 'descriptor_key': 'Temperature (Property Color)',
# 'units': '',
# 'lower_bound': 0.0,
# 'upper_bound': 1746.0
# }, {
# 'category': 'Real',
# 'descriptor_key': 'Property Band gap',
# 'units': '',
# 'lower_bound': 0.0,
# 'upper_bound': 29.0
# }, {
# 'category': 'Inorganic',
# 'descriptor_key': 'formula',
# 'threshold': 1.0
# }
# ],
# 'builder': 'simple',
# 'roles': {
# 'formula': 'input',
# 'Property Band gap': 'output',
# 'Property Color': 'output',
# 'Temperature (Property Band gap)': 'input',
# 'Temperature (Property Color)': 'input'
# }
# }
# Create a data view
data_view_id = client.create(
data_view_config, 'My dataview name', 'The data view description'
)
# Or update an existing data view
client.update(data_view_id, data_view_config)
|
1705327
|
from biplist import *
import os
from test_utils import *
import unittest
class TestInvalidPlistFile(unittest.TestCase):
def setUp(self):
pass
def testEmptyFile(self):
try:
readPlist(data_path('empty_file.plist'))
self.fail("Should not successfully read empty plist.")
except NotBinaryPlistException as e:
pass
except InvalidPlistException as e:
pass
def testTooShort(self):
try:
readPlistFromString(b"bplist0")
self.fail("Should not successfully read plist which is too short.")
except InvalidPlistException as e:
pass
def testInvalid(self):
try:
readPlistFromString(b"bplist0-------------------------------------")
self.fail("Should not successfully read invalid plist.")
except InvalidPlistException as e:
pass
if __name__ == '__main__':
unittest.main()
|
1705328
|
from xml.sax.saxutils import escape
class Node:
def __init__(self, start, end, tag, parent, attrs):
self.start = start
self.end = end
self.tag = tag
self.parent = parent
self.children = []
if attrs:
self.attrs = attrs
else:
self.attrs = []
def __str__(self):
s = '(%d, %d) tag: %s' % (self.start, self.end, self.tag)
if self.attrs:
s += ' attrs: %s' % self.attrs
if self.children:
s += ' children: %d' % len(self.children)
return '{%s}' % s
def __repr__(self):
return self.__str__()
def add_child(self, child):
#assert child.start >= self.start and child.end <= self.end
self.children.append(child)
def get_id(tag, idmap):
if tag in idmap:
idmap[tag] += 1
else:
idmap[tag] = 1
return idmap[tag]
def annotate_doc(doc, nodelist):
inserts = []
idmap = {}
segmentmap = {}
flatten_nodes(nodelist, inserts, idmap, segmentmap)
#print len(segmentmap.keys())
return insert_markers(doc, inserts), segmentmap
def insert_markers(text, inserts):
sections = []
lastpos = 0
for pos, marker in inserts:
if pos > lastpos:
sections.append(escape(text[lastpos:pos]))
elif pos < lastpos:
print ("INVALID MARKER", pos, marker, inserts)
assert 0
sections.append(marker)
lastpos = pos
if lastpos < len(text):
sections.append(escape(text[lastpos:]))
return u''.join(sections)
def flatten_nodes(nodelist, inserts, idmap, segmentmap):
for node in nodelist:
attrlist = node.attrs[:]
#id1 = '%s_%d' % (node.tag, get_id(node.tag, idmap))
#attrlist.append(('id', id1))
if attrlist:
attrs = ' '.join('%s="%s"' % (attr[0], attr[1]) for attr in attrlist)
start_tag = '<%s %s>' % (node.tag, attrs)
else:
start_tag = '<%s>' % node.tag
inserts.append((node.start, start_tag))
if node.tag not in segmentmap:
segmentmap[node.tag] = []
segmentmap[node.tag].append(node.start)
flatten_nodes(node.children, inserts, idmap, segmentmap)
inserts.append((node.end, '</%s>\n' % node.tag))
|
1705346
|
import logging
from pytorch_pretrained_bert import BertTokenizer
logger = logging.getLogger(__name__)
def get_tokenizer(tokenizer_name):
logger.info(f"Loading Tokenizer {tokenizer_name}")
if tokenizer_name.startswith("bert"):
do_lower_case = "uncased" in tokenizer_name
tokenizer = BertTokenizer.from_pretrained(
tokenizer_name, do_lower_case=do_lower_case
)
return tokenizer
|
1705372
|
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotArgumentsOutputsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, arg1file, arg2file):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/arg1.txt".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(arg1file))
with open("{}/arg2.txt".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(arg2file))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--processes",
"2",
"--argumentfile1",
"{}/arg1.txt".format(self.tmpdir),
"--argumentfile2",
"{}/arg2.txt".format(self.tmpdir),
"--outputdir",
self.tmpdir,
"--output",
"test.xml",
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate(), process.returncode
def test_argumentfile_outputs(self):
(stdout, stderr), rc = self._run_tests_with(
"""
*** Test Cases ***
Test 1
Log ${VALUE}
Should Be True ${VALUE} == 2
""",
"""
--variable VALUE:1
""",
"""
--variable VALUE:2
""",
)
self.assertEqual(rc, 1)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertIn("failed", stdout, stderr)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertIn(b"failed", stdout, stderr)
|
1705385
|
import requests
import argparse
import datetime
from app import db
from models.cran_package import CranPackage
from models.pypi_package import PypiPackage
from models.github_repo import GithubRepo
from models.github_api import make_ratelimited_call
import update
from util import safe_commit
def add_all_new_packages(package_class):
all_current_package_id_rows = db.session.query(package_class.id).all()
all_current_package_ids = [row[0] for row in all_current_package_id_rows]
all_names = package_class.get_all_live_package_names()
for package_name in all_names:
new_package = package_class(project_name=package_name)
if new_package.id not in all_current_package_ids:
print "\n\nadded new package:", new_package.id
# new_package.refresh()
db.session.add(new_package)
safe_commit(db)
print len(all_names)
def add_all_new_github_repos(language):
all_current_github_repo_rows = db.session.query(GithubRepo.id).filter(GithubRepo.language==language).all()
all_current_github_repo_ids = [row[0] for row in all_current_github_repo_rows]
end_date = datetime.datetime(2015, 11, 01)
start_date = datetime.datetime.utcnow()
date = start_date
while date >= end_date:
prev_date = date - datetime.timedelta(days=1)
# The sort field. One of stars, forks, or updated.
# max of 100 returned
# authenticated rate limit: 30/min
sort_fragements = [
"sort=stars&order=desc",
"sort=updated&order=desc",
"sort=forks&order=desc",
"sort=stars&order=asc",
"sort=updated&order=asc",
"sort=forks&order=asc"
]
for sort_fragment in sort_fragements:
url_template = "https://api.github.com/search/repositories?q=created:%22{prev_date}%20..%20{date}%22%20language:{language}&per_page=1000&{sort_fragment}"
url = url_template.format(
language=language,
date=date.isoformat()[0:10],
prev_date=prev_date.isoformat()[0:10],
sort_fragment=sort_fragment)
print url
data = make_ratelimited_call(url)
print date.isoformat()[0:10], data["total_count"], data["incomplete_results"]
date = prev_date
for repo_dict in data["items"]:
new_repo = GithubRepo(login=repo_dict["owner"]["login"], repo_name=repo_dict["name"], language=language)
new_repo.api_raw = repo_dict
print "new_repo:", new_repo
if new_repo.id not in all_current_github_repo_ids:
print "added new repo from {}: {}\n".format(date.isoformat()[0:10], new_repo.id)
db.session.add(new_repo)
all_current_github_repo_ids.append(new_repo.id)
safe_commit(db)
def recalculate_everything(parsed_args):
if parsed_args.language=="r":
package_class = CranPackage
else:
package_class = PypiPackage
parsed_args.fn = u"{}.recalculate".format(package_class.__name__)
print "parsed_args.fn", parsed_args.fn
update.run_update(parsed_args)
def refresh(parsed_args):
if parsed_args.language=="r":
package_class = CranPackage
else:
package_class = PypiPackage
parsed_args.fn = u"{}.refresh".format(package_class.__name__)
print "parsed_args.fn", parsed_args.fn
update.run_update(parsed_args)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run stuff.")
parser.add_argument('language', help="r or python")
parsed_args = update.parse_update_optional_args(parser)
add_all_new_github_repos(parsed_args.language)
# add_all_new_packages(PypiPackage)
# add_all_new_packages(CranPackage)
# start_date = ""
# end_date = ""
# add_all_new_github_repos("R", start_date, end_date)
# call run_igraph.sh
# go through all
# recalculate everything
# recalculate_everything(parsed_args)
|
1705393
|
def Settings( **kwargs ):
assert kwargs[ 'language' ] == 'java'
return {
'ls': { 'java.rename.enabled' : False },
'formatting_options': { 'org.eclipse.jdt.core.formatter.lineSplit': 30, }
}
|
1705398
|
from src.alerter.alert_code.alert_code import AlertCode
from src.alerter.alert_code.github_alert_code import GithubAlertCode
from src.alerter.alert_code.internal_alert_code import InternalAlertCode
from src.alerter.alert_code.system_alert_code import SystemAlertCode
|
1705405
|
from apps.search_head_api.views.base import SearchHeadAPIView
from . import capturenode, search, tests
__all__ = [capturenode, search, tests]
|
1705412
|
import pytest
from dbt.tests.util import run_dbt
from tests.functional.simple_snapshot.fixtures import models_slow__gen_sql
snapshots_slow__snapshot_sql = """
{% snapshot my_slow_snapshot %}
{{
config(
target_database=var('target_database', database),
target_schema=schema,
unique_key='id',
strategy='timestamp',
updated_at='updated_at'
)
}}
select
id,
updated_at,
seconds
from {{ ref('gen') }}
{% endsnapshot %}
"""
test_snapshots_slow__test_timestamps_sql = """
/*
Assert that the dbt_valid_from of the latest record
is equal to the dbt_valid_to of the previous record
*/
with snapshot as (
select * from {{ ref('my_slow_snapshot') }}
)
select
snap1.id,
snap1.dbt_valid_from as new_valid_from,
snap2.dbt_valid_from as old_valid_from,
snap2.dbt_valid_to as old_valid_to
from snapshot as snap1
join snapshot as snap2 on snap1.id = snap2.id
where snap1.dbt_valid_to is null
and snap2.dbt_valid_to is not null
and snap1.dbt_valid_from != snap2.dbt_valid_to
"""
@pytest.fixture(scope="class")
def models():
return {"gen.sql": models_slow__gen_sql}
@pytest.fixture(scope="class")
def snapshots():
return {"snapshot.sql": snapshots_slow__snapshot_sql}
@pytest.fixture(scope="class")
def tests():
return {"test_timestamps.sql": test_snapshots_slow__test_timestamps_sql}
def test_slow(project):
results = run_dbt(["snapshot"])
assert len(results) == 1
results = run_dbt(["snapshot"])
assert len(results) == 1
results = run_dbt(["test"])
assert len(results) == 1
|
1705465
|
def hanoi(disks, source, auxiliary, target):
if disks == 1:
print('Move disk 1 from peg {} to peg {}.'.format(source, target))
return
hanoi(disks - 1, source, target, auxiliary)
print('Move disk {} from peg {} to peg {}.'.format(disks, source, target))
hanoi(disks - 1, auxiliary, source, target)
disks = int(input('Enter number of disks: '))
hanoi(disks, 'A', 'B', 'C')
|
1705487
|
CACHE_KEY_ASSET_TASK_LOG_PREFIX = "asset:task-log:" # + task.id
CACHE_KEY_AUTODJ_CURRENT_STOPSET = "autodj:current-stopset"
CACHE_KEY_AUTODJ_NO_REPEAT_ARTISTS = "autodj:no-repeat-artists"
CACHE_KEY_AUTODJ_NO_REPEAT_IDS = "autodj:no-repeat-ids"
CACHE_KEY_AUTODJ_REQUESTS = "autodj:requests"
CACHE_KEY_AUTODJ_STOPSET_LAST_FINISHED_AT = "autodj:stopset-last-finished-at"
CACHE_KEY_GCAL_LAST_SYNC = "gcal:last-sync"
CACHE_KEY_HARBOR_BAN_PREFIX = "harbor:ban:" # + user.id
CACHE_KEY_HARBOR_CONFIG_CONTEXT = "harbor:config-context"
CACHE_KEY_YTDL_UP2DATE = "youtube-dl:up2date"
CACHE_KEY_SET_PASSWORD_PREFIX = "user:set-password:"
REDIS_KEY_ROOM_INFO = "zoom-runner:room-info"
REDIS_KEY_SERVICE_LOGS = "service:logs"
|
1705625
|
from config import *
import pickle
import numpy as np
def prepare_mano_model():
"""
Convert the official MANO model into compatible format with this project.
"""
with open(OFFICIAL_MANO_PATH, 'rb') as f:
data = pickle.load(f, encoding='latin1')
params = {
'pose_pca_basis': np.array(data['hands_components']),
'pose_pca_mean': np.array(data['hands_mean']),
'J_regressor': data['J_regressor'].toarray(),
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(MANO_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
def prepare_smpl_model():
"""
Convert the official SMPL model into compatible format with this project.
"""
with open(OFFICIAL_SMPL_PATH, 'rb') as f:
data = pickle.load(f, encoding='latin1')
params = {
# SMPL does not provide pose PCA
'pose_pca_basis': np.eye(23 * 3),
'pose_pca_mean': np.zeros(23 * 3),
'J_regressor': data['J_regressor'].toarray(),
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(SMPL_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
def prepare_smplh_model():
"""
Convert the official SMPLH model into compatible format with this project.
"""
data = np.load(OFFICIAL_SMPLH_PATH)
params = {
# SMPL does not provide pose PCA
'pose_pca_basis': np.eye(51 * 3),
'pose_pca_mean': np.zeros(51 * 3),
'J_regressor': data['J_regressor'],
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(SMPLH_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
if __name__ == '__main__':
prepare_smplh_model()
|
1705626
|
import hashlib
def md5sum(file_url):
md5sum = hashlib.md5()
with open(file_url,'rb') as f:
while True:
data = f.read(2048)
if not data:
break
md5sum.update(data)
return md5sum.hexdigest()
|
1705628
|
import os, time
import asyncio
from fastapi import FastAPI
from easyjobs.workers.worker import EasyJobsWorker
server = FastAPI()
@server.on_event('startup')
async def setup():
worker = await EasyJobsWorker.create(
server,
'/ws/jobs',
server_secret='<KEY>',
manager_host='192.168.1.18',
manager_port=8220,
manager_secret='<KEY>',
manager_path='/ws/jobs',
jobs_queue='DEFAULT',
#debug=True
)
@worker.task(run_after='worker_b')
async def worker_a(a, b, c):
await asyncio.sleep(5)
return {'a': a, 'b': b, 'c': c}
@worker.task(run_after='worker_c')
async def worker_b(a, b, c):
await asyncio.sleep(5)
return {'a': a + 2, 'b': b + 2, 'c': c +2}
@worker.task(on_failure='failure_notify')
async def worker_c(a, b, c):
await asyncio.sleep(5)
result = {'a': a + 2, 'b': b + 2, 'c': c +2}
print('worker_c ', result)
raise Exception(f"fake error: {result}")
@worker.task()
async def failure_notify(job_failed):
worker.log.error(job_failed)
return job_failed
# 'WORKER_PORT', 'WORKER_PATH', 'WORKER_TASK_DIR'
os.environ['WORKER_PORT'] = '8221'
os.environ['WORKER_PATH'] = '/ws/jobs'
os.environ['WORKER_TASK_DIR'] = '/home/tso/Documents/python/easyjobs/easyjobs/'
@worker.task(subprocess=True)
async def basic_blocking(a, b, c):
pass
|
1705692
|
import sys
import os
sys.path.append(os.getcwd())
import time
from abc import ABCMeta
from .utils.util import plot_scores, print_schedule, read_file
from .base_algorithm import FlightAlgorithm
import random
from .fitness import *
class RandomSearch(FlightAlgorithm, metaclass=ABCMeta):
def __init__(self, domain=domain['domain'], fitness_function=fitness_function, seed=random.randint(10, 100),
seed_init=True, init=None,max_time=1000,epochs=100) -> None:
super().__init__(domain, fitness_function, seed, seed_init, init,max_time)
self.epochs = epochs
self.best_cost=sys.maxsize
self.best_solution=0.0
"""
Random Search algorithm is implemented.
References:
[1]<NAME>. (1963). "The convergence of the random search method in the extremal control of a many parameter system". Automation and Remote Control. 24 (10): 1337–1342.
[2]<NAME>.; <NAME>. (1968). "Adaptive step size random search". IEEE Transactions on Automatic Control. 13 (3): 270–276. CiteSeerX 10.1.1.118.9779. doi:10.1109/tac.1968.1098903.
[3]<NAME>.; <NAME>. (1976). "Optimized relative step size random searches". Mathematical Programming. 10 (1): 230–244. doi:10.1007/bf01580669.
Attributes:
domain (list): List containing the upper and lower bound.i.e domain of our inputs
fitness_function (function): This parameter accepts a fitness function of given optimization problem.
seed (int,optional): Set the seed value of the random seed generator. Defaults to random integer value.
seed_init(bool,optional): True set's the seed of only population init generator, False sets all generators
init (list, optional): List for initializing the initial solution. Defaults to [].
epochs (int, optional): Number of times the algorithm runs. Defaults to 100.
best_cost: Stores the best cost of all the iterations. Defeaults to sys.maxsize.
best_solution: Stores the best_solution of all the iterations.
Returns:
list: List containing the best_solution,
int: The final cost after running the algorithm,
list: List containing all costs during all epochs.
int: The number of function evaluations(NFE) after running the algorithm
int: Seed value used by random generators.
"""
def get_base(self) -> str:
pass
def get_name(self) -> str:
return self.__class__.__name__
def run(self,domain,fitness_function,seed):
self.__init__(domain,fitness_function,seed,self.seed_init, self.init,self.max_time)
scores = []
nfe = 0
if len(self.init) > 0:
solution = self.init
else:
solution = [self.r_init.randint(self.domain[i][0], self.domain[i][1])
for i in range(len(self.domain))]
self.start_time = time.time()
for i in range(self.epochs):
if i != 0:
solution = [random.randint(self.domain[i][0], self.domain[i][1])
for i in range(len(self.domain))]
if not self.fitness_function.__name__ == 'fitness_function':
cost = self.fitness_function(solution)
else:
cost = self.fitness_function(solution, 'FCO')
nfe += 1
if cost < self.best_cost:
self.best_cost = cost
self.best_solution = solution
scores.append(self.best_cost)
if time.time()-self.start_time>self.max_time:
return self.best_solution, self.best_cost, scores, nfe, self.seed
return self.best_solution, self.best_cost, scores, nfe, self.seed
if __name__ == '__main__':
read_file('flights.txt')
rs=RandomSearch(max_time=0.00001) #def run():
soln, cost, scores, nfe, seed=rs.run(domain=domain['domain'],fitness_function=fitness_function,seed=5)
#plot_scores(scores,rs.get_name(),fname='flight_scheduling',save_fig=False)
#print_schedule(soln,'FCO')
|
1705707
|
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
import getpass
host_name = 'smtp.gmail.com'
port = 465
sender = '<EMAIL>'
password = getpass.getpass()
receiver = '<EMAIL>'
text = MIMEMultipart()
text['Subject'] = 'Test Attachment'
text['From'] = sender
text['To'] = receiver
txt = MIMEText('Sending a sample image.')
text.attach(txt)
f_path = '/home/student/Desktop/mountain.jpg'
with open(f_path, 'rb') as f:
img = MIMEImage(f.read())
img.add_header('Content-Disposition',
'attachment',
filename=os.path.basename(f_path))
text.attach(img)
s = smtplib.SMTP_SSL(host_name, port)
print("Attachment sent successfully !!")
s.login(sender, password)
s.sendmail(sender, receiver, text.as_string())
s.quit()
|
1705730
|
from sqlalchemy import create_engine
import time
# This is a basic test for both protocols. The result set from flight and odbc must match
# TODO Naren: Add unit tests for flight
db_uri = "dremio+flight://dremio:dremio123@localhost:32010/dremio;SSL=0"
engine = create_engine(db_uri)
sql = 'SELECT * FROM flight."1m" limit 100 -- SQL Alchemy Flight Test '
start = time.process_time()
result = engine.execute(sql)
print(time.process_time() - start)
|
1705736
|
from __future__ import unicode_literals
import collections
import datetime
import os
import tempfile
from copy import deepcopy
from itertools import chain
from shutil import rmtree
from ags_utils import (
create_session,
get_service_manifest,
get_service_status,
list_data_stores,
list_service_folders,
list_service_workspaces,
list_services,
restart_service,
test_service
)
from config_io import get_config, default_config_dir
from datasources import open_mxd, list_layers_in_mxd, get_layer_fields, get_layer_properties
from extrafilters import superfilter
from helpers import asterisk_tuple, empty_tuple
from logging_io import setup_logger
log = setup_logger(__name__)
def generate_service_inventory(
included_services=asterisk_tuple, excluded_services=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
config_dir=default_config_dir
):
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
if len(env_names) == 0:
raise RuntimeError('No environments specified!')
for env_name in env_names:
env = user_config['environments'][env_name]
ags_instances = superfilter(env['ags_instances'].keys(), included_instances, excluded_instances)
log.info('Listing services on ArcGIS Server instances {}'.format(', '.join(ags_instances)))
for ags_instance in ags_instances:
ags_instance_props = env['ags_instances'][ags_instance]
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
if superfilter((service_name,), included_services, excluded_services):
yield dict(
env_name=env_name,
ags_instance=ags_instance,
service_folder=service_folder,
service_name=service_name,
service_type=service_type
)
def generate_data_stores_inventory(
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
config_dir=default_config_dir
):
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
if len(env_names) == 0:
raise RuntimeError('No environments specified!')
for env_name in env_names:
env = user_config['environments'][env_name]
ags_instances = superfilter(env['ags_instances'].keys(), included_instances, excluded_instances)
log.info('Listing data stores on ArcGIS Server instances {}'.format(', '.join(ags_instances)))
for ags_instance in ags_instances:
ags_instance_props = env['ags_instances'][ags_instance]
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
data_stores = list_data_stores(server_url, token, session=session)
for data_store in data_stores:
yield dict(
env_name=env_name,
ags_instance=ags_instance,
**data_store
)
def analyze_services(
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_services=asterisk_tuple, excluded_services=empty_tuple,
warn_on_errors=True,
config_dir=default_config_dir
):
import arcpy
arcpy.env.overwriteOutput = True
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
for env_name in env_names:
log.debug('Analyzing services for environment {}'.format(env_name))
env = user_config['environments'][env_name]
for ags_instance in superfilter(env['ags_instances'], included_instances, excluded_instances):
ags_instance_props = user_config['environments'][env_name]['ags_instances'][ags_instance]
ags_connection = ags_instance_props['ags_connection']
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
if (
service_type in ('MapServer', 'GeocodeServer') and
superfilter((service_name,), included_services, excluded_services)
):
service_props = dict(
env_name=env_name,
ags_instance=ags_instance,
service_folder=service_folder,
service_name=service_name,
service_type=service_type
)
try:
service_manifest = get_service_manifest(server_url, token, service_name, service_folder, service_type, session=session)
service_props['file_path'] = file_path = service_manifest['resources'][0]['onPremisePath']
file_type = {
'MapServer': 'MXD',
'GeocodeServer': 'Locator'
}[service_type]
log.info(
'Analyzing {} service {}/{} on ArcGIS Server instance {} (Connection File: {}, {} Path: {})'
.format(service_type, service_folder, service_name, ags_instance, ags_connection, file_type, file_path)
)
if not arcpy.Exists(file_path):
raise RuntimeError('{} {} does not exist!'.format(file_type, file_path))
try:
tempdir = tempfile.mkdtemp()
log.debug('Temporary directory created: {}'.format(tempdir))
sddraft = os.path.join(tempdir, service_name + '.sddraft')
log.debug('Creating SDDraft file: {}'.format(sddraft))
if service_type == 'MapServer':
mxd = open_mxd(file_path)
analysis = arcpy.mapping.CreateMapSDDraft(
mxd,
sddraft,
service_name,
'FROM_CONNECTION_FILE',
ags_connection,
False,
service_folder
)
elif service_type == 'GeocodeServer':
locator_path = file_path
analysis = arcpy.CreateGeocodeSDDraft(
locator_path,
sddraft,
service_name,
'FROM_CONNECTION_FILE',
ags_connection,
False,
service_folder
)
else:
raise RuntimeError('Unsupported service type {}!'.format(service_type))
for key, log_method in (('messages', log.info), ('warnings', log.warn), ('errors', log.error)):
items = analysis[key]
severity = key[:-1].title()
if items:
log.info('----' + key.upper() + '---')
for ((message, code), layerlist) in items.iteritems():
code = '{:05d}'.format(code)
log_method(' {} (CODE {})'.format(message, code))
code = '="{}"'.format(code)
issue_props = dict(
severity=severity,
code=code,
message=message
)
if not layerlist:
yield dict(chain(
service_props.iteritems(),
issue_props.iteritems()
))
else:
log_method(' applies to:')
for layer in layerlist:
layer_name = layer.longName if hasattr(layer, 'longName') else layer.name
layer_props = dict(
dataset_name=layer.datasetName,
workspace_path=layer.workspacePath,
layer_name=layer_name
)
log_method(' {}'.format(layer_name))
yield dict(chain(
service_props.iteritems(),
issue_props.iteritems(),
layer_props.iteritems()
))
log_method('')
if analysis['errors']:
error_message = 'Analysis failed for service {}/{} at {:%#m/%#d/%y %#I:%M:%S %p}' \
.format(service_folder, service_name, datetime.datetime.now())
log.error(error_message)
raise RuntimeError(error_message, analysis['errors'])
finally:
log.debug('Cleaning up temporary directory: {}'.format(tempdir))
rmtree(tempdir, ignore_errors=True)
except StandardError as e:
log.exception(
'An error occurred while analyzing {} service {}/{} on ArcGIS Server instance {}'
.format(service_type, service_folder, service_name, ags_instance)
)
if not warn_on_errors:
raise
else:
yield dict(
severity='Error',
message=e.message,
**service_props
)
def list_service_layer_fields(
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_services=asterisk_tuple, excluded_services=empty_tuple,
warn_on_errors=False,
config_dir=default_config_dir
):
import arcpy
arcpy.env.overwriteOutput = True
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
for env_name in env_names:
log.debug('Listing service layers and fields for environment {}'.format(env_name))
env = user_config['environments'][env_name]
for ags_instance in superfilter(env['ags_instances'], included_instances, excluded_instances):
ags_instance_props = user_config['environments'][env_name]['ags_instances'][ags_instance]
ags_connection = ags_instance_props['ags_connection']
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
if (
service_type == 'MapServer' and
superfilter((service_name,), included_services, excluded_services)
):
service_props = dict(
env_name=env_name,
ags_instance=ags_instance,
service_folder=service_folder,
service_name=service_name,
service_type=service_type,
ags_connection=ags_connection
)
try:
service_manifest = get_service_manifest(server_url, token, service_name, service_folder, service_type, session=session)
service_props['mxd_path'] = mxd_path = service_manifest['resources'][0]['onPremisePath']
log.info(
'Listing layers and fields for {service_type} service {service_folder}/{service_name} '
'on ArcGIS Server instance {ags_instance} '
'(Connection File: {ags_connection}, MXD Path: {mxd_path})'
.format(**service_props)
)
if not arcpy.Exists(mxd_path):
raise RuntimeError('MXD {} does not exist!'.format(mxd_path))
mxd = open_mxd(mxd_path)
for layer in list_layers_in_mxd(mxd):
if not (
(hasattr(layer, 'isGroupLayer') and layer.isGroupLayer) or
(hasattr(layer, 'isRasterLayer') and layer.isRasterLayer)
):
layer_name = getattr(layer, 'longName', layer.name)
try:
layer_props = get_layer_properties(layer)
except StandardError as e:
log.exception(
'An error occurred while retrieving properties for layer {} in MXD {}'
.format(layer_name, mxd_path)
)
if not warn_on_errors:
raise
else:
yield dict(
error='Error retrieving layer properties: {}'.format(e.message),
layer_name=layer_name,
**service_props
)
continue
try:
if layer_props['is_broken']:
raise RuntimeError(
'Layer\'s data source is broken (Layer: {}, Data Source: {})'.format(
layer_name,
getattr(layer, 'dataSource', 'n/a')
)
)
for field_props in get_layer_fields(layer):
field_props['needs_index'] = not field_props['has_index'] and (
field_props['in_definition_query'] or
field_props['in_label_class_expression'] or
field_props['in_label_class_sql_query'] or
field_props['field_name'] == layer_props['symbology_field'] or
field_props['field_type'] == 'Geometry'
)
yield dict(chain(
service_props.iteritems(),
layer_props.iteritems(),
field_props.iteritems()
))
except StandardError as e:
log.exception(
'An error occurred while listing fields for layer {} in MXD {}'
.format(layer_name, mxd_path)
)
if not warn_on_errors:
raise
else:
yield dict(chain(
service_props.iteritems(),
layer_props.iteritems()
),
error='Error retrieving layer fields: {}'.format(e.message)
)
except StandardError as e:
log.exception(
'An error occurred while listing layers and fields for '
'{service_type} service {service_folder}/{service_name} on '
'ArcGIS Server instance {ags_instance} (Connection File: {ags_connection})'
.format(**service_props)
)
if not warn_on_errors:
raise
else:
yield dict(
error=e.message,
**service_props
)
def find_service_dataset_usages(
included_datasets=asterisk_tuple, excluded_datasets=empty_tuple,
included_users=asterisk_tuple, excluded_users=empty_tuple,
included_databases=asterisk_tuple, excluded_databases=empty_tuple,
included_versions=asterisk_tuple, excluded_versions=empty_tuple,
included_services=asterisk_tuple, excluded_services=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
config_dir=default_config_dir
):
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
if len(env_names) == 0:
raise RuntimeError('No environments specified!')
for env_name in env_names:
env = user_config['environments'][env_name]
ags_instances = superfilter(env['ags_instances'].keys(), included_instances, excluded_instances)
log.info('Finding service dataset usages on ArcGIS Server instances {}'.format(', '.join(ags_instances)))
for ags_instance in ags_instances:
ags_instance_props = env['ags_instances'][ags_instance]
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
service_props = dict(
env_name=env_name,
ags_instance=ags_instance,
service_folder=service_folder,
service_name=service_name,
service_type=service_type
)
if superfilter((service_name,), included_services, excluded_services):
for dataset_props in list_service_workspaces(
server_url,
token,
service_name,
service_folder,
service_type,
session=session
):
if (
superfilter((dataset_props['dataset_name'],), included_datasets, excluded_datasets) and
superfilter((dataset_props['user'],), included_users, excluded_users) and
superfilter((dataset_props['database'],), included_databases, excluded_databases) and
superfilter((dataset_props['version'],), included_versions, excluded_versions)
):
yield dict(chain(
service_props.iteritems(),
dataset_props.iteritems()
))
def restart_services(
included_services=asterisk_tuple, excluded_services=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
include_running_services=True,
delay=30,
max_retries=3,
test_after_restart=True,
config_dir=default_config_dir
):
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
if len(env_names) == 0:
raise RuntimeError('No environments specified!')
for env_name in env_names:
env = user_config['environments'][env_name]
ags_instances = superfilter(env['ags_instances'].keys(), included_instances, excluded_instances)
log.info('Restarting services on ArcGIS Server instances {}'.format(', '.join(ags_instances)))
for ags_instance in ags_instances:
ags_instance_props = env['ags_instances'][ags_instance]
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
if superfilter((service_name,), included_services, excluded_services):
if not include_running_services:
status = get_service_status(server_url, token, service_name, service_folder, service_type, session=session)
configured_state = status.get('configuredState')
if configured_state == 'STARTED':
log.debug(
'Skipping restart of service {}/{} ({}) because its configured state is {} and include_running_services is {}'
.format(service_folder, service_name, service_type, configured_state, include_running_services)
)
continue
restart_service(server_url, token, service_name, service_folder, service_type, delay, max_retries, test_after_restart, session=session)
restart_service(server_url, token, service_name, service_folder, service_type, delay, max_retries, test_after_restart, session=session)
def test_services(
included_services=asterisk_tuple, excluded_services=empty_tuple,
included_service_folders=asterisk_tuple, excluded_service_folders=empty_tuple,
included_instances=asterisk_tuple, excluded_instances=empty_tuple,
included_envs=asterisk_tuple, excluded_envs=empty_tuple,
warn_on_errors=False,
config_dir=default_config_dir
):
user_config = get_config('userconfig', config_dir)
env_names = superfilter(user_config['environments'].keys(), included_envs, excluded_envs)
if len(env_names) == 0:
raise RuntimeError('No environments specified!')
for env_name in env_names:
env = user_config['environments'][env_name]
ags_instances = superfilter(env['ags_instances'].keys(), included_instances, excluded_instances)
log.info('Testing services on ArcGIS Server instances {}'.format(', '.join(ags_instances)))
for ags_instance in ags_instances:
ags_instance_props = env['ags_instances'][ags_instance]
server_url = ags_instance_props['url']
token = ags_instance_props['token']
proxies = ags_instance_props.get('proxies') or user_config.get('proxies')
with create_session(server_url, proxies=proxies) as session:
service_folders = list_service_folders(server_url, token, session=session)
for service_folder in superfilter(service_folders, included_service_folders, excluded_service_folders):
for service in list_services(server_url, token, service_folder, session=session):
service_name = service['serviceName']
service_type = service['type']
if superfilter((service_name,), included_services, excluded_services):
test_data = test_service(server_url, token, service_name, service_folder, service_type, warn_on_errors, session=session)
yield dict(
env_name=env_name,
ags_instance=ags_instance,
service_folder=service_folder,
service_name=service_name,
service_type=service_type,
**test_data
)
def normalize_services(services, default_service_properties=None, env_service_properties=None):
for service in services:
yield normalize_service(service, default_service_properties, env_service_properties)
def normalize_service(service, default_service_properties=None, env_service_properties=None):
is_mapping = isinstance(service, collections.Mapping)
service_name = service.keys()[0] if is_mapping else service
merged_service_properties = deepcopy(default_service_properties) if default_service_properties else {}
if env_service_properties:
log.debug(
'Overriding default service properties with environment-level properties for service {}'
.format(service_name)
)
merged_service_properties.update(env_service_properties)
if is_mapping:
service_properties = service.items()[0][1]
if service_properties:
log.debug('Overriding default service properties with service-level properties for service {}'.format(service_name))
merged_service_properties.update(service_properties)
else:
log.warn(
'No service-level properties specified for service {} even though it was specified as a mapping'
.format(service_name)
)
else:
log.debug('No service-level properties specified for service {}'.format(service_name))
service_type = merged_service_properties.get('service_type', 'MapServer')
return service_name, service_type, merged_service_properties
def get_source_info(services, source_dir, staging_dir, default_service_properties, env_service_properties):
log.debug(
'Getting source info for services {}, source directory: {}, staging directory {}'
.format(services, source_dir, staging_dir)
)
source_info = {}
errors = []
for (
service_name,
service_type,
service_properties
) in normalize_services(
services,
default_service_properties,
env_service_properties
):
service_info = source_info[service_name] = {
'source_file': None,
'staging_files': []
}
if staging_dir:
staging_files = service_info['staging_files']
# If multiple staging folders are provided, look for the source item in each staging folder
staging_dirs = (staging_dir,) if isinstance(staging_dir, basestring) else staging_dir
for _staging_dir in staging_dirs:
log.debug('Finding staging items in directory: {}'.format(_staging_dir))
if service_type == 'MapServer':
staging_file = os.path.abspath(os.path.join(_staging_dir, service_name + '.mxd'))
elif service_type == 'GeocodeServer':
staging_file = os.path.abspath(os.path.join(_staging_dir, service_name + '.loc'))
else:
log.debug('Unsupported service type {} of service {} will be skipped'.format(service_type, service_name))
if os.path.isfile(staging_file):
log.debug('Staging file found: {}'.format(staging_file))
staging_files.append(staging_file)
else:
log.debug('Staging file missing: {}'.format(staging_file))
if len(staging_files) == 0:
errors.append('- No staging file found for service {}'.format(service_name))
elif len(staging_files) > 1:
errors.append(
'- More than one staging file found for service {}: \n{}'
.format(
service_name,
'\n'.join(' - {}'.format(staging_file) for staging_file in staging_files)
)
)
if source_dir:
log.debug('Finding source files in directory: {}'.format(source_dir))
if service_type == 'MapServer':
source_file = os.path.abspath(os.path.join(source_dir, service_name + '.mxd'))
elif service_type == 'GeocodeServer':
source_file = os.path.abspath(os.path.join(source_dir, service_name + '.loc'))
else:
log.debug('Unsupported service type {} of service {} will be skipped'.format(service_type, service_name))
if os.path.isfile(source_file):
log.debug('Source file found: {}'.format(source_file))
service_info['source_file'] = source_file
else:
log.debug('Source file missing: {}'.format(source_file))
errors.append('- Source file {} for service {} does not exist!'.format(source_file, service_name))
return source_info, errors
|
1705742
|
def check_parameter_value(parameter, allowed, name):
"""Raise a ValueError if a parameter value is not in the set of
allowed values.
"""
if parameter not in allowed:
raise ValueError(f"Parameter '{name}' must be one of {', '.join(allowed)}.")
|
1705748
|
import collections
from importlib import import_module
def import_string(dotted_path):
"""
Copied from Django's django.utils.module_loading.import_string to be able
to work framework independently.
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError as err: # pragma: no cover
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err: # pragma: no cover
raise ImportError(
'Module "%s" does not define a "%s" attribute/class'
% (module_path, class_name)
) from err
def dict_merge(dct, merge_dct, add_keys=True):
"""
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
This version will return a copy of the dictionary and leave the original
arguments untouched.
The optional argument ``add_keys``, determines whether keys which are
present in ``merge_dict`` but not ``dct`` should be included in the
new dict.
:param dict dct: Dict onto which the merge is executed
:param dict merge_dct: Dict which is merged into dct
:param bool add_keys: whether to add new keys
:returns: The updated dict
:rtype: dict
"""
dct = dct.copy()
if not add_keys: # pragma: no cover
merge_dct = {k: merge_dct[k] for k in set(dct).intersection(set(merge_dct))}
for k, v in merge_dct.items():
if (
k in dct
and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)
):
dct[k] = dict_merge(dct[k], v, add_keys=add_keys)
else:
dct[k] = v
return dct
|
1705773
|
import decimal
import numpy as np
from collections import deque
import torch
from config import cfg
from utils.timer import Timer
from utils.logger import logger_info
import utils.distributed as dist
from utils.distributed import sum_tensor
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 1.0 for k in topk]
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string(seconds):
"""Converts time in seconds to a fixed-width string format."""
days, rem = divmod(int(seconds), 24 * 3600)
hrs, rem = divmod(rem, 3600)
mins, secs = divmod(rem, 60)
return "{0:02},{1:02}:{2:02}:{3:02}".format(days, hrs, mins, secs)
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / 1024 / 1024
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
return np.median(self.deque)
def get_win_avg(self):
return np.mean(self.deque)
def get_global_avg(self):
return self.total / self.count
class TrainMeter(object):
"""Measures training stats."""
def __init__(self, start_epoch, num_epochs, epoch_iters):
self.epoch_iters = epoch_iters
self.max_iter = (num_epochs - start_epoch) * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.solver.log_interval)
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
self.max_epoch = num_epochs
self.start_epoch = start_epoch
def reset(self, timer=False):
if timer:
self.iter_timer.reset()
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, loss, lr, mb_size):
self.loss.add_value(loss)
self.lr = lr
self.loss_total += loss * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
cur_iter_total = (cur_epoch - self.start_epoch) * self.epoch_iters + cur_iter + 1
eta_sec = self.iter_timer.average_time * (self.max_iter - cur_iter_total)
mem_usage = gpu_mem_usage()
stats = {
"epoch": "{}/{}".format(cur_epoch + 1, self.max_epoch),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"time_avg": self.iter_timer.average_time,
"eta": time_string(eta_sec),
"loss": self.loss.get_win_avg(),
"lr": self.lr,
"mem": int(np.ceil(mem_usage)),
}
return stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.solver.log_interval != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
info = "Epoch: {:s}, Iter: {:s}, loss: {:.4f}, lr: {:s}, time_avg: {:.4f}, eta: {:s}, mem: {:d}".format(\
stats["epoch"], stats["iter"], stats["loss"], stats["lr"], stats["time_avg"], stats["eta"], stats["mem"])
logger_info(info)
class TestMeter(object):
def __init__(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def reset(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def update_stats(self, num_top1, num_top5, mb_size):
self.num_top1 += num_top1
self.num_top5 += num_top5
self.num_samples += mb_size
def log_iter_stats(self, cur_epoch):
if cfg.distributed:
tensor_reduce = torch.tensor([self.num_top1 * 1.0, self.num_top5 * 1.0, self.num_samples * 1.0], device="cuda")
tensor_reduce = sum_tensor(tensor_reduce)
tensor_reduce = tensor_reduce.data.cpu().numpy()
num_top1 = tensor_reduce[0]
num_top5 = tensor_reduce[1]
num_samples = tensor_reduce[2]
else:
num_top1 = self.num_top1
num_top5 = self.num_top5
num_samples = self.num_samples
top1_acc = num_top1 * 1.0 / num_samples
top5_acc = num_top5 * 1.0 / num_samples
info = "Epoch: {:d}, top1_acc = {:.2%}, top5_acc = {:.2%} in {:d}".format(cur_epoch + 1, top1_acc, top5_acc, int(num_samples))
logger_info(info)
return top1_acc, top5_acc
|
1705797
|
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
# read data
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv('ml-100k/u.user', sep='|', names=u_cols,encoding='latin-1')
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv('ml-100k/u.data', sep='\t', names=r_cols,encoding='latin-1')
m_cols = ['movie_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
movies = pd.read_csv('ml-100k/u.item', sep='|', names=m_cols, usecols=range(5),encoding='latin-1')
# data merge
movie_ratings = pd.merge(movies, ratings)
lens = pd.merge(movie_ratings, users)
movie_stats = lens.groupby('title').agg({'rating': [np.size, np.mean]})
atleast_100 = movie_stats['rating']['size'] >= 100
print( movie_stats[atleast_100].sort_values([('rating', 'mean')], ascending=False)[:10] )
'''上述语句等价于SQL中的:
SELECT title, COUNT(1) size, AVG(rating) mean
FROM lens
GROUP BY title
HAVING COUNT(1) >= 100
ORDER BY 3 DESC
LIMIT 10;'''
|
1705846
|
import os
from typing import Any, Dict, Iterable, List, Optional, TYPE_CHECKING
import manta_lab as ml
import manta_lab.base.packet as pkt
if TYPE_CHECKING:
from multiprocessing.process import BaseProcess
from queue import Queue
from manta_lab.api import MantaAPI
from manta_lab.base.packet import Packet, RequestPacket
from ..manta_artifact import Artifact
from ..manta_run import Run
"""
Overal architectures will be changed at next version
Interface -> Handler -> Store(Future Implement) -> Sender -> RecordStreamer
Interface: create packets, pass it to handler
Handler: handle packets, execute complex logics
Sender: send packets or request to server
"""
def _wrap_packet(packet):
p = pkt.Packet.init_from(packet)
return p
def _wrap_request_packet(packet):
rp = pkt.RequestPacket.init_from(packet)
return rp
class InterfaceBase(object):
_run: Optional["Run"]
_api: Optional["MantaAPI"]
_drop: bool
def __init__(self, api: "MantaAPI" = None) -> None:
self._api = api
self._run = None
self._drop = False
def set_api(self, api: "MantaAPI") -> None:
self._api = api
def hack_set_run(self, run: "Run") -> None:
self._run = run
def join(self) -> None:
if self._drop:
return
_ = self._publish_shutdown()
def _publish_shutdown(self) -> None:
raise NotImplementedError()
def _publish(self, record: "Packet", local: bool = None) -> None:
raise NotImplementedError()
""" publish functions, doesnt expect response. just let packet goes"""
def _publish_history(self, history: pkt.HistoryPacket) -> None:
raise NotImplementedError()
def publish_history(self, run, data: dict, step: int):
data = ml.dtypes.history_data_to_json(run, data, step=step)
items = {}
for k, v in data.items():
items[k], _ = ml.util.json_friendly(v)
run.summary.update(items)
history = pkt.HistoryPacket(item=items)
self._publish_history(history)
def _publish_stats(self, stats: pkt.StatsPacket) -> None:
raise NotImplementedError()
def publish_stats(self, data: dict):
# TODO: sync step with history
stats = pkt.StatsPacket(item=data)
self._publish_stats(stats)
def _publish_console(self, console: pkt.ConsolePacket) -> None:
raise NotImplementedError()
def publish_console(self, steam, lines):
console = pkt.ConsolePacket(steam=steam, lines=lines)
self._publish_console(console)
def _publish_summary(self, summary: pkt.SummaryPacket) -> None:
raise NotImplementedError()
def publish_summary(self, data: dict):
summary = pkt.SummaryPacket(summary=data)
self._publish_summary(summary)
def _publish_meta(self, meta: pkt.MetaPacket) -> None:
raise NotImplementedError()
def publish_meta(self, data: dict):
meta = pkt.MetaPacket(metadata=data)
self._publish_meta(meta)
def _publish_config(self, config: pkt.MetaPacket) -> None:
raise NotImplementedError()
def publish_config(self, data: dict):
config = pkt.ConfigPacket(config=data)
self._publish_config(config)
def _publish_artifact(self, artifact: pkt.ArtifactRequestPacket) -> str:
raise NotImplementedError()
def publish_artifact(self, artifact: "Artifact") -> str:
packet = artifact.as_packet()
resp = self._publish_artifact(packet)
return resp
""" communication functions, expect response."""
def _communicate_artifact(self, artifact: pkt.ArtifactRequestPacket) -> str:
raise NotImplementedError()
def communicate_artifact(self, artifact: "Artifact") -> str:
packet = artifact.as_packet()
resp = self._communicate_artifact(packet)
return resp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.