id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
168322
|
import networkx
import numpy
import chainer
from chainer_chemistry.dataset.graph_dataset.base_graph_dataset import PaddingGraphDataset, SparseGraphDataset # NOQA
from chainer_chemistry.dataset.graph_dataset.base_graph_data import PaddingGraphData, SparseGraphData # NOQA
from chainer_chemistry.dataset.graph_dataset.feature_converters import batch_without_padding # NOQA
class BaseNetworkxPreprocessor(object):
"""Base class to preprocess `Networkx::Graph` object"""
def __init__(self, *args, **kwargs):
pass
def get_x(self, graph):
if 'x' in graph.graph:
x = graph.graph['x']
else:
feature_dim, = graph.nodes[0]['x'].shape
x = numpy.empty((graph.number_of_nodes(), feature_dim),
dtype=numpy.float32)
for v, data in graph.nodes.data():
x[v] = data['x']
return x
def get_y(self, graph):
if 'y' in graph.graph:
y = graph.graph['y']
else:
y = numpy.empty(graph.number_of_nodes(), dtype=numpy.int32)
for v, data in graph.nodes.data():
y[v] = data['y']
return y
class BasePaddingNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `PaddingGraphDataset`
""" # NOQA
def __init__(self, use_coo=False, *args, **kwargs):
self.use_coo = use_coo
def construct_data(self, graph):
"""Construct `PaddingGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
PaddingGraphData: graph data of padding pattern
"""
if not self.use_coo:
return PaddingGraphData(
x=self.get_x(graph),
adj=networkx.to_numpy_array(graph, dtype=numpy.float32),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
n_edges = graph.number_of_edges() * 2
row = numpy.empty((n_edges), dtype=numpy.int)
col = numpy.empty((n_edges), dtype=numpy.int)
data = numpy.ones((n_edges), dtype=numpy.float32)
for i, edge in enumerate(graph.edges):
row[2 * i] = edge[0]
row[2 * i + 1] = edge[1]
col[2 * i] = edge[1]
col[2 * i + 1] = edge[0]
# ensure row is sorted
if not numpy.all(row[:-1] <= row[1:]):
order = numpy.argsort(row)
row = row[order]
col = col[order]
assert numpy.all(row[:-1] <= row[1:])
adj = chainer.utils.CooMatrix(
data=data, row=row, col=col,
shape=(graph.number_of_nodes(), graph.number_of_nodes()),
order='C')
return PaddingGraphData(
x=self.get_x(graph),
adj=adj,
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def create_dataset(self, graph_list):
"""Create `PaddingGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
PaddingGraphDataset: graph dataset of padding pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = PaddingGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
class BaseSparseNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `SparseGraphDataset`
"""
def construct_data(self, graph):
"""Construct `SparseGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
SparseGraphData: graph data of sparse pattern
"""
edge_index = numpy.empty((2, graph.number_of_edges() * 2),
dtype=numpy.int)
for i, edge in enumerate(graph.edges):
edge_index[0][2 * i] = edge[0]
edge_index[0][2 * i + 1] = edge[1]
edge_index[1][2 * i] = edge[1]
edge_index[1][2 * i + 1] = edge[0]
return SparseGraphData(
x=self.get_x(graph),
edge_index=numpy.array(edge_index, dtype=numpy.int),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def add_self_loop(self, graph):
for v in range(graph.number_of_nodes()):
graph.add_edge(v, v)
return graph
def create_dataset(self, graph_list):
"""Create `SparseGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
SparseGraphDataset: graph dataset of sparse pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = SparseGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
|
168478
|
from opentera.db.Base import db, BaseModel
from enum import Enum
import random
from datetime import datetime, timedelta
import uuid
class TeraSessionStatus(Enum):
STATUS_NOTSTARTED = 0
STATUS_INPROGRESS = 1
STATUS_COMPLETED = 2
STATUS_CANCELLED = 3
STATUS_TERMINATED = 4
class TeraSession(db.Model, BaseModel):
__tablename__ = 't_sessions'
id_session = db.Column(db.Integer, db.Sequence('id_session_sequence'), primary_key=True, autoincrement=True)
session_uuid = db.Column(db.String(36), nullable=False, unique=True)
id_session_type = db.Column(db.Integer, db.ForeignKey('t_sessions_types.id_session_type'), nullable=False)
id_creator_user = db.Column(db.Integer, db.ForeignKey('t_users.id_user'), nullable=True)
id_creator_device = db.Column(db.Integer, db.ForeignKey('t_devices.id_device'), nullable=True)
id_creator_participant = db.Column(db.Integer, db.ForeignKey('t_participants.id_participant'), nullable=True)
id_creator_service = db.Column(db.Integer, db.ForeignKey('t_services.id_service', ondelete='set null'),
nullable=True)
session_name = db.Column(db.String, nullable=False)
session_start_datetime = db.Column(db.TIMESTAMP(timezone=True), nullable=False)
session_duration = db.Column(db.Integer, nullable=False, default=0)
session_status = db.Column(db.Integer, nullable=False)
session_comments = db.Column(db.String, nullable=True)
session_parameters = db.Column(db.String, nullable=True)
session_participants = db.relationship("TeraParticipant", secondary="t_sessions_participants",
back_populates="participant_sessions")
session_users = db.relationship("TeraUser", secondary="t_sessions_users", back_populates="user_sessions")
session_devices = db.relationship("TeraDevice", secondary="t_sessions_devices",
back_populates="device_sessions")
session_creator_user = db.relationship('TeraUser')
session_creator_device = db.relationship('TeraDevice')
session_creator_participant = db.relationship('TeraParticipant')
session_creator_service = db.relationship('TeraService')
session_session_type = db.relationship('TeraSessionType')
session_events = db.relationship('TeraSessionEvent', cascade="delete")
session_assets = db.relationship('TeraAsset', cascade='delete')
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
ignore_fields.extend(['session_participants', 'session_creator_user', 'session_creator_device',
'session_creator_participant', 'session_creator_service', 'session_session_type',
'session_events', 'session_users', 'session_devices', 'session_assets'])
if minimal:
ignore_fields.extend(['session_comments', 'session_duration', 'session_start_datetime',
'session_parameters'])
rval = super().to_json(ignore_fields=ignore_fields)
if not minimal:
# Append list of participants ids and names
rval['session_participants'] = [{'id_participant': part.id_participant,
'participant_uuid': part.participant_uuid,
'participant_name': part.participant_name,
'id_project': part.id_project}
for part in self.session_participants]
# Append list of users ids and names
rval['session_users'] = [{'id_user': user.id_user,
'user_uuid': user.user_uuid,
'user_name': user.get_fullname()}
for user in self.session_users]
# Append list of devices ids and names
rval['session_devices'] = [{'id_device': device.id_device,
'device_uuid': device.device_uuid,
'device_name': device.device_name}
for device in self.session_devices]
# Append user name
if self.session_creator_user:
rval['session_creator_user'] = self.session_creator_user.get_fullname()
rval['session_creator_user_uuid'] = self.session_creator_user.user_uuid
elif self.session_creator_device:
rval['session_creator_device'] = self.session_creator_device.device_name
rval['session_creator_device_uuid'] = self.session_creator_device.device_uuid
elif self.session_creator_participant:
rval['session_creator_participant'] = self.session_creator_participant.participant_name
rval['session_creator_participant_uuid'] = self.session_creator_participant.participant_uuid
elif self.session_creator_service:
rval['session_creator_service'] = self.session_creator_service.service_name
rval['session_creator_service_uuid'] = self.session_creator_service.service_uuid
# Append session components
# from opentera.db.models.TeraDeviceData import TeraDeviceData
# rval['session_has_device_data'] = len(TeraDeviceData.get_data_for_session(self.id_session)) > 0
return rval
def to_json_create_event(self):
return self.to_json(minimal=True)
def to_json_update_event(self):
return self.to_json(minimal=True)
def to_json_delete_event(self):
# Minimal information, delete can not be filtered
return {'id_session': self.id_session, 'session_uuid': self.session_uuid}
@staticmethod
def create_defaults(test=False):
if test:
from opentera.db.models.TeraUser import TeraUser
from opentera.db.models.TeraDevice import TeraDevice
from opentera.db.models.TeraSessionType import TeraSessionType
from opentera.db.models.TeraParticipant import TeraParticipant
from opentera.db.models.TeraService import TeraService
session_user = TeraUser.get_user_by_id(1)
session_user2 = TeraUser.get_user_by_id(2)
session_part = TeraParticipant.get_participant_by_name('Participant #1')
session_part2 = TeraParticipant.get_participant_by_name('Participant #2')
session_service = TeraService.get_service_by_key('VideoRehabService')
session_device = TeraDevice.get_device_by_id(2)
# Create user sessions
for i in range(8):
base_session = TeraSession()
base_session.session_creator_user = session_user
ses_type = random.randint(1, 4)
base_session.session_session_type = TeraSessionType.get_session_type_by_id(ses_type)
base_session.session_name = "Séance #" + str(i + 1)
base_session.session_start_datetime = datetime.now() - timedelta(days=random.randint(0, 30))
base_session.session_duration = random.randint(60, 4800)
ses_status = random.randint(0, 4)
base_session.session_status = ses_status
if i < 7:
base_session.session_participants = [session_part]
else:
base_session.session_participants = [session_part, session_part2]
if i < 4:
base_session.session_users = [base_session.session_creator_user]
else:
base_session.session_users = [base_session.session_creator_user, session_user2]
if i == 3:
base_session.session_devices = [session_device]
base_session.session_uuid = str(uuid.uuid4())
db.session.add(base_session)
# Create device sessions
for i in range(8):
base_session = TeraSession()
base_session.session_creator_device = TeraDevice.get_device_by_id(1)
ses_type = random.randint(1, 4)
base_session.session_session_type = TeraSessionType.get_session_type_by_id(ses_type)
base_session.session_name = "Séance #" + str(i + 1)
base_session.session_start_datetime = datetime.now() - timedelta(days=random.randint(0, 30))
base_session.session_duration = random.randint(60, 4800)
ses_status = random.randint(0, 4)
base_session.session_status = ses_status
if i < 7:
base_session.session_participants = [session_part]
else:
base_session.session_participants = [session_part, session_part2]
base_session.session_uuid = str(uuid.uuid4())
db.session.add(base_session)
# Create participant sessions
for i in range(8):
base_session = TeraSession()
base_session.session_creator_participant = TeraParticipant.get_participant_by_id(1)
ses_type = random.randint(1, 4)
base_session.session_session_type = TeraSessionType.get_session_type_by_id(ses_type)
base_session.session_name = "Séance #" + str(i + 1)
base_session.session_start_datetime = datetime.now() - timedelta(days=random.randint(0, 30))
base_session.session_duration = random.randint(60, 4800)
ses_status = random.randint(0, 4)
base_session.session_status = ses_status
base_session.session_participants = [base_session.session_creator_participant]
base_session.session_uuid = str(uuid.uuid4())
db.session.add(base_session)
# Create service sessions
for i in range(4):
base_session = TeraSession()
base_session.session_creator_service = session_service
ses_type = random.randint(1, 4)
base_session.session_session_type = TeraSessionType.get_session_type_by_id(ses_type)
base_session.session_name = "Séance #" + str(i + 1)
base_session.session_start_datetime = datetime.now() - timedelta(days=random.randint(0, 30))
base_session.session_duration = random.randint(60, 4800)
ses_status = random.randint(0, 4)
base_session.session_status = ses_status
if i < 3:
base_session.session_participants = [session_part]
else:
base_session.session_participants = [session_part, session_part2]
base_session.session_uuid = str(uuid.uuid4())
db.session.add(base_session)
db.session.commit()
@staticmethod
def get_session_by_id(ses_id: int):
return TeraSession.query.filter_by(id_session=ses_id).first()
@staticmethod
def get_session_by_uuid(s_uuid):
session = TeraSession.query.filter_by(session_uuid=s_uuid).first()
if session:
return session
return None
@staticmethod
def get_session_by_name(name: str):
return TeraSession.query.filter_by(session_name=name).first()
@staticmethod
def get_sessions_for_participant(part_id: int):
from opentera.db.models.TeraParticipant import TeraParticipant
return TeraSession.query.join(TeraSession.session_participants).filter(TeraParticipant.id_participant ==
part_id) \
.order_by(TeraSession.session_start_datetime.desc()).all()
@staticmethod
def get_sessions_for_user(user_id: int):
from opentera.db.models.TeraUser import TeraUser
return TeraSession.query.join(TeraSession.session_users).filter(TeraUser.id_user == user_id) \
.order_by(TeraSession.session_start_datetime.desc()).all()
@staticmethod
def get_sessions_for_device(device_id: int):
from opentera.db.models.TeraDevice import TeraDevice
return TeraSession.query.join(TeraSession.session_devices).filter(TeraDevice.id_device == device_id) \
.order_by(TeraSession.session_start_datetime.desc()).all()
@staticmethod
def get_sessions_for_type(session_type_id: int):
return TeraSession.query.filter_by(id_session_type=session_type_id).all()
@staticmethod
def is_user_in_session(session_uuid: str, user_uuid: str) -> bool:
session = TeraSession.get_session_by_uuid(session_uuid)
user_uuids = [user.user_uuid for user in session.session_users]
return user_uuid in user_uuids
@staticmethod
def is_device_in_session(session_uuid: str, device_uuid: str) -> bool:
session = TeraSession.get_session_by_uuid(session_uuid)
device_uuids = [device.device_uuid for device in session.session_devices]
return device_uuid in device_uuids
@staticmethod
def is_participant_in_session(session_uuid: str, participant_uuid: str) -> bool:
session = TeraSession.get_session_by_uuid(session_uuid)
participant_uuids = [participant.participant_uuid for participant in session.session_participants]
return participant_uuid in participant_uuids
def has_user(self, id_user: int) -> bool:
user_ids = [user.id_user for user in self.session_users]
return id_user in user_ids
def has_device(self, id_device: int) -> bool:
device_ids = [device.id_device for device in self.session_devices]
return id_device in device_ids
def has_participant(self, id_participant: int) -> bool:
participant_ids = [participant.id_participant for participant in self.session_participants]
return id_participant in participant_ids
def get_associated_project_id(self):
project_id = None
if self.session_participants:
# Return project id for the first participant, since they should all be the same...
project_id = self.session_participants[0].id_project
return project_id
# THIS SHOULD NOT BE USED ANYMORE, AS DELETES CAN'T OCCUR IF THERE'S STILL ASSOCIATED SESSIONS
# @staticmethod
# def delete_orphaned_sessions(commit_changes=True):
# from opentera.db.models.TeraDeviceData import TeraDeviceData
# orphans_parts = TeraSession.query.outerjoin(TeraSession.session_participants).filter(
# TeraSession.session_participants == None).all()
#
# orphans_users = TeraSession.query.outerjoin(TeraSession.session_users).filter(
# TeraSession.session_users == None).all()
#
# orphans = list(set(orphans_parts + orphans_users)) # Keep unique sessions only!
#
# if orphans:
# for orphan in orphans:
# TeraDeviceData.delete_files_for_session(orphan.id_session)
# db.session.delete(orphan)
# # TeraSession.delete(orphan.id_session)
#
# if commit_changes:
# db.session.commit()
@classmethod
def delete(cls, id_todel):
# from opentera.db.models.TeraDeviceData import TeraDeviceData
# TeraDeviceData.delete_files_for_session(id_todel)
super().delete(id_todel)
@classmethod
def insert(cls, session):
session.session_uuid = str(uuid.uuid4())
super().insert(session)
|
168480
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.minard_troops import minard_troops
def test_minard_troops():
"""Test module minard_troops.py by downloading
minard_troops.csv and testing shape of
extracted data has 51 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = minard_troops(test_path)
try:
assert x_train.shape == (51, 5)
except:
shutil.rmtree(test_path)
raise()
|
168481
|
import sys
from decorator import decorator
from fabric.api import env, hide, parallel, run, settings
from fabric.tasks import execute
env.shell = '/bin/bash -l -c -o pipefail'
env.keepalive = 60
env.timeout = 60
def parallel_task(server_side=True):
@decorator
def _parallel_task(task, *args, **kargs):
self = args[0]
if server_side:
hosts = self.hosts
else:
hosts = self.workers
with settings(user=self.user, password=self.password, warn_only=True):
with hide("running", "output"):
return execute(parallel(task), *args, hosts=hosts, **kargs)
return _parallel_task
class RemoteStats:
def __init__(self, hosts, workers, user, password, interval=None):
self.hosts = hosts
self.user = user
self.password = password
self.workers = workers
self.interval = interval
def run(self, *args, **kwargs):
try:
return run(*args, **kwargs)
except KeyboardInterrupt:
sys.exit()
|
168492
|
from __future__ import absolute_import
from django.db import models
from django.utils.translation import ugettext_lazy
from smsgateway.enums import (OPERATOR_CHOICES, OPERATOR_UNKNOWN, GATEWAY_CHOICES, DIRECTION_CHOICES, DIRECTION_INBOUND,
PRIORITIES, PRIORITY_MEDIUM, PRIORITY_DEFERRED)
from datetime import datetime
class SMS(models.Model):
sent = models.DateTimeField(default=datetime.now, verbose_name=ugettext_lazy(u'sent'))
content = models.TextField(verbose_name=ugettext_lazy(u'content'), help_text=ugettext_lazy(u'SMS content'))
sender = models.CharField(max_length=32, verbose_name=ugettext_lazy(u'sender'), db_index=True)
to = models.CharField(max_length=32, verbose_name=ugettext_lazy(u'receiver'), db_index=True)
operator = models.IntegerField(choices=OPERATOR_CHOICES, default=OPERATOR_UNKNOWN,
verbose_name=ugettext_lazy(u'Originating operator'))
gateway = models.IntegerField(choices=GATEWAY_CHOICES, default=0, verbose_name=ugettext_lazy(u'gateway'),
help_text=ugettext_lazy(u'By which provider the SMS was handled.'))
backend = models.CharField(max_length=32, db_index=True, default='unknown', verbose_name=ugettext_lazy(u'backend'))
gateway_ref = models.CharField(max_length=64, blank=True, verbose_name=ugettext_lazy(u'gateway reference'),
help_text=ugettext_lazy(u'A reference id for the gateway'))
direction = models.IntegerField(choices=DIRECTION_CHOICES, default=DIRECTION_INBOUND,
verbose_name=ugettext_lazy(u'direction'))
class Meta:
get_latest_by = 'sent'
ordering = ('sent',)
verbose_name = ugettext_lazy(u'SMS')
verbose_name_plural = ugettext_lazy(u'SMSes')
def __unicode__(self):
return u'SMS: "{}" from "{}"'.format(self.content, self.sender)
class QueuedSMS(models.Model):
to = models.CharField(max_length=32, verbose_name=ugettext_lazy(u'receiver'))
signature = models.CharField(max_length=32, verbose_name=ugettext_lazy(u'signature'))
content = models.TextField(verbose_name=ugettext_lazy(u'content'), help_text=ugettext_lazy(u'SMS content'))
created = models.DateTimeField(default=datetime.now)
using = models.CharField(blank=True, max_length=100, verbose_name=ugettext_lazy(u'gateway'),
help_text=ugettext_lazy(u'Via which provider the SMS will be sent.'))
priority = models.CharField(max_length=1, choices=PRIORITIES, default=PRIORITY_MEDIUM)
reliable = models.BooleanField(default=False, blank=True, verbose_name=ugettext_lazy(u'is reliable'))
class Meta:
get_latest_by = 'created'
ordering = ('priority', 'created',)
verbose_name = ugettext_lazy(u'Queued SMS')
verbose_name_plural = ugettext_lazy(u'Queued SMSes')
def defer(self):
self.priority = PRIORITY_DEFERRED
self.save()
|
168525
|
from tqdm import tqdm
import torch as tc
import pdb
import os , sys
import math
import fitlog
import re
from utils.scorer import get_f1
from utils.train_util import pad_sents , get_data_from_batch
from utils.write_keyfile import write_keyfile
def before_test(C , logger , dataset , models):
if isinstance(models , tc.nn.Module):
models = [models]
for i in range(len(models)):
models[i] = models[i].eval()
device = tc.device(C.device)
batch_size = 8
batch_numb = (len(dataset) // batch_size) + int((len(dataset) % batch_size) != 0)
return device , batch_size , batch_numb , models
def get_output(C , logger ,
models , device , loss_func , generator ,
sents , ents , anss , data_ent ,
):
preds = [0 for _ in range(len(models))]
for i , model in enumerate(models):
old_device = next(model.parameters()).device
model = model.to(device)
preds[i] = model(sents , ents)
model = model.to(old_device) #如果他本来在cpu上,生成完之后还是把他放回cpu
loss = loss_func(preds[i] , anss , ents)
ans_rels = [ [(u,v) for u,v,t in bat] for bat in anss] if C.gene_in_data else None
generated = generator(preds , data_ent , ans_rels = ans_rels)
#pred_map = pred.max(-1)[1] #(ne , ne)
return model , preds , loss , generated
def get_evaluate(C , logger , mode , generated , generator , test_data = None):
golden = write_keyfile(test_data , generator)
os.makedirs("analyze_res/debug" , exist_ok = True)
with open("analyze_res/debug/golden.txt" , "w") as fil:
fil.write(golden)
with open("analyze_res/debug/gene.txt" , "w") as fil:
fil.write(generated)
micro_f1 , macro_f1 = get_f1(golden , generated , is_file_content = True ,
no_rel_name = generator.get_no_rel_name() , logger = logger)
micro_f1 , macro_f1 = micro_f1 * 100 , macro_f1 * 100
return micro_f1 , macro_f1
def test(C , logger ,
dataset , models ,
loss_func , generator ,
mode = "valid" , epoch_id = 0 , run_name = "0" , need_generated = False ,
):
device , batch_size , batch_numb , models = before_test(C , logger , dataset , models)
pbar = tqdm(range(batch_numb) , ncols = 70)
avg_loss = 0
generated = ""
for batch_id in pbar:
data = dataset[batch_id * batch_size : (batch_id+1) * batch_size]
sents , ents , anss , data_ent = get_data_from_batch(data, device=tc.device(C.device))
with tc.no_grad():
model , preds , loss , partial_generated = get_output(
C,logger,models,device,loss_func,generator,sents,ents,anss,data_ent
)
generated += partial_generated
avg_loss += float(loss) / len(models)
pbar.set_description_str("(Test )Epoch {0}".format(epoch_id))
pbar.set_postfix_str("loss = %.4f (avg = %.4f)" % ( float(loss) , avg_loss / (batch_id+1)))
micro_f1 , macro_f1 = get_evaluate(C , logger , mode , generated , generator , dataset)
#print (result)
logger.log ("-----Epoch {} tested. Micro F1 = {:.2f}% , Macro F1 = {:.2f}% , loss = {:.4f}".
format(epoch_id , micro_f1, macro_f1, avg_loss / batch_numb))
logger.log("\n")
fitlog.add_metric(micro_f1 , step = epoch_id , name = "({0})micro f1".format(run_name))
fitlog.add_metric(macro_f1 , step = epoch_id , name = "({0})macro f1".format(run_name))
if need_generated:
return micro_f1 , macro_f1 , avg_loss , generated
return micro_f1 , macro_f1 , avg_loss
|
168533
|
from django.contrib import admin
from reversion.admin import VersionAdmin
from django.contrib.flatpages.admin import FlatPage, FlatPageAdmin
from .models import HostedPicture
admin.site.unregister(FlatPage)
@admin.register(FlatPage)
class FlatPageVersionedAdmin(VersionAdmin, FlatPageAdmin):
pass
@admin.register(HostedPicture)
class HostedPictureAdmin(admin.ModelAdmin):
list_display = ['title', 'url', 'html']
|
168535
|
import unittest
import subprocess
class TestPapermill(unittest.TestCase):
def test_papermill(self):
result = subprocess.run([
'papermill',
'/input/tests/data/notebook.ipynb',
'-',
], stdout=subprocess.PIPE)
self.assertEqual(0, result.returncode)
self.assertTrue(b'999' in result.stdout)
|
168596
|
from rest_framework import serializers
from .models import *
class ChickenSerializer(serializers.ModelSerializer):
class Meta:
model = Chicken
fields = '__all__'
class WorkerSerializer(serializers.ModelSerializer):
class Meta:
model = Worker
fields = '__all__'
class BreedSerializer(serializers.ModelSerializer):
class Meta:
model = Chicken
fields = '__all__'
class CageSerializer(serializers.ModelSerializer):
class Meta:
model = Cage
fields = '__all__'
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Report
fields = '__all__'
|
168611
|
from rest_framework import decorators, permissions, status, viewsets
from rest_framework.response import Response
from lego.apps.feeds.attr_cache import AttrCache
from .feed_manager import feed_manager
from .models import NotificationFeed, PersonalFeed, UserFeed
from .serializers import (
AggregatedFeedSerializer,
AggregatedMarkedFeedSerializer,
MarkSerializer,
)
class FeedViewSet(viewsets.GenericViewSet):
"""
Generic viewset base for all types of feeds.
"""
ordering = "-updated_at"
serializer_class = AggregatedFeedSerializer
def attach_metadata(self, data):
"""
Map over the feed here to attach more information to each element.
"""
content_strings = set()
for item in data:
activities = item.get("activities")
if activities:
# Aggregated Activity
for activity in activities:
target = activity.get("target")
object = activity.get("object")
actor = activity.get("actor")
content_strings.add(target) if target else None
content_strings.add(object) if object else None
content_strings.add(actor) if actor else None
if content_strings:
cache = AttrCache()
lookup = cache.bulk_lookup(content_strings)
for item in data:
context = {}
activities = item.get("activities")
if activities:
# Aggregated Activity
for activity in activities:
target = activity.get("target")
object = activity.get("object")
actor = activity.get("actor")
if target in lookup.keys():
context[target] = lookup[target]
if object in lookup.keys():
context[object] = lookup[object]
if actor in lookup.keys():
context[actor] = lookup[actor]
item["context"] = context
return data
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(self.attach_metadata(serializer.data))
serializer = self.get_serializer(queryset, many=True)
return Response(self.attach_metadata(serializer.data))
class FeedMarkerViewSet(viewsets.GenericViewSet):
"""
Feed class with marker support
"""
serializer_class = AggregatedMarkedFeedSerializer
@decorators.action(detail=False, serializer_class=MarkSerializer, methods=["POST"])
def mark_all(self, request):
"""
This function marks all activities in a NotificationFeed as seen or/and red.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
feed = self.get_queryset().model
seen = serializer.validated_data["seen"]
read = serializer.validated_data["read"]
feed.mark_all(self.request.user.id, seen, read)
return Response(serializer.data, status=status.HTTP_200_OK)
@decorators.action(detail=True, serializer_class=MarkSerializer, methods=["POST"])
def mark(self, request, pk):
"""
Mark a single notification as read or seen.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
feed = self.get_queryset().model
seen = serializer.validated_data["seen"]
read = serializer.validated_data["read"]
feed.mark_all(self.request.user.id, str(pk), seen, read)
return Response(serializer.data, status=status.HTTP_200_OK)
@decorators.action(detail=False, methods=["GET"])
def notification_data(self, request):
feed = self.get_queryset().model
return Response(feed.get_notification_data(self.request.user.id))
class UserFeedViewSet(FeedViewSet):
"""
Public events produced by users. This feed should not contain private information! This feed
uses a url param to decide which feed to retrieve.
"""
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
user_id = self.kwargs["user_pk"]
return feed_manager.retrieve_feed(UserFeed, user_id)
class PersonalFeedViewSet(FeedViewSet):
"""
Personal user timeline, based on request.user
"""
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return feed_manager.retrieve_feed(PersonalFeed, self.request.user.id)
class NotificationsViewSet(FeedMarkerViewSet, FeedViewSet):
"""
Notifications feed based on request.user
"""
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return feed_manager.retrieve_feed(NotificationFeed, self.request.user.id)
|
168632
|
import numpy as np
def flux(x):
return 0.5 * np.square(x)
def minf(a,b):
# if b<=0:
# return flux(b)
# elif a>=0:
# return flux(a)
# else:
# return 0.0
return (b <= 0) * flux(b) + (a >= 0) * flux(a)
def maxf(a,b):
return np.maximum(flux(a),flux(b))
|
168634
|
from .base import MetricSelector
class SemanticSimilarity(MetricSelector):
"""
:English: :py:class:`.UniversalSentenceEncoder`
"""
def _select(self, lang):
if lang.name == "english":
from ..algorithms.usencoder import UniversalSentenceEncoder
return UniversalSentenceEncoder()
|
168661
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
from torch.autograd import grad
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
#CPU or GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#device = 'cpu'
parser = argparse.ArgumentParser()
parser.add_argument('--tau', default=0.005, type=float) # target smoothing coefficient
parser.add_argument('--target_update_interval', default=1, type=int)
parser.add_argument('--gradient_steps', default=1, type=int)
parser.add_argument('--learning_rate', default=3e-4, type=int)
parser.add_argument('--gamma', default=0.99, type=int) # discount gamma
parser.add_argument('--capacity', default=400000, type=int) # replay buffer size
parser.add_argument('--iteration', default=100000, type=int) # num of games
parser.add_argument('--batch_size', default=512, type=int) # mini batch size
parser.add_argument('--seed', default=1, type=int)
# optional parameters
parser.add_argument('--num_hidden_layers', default=2, type=int)
parser.add_argument('--num_hidden_units_per_layer', default=256, type=int)
parser.add_argument('--sample_frequency', default=256, type=int)
parser.add_argument('--activation', default='Relu', type=str)
parser.add_argument('--render', default=False, type=bool) # show UI or not
parser.add_argument('--log_interval', default=2000, type=int) #
parser.add_argument('--load', default=True, type=bool) # load model
args = parser.parse_args()
min_Val = torch.tensor(1e-7).float().to(device)
class Replay_buffer():
def __init__(self, capacity,state_dim,action_dim):
self.capacity = capacity
self.state_pool = torch.zeros(self.capacity, state_dim).float().to(device)
self.action_pool = torch.zeros(self.capacity, action_dim).float().to(device)
self.reward_pool = torch.zeros(self.capacity, 1).float().to(device)
self.next_state_pool = torch.zeros(self.capacity, state_dim).float().to(device)
self.done_pool = torch.zeros(self.capacity, 1).float().to(device)
self.num_transition = 0
def push(self, s, a, r, s_, d):
index = self.num_transition % self.capacity
s = torch.tensor(s).float().to(device)
a = torch.tensor(a).float().to(device)
r = torch.tensor(r).float().to(device)
s_ = torch.tensor(s_).float().to(device)
d = torch.tensor(d).float().to(device)
for pool, ele in zip([self.state_pool, self.action_pool, self.reward_pool, self.next_state_pool, self.done_pool],
[s, a, r, s_, d]):
pool[index] = ele
self.num_transition += 1
def sample(self, batch_size):
index = np.random.choice(range(self.capacity), batch_size, replace=False)
bn_s, bn_a, bn_r, bn_s_, bn_d = self.state_pool[index], self.action_pool[index], self.reward_pool[index],\
self.next_state_pool[index], self.done_pool[index]
return bn_s, bn_a, bn_r, bn_s_, bn_d
class Actor(nn.Module):
def __init__(self, state_dim, action_dim ,min_log_std=-20, max_log_std=2):##max and min left to modify
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, 512)
self.fc2 = nn.Linear(512, 256)
self.mu_head = nn.Linear(256, action_dim)
self.log_std_head = nn.Linear(256, action_dim)
self.min_log_std = min_log_std
self.max_log_std = max_log_std
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
mu = self.mu_head(x)
log_std_head = self.log_std_head(x)
log_std_head = torch.clamp(log_std_head, self.min_log_std, self.max_log_std) ##give a resitriction on the chosen action
return mu, log_std_head
class Critic(nn.Module):
def __init__(self, state_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Q(nn.Module):
def __init__(self, state_dim, action_dim):
super(Q, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.fc1 = nn.Linear(state_dim + action_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, s, a):
s = s.reshape(-1, self.state_dim)
a = a.reshape(-1, self.action_dim)
x = torch.cat((s, a), -1) # combination s and a
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class SACAgent():
def __init__(self, state_dim = 45, action_dim=21):
super(SACAgent, self).__init__()
self.policy_net = Actor(state_dim=state_dim, action_dim = action_dim).to(device)
self.value_net = Critic(state_dim).to(device)
self.Target_value_net = Critic(state_dim).to(device)
self.Q_net1 = Q(state_dim, action_dim).to(device)
self.Q_net2 = Q(state_dim, action_dim).to(device)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=args.learning_rate)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=args.learning_rate)
self.Q1_optimizer = optim.Adam(self.Q_net1.parameters(), lr=args.learning_rate)
self.Q2_optimizer = optim.Adam(self.Q_net2.parameters(), lr=args.learning_rate)
self.replay_buffer = Replay_buffer(args.capacity,state_dim,action_dim)
self.num_transition = 0
self.num_training = 0
self.writer = SummaryWriter('./exp-SAC_dual_Q_network')
self.value_criterion = nn.MSELoss()
self.Q1_criterion = nn.MSELoss()
self.Q2_criterion = nn.MSELoss()
for target_param, param in zip(self.Target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
self.steer_range = (-0.8,0.8)
self.throttle_range = (0.6,1.0)
def select_action(self, state):
state = torch.FloatTensor(state).to(device)
mu, log_sigma = self.policy_net(state)
sigma = torch.exp(log_sigma)
dist = Normal(mu, sigma)
z = dist.sample()
steer = float(torch.tanh(z[0,0]).detach().cpu().numpy())
throttle = float(torch.tanh(z[0,1]).detach().cpu().numpy())
steer = (steer + 1)/2 * (self.steer_range[1] - self.steer_range[0]) + self.steer_range[0]
throttle = (throttle + 1)/2 * (self.throttle_range[1] - self.throttle_range[0]) + self.throttle_range[0]
return np.array([steer, throttle])
def test(self, state):
state = torch.FloatTensor(state).to(device)
mu, log_sigma = self.policy_net(state)
action = mu
steer = float(torch.tanh(action[0,0]).detach().cpu().numpy())
throttle = float(torch.tanh(action[0,1]).detach().cpu().numpy())
steer = (steer + 1)/2 * (self.steer_range[1] - self.steer_range[0]) + self.steer_range[0]
throttle = (throttle + 1)/2 * (self.throttle_range[1] - self.throttle_range[0]) + self.throttle_range[0]
return np.array([steer, throttle])
def evaluate(self, state):
batch = state.size()[0]
batch_mu, batch_log_sigma = self.policy_net(state)
batch_sigma = torch.exp(batch_log_sigma)
dist = Normal(batch_mu, batch_sigma)
noise = Normal(0, 1)
z = noise.sample()
action = torch.tanh(batch_mu + batch_sigma * z.to(device))
log_prob = dist.log_prob(batch_mu + batch_sigma * z.to(device)) - torch.log(1 - action.pow(2) + min_Val)
log_prob_0 = log_prob[:,0].reshape(batch,1)
log_prob_1 = log_prob[:,1].reshape(batch,1)
log_prob = log_prob_0 + log_prob_1
return action, log_prob, z, batch_mu, batch_log_sigma
def update(self):
if self.num_training % 500 == 0:
print("**************************Train Start************************")
print("Training ... \t{} times ".format(self.num_training))
for _ in range(args.gradient_steps):
bn_s, bn_a, bn_r, bn_s_, bn_d = self.replay_buffer.sample(args.batch_size)
target_value = self.Target_value_net(bn_s_)
next_q_value = bn_r + (1 - bn_d) * args.gamma * target_value
excepted_value = self.value_net(bn_s)
excepted_Q1 = self.Q_net1(bn_s, bn_a)
excepted_Q2 = self.Q_net2(bn_s, bn_a)
sample_action, log_prob, z, batch_mu, batch_log_sigma = self.evaluate(bn_s)
excepted_new_Q = torch.min(self.Q_net1(bn_s, sample_action), self.Q_net2(bn_s, sample_action))
next_value = excepted_new_Q - log_prob
V_loss = self.value_criterion(excepted_value, next_value.detach()).mean() # J_V
# Dual Q net
Q1_loss = self.Q1_criterion(excepted_Q1, next_q_value.detach()).mean() # J_Q
Q2_loss = self.Q2_criterion(excepted_Q2, next_q_value.detach()).mean()
pi_loss = (log_prob - excepted_new_Q).mean() # according to original paper
self.writer.add_scalar('Loss/V_loss', V_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/Q1_loss', Q1_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/Q2_loss', Q2_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/policy_loss', pi_loss, global_step=self.num_training)
# mini batch gradient descent
self.value_optimizer.zero_grad()
V_loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(self.value_net.parameters(), 0.5)
self.value_optimizer.step()
self.Q1_optimizer.zero_grad()
Q1_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.Q_net1.parameters(), 0.5)
self.Q1_optimizer.step()
self.Q2_optimizer.zero_grad()
Q2_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.Q_net2.parameters(), 0.5)
self.Q2_optimizer.step()
self.policy_optimizer.zero_grad()
pi_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.policy_net.parameters(), 0.5)
self.policy_optimizer.step()
# update target v net update
for target_param, param in zip(self.Target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(target_param * (1 - args.tau) + param * args.tau)
self.num_training += 1
def save(self,epoch, capacity):
os.makedirs('./SAC_model_' +str(capacity) , exist_ok=True)
torch.save(self.policy_net.state_dict(), './SAC_model_' +str(capacity)+ '/policy_net_' + str(epoch) + '.pth')
torch.save(self.value_net.state_dict(), './SAC_model_' +str(capacity)+ '/value_net_'+ str(epoch) +'.pth')
torch.save(self.Q_net1.state_dict(), './SAC_model_' +str(capacity)+'/Q_net1_' + str(epoch) + '.pth')
torch.save(self.Q_net2.state_dict(), './SAC_model_' +str(capacity)+'/Q_net2_' + str(epoch) + '.pth')
print("====================================")
print("Model has been saved...")
print("====================================")
def load(self, epoch, capacity):
dir = './SAC_model_' + str(capacity) + '/'
self.policy_net.load_state_dict(torch.load( dir + 'policy_net_' + str(epoch) + '.pth'))
self.value_net.load_state_dict(torch.load( dir + 'value_net_'+ str(epoch) + '.pth'))
self.Q_net1.load_state_dict(torch.load( dir + 'Q_net1_' + str(epoch) + '.pth'))
self.Q_net2.load_state_dict(torch.load( dir + 'Q_net2_' + str(epoch) + '.pth'))
print("====================================")
print("model has been loaded...")
print("====================================")
|
168680
|
from manimlib.imports import *
class Scene_(Scene):
CONFIG = {
"camera_config": {
"background_color": WHITE
}
}
class NF24P3358(Scene_):
def construct(self):
line = Line(LEFT*3, RIGHT*3, color=BLACK)
nodes = VGroup(
*[
Circle(radius=0.25, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK)
for _ in range(7)
]
).arrange(RIGHT, buff=0.5)
names = VGroup(
*[
TextMobject(str(i), color=BLACK).scale(0.7)
for i in range(1, 8)
]
)
edges = VGroup(
*[
Arrow(nodes[i].get_center(), nodes[i + 1].get_center(), buff=0.25, color=BLACK)
for i in range(6)
]
)
infs = VGroup(
*[
TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(edges[i], UP, buff=0.1)
for i in range(6)
]
)
costs = VGroup(
*[
TextMobject("0", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(edges[i], DOWN, buff=0.1)
for i in range(6)
]
)
for i in range(7):
names[i].move_to(nodes[i])
s = VGroup(
Circle(radius=0.25, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("s", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([-4.5, -1.5, 0]))
s.add(Arrow(s.get_center(), nodes[0].get_center(), color=RED, buff=0.25))
s.add(TextMobject("k", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(s[-1], UL, buff=-0.5))
s.add(TextMobject("0", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(s[-2], DR, buff=-0.5))
t = VGroup(
Circle(radius=0.25, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("t", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([4.5, -1.5, 0]))
t.add(Arrow(nodes[-1].get_center(), t.get_center(), color=RED, buff=0.25))
t.add(TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(t[-1], UR, buff=-0.5))
t.add(TextMobject("0", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(t[-2], DL, buff=-0.5))
braces = VGroup(
Brace(VGroup(Dot(nodes[0].get_center(), radius=0.01), Dot(nodes[2].get_center(), radius=0.01)), UP, color=GRAY, buff=0.52),
Brace(VGroup(Dot(nodes[2].get_center(), radius=0.01), Dot(nodes[5].get_center(), radius=0.01)), UP, color=GRAY, buff=0.52),
Brace(VGroup(Dot(nodes[1].get_center(), radius=0.01), Dot(nodes[3].get_center(), radius=0.01)), DOWN, color=GRAY, buff=0.52),
Brace(VGroup(Dot(nodes[4].get_center(), radius=0.01), Dot(nodes[6].get_center(), radius=0.01)), DOWN, color=GRAY, buff=0.52),
)
edges2 = VGroup(
CurvedArrow(nodes[0].get_center(), nodes[2].get_center(), buff=0.25, color=BLACK, angle=-TAU / 4).shift(UP*1),
CurvedArrow(nodes[2].get_center(), nodes[5].get_center(), buff=0.25, color=BLACK, angle=-TAU / 4).shift(UP*1),
CurvedArrow(nodes[1].get_center(), nodes[3].get_center(), buff=0.25, color=BLACK, angle= TAU / 4).shift(DOWN*1),
CurvedArrow(nodes[4].get_center(), nodes[6].get_center(), buff=0.25, color=BLACK, angle= TAU / 4).shift(DOWN*1),
)
caps2 = VGroup(
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(edges2[0], UP, buff=0.1),
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(edges2[1], UP, buff=0.1),
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(edges2[2], UP, buff=-0.4),
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).next_to(edges2[3], UP, buff=-0.4)
)
costs2 = VGroup(
TextMobject("6", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(edges2[0], DOWN, buff=-0.4),
TextMobject("3", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(edges2[1], DOWN, buff=-0.6),
TextMobject("2", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(edges2[2], DOWN, buff=0.1),
TextMobject("4", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.6).next_to(edges2[3], DOWN, buff=0.1),
)
old_num = VGroup(
TextMobject("1", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("6", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("7", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("8", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("9", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("10", color=GRAY, background_stroke_color=GRAY).scale(0.45),
TextMobject("13", color=GRAY, background_stroke_color=GRAY).scale(0.45),
)
for i in range(7):
old_num[i].next_to(nodes[i], DOWN, buff=0.08)
comments = VGroup(
TextMobject("蓝-容量", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.7),
TextMobject("橙-费用", color=GOLD_D, background_stroke_color=GOLD_D).scale(0.7),
).arrange(DOWN, aligned_edge=LEFT, buff=0.2).next_to(nodes[0], DOWN, buff=1.3)
rec = SurroundingRectangle(comments, color=GRAY, buff=0.2)
problem = TextMobject("最长$k$可重区间集问题", color=WHITE, background_stroke_color=WHITE)
problem.add_background_rectangle(color=GOLD_D, opacity=1, buff=0.15).next_to(nodes[0], UP, buff=1.8).shift(RIGHT*0.2)
author = TextMobject("by @鹤翔万里", background_stroke_color=ORANGE, opacity=0.8).scale(0.7).set_color(ORANGE).next_to(t[0], DOWN, buff=0.4).shift(LEFT*0.8)
self.add(line, nodes, names, s, edges, infs, costs, t, braces, edges2, caps2, costs2, old_num, comments, rec, problem, author)
class NF24P2762(Scene_):
def construct(self):
rad = 0.3
s = VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("s", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([-4.5, 0, 0]))
t = VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("t", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([4.5, 0, 0]))
nodes = VGroup(
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK),
TextMobject("1\ 实验", color=BLACK, background_stroke_color=BLACK).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([-1.6, 1, 0])),
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK),
TextMobject("2\ 实验", color=BLACK, background_stroke_color=BLACK).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([-1.6, -1, 0])),
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK, fill_opacity=1),
TextMobject("1\ 仪器", color=WHITE, background_stroke_color=WHITE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([1.5, 2, 0])),
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK, fill_opacity=1),
TextMobject("2\ 仪器", color=WHITE, background_stroke_color=WHITE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([1.5, 0, 0])),
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK, fill_opacity=1),
TextMobject("3\ 仪器", color=WHITE, background_stroke_color=WHITE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([1.5, -2, 0])),
)
edges_s = VGroup(
Arrow(s[0].get_center(), nodes[0][0].get_center(), color=RED, buff=rad),
Arrow(s[0].get_center(), nodes[1][0].get_center(), color=RED, buff=rad),
)
edges_s.add(TextMobject("10", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_s[0], UP, buff=-0.3).shift(LEFT*0.3))
edges_s.add(TextMobject("25", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_s[1], DOWN, buff=-0.3).shift(LEFT*0.3))
edges_n = VGroup(
Arrow(nodes[0][-1].get_center(), nodes[2][0].get_center(), color=BLACK, buff=rad),
Arrow(nodes[0][-1].get_center(), nodes[3][0].get_center(), color=BLACK, buff=rad),
Arrow(nodes[1][-1].get_center(), nodes[3][0].get_center(), color=BLACK, buff=rad),
Arrow(nodes[1][-1].get_center(), nodes[4][0].get_center(), color=BLACK, buff=rad),
)
edges_n.add(TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_n[0], UP, buff=-0.35).shift(LEFT*0.3))
edges_n.add(TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_n[1], UP, buff=-0.25).shift(LEFT*-0.1))
edges_n.add(TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_n[2], UP, buff=-0.35).shift(LEFT*0.3))
edges_n.add(TextMobject("inf", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_n[3], UP, buff=-0.25).shift(LEFT*-0.1))
edges_t = VGroup(
Arrow(nodes[2][-1].get_center(), t[0].get_center(), color=RED, buff=rad),
Arrow(nodes[3][-1].get_center(), t[0].get_center(), color=RED, buff=rad),
Arrow(nodes[4][-1].get_center(), t[0].get_center(), color=RED, buff=rad),
)
edges_t.add(TextMobject("5", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_t[0], UP, buff=-0.5).shift(LEFT*0.15))
edges_t.add(TextMobject("6", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_t[1], UP, buff=-0.1).shift(LEFT*0.2))
edges_t.add(TextMobject("7", color=BLUE_D, background_stroke_color=BLUE_D).next_to(edges_t[2], DOWN, buff=-0.5).shift(LEFT*0.15))
min_cut = DashedLine(np.array([2.5, 3, 0]), np.array([2.5, -3, 0]), color=DARK_GRAY)
comment = VGroup(
TextMobject("蓝-容量", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.7),
TextMobject("灰-最小割(最大流)", color=DARK_GRAY, background_stroke_color=DARK_GRAY).scale(0.7)
).arrange(DOWN, aligned_edge=LEFT).next_to(edges_s[3], DOWN, buff=0.8)
rec = SurroundingRectangle(comment, color=GRAY, buff=0.2)
problem = TextMobject("太空飞行计划问题", color=WHITE, background_stroke_color=WHITE)
problem.add_background_rectangle(color=GOLD_D, opacity=1, buff=0.15).next_to(edges_s[2], UP, buff=1)
author = TextMobject("by @鹤翔万里", background_stroke_color=ORANGE, opacity=0.8).scale(0.7).set_color(ORANGE).next_to(t[0], DOWN, buff=2)
label = VGroup(
TextMobject("报酬", color=GREEN, background_stroke_color=GREEN).scale(0.6).next_to(edges_s[2], UL, buff=0.1),
TextMobject("费用", color=GREEN, background_stroke_color=GREEN).scale(0.6).next_to(edges_t[3], UR, buff=0.1),
)
self.add(s, t, edges_s, edges_n, edges_t, nodes, min_cut, comment, rec, problem, author, label)
class NF24P3357(Scene_):
def construct(self):
axes = Axes(x_min=-0.5, x_max=8, y_min=-0.5, y_max=4, number_line_config={"color": BLACK}).center().shift(DOWN*0.5)
axes.add_coordinates(number_config={"color": BLACK})
line1 = Line(axes.c2p(1, 1), axes.c2p(1, 3), color=BLUE_D)
line1_ = Line(axes.c2p(2, 1), axes.c2p(3, 3), color=BLUE_D)
line2 = Line(axes.c2p(1, 1), axes.c2p(3, 3), color=GREEN_D)
line2_ = Line(axes.c2p(3, 1), axes.c2p(6, 3), color=GREEN_D)
dots = VGroup(
SmallDot(axes.c2p(1, 1), color=GRAY),
SmallDot(axes.c2p(1, 3), color=GRAY),
SmallDot(axes.c2p(2, 1), color=GRAY),
SmallDot(axes.c2p(3, 3), color=GRAY),
SmallDot(axes.c2p(3, 1), color=GRAY),
SmallDot(axes.c2p(6, 3), color=GRAY),
)
arrows = VGroup(
CurvedArrow(line1.get_center(), line1_.get_center(), color=RED, angle=-TAU/4).scale(0.9),
CurvedArrow(line2.get_center(), line2_.get_center(), color=RED).scale(0.9).shift(DL*0.2+LEFT*0.1),
)
changes = VGroup(
TexMobject("(x_1,x_1)\\rightarrow(2x_1, 2x_1+1)", color=MAROON, background_stroke_color=MAROON).scale(0.85),
TexMobject("(x_1,x_2)\\rightarrow(2x_1+1, 2x_2)", color=MAROON, background_stroke_color=MAROON).scale(0.85)
).arrange(DOWN, aligned_edge=LEFT).next_to(axes, UP, buff=-0.5)
dls = VGroup(
DashedLine(axes.c2p(2, 0), axes.c2p(2, 1), color=PURPLE),
DashedLine(axes.c2p(3, 0), axes.c2p(3, 3), color=PURPLE),
DashedLine(axes.c2p(6, 0), axes.c2p(6, 3), color=PURPLE),
)
braces = VGroup(
Brace(VGroup(Dot(axes.c2p(2, 0), radius=0.01), Dot(axes.c2p(3, 0), radius=0.01)), UP, color=DARK_GRAY),
Brace(VGroup(Dot(axes.c2p(3, 0), radius=0.01), Dot(axes.c2p(6, 0), radius=0.01)), UP, color=DARK_GRAY),
)
problem = TextMobject("最长$k$可重线段集问题", color=WHITE, background_stroke_color=WHITE)
problem.add_background_rectangle(color=GOLD_D, opacity=1, buff=0.15).move_to(np.array([-3, 3, 0]))
author = TextMobject("by @鹤翔万里", background_stroke_color=ORANGE, opacity=0.8).scale(0.7).set_color(ORANGE)
author.move_to(np.array([4, -1.5, 0]))
comment = TextMobject("化为开区间", color=DARK_GRAY, background_stroke_color=DARK_GRAY).scale(0.6).next_to(braces[-1], UP, buff=0.2)
dots2 = VGroup(
Dot(axes.c2p(2, 0), color=BLACK, radius=0.1),
Dot(axes.c2p(3, 0), color=BLACK, radius=0.1),
Dot(axes.c2p(6, 0), color=BLACK, radius=0.1),
)
self.add(axes, line1, line1_, line2, line2_, dls, dots, arrows, changes, braces, problem, author, comment, dots2)
class NF24P2754(Scene_):
def construct(self):
self.camera.set_frame_height(9)
self.camera.resize_frame_shape(1)
rad = 0.3
lis = [3, 1.8, 0.6, -0.6, -1.8, -3]
times = VGroup(
*[
VGroup(
TextMobject("time={}".format(i), color=GRAY, background_stroke_color=GRAY).scale(0.6),
DashedLine(np.array([-5, lis[i], 0]), np.array([5, lis[i], 0]), color=GRAY)
)
for i in range(6)
]
).shift(DL+UP*0.5)
for i in range(6):
times[i][0].next_to(times[i][1], LEFT)
nodes_0 = VGroup(
*[
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK, fill_color=WHITE, fill_opacity=1),
TextMobject("0\ 地", color=BLACK, background_stroke_color=BLACK).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([-3.6, i, 0]))
for i in lis
]
)
nodes_1 = VGroup(
*[
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=ORANGE, fill_color=WHITE, fill_opacity=1),
TextMobject("1\ 站", color=ORANGE, background_stroke_color=ORANGE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([-1.2, i, 0]))
for i in lis
]
)
nodes_2 = VGroup(
*[
VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=ORANGE, fill_color=WHITE, fill_opacity=1),
TextMobject("2\ 站", color=ORANGE, background_stroke_color=ORANGE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([1.2, i, 0]))
for i in lis
]
)
nodes_3 = VGroup(
*[
VGroup(
Circle(radius=rad, fill_color=BLACK, fill_opacity=1, stroke_color=BLACK).shift(LEFT*0.4).set_opacity(0),
RoundedRectangle(height=rad*2, width=1.4, corner_radius=rad, color=BLACK, fill_opacity=1),
TextMobject("-1\ 月", color=WHITE, background_stroke_color=WHITE).scale(0.75),
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=BLACK).shift(RIGHT*0.4).set_opacity(0)
).move_to(np.array([3.6, i, 0]))
for i in lis
]
)
nodes = VGroup(nodes_0, nodes_1, nodes_2, nodes_3).shift(DL+UP*0.5)
sw = 6
edges_0 = VGroup(
*[
Arrow(nodes_0[i][1].get_center(), nodes_0[i + 1][1].get_center(), color=BLACK, buff=rad, stroke_width=sw)
for i in range(5)
]
)
edges_1 = VGroup(
*[
Arrow(nodes_1[i][1].get_center(), nodes_1[i + 1][1].get_center(), color=ORANGE, buff=rad, stroke_width=sw)
for i in range(5)
]
)
edges_2 = VGroup(
*[
Arrow(nodes_2[i][1].get_center(), nodes_2[i + 1][1].get_center(), color=ORANGE, buff=rad, stroke_width=sw)
for i in range(5)
]
)
edges_3 = VGroup(
*[
Arrow(nodes_3[i + 1][1].get_center(), nodes_3[i][1].get_center(), color=BLACK, buff=rad, stroke_width=sw)
for i in range(5)
]
)
car_1 = VGroup(
Arrow(nodes[0][0][-1].get_center(), nodes[1][1][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[1][1][-1].get_center(), nodes[2][2][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[2][2][0].get_center(), nodes[0][3][-1].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.05).set_stroke(width=4),
Arrow(nodes[0][3][-1].get_center(), nodes[1][4][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[1][4][-1].get_center(), nodes[2][5][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
)
car_2 = VGroup(
Arrow(nodes[1][0][-1].get_center(), nodes[2][1][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[2][1][-1].get_center(), nodes[3][2][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[3][2][0].get_center(), nodes[1][3][-1].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.05).set_stroke(width=4),
Arrow(nodes[1][3][-1].get_center(), nodes[2][4][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
Arrow(nodes[2][4][-1].get_center(), nodes[3][5][0].get_center(), buff=rad+0.03, color=BLUE_D, max_tip_length_to_length_ratio=0.1).set_stroke(width=4),
)
cars = VGroup(car_1, car_2)
labels = VGroup(
*[
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).add_background_rectangle(color=WHITE, opacity=0.8, buff=0.1)\
.move_to(car_1[i])
for i in range(5)
],
*[
TextMobject("1", color=BLUE_D, background_stroke_color=BLUE_D).scale(0.6).add_background_rectangle(color=WHITE, opacity=0.8, buff=0.1)\
.move_to(car_2[i])
for i in range(5)
],
)
s = VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("s", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([-6, 3.5, 0]))
t = VGroup(
Circle(radius=rad, fill_color=WHITE, fill_opacity=1, stroke_color=RED),
TextMobject("t", color=RED, background_stroke_width=0).scale(0.75)
).move_to(np.array([4, 3.5, 0]))
edge_s = Arrow(s[0].get_center() , nodes[0][0][0].get_center(), color=RED, buff=rad)
edge_t = Arrow(nodes[3][0][-1].get_center(), t[0].get_center(), color=RED, buff=rad)
problem = TextMobject("星际转移问题", color=WHITE, background_stroke_color=WHITE).scale(1.3)
problem.add_background_rectangle(color=GOLD_D, opacity=1, buff=0.15).next_to(nodes[1][0], UP, buff=0.5)
comment = VGroup(
TextMobject("未标记的边", color=GRAY, background_stroke_color=GRAY).scale(0.75),
TextMobject("容量为inf", color=BLUE, background_stroke_color=BLUE).scale(0.75)
).arrange(DOWN, aligned_edge=LEFT).move_to(np.array([5.5, -0.5, 0]))
rec = SurroundingRectangle(comment, color=GRAY, buff=0.2)
author = TextMobject("by @鹤翔万里", background_stroke_color=ORANGE, opacity=0.8).scale(0.7).set_color(ORANGE)
author.move_to(np.array([5.5, -2.5, 0]))
vdots = VGroup(
*[TexMobject("\\vdots", color=BLACK).scale(0.8) for i in range(4)]
)
for i in range(4):
vdots[i].next_to(nodes[i][-1], DOWN, buff=0.2)
edges = VGroup(edges_0, edges_1, edges_2, edges_3)
self.add(times, edges, cars, labels, edge_s, edge_t, nodes, s, t, problem, comment, rec, vdots, author)
class MosAlgoCompare1(Scene_):
def construct(self):
line = [
[-3.8, [-3, -1.8, -0.2, 0.2, 1.8, 2.2, 3.8]],
[-1.8, [-1, 0.2, 1.8, 2.2, 3.8]],
[0.2, [1.8, 2.2, 3.8]],
[2.2, [3, 3.8]]
]
lines = VGroup(
*[
VGroup(
*[
VGroup(
Line(i * RIGHT, j * RIGHT, color=BLACK),
Dot(i * RIGHT, color=DARK_GRAY, radius=0.05),
Dot(j * RIGHT, color=DARK_GRAY, radius=0.05),
) for j in k
]
).arrange(DOWN, False, buff=0.3, coor_mask=np.array([0, 1, 0]))
for i, k in line
]
).arrange(DOWN, buff=0.45, coor_mask=np.array([0, 1, 0]))
back = VGroup(
*[
DashedLine(np.array([i, 3.8, 0]), np.array([i, -3.8, 0]), color=GRAY)
for i in [-4, -2, 0, 2, 4]
]
)
lines1 = VGroup()
for group in lines:
for i, j in enumerate(group):
if j != group[-1]:
lines1.add(Line(j[-1].get_center(), group[i+1][-1].get_center(), color=RED).add_tip(0.15))
lines2 = VGroup()
for i, j in enumerate(lines):
if j != lines[-1]:
lines2.add(Line(j[-1][-1].get_center(), lines[i+1][0][-1].get_center(), color=BLUE_D).add_tip(0.15))
self.add(back, lines, lines1, lines2)
class MosAlgoCompare2(Scene_):
def construct(self):
line = [
[-3.8, [-3, -1.8, -0.2, 0.2, 1.8, 2.2, 3.8]],
[-1.8, [3.8, 2.2, 1.8, 0.2, -1]],
[0.2, [1.8, 2.2, 3.8]],
[2.2, [3.8, 3]]
]
lines = VGroup(
*[
VGroup(
*[
VGroup(
Line(i * RIGHT, j * RIGHT, color=BLACK),
Dot(i * RIGHT, color=DARK_GRAY, radius=0.05),
Dot(j * RIGHT, color=DARK_GRAY, radius=0.05),
) for j in k
]
).arrange(DOWN, False, buff=0.3, coor_mask=np.array([0, 1, 0]))
for i, k in line
]
).arrange(DOWN, buff=0.45, coor_mask=np.array([0, 1, 0]))
back = VGroup(
*[
DashedLine(np.array([i, 3.8, 0]), np.array([i, -3.8, 0]), color=GRAY)
for i in [-4, -2, 0, 2, 4]
]
)
lines1 = VGroup()
for group in lines:
for i, j in enumerate(group):
if j != group[-1]:
lines1.add(Line(j[-1].get_center(), group[i+1][-1].get_center(), color=RED).add_tip(0.15))
lines2 = VGroup()
for i, j in enumerate(lines):
if j != lines[-1]:
lines2.add(Line(j[-1][-1].get_center(), lines[i+1][0][-1].get_center(), color=BLUE_D).add_tip(0.15))
self.add(back, lines, lines1, lines2)
class RollBackMosAlgo(Scene_):
def construct(self):
line = [
[-3.6, -0.4],
[-2.4, 0.4],
[-3.6, 2.4],
[-2.4, 3.6]
]
lines = VGroup(
*[
VGroup(
Line(i * RIGHT, j * RIGHT, color=BLACK),
Dot(i * RIGHT, color=DARK_GRAY, radius=0.05),
Dot(j * RIGHT, color=DARK_GRAY, radius=0.05),
)
for i, j in line
]
).arrange(DOWN, buff=0.5, coor_mask=np.array([0, 1, 0]))
back = VGroup(
*[
DashedLine(np.array([i, 3.8, 0]), np.array([i, -3.8, 0]), color=GRAY)
for i in [-4, -2, 0, 2, 4]
]
)
s = np.array([-2, 1.5, 0])
# self.add(Dot(s, color=BLACK))
l = VGroup(
Line(s, lines[0][1].get_center(), color=ORANGE),
Line(lines[0][1].get_center(), np.array([-2, lines[0][1].get_center()[1], 0]), color=ORANGE),
Line(np.array([-2, lines[0][1].get_center()[1], 0]), lines[1][1].get_center(), color=ORANGE),
Line(lines[1][1].get_center(), np.array([-2, lines[1][1].get_center()[1], 0]), color=ORANGE),
Line(np.array([-2, lines[1][1].get_center()[1], 0]), lines[2][1].get_center(), color=ORANGE),
Line(lines[2][1].get_center(), np.array([-2, lines[2][1].get_center()[1], 0]), color=ORANGE),
Line(np.array([-2, lines[2][1].get_center()[1], 0]), lines[3][1].get_center(), color=ORANGE),
Line(lines[3][1].get_center(), np.array([-2, lines[3][1].get_center()[1], 0]), color=ORANGE),
)
for each in l:
each.add_tip(0.2)
r = VGroup(
Line(s, lines[0][2].get_center(), color=BLUE_D),
Line(lines[0][2].get_center(), lines[1][2].get_center(), color=BLUE_D),
Line(lines[1][2].get_center(), lines[2][2].get_center(), color=BLUE_D),
Line(lines[2][2].get_center(), lines[3][2].get_center(), color=BLUE_D),
)
for each in r:
each.add_tip(0.2)
self.add(lines, back, l, r)
|
168681
|
from keanu.vertex import Gamma
from keanu import BayesNet
from keanu.network_io import ProtobufLoader, JsonLoader, ProtobufSaver, DotSaver, JsonSaver
def test_can_save_and_load(tmpdir) -> None:
PROTO_FILE_NAME = str(tmpdir.join("test.proto"))
JSON_FILE_NAME = str(tmpdir.join("test.json"))
DOT_FILE_NAME = str(tmpdir.join("test.dot"))
gamma = Gamma(1.0, 1.0)
gamma.set_value(2.5)
# %%SNIPPET_START%% PythonSaveSnippet
net = BayesNet(gamma.iter_connected_graph())
metadata = {"Author": "Documentation Team"}
protobuf_saver = ProtobufSaver(net)
protobuf_saver.save(PROTO_FILE_NAME, True, metadata)
json_saver = JsonSaver(net)
json_saver.save(JSON_FILE_NAME, True, metadata)
dot_saver = DotSaver(net)
dot_saver.save(DOT_FILE_NAME, True, metadata)
# %%SNIPPET_END%% PythonSaveSnippet
# %%SNIPPET_START%% PythonLoadSnippet
protobuf_loader = ProtobufLoader()
new_net_from_proto = protobuf_loader.load(PROTO_FILE_NAME)
json_loader = JsonLoader()
new_net_from_json = json_loader.load(JSON_FILE_NAME)
# %%SNIPPET_END%% PythonLoadSnippet
|
168688
|
import re
import six
from sqlalchemy import inspect
from jet_bridge_base.exceptions.validation_error import ValidationError
def serialize_validation_error(exc):
def process(e, root=False):
if isinstance(e.detail, dict):
return dict(map(lambda x: (x[0], process(x[1])), e.detail.items()))
elif isinstance(e.detail, list):
return list(map(lambda x: process(x), e.detail))
elif root:
return {'non_field_errors': [e.detail]}
else:
return e.detail
return process(exc, root=True)
def validation_error_from_database_error(e, model):
if hasattr(e, 'orig'):
if hasattr(e.orig, 'args') and hasattr(e.orig.args, '__getitem__'):
if len(e.orig.args) == 1:
message = e.orig.args[0]
elif len(e.orig.args) == 2:
message = e.orig.args[1]
else:
message = e.orig.args
message = six.text_type(message)
regex = [
[r'Key\s\((.+)\)=\((.+)\)\salready\sexists', 1, 2], # PostgreSQL
[r'Duplicate\sentry\s\'(.+)\'\sfor key\s\'(.+)\'', 2, 1], # MySQL
[r'UNIQUE\sconstraint\sfailed\:\s(.+)\.(.+)', 2, None] # SQLite
]
for (r, field_index, value_index) in regex:
match = re.search(r, message, re.IGNORECASE | re.MULTILINE)
if match:
mapper = inspect(model)
columns = dict(map(lambda x: (x.key, x), mapper.columns))
column_name = match.group(field_index)
if column_name in columns:
error = dict()
error[column_name] = ValidationError('record with the same value already exists')
return ValidationError(error)
return ValidationError(message)
return ValidationError('Query failed')
|
168690
|
import random
import bintrees
import threading
from itertools import count
from collections import Counter
import tensorflow as tf
from joblib import Parallel, delayed
from lib.ops import get_available_gpus
from lib.trainer import SampleBasedTrainer
class MultiGPUTrainer:
def __init__(self, name, make_model,
devices=get_available_gpus(),
master_device=None,
TrainerClass=SampleBasedTrainer,
sess=None, *args, verbose=False, **kwargs):
""" A wrapper-class that performs batch-parallel training with some trainer. """
self.name = name
self.sess = sess = sess or tf.get_default_session() or tf.InteractiveSession()
self.master_device = master_device = master_device or next(iter(devices))
assert master_device in devices
self.verbose = verbose
class Worker(TrainerClass):
def get_optimizer(self, *args, **kwargs):
""" Worker does not update weights by itself. use sgd to avoid wasting memory """
return tf.train.GradientDescentOptimizer(learning_rate=0)
with tf.variable_scope(name):
self.workers_by_device = {}
for i, device in enumerate(devices):
with tf.device(device), tf.variable_scope('worker_%i' % i):
model = make_model()
if device == master_device:
worker = TrainerClass(model, *args, **kwargs)
else:
worker = Worker(model, *args, **kwargs)
self.workers_by_device[device] = worker
if verbose:
print("Created model {} weights and worker on device {}"
"".format(model.name, device))
self.master_model = self.workers_by_device[master_device].model
self.master_worker = self.workers_by_device[self.master_device]
assert isinstance(self.master_worker, TrainerClass)
# step 1: send main model's weights to all worker replicas
self.scatter_weights = []
for device, worker in self.workers_by_device.items():
if worker == self.master_worker:
continue
self.scatter_weights.extend(map(tf.assign,
worker.optimized_variables,
self.master_worker.optimized_variables))
# step 2: compute grads and counters at all workers
self.gather_grads, self.gather_counters = [], []
for device, worker in self.workers_by_device.items():
if worker == self.master_worker:
continue
self.gather_grads.extend(
map(tf.assign_add, self.master_worker.accumulated_grads, worker.accumulated_grads)
)
self.gather_grads.append(
tf.assign_add(self.master_worker.accumulated_num_batches, worker.accumulated_num_batches)
)
master_counters_flat = [self.master_worker.accumulated_counters[name]
for name in sorted(self.master_worker.accumulated_counters.keys())]
worker_counters_flat = [worker.accumulated_counters[name]
for name in sorted(self.master_worker.accumulated_counters.keys())]
self.gather_counters.extend(
map(tf.assign_add, master_counters_flat, worker_counters_flat)
)
# step 3: perform gradient step and reset all accumulated values
self.reset_slave_grads = [
worker.reset_gradients for worker in self.workers_by_device.values()
if worker != self.master_worker
]
self.reset_slave_counters = [
worker.reset_counters for worker in self.workers_by_device.values()
if worker != self.master_worker
]
def train_on_batches(self, batches, optimizer_step=True, reset_counters=None, **kwargs):
sess = self.sess
lock = threading.Lock()
available_devices = set(self.workers_by_device.keys())
def _thread(batch):
with lock:
assert len(available_devices) != 0, "all devices busy. this should't ever happen"
device = available_devices.pop()
if self.verbose:
print("thread {} acquired device {}".format(threading.get_ident(), device), flush=True)
result = self.workers_by_device[device].train_on_batch(batch, optimizer_step=False,
reset_counters=False, **kwargs)
with lock:
if self.verbose:
print("thread {} released device {}".format(threading.get_ident(), device), flush=True)
available_devices.add(device)
return result
tasks = [delayed(_thread)(batch) for batch in batches]
_ = Parallel(backend='threading', n_jobs=len(self.workers_by_device))(tasks)
sess.run(self.gather_counters)
metrics = sess.run(self.master_worker.compute_metrics)
if optimizer_step:
sess.run(self.gather_grads)
sess.run(self.master_worker.apply_gradients)
sess.run([self.master_worker.reset_gradients, self.reset_slave_grads, self.scatter_weights])
if reset_counters is None:
reset_counters = optimizer_step
if reset_counters:
sess.run(self.master_worker.reset_counters)
return metrics
class ParallelBatchIterator:
def __init__(self, iterator, cost_func, n_buffers, random_state=42, **kwargs):
"""
groups iterator items (batches) by cost and returns tuples of batches with
approximtely the same cost. Uses cost buffer
:param iterator: iterator over batches
:param cost_func: lambda batch -> float cost,
typically it's approximate time it takes to process batch
:param n_buffers: number of batches to return in parallel.
"""
self.available_costs, self.cost_counts = set(), Counter()
self.cost_func = cost_func
def iterate_with_costs(iterator):
for batch in iterator:
cost = cost_func(batch)
self.available_costs.add(cost)
self.cost_counts[cost] += 1
yield batch, cost
self.cost_buffers = [
self.CostBuffer(iterate_with_costs(iterator)) for _ in range(n_buffers)
]
self.rng = random.Random(42)
def __iter__(self):
return self
def __next__(self):
# sample random existing cost
sample_cost = self.rng.sample(self.available_costs, 1)[0] if len(self.available_costs) else 0
batches, costs = zip(*(cost_buffer.pop(sample_cost)
for cost_buffer in self.cost_buffers))
for cost in costs:
self.cost_counts[cost] -= 1
if self.cost_counts[cost] <= 0:
self.cost_counts.pop(cost)
self.available_costs.remove(cost)
return batches
class CostBuffer:
def __init__(self, iterator_with_costs, buf_size=1000):
"""
A tool for quickly finding batches with approximately specified cost.
Used for multi-gpu batch balancing
Credits: Yandex MT team
"""
self.iterator_with_costs = iterator_with_costs
self.current_size, self.max_size = 0, buf_size
self.tree = bintrees.FastRBTree()
def pop(self, target_cost):
""" Samples batch near target_cost. """
# warmup: read from iterator
for batch, cost in self.iterator_with_costs:
if cost in self.tree:
self.tree[cost].append(batch)
else:
self.tree[cost] = [batch]
self.current_size += 1
if self.current_size >= self.max_size:
break
if not len(self.tree):
raise StopIteration("No elements left in buffer/iterator")
# generate cost to choose and choose relevant batch
cost = self._find_nearest_cost(target_cost)
batch = self.tree[cost][0]
# remove selected items from structures
del self.tree[cost][0]
if len(self.tree[cost]) == 0:
del self.tree[cost]
self.current_size -= 1
return batch, cost
def _find_nearest_cost(self, cost):
cost = max(cost, self.tree.min_key())
cost = min(cost, self.tree.max_key())
floor_cost = self.tree.floor_key(cost)
ceil_cost = self.tree.ceiling_key(cost)
return floor_cost if abs(ceil_cost - cost) < abs(floor_cost - cost) else ceil_cost
|
168735
|
import asyncio
def main():
print("Creating our event loop")
loop = asyncio.get_event_loop()
loop.run_forever()
print("Our Loop will now run forever, this will never execute")
if __name__ == '__main__':
main()
|
168738
|
from functools import wraps
from django.http.response import HttpResponse
from atlassian_jwt_auth.frameworks.django.decorators import with_asap
def validate_asap(issuers=None, subjects=None, required=True):
"""Decorator to allow endpoint-specific ASAP authorization, assuming ASAP
authentication has already occurred.
:param list issuers: A list of issuers that are allowed to use the
endpoint.
:param list subjects: A list of subjects that are allowed to use the
endpoint.
:param boolean required: Whether or not to require ASAP on this endpoint.
Note that requirements will be still be verified if claims are present.
"""
def validate_asap_decorator(func):
@wraps(func)
def validate_asap_wrapper(request, *args, **kwargs):
asap_claims = getattr(request, 'asap_claims', None)
if required and not asap_claims:
message = 'Unauthorized: Invalid or missing token'
response = HttpResponse(message, status=401)
response['WWW-Authenticate'] = 'Bearer'
return response
if asap_claims:
iss = asap_claims['iss']
if issuers and iss not in issuers:
message = 'Forbidden: Invalid token issuer'
return HttpResponse(message, status=403)
sub = asap_claims.get('sub')
if subjects and sub not in subjects:
message = 'Forbidden: Invalid token subject'
return HttpResponse(message, status=403)
return func(request, *args, **kwargs)
return validate_asap_wrapper
return validate_asap_decorator
def requires_asap(issuers=None, subject_should_match_issuer=None, func=None):
"""Decorator for Django endpoints to require ASAP
:param list issuers: *required The 'iss' claims that this endpoint is from.
"""
return with_asap(func=func,
required=True,
issuers=issuers,
subject_should_match_issuer=subject_should_match_issuer)
|
168758
|
import os
import typing
class MetadataToPsvTransformer:
"""
Abstract class for transforming DSS metadata to PSV rows.
"""
PSV_EXT = ".psv"
OUTPUT_DIRNAME = "output"
LOG_DIRNAME = "logs"
def __init__(self, staging_dir):
self.staging_dir = staging_dir
self.output_dir = os.path.join(staging_dir,
MetadataToPsvTransformer.OUTPUT_DIRNAME)
def transform(self, bundle_search_dir: str):
"""
Parse a directory containing one or more bundle directories to extract
metadata PSV rows and write them to a file.
:param bundle_search_dir: Local path to bundle contents
"""
self._write_rows_to_psvs(*self._parse_from_metadatas(bundle_search_dir))
def transform_bundle(self, bundle_dir: str, bundle_manifest_path: str):
"""
Parses one bundle's JSON metadata and writes rows to corresponding PSV file(s).
:param bundle_dir: Local path to bundle contents
:param bundle_manifest_path: Path to the bundle's manifest
"""
self._write_rows_to_psvs(*self._parse_from_metadatas(bundle_dir, bundle_manifest_path))
def _parse_from_metadatas(self, bundle_dir: str, bundle_manifest_path: typing.Optional[str] = None):
"""
Parses JSON metadata for a bundle into set(s) of PSV rows.
:param bundle_dir: Local path to bundle contents
"""
raise NotImplementedError
def _write_rows_to_psvs(self, *args: typing.Tuple):
"""
Writes row(s) to specified PSV file(s).
:param args: n Tuples (TableName, Set(str)) where
TableName: the table to write to and
Set(str): A str element represents a row to write
:return: None
"""
for arg in args:
table = arg[0]
rows = arg[1]
out_file = os.path.join(self.output_dir,
table.value + MetadataToPsvTransformer.PSV_EXT)
with open(out_file, 'a') as fh:
for row in rows:
fh.write(row + "\n")
@staticmethod
def _generate_psv_row(*args):
return '|'.join(args)
|
168767
|
str = "RahulShettyAcademy.com"
str1 = "Consulting firm"
str3 = "RahulShetty"
print(str[1]) #a
print(str[0:5]) # if you want substring in python
print(str+str1) # concatenation
print(str3 in str) # substring check
var = str.split(".")
print(var)
print(var[0])
str4 = " great "
print(str4.strip())
print(str4.lstrip())
print(str4.rstrip())
|
168801
|
from activfuncs import plot, x
import numpy as np
def softplus(x):
return np.log(1+np.exp(x))
plot(softplus, yaxis=(-0.4, 1.4))
|
168834
|
import os
import json
from easydict import EasyDict
from pprint import pprint
from utils.dirs import create_dirs
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file: the path of the config file
:return: config(namespace), config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
try:
config_dict = json.load(config_file)
except ValueError as e:
print("INVALID JSON file format.. Please provide a good json file")
exit(-1)
# convert the dictionary to a namespace using bunch lib
config = EasyDict(config_dict)
return config, config_dict
def process_config(json_file):
"""
Get the json file then editing the path of the experiments folder, creating the dir and return the config
:param json_file: the path of the config file
:return: config object(namespace)
"""
config, _ = get_config_from_json(json_file)
print(" THE Configuration of your experiment ..")
pprint(config)
print(" *************************************** ")
try:
config.summary_dir = os.path.join("experiments", config.exp_name, "summaries/")
config.checkpoint_dir = os.path.join("experiments", config.exp_name, "checkpoints/")
config.out_dir = os.path.join("experiments", config.exp_name, "out/")
create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir])
except AttributeError as e:
print("ERROR!!..Please provide the exp_name in json file..")
exit(-1)
return config
|
168924
|
import unittest
M = None
MN = None
MN2 = None
class NestedTest(unittest.TestCase):
"""
Verifies we can create instance from nested proto file.
https://github.com/appnexus/pyrobuf/issues/55
"""
@classmethod
def setUpClass(cls):
global M
global MN
global MN2
from test_nested_issue55_proto import M, MN, MN2
def test_use_nested(self):
"""
Simple test that just verifies we can creates M, N and N2 objects.
"""
message_m = M()
message_n = MN()
message_n2 = MN2()
if __name__ == "__main__":
unittest.main()
|
168931
|
from datetime import datetime, date
import django.test
from contracts.models import Contract, Entity, ProcedureType
from contracts.views_data import *
from contracts.views_analysis import ANALYSIS
class TestAnalysis(django.test.TestCase):
def test_contracts_price_histogram(self):
Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2003, month=1, day=1),
signing_date=datetime(year=2003, month=1, day=1))
response = self.client.get(
reverse(analysis_selector, args=('contracts-price-histogram-json',)))
self.assertEqual(200, response.status_code)
self.assertEqual([{'values': [], 'key': 'histogram of contracts values'}],
json.loads(response.content.decode('utf-8')))
Contract.objects.all().delete()
for x in range(10):
Contract.objects.create(
base_id=x, contract_description='da', price=1000,
added_date=datetime(year=2003, month=1, day=1),
signing_date=datetime(year=2003, month=1, day=1))
response = self.client.get(
reverse(analysis_selector, args=('contracts-price-histogram-json',)))
self.assertEqual(200, response.status_code)
self.assertEqual([{'values': [{'min_value': 8, 'max_value': 16,
'count': 10}],
'key': 'histogram of contracts values'}],
json.loads(response.content.decode('utf-8')))
def test_entities_values_histogram(self):
c = Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2003, month=1, day=1),
signing_date=datetime(year=2003, month=1, day=1))
e1 = Entity.objects.create(name='test1', base_id=1, nif='nif')
e2 = Entity.objects.create(name='test2', base_id=2, nif='nif')
c.contractors.add(e1)
c.contracted.add(e2)
e1.compute_data()
e2.compute_data()
response = self.client.get(
reverse(analysis_selector, args=('entities-values-histogram-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual({'values': [{'value': 1, 'min_value': 8, 'max_value': 16}],
'key': 'entities earning'},
result[0])
self.assertEqual({'values': [{'value': 0, 'min_value': 8, 'max_value': 16}],
'key': 'entities expending'},
result[1])
def test_contracts_statistics(self):
Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2003, month=1, day=1),
signing_date=datetime(year=2003, month=1, day=1))
today = date.today()
Contract.objects.create(
base_id=2, contract_description='da', price=1000,
added_date=datetime(year=today.year, month=today.month, day=1),
signing_date=datetime(year=today.year, month=today.month, day=1))
data = analysis_manager.get_analysis("contracts_statistics")
self.assertEqual(data['total_sum'], 2000)
self.assertEqual(data['total_count'], 2)
self.assertEqual(data['year_sum'], 1000)
self.assertEqual(data['year_count'], 1)
self.assertEqual(data['month_sum'], 1000)
self.assertEqual(data['month_count'], 1)
def test_ProceduresTimeSeriesJsonView(self):
# e1 is a municipality
e1 = Entity.objects.create(name='test1', base_id=1, nif='506572218')
e2 = Entity.objects.create(name='test2', base_id=2, nif='nif')
p1 = ProcedureType.objects.create(name='Test1', base_id=1)
p2 = ProcedureType.objects.create(name='Test2', base_id=2)
c1 = Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2011, month=1, day=1),
signing_date=datetime(year=2011, month=1, day=1),
procedure_type=p1,
)
c1.contractors.add(e1)
c1.contracted.add(e2)
c2 = Contract.objects.create(
base_id=2, contract_description='da', price=2000,
added_date=datetime(year=2011, month=2, day=1),
signing_date=datetime(year=2011, month=2, day=1),
procedure_type=p2,
)
c2.contractors.add(e1)
c2.contracted.add(e2)
# contract without date should not count anywhere
Contract.objects.create(
base_id=3, contract_description='da', price=2000,
added_date=datetime(year=2011, month=2, day=1),
signing_date=None,
procedure_type=p2,
)
expected = [{'key': 'Test1', 'values':
[{'value': 10, 'month': '2011-01', 'count': 1},
{'value': 0, 'month': '2011-02', 'count': 0}
]},
{'key': 'Test2', 'values':
[{'value': 0, 'month': '2011-01', 'count': 0},
{'value': 20, 'month': '2011-02', 'count': 1},
]}
]
response = self.client.get(
reverse(analysis_selector, args=('procedure-types-time-series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected, result)
# since e1 is municipality, the outcome should be the same
response = self.client.get(
reverse(analysis_selector, args=('municipalities-procedure-types-'
'time-series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected, result)
def test_ContractsTimeSeriesJsonView(self):
# e1 is a municipality
e1 = Entity.objects.create(name='test1', base_id=1, nif='506572218')
e2 = Entity.objects.create(name='test2', base_id=2, nif='nif')
c1 = Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2011, month=1, day=1),
signing_date=datetime(year=2011, month=1, day=1),
)
c1.contractors.add(e1)
c1.contracted.add(e2)
c2 = Contract.objects.create(
base_id=2, contract_description='da', price=2000,
added_date=datetime(year=2011, month=2, day=1),
signing_date=datetime(year=2011, month=2, day=1),
)
c2.contractors.add(e1)
c2.contracted.add(e2)
response = self.client.get(
reverse(analysis_selector, args=('municipalities-contracts-time-'
'series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
expected = [{'key': 'contracts', 'bar': True, 'values':
[{'value': 1, 'month': '2011-01'},
{'value': 1, 'month': '2011-02'},
]},
{'key': 'value', 'color': 'black', 'values':
[{'value': 10, 'month': '2011-01'},
{'value': 20, 'month': '2011-02'}
]}
]
self.assertEqual(expected, result)
response = self.client.get(
reverse(analysis_selector, args=('contracts-time-series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected, result)
response = self.client.get(
reverse(analysis_selector, args=('excluding-municipalities-contracts-'
'time-series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual([{'key': 'contracts', 'bar': True, 'values': []},
{'key': 'value', 'color': 'black', 'values': []}],
result)
# change e1 to a ministry
e1.name = 'Secretaria-Geral do Ministério da Educação'
e1.save()
response = self.client.get(
reverse(analysis_selector, args=('ministries-contracts-'
'time-series-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected, result)
def test_lorenz_curve(self):
# e1 always contracts; e2 and e3 receive
e1 = Entity.objects.create(name='test1', base_id=1, nif='nif')
e2 = Entity.objects.create(name='test2', base_id=2, nif='nif')
e3 = Entity.objects.create(name='test2', base_id=3, nif='nif')
c1 = Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2010, month=1, day=1),
signing_date=datetime(year=2010, month=1, day=1))
c1.contractors.add(e1)
c1.contracted.add(e2)
c2 = Contract.objects.create(
base_id=2, contract_description='da', price=2000,
added_date=datetime(year=2010, month=1, day=1),
signing_date=datetime(year=2010, month=1, day=1))
c2.contractors.add(e1)
c2.contracted.add(e3)
e1.compute_data()
e2.compute_data()
e3.compute_data()
response = self.client.get(
reverse(analysis_selector, args=('contracted-lorenz-curve-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual({'values': [{'cumulative': 0.0, 'rank': 0.0},
{'cumulative': 1.0, 'rank': 1.0}],
'key': 'Equality line'},
result[0])
self.assertEqual({'values': [{'cumulative': 0.3333333333333333, 'rank': 0.0},
{'cumulative': 1.0, 'rank': 1.0}],
'key': 'Lorenz curve of private entities'},
result[1])
def test_municipalities_ranking(self):
# e1 and e2 are municipalities
e1 = Entity.objects.create(name='test1', base_id=1, nif='506780902')
e2 = Entity.objects.create(name='test2', base_id=2, nif='506572218')
c1 = Contract.objects.create(
base_id=1, contract_description='da', price=1000,
added_date=datetime(year=2010, month=1, day=3),
signing_date=datetime(year=2010, month=1, day=1),
description='BlaBla')
c1.contractors.add(e1)
c2 = Contract.objects.create(
base_id=2, contract_description='da', price=2000,
added_date=datetime(year=2010, month=2, day=1),
signing_date=datetime(year=2010, month=2, day=1))
c2.contractors.add(e2)
c3 = Contract.objects.create(
base_id=3, contract_description='da', price=3000,
added_date=datetime(year=2011, month=2, day=1),
signing_date=datetime(year=2011, month=2, day=1))
c3.contractors.add(e2)
response = self.client.get(
reverse(analysis_selector, args=('municipalities-ranking-json',)))
self.assertEqual(200, response.status_code)
result = json.loads(response.content.decode('utf-8'))
self.assertEqual('Cartaxo', result[0]['key'])
self.assertEqual('/entity/id1/cartaxo', result[0]['url'])
self.assertEqual(2, len(result[0]['values']))
self.assertEqual({'avg_deltat_rank': 2,
'avg_specificity': 0.0,
'avg_good_text': 1.0,
'avg_good_text_rank': 1,
'avg_deltat': 2.0,
'year': '2010',
'avg_specificity_rank': 1,
'count': 1,
'value': 10.0}, result[0]['values'][0])
self.assertEqual({'avg_deltat_rank': None,
'avg_specificity': None,
'avg_good_text': None,
'avg_good_text_rank': None,
'avg_deltat': None,
'year': '2011',
'avg_specificity_rank': None,
'count': 0,
'value': 0}, result[0]['values'][1])
self.assertEqual('Lamego', result[1]['key'])
self.assertEqual('/entity/id2/lamego', result[1]['url'])
self.assertEqual(2, len(result[1]['values']))
self.assertEqual({'avg_deltat_rank': 1,
'avg_specificity': 0.0,
'avg_good_text': 0.0,
'avg_good_text_rank': 2,
'avg_deltat': 0.0,
'year': '2010',
'avg_specificity_rank': 1,
'count': 1,
'value': 20.0}, result[1]['values'][0])
self.assertEqual({'avg_deltat_rank': 1,
'avg_specificity': 0.0,
'avg_good_text': 0.0,
'avg_good_text_rank': 1,
'avg_deltat': 0,
'year': '2011',
'avg_specificity_rank': 1,
'count': 1,
'value': 30.0}, result[1]['values'][1])
def test_data_not_found(self):
response = self.client.get(reverse(analysis_selector,
args=('invalid-name',)))
self.assertEqual(404, response.status_code)
class TestAnalysisViews(django.test.TestCase):
def test_views(self):
for name in ANALYSIS:
identity = ANALYSIS[name]['id']
title = ANALYSIS[name]['title']
response = self.client.get(
reverse('contracts_analysis_selector',
args=(identity, slugify(title))))
self.assertEqual(200, response.status_code)
response = self.client.get(
reverse('contracts_analysis_selector', args=(10000, 'bla')))
self.assertEqual(302, response.status_code)
def test_analysis_list(self):
response = self.client.get(reverse('contracts_analysis'))
self.assertEqual(200, response.status_code)
self.assertEqual(len(ANALYSIS), len(response.context['analysis']))
|
168933
|
import operator
import unittest
import cyordereddict
class TestOrderedDict(unittest.TestCase):
def setUp(self):
data = [('x', 0), ('y', 1), ('z', 2)]
self.dicts = []
try:
import collections
self.dicts.append(collections.OrderedDict(data))
except AttributeError:
# python 2.6
pass
self.dicts.append(cyordereddict.OrderedDict(data))
def test_rich_cmp(self):
# verify rich comparison still works as expected on Python 2
ops = ['lt', 'le', 'gt', 'ge']
for op in ops:
for obj in self.dicts:
f = getattr(operator, op)
self.assertTrue(f(obj, {}) is not None)
self.assertTrue(f({}, obj) is not None)
|
168937
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'somesecretkey'
LAZYLOAD_LDA = False
ALLOW_ANON = True
DEFAULT_LDA_MODEL = 'Demo'
DEFAULT_DB = 'Demo Keyword-based Model'
# Flask-Uploads configs - used in /app/__init__.py
# the names has the format: UPLOADED_[]_DEST defined by Flask-Uploads
UPLOADED_PAPERS_DEST = os.environ.get(
'UPLOADED_PAPERS_DEST') or basedir + '/app/static/upload/papers/'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(Config):
# debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_PROFILER_ENABLED = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
DEBUG = False
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
DEBUG = False
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
anon = {
"family_name": "",
"user_id": "0",
"name": "Anonymous",
"picture": "https://randomuser.me/api/portraits/lego/1.jpg",
"locale": "en",
"email_verified": True,
"nickname": "Anon",
"given_name": "",
"email": "<EMAIL>",
}
|
168945
|
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str)
parser.add_argument("--data_dir", type=str)
args = parser.parse_args()
for file in os.listdir(args.data_dir):
cmd = 'python scripts/evaluate_feats.py '
if 'test' in file:
cmd += ' --reference ' + os.path.join(args.data_dir, file)
cmd += ' --output ' + os.path.join(args.model, file)
cmd += ' > ' + os.path.join(args.model, file) + '.feat.json'
print(cmd)
|
168951
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smsbillables', '0010_gateway_fee_amount_null'),
]
operations = [
migrations.AlterField(
model_name='smsbillable',
name='date_created',
field=models.DateTimeField(auto_now_add=True),
preserve_default=True,
),
migrations.AlterField(
model_name='smsbillable',
name='date_sent',
field=models.DateTimeField(),
preserve_default=True,
),
]
|
168958
|
class Config:
def __init__(self):
self.DARKNET_PATH = "lib/pyyolo/darknet"
self.DATACFG = "data/obj.data"
self.CFGFILE = "cfg/yolo-obj.cfg"
self.WEIGHTFILE_SKETCH = "models/yolo-obj_final_sketch.weights"
self.WEIGHTFILE_TEMPLATES = "models/yolo-obj_45000.weights"
self.CLASSESFILE = "data/predefined_classes.txt"
self.THRESH = 0.45
self.HIER_THRESH = 0.5
self.OUT_DIR = "server/static/images"
self.ASSET_DIR = "assets"
# Snap to grid
self.gheight = 200
self.gwidth = 200
# Color code
self.cc_default = (0, 255, 0)
self.cc_blue = (255, 0, 0)
self.cc_green = (0, 255, 0)
self.cc_red = (0, 0, 255)
self.color_scheme = {
"red": self.cc_red,
"green": self.cc_green,
"blue": self.cc_blue,
"default": self.cc_default,
}
|
169009
|
from typing import Any, List
from boa3.builtin.interop.contract import create_multisig_account
from boa3.builtin.type import ECPoint, UInt160
def main(minimum_sigs: int, public_keys: List[ECPoint], arg: Any) -> UInt160:
return create_multisig_account(minimum_sigs, public_keys, arg)
|
169032
|
class RemoteDockerException(RuntimeError):
pass
class InstanceNotRunning(RemoteDockerException):
pass
|
169043
|
import math
import torch.nn as nn
from rls.nn.activations import Act_REGISTER, default_act
Vec_REGISTER = {}
class VectorIdentityNetwork(nn.Sequential):
def __init__(self, in_dim, *args, **kwargs):
super().__init__()
self.h_dim = self.in_dim = in_dim
self.add_module(f'identity', nn.Identity())
class VectorAdaptiveNetwork(nn.Sequential):
def __init__(self, in_dim, h_dim=16, **kwargs):
super().__init__()
self.in_dim = in_dim
self.h_dim = h_dim
x = math.log2(self.h_dim)
y = math.log2(self.in_dim)
l = math.ceil(x) + 1 if math.ceil(x) == math.floor(x) else math.ceil(x)
r = math.floor(y) if math.ceil(y) == math.floor(y) else math.ceil(y)
outs = list(map(lambda x: 2 ** x, range(l, r)[::-1]))
ins = [self.in_dim] + outs[:-1]
for i, (_in, _out) in enumerate(zip(ins, outs)):
self.add_module(f'linear_{i}', nn.Linear(_in, _out))
self.add_module(f'{default_act}_{i}', Act_REGISTER[default_act]())
if outs:
ins = outs[-1]
else:
ins = self.in_dim
self.add_module('linear', nn.Linear(ins, self.h_dim))
self.add_module(f'{default_act}', Act_REGISTER[default_act]())
Vec_REGISTER['identity'] = VectorIdentityNetwork
Vec_REGISTER['adaptive'] = VectorAdaptiveNetwork
|
169050
|
import numpy as np
def run_optimizer(opt, cost_f, iterations, *args, **kwargs):
errors = [cost_f.eval(cost_f.x_start, cost_f.y_start)]
xs,ys= [cost_f.x_start],[cost_f.y_start]
for epochs in range(iterations):
x, y= opt.step(*args, **kwargs)
xs.append(x)
ys.append(y)
errors.append(cost_f.eval(x,y))
distance = np.sqrt((np.array(xs)-cost_f.x_optimum)**2 + (np.array(ys)-cost_f.y_optimum)**2)
return errors, distance, xs, ys
class Optimizer:
def __init__(self, cost_f, lr, x, y, **kwargs):
self.lr = lr
self.cost_f = cost_f
if x==None or y==None:
self.x = self.cost_f.x_start
self.y = self.cost_f.y_start
else:
self.x = x
self.y = y
self.__dict__.update(kwargs)
def step(self, lr):
raise NotImplementedError()
|
169059
|
def run_api_workflow_with_assertions(workflow_specification, current_request, test_context):
current_request_result = current_request(test_context)
if current_request_result is not None and current_request_result["continue_workflow"]:
run_api_workflow_with_assertions(
workflow_specification,
workflow_specification[current_request_result["next_request"]],
current_request_result["test_context"]
)
|
169092
|
import torch
import typing
def check_vector(x, name):
if not torch.is_tensor(x):
raise RuntimeError('{} needs to be a Tensor'.format(name))
if x.dim() != 1:
raise RuntimeError('{} needs to be a vector (one-dimensional Tensor)'.format(name))
def check_scalar(x, name):
if not torch.is_tensor(x):
raise RuntimeError('{} needs to be a Tensor'.format(name))
if x.dim() != 0:
raise RuntimeError('{} needs to be a scalar (zero-dimensional Tensor)'.format(name))
def check_same_shape(x, xname, y, yname):
if x.shape != y.shape:
raise RuntimeError('Expecting {}.shape ({}) and {}.shape ({}) to be the same'.format(xname, x.shape, yname, y.shape))
def onehot_like(x, i):
ret = torch.zeros_like(x)
ret[i] = 1.
return ret
def unzip(lst):
return zip(*lst)
def check_tensor(value):
if not torch.is_tensor(value):
raise ValueError('Expecting value to be a tensor')
def check_list_or_tuple_of_tensors(value):
if not (isinstance(value, typing.List) or isinstance(value, typing.Tuple)):
raise ValueError('Expecting value to be a list or tuple')
for v in value:
check_tensor(v)
def flatten(tensors):
if torch.is_tensor(tensors):
return tensors.reshape(-1)
check_list_or_tuple_of_tensors(tensors)
return torch.cat([t.reshape(-1) for t in tensors])
def unflatten_as(tensor, tensors):
if torch.is_tensor(tensors):
return tensor.reshape(tensors.shape)
check_tensor(tensor)
check_list_or_tuple_of_tensors(tensors)
shapes = [t.shape for t in tensors]
nelements = [t.nelement() for t in tensors]
ts = torch.split(tensor.reshape(-1), nelements)
return [ts[i].reshape(shapes[i]) for i in range(len(tensors))]
|
169150
|
import warnings
class LogWrapper(object):
def __init__(self, logger, context):
self.logger = logger
self.context = context
def __call__(self, key, value):
warnings.warn(
"Calling context.iopipe.log() has been deprecated, use "
"context.iopipe.metric() instead"
)
def __getattr__(self, name):
self.context.iopipe.label("@iopipe/plugin-logger")
return getattr(self.logger, name)
|
169153
|
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
class FusedOptimizer(optim.Optimizer):
def __init__(self, all_params, lr=None, weight_decay=None, momentum=None):
self.optimizers = []
ops = set([x['optimizer'] for x in all_params])
for op in ops:
params = [x for x in all_params if x['optimizer'] == op]
if op == 'adam':
optimizer = optim.Adam(params, lr=lr, weight_decay=weight_decay)
elif op == 'adamw':
optimizer = optim.AdamW(params, lr=lr, weight_decay=weight_decay)
elif op == 'sgd':
optimizer = optim.SGD(
params, lr=lr, weight_decay=weight_decay,
momentum=momentum
)
else:
raise ValueError('wrong ops type')
self.optimizers.append(optimizer)
defaults = dict(lr=lr, weight_decay=weight_decay)
super(FusedOptimizer, self).__init__(all_params, defaults)
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def build_optimizer(model, optim_cfg):
if getattr(optim_cfg, 'PER_PARAMETER_CFG', None) is None:
params = [x for x in model.parameters() if x.requires_grad]
else:
all_parameters = dict(model.named_parameters())
all_parameters = {k: v for k, v in all_parameters.items() if v.requires_grad}
params = []
for cur_cfg in optim_cfg.PER_PARAMETER_CFG:
cur_params = []
for k in list(all_parameters.keys()):
if cur_cfg.START_WITH == 'others':
check_ok = True
elif isinstance(cur_cfg.START_WITH, str):
check_ok = k.startswith(cur_cfg.START_WITH)
elif isinstance(cur_cfg.START_WITH, list):
check_ok = any([k.startswith(start_str) for start_str in cur_cfg.START_WITH])
else:
raise ValueError('wrong start_with config')
if check_ok:
cur_params.append(all_parameters[k])
all_parameters.pop(k)
assert len(cur_params) > 0, 'cannot find any parameter starting with {}'.format(cur_cfg.START_WITH)
print(f"find {len(cur_params)} parameters starting with {cur_cfg.START_WITH}")
params.append({
"params": cur_params,
"lr": optim_cfg.LR * cur_cfg.MUL_LR,
})
if 'optimizer' in cur_cfg:
params[-1]['optimizer'] = cur_cfg.optimizer
if len(all_parameters) > 0:
print(f"find {len(all_parameters)} parameters left")
print(list(all_parameters.keys()))
if optim_cfg.OPTIMIZER == 'adam':
optimizer = optim.Adam(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'adamw':
optimizer = optim.AdamW(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'fused':
optimizer = FusedOptimizer(params, lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY, momentum=optim_cfg.MOMENTUM)
elif optim_cfg.OPTIMIZER == 'adam_onecycle':
assert getattr(optim_cfg, 'PER_CHILD_CFG', None) is None
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
def flatten_model(m):
return sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
def get_layer_groups(m):
return [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * total_iters_each_epoch,
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
|
169166
|
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from data_process import data_process_utils
from data_process.census_process.census_data_creation_config import census_data_creation
from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, \
numericalize_census9495_data, standardize_census_data
from data_process.census_process.mapping_resource import cate_to_index_map, continuous_cols, categorical_cols, \
target_col_name
# follow link provides description on columns of Census Income Dataset:
# https://docs.1010data.com/Tutorials/MachineLearningExamples/CensusIncomeDataSet.html
def get_timestamp():
return int(datetime.utcnow().timestamp())
CENSUS_COLUMNS = ["age", "class_worker", "det_ind_code", "det_occ_code", "education",
"wage_per_hour", "hs_college", "marital_stat", "major_ind_code", "major_occ_code",
"race", "hisp_origin", "gender", "union_member", "unemp_reason", "full_or_part_emp",
"capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
RERANGED_CENSUS_COLUMNS_NEW = ["age", "gender_index", "age_index", "class_worker", "det_ind_code", "det_occ_code",
"education",
"education_year", "wage_per_hour", "hs_college", "marital_stat", "major_ind_code",
"major_occ_code", "race", "hisp_origin", "gender", "union_member", "unemp_reason",
"full_or_part_emp", "capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
def process(data_path, to_dir=None, train=True):
census = pd.read_csv(data_path, names=CENSUS_COLUMNS, skipinitialspace=True)
print("[INFO] load {} data".format("train" if train else "test"))
print("[INFO] load data with shape:", census.shape)
appendix = "_train" if train else "_test"
extension = ".csv"
appendix = appendix + extension
print("[INFO] consistentize original data")
c_census = consistentize_census9495_columns(census)
c_census.to_csv(to_dir + 'consistentized_census9495' + appendix, header=True, index=False)
print("[INFO] numericalize data")
p_census = numericalize_census9495_data(c_census, cate_to_index_map)
return p_census
def compute_instance_prob(data_frame):
weight_sum = data_frame["instance_weight"].sum()
data_frame["instance_weight"] = data_frame["instance_weight"] / weight_sum
def create_file_appendix(train):
appendix = "_train" if train else "_valid"
extension = ".csv"
return appendix + extension
def create_degree_src_tgt_data(p_census,
from_dir,
to_dir,
data_tag,
pos_ratio,
num_all,
train=True,
grad_train_scaler=None,
undergrad_train_scaler=None,
grad_census_test_values=None,
save_intermediate_tables=False):
appendix = create_file_appendix(train)
print("====================== create_degree_source_target_data for {} data ======================"
.format("train" if train else "valid"))
# form source and target domain data
doctorate_census = p_census[p_census['education'] == 11]
master_census = p_census[(p_census['education'] == 9) | (p_census['education'] == 10)]
undergrad_census = p_census[
(p_census['education'] != 9) & (p_census['education'] != 10) & (p_census['education'] != 11)]
columns = continuous_cols + categorical_cols + ['instance_weight', target_col_name]
doctorate_census = doctorate_census[columns]
master_census = master_census[columns]
undergrad_census = undergrad_census[columns]
print("[INFO] doctorate_census shape", doctorate_census.shape)
print("[INFO] master_census shape", master_census.shape)
print("[INFO] undergrad_census shape", undergrad_census.shape)
if save_intermediate_tables:
doctorate_census.to_csv(to_dir + 'doctorate_census9495' + appendix, header=True, index=False)
master_census.to_csv(to_dir + 'master_census9495' + appendix, header=True, index=False)
undergrad_census.to_csv(to_dir + 'undergrad_census9495' + appendix, header=True, index=False)
doctorate_census = pd.read_csv(from_dir + 'doctorate_census9495' + appendix, skipinitialspace=True)
master_census = pd.read_csv(from_dir + 'master_census9495' + appendix, skipinitialspace=True)
undergrad_census = pd.read_csv(from_dir + 'undergrad_census9495' + appendix, skipinitialspace=True)
doctorate_census_values = doctorate_census[columns].values
master_census_values = master_census[columns].values
undergrad_census_values = undergrad_census[columns].values
# doctor and master form the source domain
grad_census_values = np.concatenate([doctorate_census_values, master_census_values], axis=0)
grad_census_values = shuffle(grad_census_values)
grad_census_df_for_da = pd.DataFrame(data=grad_census_values, columns=columns)
# undergraduate form the target domain
undergrad_census_values = shuffle(undergrad_census_values)
undergrad_census_df = pd.DataFrame(data=undergrad_census_values, columns=columns)
_, grad_train_scaler = standardize_census_data(grad_census_df_for_da, continuous_cols, grad_train_scaler)
_, udgrad_train_scaler = standardize_census_data(undergrad_census_df, continuous_cols, undergrad_train_scaler)
grad_census_df_1 = grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 1]
grad_census_df_0 = grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 0]
undergrad_census_df_1 = undergrad_census_df[undergrad_census_df[target_col_name] == 1]
undergrad_census_df_0 = undergrad_census_df[undergrad_census_df[target_col_name] == 0]
print("[INFO] (orig) (target) grad_census_df_1 shape:", grad_census_df_1.shape)
print("[INFO] (orig) (target) grad_census_df_0 shape:", grad_census_df_0.shape)
print("[INFO] (orig) (source) undergrad_census_df_1 shape:", undergrad_census_df_1.shape)
print("[INFO] (orig) (source) undergrad_census_df_0 shape:", undergrad_census_df_0.shape)
grad_census_for_test = None
test_pos_ratio = 0.5
if train:
num_pos = int(num_all * pos_ratio)
num_neg = int(num_all * (1 - pos_ratio))
print(f"[INFO] train num_pos:{num_pos}")
print(f"[INFO] train num_neg:{num_neg}")
# get labeled target data for supervised training
grad_census_values_1 = grad_census_df_1.values[0:num_pos]
grad_census_values_0 = grad_census_df_0.values[0:num_neg]
grad_census_values_for_supervise = shuffle(np.concatenate((grad_census_values_1, grad_census_values_0), axis=0))
print(f"[INFO] grad train positive samples range:[0:{num_pos}].")
print(f"[INFO] grad train negative samples range:[0:{num_neg}].")
print(f"[INFO] grad train all samples shape:{grad_census_values_for_supervise.shape}.")
num_pos_for_test = int((grad_census_df_0.shape[0] - num_all) * test_pos_ratio)
grad_census_test_values_1 = grad_census_df_1.values[num_pos:num_pos + num_pos_for_test]
grad_census_test_values_0 = grad_census_df_0.values[num_all:]
print(f"[INFO] => grad left_data for test # of positive samples:{num_pos_for_test}")
print(f"[INFO] => grad left-data for test pos samples range:[{num_pos}:{num_pos + num_pos_for_test}].")
print(f"[INFO] => grad left-data for test pos samples shape:{grad_census_test_values_1.shape}")
print(f"[INFO] => grad left-data for test neg samples range:[{num_all}:-1].")
print(f"[INFO] => grad left-data for test neg samples shape:{grad_census_test_values_0.shape}")
grad_census_for_test = np.concatenate([grad_census_test_values_1, grad_census_test_values_0], axis=0)
print(f"[INFO] => grad left-data for test shape: {grad_census_for_test.shape}")
else:
# num_pos = int((grad_census_df_0.shape[0] + grad_census_df_0.shape[1]) * test_pos_ratio)
# grad_census_values_1 = grad_census_df_1.values[:num_pos]
grad_census_values_1 = grad_census_df_1.values
grad_census_values_0 = grad_census_df_0.values
grad_census_values_for_supervise = shuffle(
np.concatenate((grad_census_values_1, grad_census_values_0, grad_census_test_values), axis=0))
print(f"[INFO] grad test pos samples shape:{grad_census_values_1.shape}.")
print(f"[INFO] grad test neg samples shape:{grad_census_values_0.shape}.")
print(f"[INFO] grad left-data for test samples shape:{grad_census_test_values.shape}.")
print(f"[INFO] grad test all samples shape: {grad_census_values_for_supervise.shape}")
# print("grad_census_values_1 shape:", grad_census_values_1.shape)
# print("grad_census_values_0 shape:", grad_census_values_0.shape)
# grad_census_values_for_supervise = shuffle(np.concatenate((grad_census_values_1, grad_census_values_0), axis=0))
grad_census_df_for_ft = pd.DataFrame(data=grad_census_values_for_supervise, columns=columns)
print("[INFO] (final) grad_census_df_for_ft (supervised) shape:", grad_census_df_for_ft.shape)
print("[INFO] grad_census_df_for_ft (supervised) pos:",
grad_census_df_for_ft[grad_census_df_for_ft[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft (supervised) neg:",
grad_census_df_for_ft[grad_census_df_for_ft[target_col_name] == 0].shape)
# save data
if train:
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + appendix
grad_census_df_for_ft.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft data to: {grad_ft_file_full_path}")
print("[INFO] (final) grad_census_df_for_ad shape:", grad_census_df_for_da.shape)
print("[INFO] grad_census_df_for_ad pos:",
grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ad neg:",
grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 0].shape)
grad_da_file_full_path = from_dir + 'grad_census9495_ad_' + str(data_tag) + appendix
grad_census_df_for_da.to_csv(grad_da_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ad data to: {grad_da_file_full_path}")
else:
# test
half_num = int(grad_census_df_for_ft.shape[0] / 2)
grad_census_df_for_ft_valid = grad_census_df_for_ft[:half_num]
grad_census_df_for_ft_test = grad_census_df_for_ft[half_num:]
print(f"[INFO] (final) grad_census_df_for_ft_valid shape:{grad_census_df_for_ft_valid.shape}")
print(f"[INFO] => grad_census_df_for_ft_valid shape range:[0:{half_num}].")
print("[INFO] grad_census_df_for_ft_valid pos:",
grad_census_df_for_ft_valid[grad_census_df_for_ft_valid[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft_valid neg:",
grad_census_df_for_ft_valid[grad_census_df_for_ft_valid[target_col_name] == 0].shape)
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + "_valid.csv"
grad_census_df_for_ft_valid.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft valid data to: {grad_ft_file_full_path}")
print(f"[INFO] (final) grad_census_df_for_ft_test shape:{grad_census_df_for_ft_test.shape}")
print(f"[INFO] => grad_census_df_for_ft_test range:[{half_num}:].")
print("[INFO] grad_census_df_for_ft_test pos:",
grad_census_df_for_ft_test[grad_census_df_for_ft_test[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft_valid neg:",
grad_census_df_for_ft_test[grad_census_df_for_ft_test[target_col_name] == 0].shape)
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + "_test.csv"
grad_census_df_for_ft_test.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft test data to: {grad_ft_file_full_path}")
undergrad_pos_num = undergrad_census_df_1.shape[0]
undergrad_census_values_all = shuffle(
np.concatenate((undergrad_census_df_1.values, undergrad_census_df_0[:undergrad_pos_num * 9].values), axis=0))
undergrad_census_df_all = pd.DataFrame(data=undergrad_census_values_all, columns=columns)
print("[INFO] (final) undergrad_census_df_all shape:", undergrad_census_df_all.shape)
print("[INFO] undergrad_census_df_all pos:",
undergrad_census_df_all[undergrad_census_df_all[target_col_name] == 1].shape)
print("[INFO] undergrad_census_df_all neg:",
undergrad_census_df_all[undergrad_census_df_all[target_col_name] == 0].shape)
undergrad_file_full_path = from_dir + 'undergrad_census9495_ad_' + str(data_tag) + appendix
undergrad_census_df_all.to_csv(undergrad_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved undergrad ad data to: {undergrad_file_full_path}")
return grad_train_scaler, udgrad_train_scaler, grad_census_for_test
def combine_src_tgt_data(from_dir, to_dir, data_tag):
print(f"========================= combine census source and target data ============================ ")
source_train_file_name = from_dir + f'undergrad_census9495_ad_{data_tag}_train.csv'
target_train_file_name = from_dir + f'grad_census9495_ft_{data_tag}_train.csv'
df_src_data = pd.read_csv(source_train_file_name, skipinitialspace=True)
df_tgt_data = pd.read_csv(target_train_file_name, skipinitialspace=True)
print("[INFO] df_src_data shape:", df_src_data.shape)
print("[INFO] df_tgt_data shape:", df_tgt_data.shape)
df_data = data_process_utils.combine_src_tgt_data(df_src_data, df_tgt_data)
print("[INFO] df_src_tgt_data shape:", df_data.shape)
file_full_name = "{}/degree_src_tgt_census9495_{}_train.csv".format(to_dir, data_tag)
data_process_utils.save_df_data(df_data, file_full_name)
if __name__ == "__main__":
data_dir = census_data_creation['original_data_dir']
output_data_dir = census_data_creation['processed_data_dir']
data_tag = census_data_creation['data_tag']
pos_ratio = census_data_creation['positive_sample_ratio']
num_all = census_data_creation['number_target_samples']
print("[INFO] ------ process data ------")
train_data_path = data_dir + census_data_creation['train_data_file_name']
test_data_path = data_dir + census_data_creation['test_data_file_name']
train_df = process(train_data_path, to_dir=output_data_dir, train=True)
test_df = process(test_data_path, to_dir=output_data_dir, train=False)
grad_train_scaler, udgrad_train_scaler, grad_census_for_test = create_degree_src_tgt_data(train_df,
from_dir=output_data_dir,
to_dir=output_data_dir,
train=True,
pos_ratio=pos_ratio,
num_all=num_all,
data_tag=data_tag)
create_degree_src_tgt_data(test_df,
from_dir=output_data_dir,
to_dir=output_data_dir,
train=False,
pos_ratio=pos_ratio,
grad_train_scaler=grad_train_scaler,
undergrad_train_scaler=udgrad_train_scaler,
grad_census_test_values=grad_census_for_test,
data_tag=data_tag,
num_all=num_all)
# NOTE: input dir and output dir are the same for src_tgt_data
combine_src_tgt_data(from_dir=output_data_dir, to_dir=output_data_dir, data_tag=data_tag)
|
169210
|
from singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods import UmiBarcodeDemuxMethod
# Cell seq 1 with 6bp UMI
class CELSeq1_c8_u4(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = 'celseq1'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=8,
umiLength=4,
barcodeRead=0,
barcodeStart=0,
barcodeLength=8,
random_primer_read=1,
random_primer_length=6,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
**kwargs)
self.shortName = 'CS1C8U4'
self.longName = 'CELSeq 1, CB: 8bp, UMI: 4bp'
self.autoDetectable = True
self.description = 'R1 starts with a 8bp cell barcode followed by a 4bp UMI. R2 ends with a 6bp random primer'
|
169250
|
from soccerdepth.data.dataset_loader import get_set
import numpy as np
import utils.files as file_utils
from os.path import join
import argparse
from soccerdepth.models.hourglass import hg8
from soccerdepth.models.utils import weights_init
from soccerdepth.data.data_utils import image_logger_converter_visdom
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from soccerdepth.data.transforms import *
from torchvision import transforms
import warnings
from visdom import Visdom
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Depth AutoEncoder')
parser.add_argument('--dataset', default='', help='Dataset to train on')
parser.add_argument('--batchSize', type=int, default=6, help='training batch size')
parser.add_argument('--testBatchSize', type=int, default=2, help='testing batch size')
parser.add_argument('--nEpochs', type=int, default=1000, help='number of epochs to train for')
parser.add_argument('--input_nc', type=int, default=4, help='input image channels')
parser.add_argument('--output_nc', type=int, default=51, help='output image channels')
parser.add_argument('--nf', type=int, default=64, help='number of filters')
parser.add_argument('--lr', type=float, default=0.00003, help='Learning Rate. Default=0.002')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--lamb', type=int, default=100, help='weight on L1 term in objective')
parser.add_argument('--run', type=int, default=44, help='Run number for tensorboard')
parser.add_argument('--output_path', default='/home/krematas/Mountpoints/grail/CNN', help='Where the files WILL be stored')
parser.add_argument('--modeldir', default='/home/krematas/Mountpoints/grail/CNN', help='Where the files ARE being stored')
parser.add_argument('--additional_input', default='mask', help='filepostfix')
parser.add_argument('--postfix', default='hg_estmask', help='filepostfix')
parser.add_argument('--resume', type=int, default=0, help='Resume training')
parser.add_argument('--port', type=int, default=9876, help='Run number for tensorboard')
parser.add_argument('--additional_input_type', default='estmask', help='The type of addtional type to load [estmap, trimap]')
opt, _ = parser.parse_known_args()
opt.cuda = True
print(opt)
# Initiate 5 windows
viz = Visdom(env='FIFA CNN training', port=opt.port)
viz.close()
win0 = viz.images(np.ones((4, 3, 128, 128)))
win1 = viz.images(np.ones((4, 3, 128, 128)))
win2 = viz.images(np.ones((4, 3, 128, 128)))
win3 = viz.images(np.ones((4, 3, 128, 128)))
epoch_lot = viz.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1,)).cpu(),
opts=dict(
xlabel='Epoch',
ylabel='Loss',
title='Epoch Training Loss',
legend=['Loss'])
)
lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1,)).cpu(),
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title='Current Training Loss',
legend=['Loss']
)
)
# writer = SummaryWriter("runs/run%d" % opt.run)
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
cudnn.benchmark = True
torch.manual_seed(opt.seed)
if opt.cuda:
torch.cuda.manual_seed(opt.seed)
dataset = 'play_for_data'
path_to_data = join(file_utils.get_platform_datadir(dataset), 'cnn2')
print('===> Loading datasets')
composed = transforms.Compose([RandomRotation(), RandomCrop(), Rescale(256, 64), ColorOffset(), ToTensor(), NormalizeImage()])
train_set = get_set(join(path_to_data, 'train'), nbins=opt.output_nc, transform=composed, additional_input_type=opt.additional_input_type)
composed = transforms.Compose([Rescale(256, 64), ToTensor(), NormalizeImage()])
test_set = get_set(join(path_to_data, 'test'), nbins=opt.output_nc, transform=composed, additional_input_type=opt.additional_input_type)
training_data_loader = DataLoader(dataset=train_set, num_workers=8, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=8, batch_size=opt.testBatchSize, shuffle=False)
print('===> Building model')
model = hg8(input_nc=opt.input_nc, output_nc=opt.output_nc)
model.apply(weights_init)
print('===> The loss function is cross entropy loss')
class_weights = np.ones((opt.output_nc, ))
class_weights[0] = 0.1
weights = torch.from_numpy(class_weights)
weights = torch.FloatTensor(weights.size()).copy_(weights).cuda()
criterion = nn.NLLLoss2d(weight=weights)
logsoftmax = nn.LogSoftmax()
# Setup the Adam optimizer
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=0.003)
# Resume if available
if opt.resume > 0:
checkpoint = torch.load(join(opt.modeldir, 'model_epoch_%d_%s_%s.pth' % (opt.resume, opt.additional_input, opt.postfix)))
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if opt.cuda:
model = model.cuda()
criterion = criterion.cuda()
def train(epoch):
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
input, target, mask = Variable(batch['image']).float(), Variable(batch['target']).long(), Variable(batch['mask']).float()
if opt.input_nc > 3:
input = torch.cat((input, mask), 1)
if opt.cuda:
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(logsoftmax(output[0]), target.squeeze())
for j in range(1, len(output)):
loss += criterion(logsoftmax(output[j]), target.squeeze())
epoch_loss += loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))
if iteration % 50 == 0:
prediction = logsoftmax(output[-1])
x, y, z, w = image_logger_converter_visdom(input, mask, target, prediction)
viz.images(
x,
win=win0,
)
viz.images(
y,
win=win1,
)
viz.images(
w,
win=win2,
)
viz.images(
z,
win=win3,
)
# writer.add_image('Train_image', x, epoch)
# writer.add_image('Train_prediction', y, epoch)
# writer.add_image('Train_label', z, epoch)
# print(torch.ones((1,)).cpu().size())
# print(torch.Tensor([loss.data[0]]).unsqueeze(0).cpu().size())
viz.line(
X=torch.ones((1, 1)).cpu() * ((epoch-1)*len(training_data_loader) + iteration),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=lot,
update='append'
)
# hacky fencepost solution for 0th epoch plot
if epoch == 1 and iteration == len(training_data_loader)-1:
viz.line(
X=torch.zeros((1, 1)).cpu(),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=epoch_lot,
update=True
)
viz.line(
X=torch.ones((1, 1)).cpu()*epoch,
Y=torch.Tensor([epoch_loss/len(training_data_loader)]).unsqueeze(0).cpu(),
win=epoch_lot,
update='append'
)
print("===> Epoch {} Complete: Avg. Loss: {:.6f}".format(epoch, epoch_loss / len(training_data_loader)))
return epoch_loss / len(training_data_loader)
def test():
epoch_loss = 0
for iteration, batch in enumerate(testing_data_loader):
input, target, mask = Variable(batch['image']).float(), Variable(batch['target']).long(), Variable(batch['mask']).float()
if opt.input_nc > 3:
input = torch.cat((input, mask), 1)
if opt.cuda:
input = input.cuda()
target = target.cuda()
output = model(input)
loss = 0
for o in output:
loss += criterion(logsoftmax(o), target.squeeze())
epoch_loss += loss.data[0]
if iteration == 1:
prediction = logsoftmax(output[-1])
x, y, z, w = image_logger_converter_visdom(input, mask, target, prediction)
viz.images(
x,
win=win0,
)
viz.images(
y,
win=win1,
)
viz.images(
w,
win=win2,
)
viz.images(
z,
win=win3,
)
print("===> Validation Complete: Avg. Loss: {:.6f}".format(epoch_loss / len(testing_data_loader)))
return epoch_loss / len(testing_data_loader)
def checkpoint(epoch):
model_out_path = "{0}/model_epoch_{1}_{2}_{3}.pth".format(opt.output_path, epoch, opt.additional_input, opt.postfix)
dict_to_save = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(dict_to_save, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
for epoch in range(opt.resume+1, opt.nEpochs + 1):
v1 = train(epoch)
v2 = test()
writer.add_scalar('train_loss', v1, epoch)
writer.add_scalar('val_loss', v2, epoch)
checkpoint(epoch)
writer.close()
|
169330
|
from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
from .base import ManageTestCase
class TestTasksTester(ManageTestCase):
def test_dashboard(self):
url = reverse('manage:tasks_tester')
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.post(url, {'milliseconds': 84})
eq_(response.status_code, 302)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Waited 0 seconds :)' in response.content)
|
169342
|
import logging
import multiprocessing
import time
from radosgw_agent import worker
from radosgw_agent import client
from radosgw_agent.util import get_dev_logger
from radosgw_agent.exceptions import NotFound, HttpError
log = logging.getLogger(__name__)
dev_log = get_dev_logger(__name__)
# the replica log api only supports one entry, and updating it
# requires sending a daemon id that matches the existing one. This
# doesn't make a whole lot of sense with the current structure of
# radosgw-agent, so just use a constant value for the daemon id.
DAEMON_ID = 'radosgw-agent'
def prepare_sync(syncer, error_delay):
"""Attempt to prepare a syncer for running a sync.
:param error_delay: seconds to wait before retrying
This will retry forever so the sync agent continues if radosgws
are unavailable temporarily.
"""
while True:
try:
syncer.prepare()
break
except Exception:
log.warn('error preparing for sync, will retry. Traceback:',
exc_info=True)
time.sleep(error_delay)
def incremental_sync(meta_syncer, data_syncer, num_workers, lock_timeout,
incremental_sync_delay, metadata_only, error_delay):
"""Run a continuous incremental sync.
This will run forever, pausing between syncs by a
incremental_sync_delay seconds.
"""
while True:
try:
meta_syncer.sync(num_workers, lock_timeout)
if not metadata_only:
data_syncer.sync(num_workers, lock_timeout)
except Exception:
log.warn('error doing incremental sync, will try again. Traceback:',
exc_info=True)
# prepare data before sleeping due to rgw_log_bucket_window
if not metadata_only:
prepare_sync(data_syncer, error_delay)
log.info('waiting %d seconds until next sync',
incremental_sync_delay)
time.sleep(incremental_sync_delay)
prepare_sync(meta_syncer, error_delay)
class Syncer(object):
def __init__(self, src, dest, max_entries, *args, **kwargs):
self.src = src
self.dest = dest
self.src_conn = client.connection(src)
self.dest_conn = client.connection(dest)
self.daemon_id = DAEMON_ID
self.worker_cls = None # filled in by subclass constructor
self.num_shards = None
self.max_entries = max_entries
self.object_sync_timeout = kwargs.get('object_sync_timeout')
def init_num_shards(self):
if self.num_shards is not None:
return
try:
self.num_shards = client.num_log_shards(self.src_conn, self.type)
log.debug('%d shards to check', self.num_shards)
except Exception:
log.error('finding number of shards failed')
raise
def shard_num_for_key(self, key):
key = key.encode('utf8')
hash_val = 0
for char in key:
c = ord(char)
hash_val = (hash_val + (c << 4) + (c >> 4)) * 11
return hash_val % self.num_shards
def prepare(self):
"""Setup any state required before syncing starts.
This must be called before sync().
"""
pass
def generate_work(self):
"""Generate items to be place in a queue or processing"""
pass
def wait_until_ready(self):
pass
def complete_item(self, shard_num, retries):
"""Called when syncing a single item completes successfully"""
marker = self.shard_info.get(shard_num)
if not marker:
return
try:
data = [dict(name=retry, time=worker.DEFAULT_TIME)
for retry in retries]
client.set_worker_bound(self.dest_conn,
self.type,
marker,
worker.DEFAULT_TIME,
self.daemon_id,
shard_num,
data)
except Exception:
log.warn('could not set worker bounds, may repeat some work.'
'Traceback:', exc_info=True)
def sync(self, num_workers, log_lock_time):
workQueue = multiprocessing.Queue()
resultQueue = multiprocessing.Queue()
processes = [self.worker_cls(workQueue,
resultQueue,
log_lock_time,
self.src,
self.dest,
daemon_id=self.daemon_id,
max_entries=self.max_entries,
object_sync_timeout=self.object_sync_timeout,
)
for i in xrange(num_workers)]
for process in processes:
process.daemon = True
process.start()
self.wait_until_ready()
log.info('Starting sync')
# enqueue the shards to be synced
num_items = 0
for item in self.generate_work():
num_items += 1
workQueue.put(item)
# add a poison pill for each worker
for i in xrange(num_workers):
workQueue.put(None)
# pull the results out as they are produced
retries = {}
for i in xrange(num_items):
result, item = resultQueue.get()
shard_num, retries = item
if result == worker.RESULT_SUCCESS:
log.debug('synced item %r successfully', item)
self.complete_item(shard_num, retries)
else:
log.error('error syncing shard %d', shard_num)
retries.append(shard_num)
log.info('%d/%d items processed', i + 1, num_items)
if retries:
log.error('Encountered errors syncing these %d shards: %r',
len(retries), retries)
class IncrementalSyncer(Syncer):
def get_worker_bound(self, shard_num):
bound = client.get_worker_bound(
self.dest_conn,
self.type,
shard_num)
marker = bound['marker']
retries = bound['retries']
dev_log.debug('oldest marker and time for shard %d are: %r %r',
shard_num, marker, bound['oldest_time'])
dev_log.debug('%d items to retry are: %r', len(retries), retries)
return marker, retries
def get_log_entries(self, shard_num, marker):
try:
result = client.get_log(self.src_conn, self.type,
marker, self.max_entries,
shard_num)
last_marker = result['marker']
log_entries = result['entries']
if len(log_entries) == self.max_entries:
log.warn('shard %d log has fallen behind - log length >= %d',
shard_num, self.max_entries)
except NotFound:
# no entries past this marker yet, but we my have retries
last_marker = ' '
log_entries = []
return last_marker, log_entries
def prepare(self):
self.init_num_shards()
self.shard_info = {}
self.shard_work = {}
for shard_num in xrange(self.num_shards):
marker, retries = self.get_worker_bound(shard_num)
last_marker, log_entries = self.get_log_entries(shard_num, marker)
self.shard_work[shard_num] = log_entries, retries
self.shard_info[shard_num] = last_marker
self.prepared_at = time.time()
def generate_work(self):
return self.shard_work.iteritems()
class MetaSyncerInc(IncrementalSyncer):
def __init__(self, *args, **kwargs):
super(MetaSyncerInc, self).__init__(*args, **kwargs)
self.worker_cls = worker.MetadataWorkerIncremental
self.type = 'metadata'
class DataSyncerInc(IncrementalSyncer):
def __init__(self, *args, **kwargs):
super(DataSyncerInc, self).__init__(*args, **kwargs)
self.worker_cls = worker.DataWorkerIncremental
self.type = 'data'
self.rgw_data_log_window = kwargs.get('rgw_data_log_window', 30)
def wait_until_ready(self):
log.info('waiting to make sure bucket log is consistent')
while time.time() < self.prepared_at + self.rgw_data_log_window:
time.sleep(1)
class DataSyncerFull(Syncer):
def __init__(self, *args, **kwargs):
super(DataSyncerFull, self).__init__(*args, **kwargs)
self.worker_cls = worker.DataWorkerFull
self.type = 'data'
self.rgw_data_log_window = kwargs.get('rgw_data_log_window', 30)
def prepare(self):
log.info('preparing to do a full data sync')
self.init_num_shards()
# save data log markers for each shard
self.shard_info = {}
for shard in xrange(self.num_shards):
info = client.get_log_info(self.src_conn, 'data', shard)
# setting an empty marker returns an error
if info['marker']:
self.shard_info[shard] = info['marker']
else:
self.shard_info[shard] = ' '
# get list of buckets after getting any markers to avoid skipping
# entries added before we got the marker info
log.debug('getting bucket list')
buckets = client.get_bucket_list(self.src_conn)
self.prepared_at = time.time()
self.buckets_by_shard = {}
for bucket in buckets:
shard = self.shard_num_for_key(bucket)
self.buckets_by_shard.setdefault(shard, [])
self.buckets_by_shard[shard].append(bucket)
def generate_work(self):
return self.buckets_by_shard.iteritems()
def wait_until_ready(self):
log.info('waiting to make sure bucket log is consistent')
while time.time() < self.prepared_at + self.rgw_data_log_window:
time.sleep(1)
class MetaSyncerFull(Syncer):
def __init__(self, *args, **kwargs):
super(MetaSyncerFull, self).__init__(*args, **kwargs)
self.worker_cls = worker.MetadataWorkerFull
self.type = 'metadata'
def prepare(self):
try:
self.sections = client.get_metadata_sections(self.src_conn)
except HttpError as e:
log.error('Error listing metadata sections: %s', e)
raise
# grab the lastest shard markers and timestamps before we sync
self.shard_info = {}
self.init_num_shards()
for shard_num in xrange(self.num_shards):
info = client.get_log_info(self.src_conn, 'metadata', shard_num)
# setting an empty marker returns an error
if info['marker']:
self.shard_info[shard_num] = info['marker']
else:
self.shard_info[shard_num] = ' '
self.metadata_by_shard = {}
for section in self.sections:
try:
for key in client.list_metadata_keys(self.src_conn, section):
shard = self.shard_num_for_key(section + ':' + key)
self.metadata_by_shard.setdefault(shard, [])
self.metadata_by_shard[shard].append((section, key))
except NotFound:
# no keys of this type exist
continue
except HttpError as e:
log.error('Error listing metadata for section %s: %s',
section, e)
raise
def generate_work(self):
return self.metadata_by_shard.iteritems()
|
169356
|
import math
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import moderngl as mgl
import numpy as np
import vpype as vp
from ._utils import ColorType, load_program, load_texture_array
if TYPE_CHECKING: # pragma: no cover
from .engine import Engine
ResourceType = Union[mgl.Buffer, mgl.Texture, mgl.TextureArray]
class Painter:
def __init__(self, ctx: mgl.Context):
self._ctx = ctx
self._resources: List[ResourceType] = []
def __del__(self):
for resource in self._resources:
resource.release()
def register_resource(self, resource: ResourceType) -> ResourceType:
self._resources.append(resource)
return resource
def buffer(self, *args: Any, **kwargs: Any) -> mgl.Buffer:
buffer = self._ctx.buffer(*args, **kwargs)
self.register_resource(buffer)
return buffer
def render(self, engine: "Engine", projection: np.ndarray) -> None:
raise NotImplementedError
class PaperBoundsPainter(Painter):
def __init__(
self,
ctx: mgl.Context,
paper_size: Tuple[float, float],
color: ColorType = (0, 0, 0, 0.45),
shadow_size: float = 7.0,
):
super().__init__(ctx)
data = np.array(
[
(0, 0),
(paper_size[0], 0),
(paper_size[0], paper_size[1]),
(0, paper_size[1]),
(paper_size[0], shadow_size),
(paper_size[0] + shadow_size, shadow_size),
(paper_size[0] + shadow_size, paper_size[1] + shadow_size),
(shadow_size, paper_size[1] + shadow_size),
(shadow_size, paper_size[1]),
],
dtype="f4",
)
line_idx = np.array([0, 1, 2, 3], dtype="i4")
triangle_idx = np.array(
[
(0, 3, 1), # page background
(1, 3, 2),
(4, 2, 5), # shadow
(2, 6, 5),
(7, 6, 2),
(8, 7, 2),
],
dtype="i4",
).reshape(-1)
self._color = color
self._prog = load_program("fast_line_mono", ctx)
vbo = self.buffer(data.tobytes())
self._bounds_vao = ctx.vertex_array(
self._prog, [(vbo, "2f", "in_vert")], self.buffer(line_idx.tobytes())
)
self._shading_vao = ctx.vertex_array(
self._prog, [(vbo, "2f", "in_vert")], self.buffer(triangle_idx.tobytes())
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = (0, 0, 0, 0.25)
self._shading_vao.render(mgl.TRIANGLES, first=6, vertices=12)
self._prog["color"].value = (1, 1, 1, 1)
self._shading_vao.render(mgl.TRIANGLES, first=0, vertices=6)
self._prog["color"].value = self._color
self._bounds_vao.render(mgl.LINE_LOOP)
class LineCollectionFastPainter(Painter):
def __init__(self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType):
super().__init__(ctx)
self._prog = load_program("fast_line_mono", ctx)
self._color = color
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "in_vert")], index_buffer=ibo)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = self._color
self._vao.render(mgl.LINE_STRIP)
@staticmethod
def _build_buffers(lc: vp.LineCollection) -> Tuple[np.ndarray, np.ndarray]:
total_length = sum(len(line) for line in lc)
buffer = np.empty((total_length, 2), dtype="f4")
indices = np.empty(total_length + len(lc), dtype="i4")
indices.fill(-1)
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
indices[i + cur_index : i + next_idx] = np.arange(cur_index, next_idx)
buffer[cur_index:next_idx] = vp.as_vector(line)
cur_index = next_idx
return buffer, indices
class LineCollectionFastColorfulPainter(Painter):
COLORS = [
np.array((0.0, 0.0, 1.0, 1.0)),
np.array((0.0, 0.5, 0.0, 1.0)),
np.array((1.0, 0.0, 0.0, 1.0)),
np.array((0.0, 0.75, 0.75, 1.0)),
np.array((0.0, 1.0, 0.0, 1.0)),
np.array((0.75, 0, 0.75, 1.0)),
np.array((0.75, 0.75, 0.0, 1.0)),
]
def __init__(self, ctx: mgl.Context, lc: vp.LineCollection, show_points: bool = False):
super().__init__(ctx)
self._show_points = show_points
self._prog = load_program("fast_line", ctx)
# TODO: hacked color table size is not ideal, this will need to be changed when
# implementing color themes
self._prog["colors"].write(np.concatenate(self.COLORS).astype("f4").tobytes())
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(
self._prog,
[(vbo, "2f4 i1", "in_vert", "color_idx")],
ibo,
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._vao.render(mgl.LINE_STRIP)
if self._show_points:
self._vao.render(mgl.POINTS)
@classmethod
def _build_buffers(cls, lc: vp.LineCollection) -> Tuple[np.ndarray, np.ndarray]:
total_length = sum(len(line) for line in lc)
buffer = np.empty(total_length, dtype=[("vertex", "2f4"), ("color", "i1")])
indices = np.empty(total_length + len(lc), dtype="i4")
indices.fill(-1)
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
indices[i + cur_index : i + next_idx] = np.arange(cur_index, next_idx)
buffer["vertex"][cur_index:next_idx] = vp.as_vector(line)
buffer["color"][cur_index:next_idx] = i % len(cls.COLORS)
cur_index = next_idx
return buffer, indices
class LineCollectionPointsPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType = (0, 0, 0, 0.25)
):
super().__init__(ctx)
vertex = """
#version 330
uniform mat4 projection;
in vec2 position;
void main() {
gl_PointSize = 5.0;
gl_Position = projection * vec4(position, 0.0, 1.0);
}
"""
fragment = """
#version 330
uniform vec4 color;
out vec4 out_color;
void main() {
out_color = color;
}
"""
self._prog = ctx.program(vertex_shader=vertex, fragment_shader=fragment)
self._color = color
vertices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "position")])
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = self._color
self._vao.render(mgl.POINTS)
@staticmethod
def _build_buffers(lc: vp.LineCollection) -> np.ndarray:
buffer = np.empty((sum(len(line) for line in lc), 2), dtype="f4")
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
buffer[cur_index:next_idx] = vp.as_vector(line)
cur_index = next_idx
return buffer
class LineCollectionPenUpPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType = (0, 0, 0, 0.5)
):
super().__init__(ctx)
self._color = color
self._prog = load_program("fast_line_mono", ctx)
# build vertices
vertices: List[Tuple[float, float]] = []
for i in range(len(lc) - 1):
vertices.extend(
((lc[i][-1].real, lc[i][-1].imag), (lc[i + 1][0].real, lc[i + 1][0].imag))
)
if len(vertices) > 0:
vbo = self.buffer(np.array(vertices, dtype="f4").tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "in_vert")])
else:
self._vao = None
def render(self, engine: "Engine", projection: np.ndarray) -> None:
if self._vao is not None:
self._prog["color"].value = self._color
self._prog["projection"].write(projection)
self._vao.render(mgl.LINES)
class LineCollectionPreviewPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, pen_width: float, color: ColorType
):
super().__init__(ctx)
self._color = color
self._pen_width = pen_width
self._prog = load_program("preview_line", ctx)
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "position")], ibo)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["color"].value = self._color
self._prog["pen_width"].value = self._pen_width
self._prog["antialias"].value = 1.5 / engine.scale
self._prog["projection"].write(projection)
if engine.debug:
self._prog["kill_frag_shader"].value = False
self._prog["debug_view"].value = True
self._prog["color"].value = self._color[0:3] + (0.3,)
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
self._prog["kill_frag_shader"].value = True
self._prog["debug_view"].value = False
self._prog["color"].value = (0, 1, 0, 1)
self._ctx.wireframe = True
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
self._ctx.wireframe = False
else:
self._prog["kill_frag_shader"].value = False
self._prog["debug_view"].value = False
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
@staticmethod
def _build_buffers(lc: vp.LineCollection):
"""Prepare the buffers for multi-polyline rendering. Closed polyline must have their
last point identical to their first point."""
indices = []
reset_index = [-1]
start_index = 0
for i, line in enumerate(lc):
if line[0] == line[-1]: # closed path
idx = np.arange(len(line) + 3) - 1
idx[0], idx[-2], idx[-1] = len(line) - 1, 0, 1
else:
idx = np.arange(len(line) + 2) - 1
idx[0], idx[-1] = 0, len(line) - 1
indices.append(idx + start_index)
start_index += len(line)
indices.append(reset_index)
return (
np.vstack([vp.as_vector(line).astype("f4") for line in lc]),
np.concatenate(indices).astype("i4"),
)
class RulersPainter(Painter):
def __init__(self, ctx: mgl.Context):
super().__init__(ctx)
# this also sets the font size
self._thickness = 20.0
self._font_size = 7.0
self._prog = load_program("ruler_patch", ctx)
# vertices
vertices = self.buffer(
np.array(
[
(-1.0, 1.0),
(0.0, 1.0),
(1.0, 1.0),
(-1.0, 0.0),
(0, 0.0),
(1.0, 0.0),
(-1.0, -1.0),
(0.0, -1.0),
],
dtype="f4",
).tobytes()
)
# line strip for stroke
frame_indices = self.buffer(np.array([3, 5, 1, 7], dtype="i4").tobytes())
self._stroke_vao = ctx.vertex_array(
self._prog, [(vertices, "2f4", "in_vert")], frame_indices
)
# triangles for fill
# first 6 vertices for the small top-right
# next 12 vertices for the rulers themselves
patch_indices = self.buffer(
np.array(
[0, 1, 3, 1, 3, 4, 1, 2, 4, 2, 4, 5, 3, 4, 6, 4, 6, 7], dtype="i4"
).tobytes()
)
self._fill_vao = ctx.vertex_array(
self._prog, [(vertices, "2f4", "in_vert")], patch_indices
)
# major ticks buffer
self._ticks_prog = load_program("ruler_ticks", ctx)
self._ticks_prog["color"] = (0.2, 0.2, 0.2, 1.0)
self._ticks_vao = ctx.vertex_array(self._ticks_prog, [])
# TEXT STUFF
# https://github.com/Contraz/demosys-py/blob/master/demosys/effects/text/resources/data/demosys/text/meta.json
# {
# "characters": 190,
# "character_ranges": [
# {
# "min": 32,
# "max": 126
# },
# {
# "min": 161,
# "max": 255
# }
# ],
# "character_height": 159,
# "character_width": 77,
# "atlas_height": 30210,
# "atlas_width": 77
# }
self._texture = load_texture_array("VeraMono.png", ctx, (77, 159, 190), 4)
self._text_prog = load_program("ruler_text", ctx)
self._aspect_ratio = 159.0 / 77.0
self._text_prog["color"].value = (0, 0, 0, 1.0)
self._text_vao = ctx.vertex_array(self._text_prog, [])
# unit label
self._unit_label = LabelPainter(ctx, "XX")
@property
def thickness(self) -> float:
return self._thickness
def render(self, engine: "Engine", projection: np.ndarray) -> None:
# ===========================
# render frame
self._prog["ruler_width"] = 2 * self._thickness * engine.pixel_factor / engine.width
self._prog["ruler_height"] = 2 * self._thickness * engine.pixel_factor / engine.height
self._prog["color"].value = (1.0, 1.0, 1.0, 1.0)
self._fill_vao.render(mode=mgl.TRIANGLES, first=6)
# ===========================
# render ticks
spec = engine.scale_spec
self._ticks_prog["scale"] = spec.scale_px * engine.scale
self._ticks_prog["divisions"] = list(spec.divisions)
self._ticks_prog["delta_number"] = spec.scale
# compute various stuff
horiz_tick_count = math.ceil(engine.width / engine.scale / spec.scale_px) + 1
vertical_tick_count = math.ceil(engine.height / engine.scale / spec.scale_px) + 1
doc_width, doc_height = (
engine.document.page_size
if engine.document is not None and engine.document.page_size is not None
else (-1.0, -1.0)
)
start_number_horiz = math.floor(engine.origin[0] / spec.scale_px) * spec.scale
start_number_vert = math.floor(engine.origin[1] / spec.scale_px) * spec.scale
thickness = self._thickness * engine.pixel_factor
font_size = self._font_size * engine.pixel_factor
# render vertical ruler
self._ticks_prog["vertical"] = True
self._ticks_prog["viewport_dim"] = engine.height
self._ticks_prog["document_dim"] = doc_height / spec.to_px
self._ticks_prog["offset"] = (engine.origin[1] % spec.scale_px) * engine.scale
self._ticks_prog["ruler_thickness"] = 2 * thickness / engine.width
self._ticks_prog["start_number"] = start_number_vert
self._ticks_vao.render(mode=mgl.POINTS, vertices=vertical_tick_count)
# render horizontal ruler
self._ticks_prog["vertical"] = False
self._ticks_prog["viewport_dim"] = engine.width
self._ticks_prog["document_dim"] = doc_width / spec.to_px
self._ticks_prog["offset"] = (engine.origin[0] % spec.scale_px) * engine.scale
self._ticks_prog["ruler_thickness"] = 2 * thickness / engine.height
self._ticks_prog["start_number"] = start_number_horiz
self._ticks_vao.render(mode=mgl.POINTS, vertices=horiz_tick_count)
# ===========================
# render glyph
self._texture.use(0)
self._text_prog["scale"] = spec.scale_px * engine.scale
self._text_prog["delta_number"] = spec.scale
# horizontal
self._text_prog["vertical"] = False
self._text_prog["viewport_dim"] = engine.width
self._text_prog["document_dim"] = doc_width / spec.to_px
self._text_prog["offset"] = (engine.origin[0] % spec.scale_px) * engine.scale
self._text_prog["glyph_size"].value = (
font_size * 2.0 / engine.width,
font_size * 2.0 * self._aspect_ratio / engine.height,
)
self._text_prog["start_number"] = start_number_horiz
self._text_vao.render(mode=mgl.POINTS, vertices=horiz_tick_count)
# vertical
self._text_prog["vertical"] = True
self._text_prog["viewport_dim"] = engine.height
self._text_prog["document_dim"] = doc_height / spec.to_px
self._text_prog["offset"] = (engine.origin[1] % spec.scale_px) * engine.scale
self._text_prog["glyph_size"].value = (
font_size * 2.0 * self._aspect_ratio / engine.width,
font_size * 2.0 / engine.height,
)
self._text_prog["start_number"] = start_number_vert
self._text_vao.render(mode=mgl.POINTS, vertices=vertical_tick_count)
# ===========================
# render units corner
self._prog["color"].value = (1.0, 1.0, 1.0, 1.0)
self._fill_vao.render(mode=mgl.TRIANGLES, vertices=6)
self._prog["color"].value = (0.0, 0.0, 0.0, 1.0)
self._stroke_vao.render(mode=mgl.LINES)
self._unit_label.font_size = font_size
self._unit_label.position = (thickness / 7.0, thickness / 8.0)
self._unit_label.label = spec.unit
self._unit_label.render(engine, projection)
class LabelPainter(Painter):
def __init__(
self,
ctx: mgl.Context,
label: str = "",
position: Tuple[float, float] = (0.0, 0.0),
font_size: float = 14.0,
max_size: Optional[int] = None,
color: ColorType = (0.0, 0.0, 0.0, 1.0),
):
super().__init__(ctx)
self.position = position
self.font_size = font_size
self._max_size = max_size or len(label)
self._buffer = self.buffer(reserve=self._max_size)
self.label = label
self._color = color
self._texture = load_texture_array("VeraMono.png", ctx, (77, 159, 190), 4)
self._aspect_ratio = 159.0 / 77.0
self._prog = load_program("label", ctx)
self._vao = ctx.vertex_array(self._prog, [(self._buffer, "u1", "in_char")])
@property
def label(self) -> str:
return self._label
@label.setter
def label(self, label: str) -> None:
self._label = label
self._size = min(len(label), self._max_size)
self._buffer.write(
np.array([ord(c) for c in label[: self._max_size]], dtype=np.uint8).tobytes()
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._texture.use(0)
self._prog["color"].value = self._color
self._prog["position"].value = (
-1.0 + 2.0 * self.position[0] / engine.width,
1.0 - 2.0 * self.position[1] / engine.height,
)
self._prog["glyph_size"].value = (
self.font_size * 2.0 / engine.width,
self.font_size * 2.0 * self._aspect_ratio / engine.height,
)
self._vao.render(mode=mgl.POINTS, vertices=self._size)
|
169383
|
from unittest import TestCase
from numpy import array, ndarray
from numpy.testing import assert_array_equal
from trigger import accel_value, trigger_time
class TriggerTimeTest(TestCase):
def test_estimates_when_function_exceeds(self):
function = 10
t = array([1599574034])
trig_level = 100
expected = ndarray([])
actual = trigger_time(function, t, trig_level)
assert_array_equal(expected, actual)
class TestAccelValue(TestCase):
def test_it_provides_the_right_value(self):
"""
[x,y,z,accel_value]
x,y and z values were randomly generated
accel_value = ((x**2 + y**2 + z**2)**0.5)
"""
testCases = [
[1, 2, 2, 3],
[2, 4, 4, 6],
[2, -1, 2, 3],
[-4, -4, 2, 6],
[4, -2, 4, 6],
[2, 2, -1, 3],
[-5.444444444, -10.33333333, -4.111111111, 12.38228524],
[6.555555556, 7.111111111, -5.111111111, 10.93922605],
[7.888888889, -5.222222222, 11, 14.50883086],
[7.111111111, 2.222222222, -8.111111111, 11.01345978],
[3.333333333, -6.666666667, 2.777777778, 7.954345035],
[-7.222222222, -8.666666667, -7.333333333, 13.45545922],
[2.555555556, 8.333333333, -3.444444444, 9.372273266],
[-10.88888889, 5.555555556, -10.77777778, 16.29701177],
[1.777777778, 2.888888889, 2.111111111, 3.995367688],
[-1.333333333, 3, -8.333333333, 8.956685895],
[-8.111111111, -10.55555556, -9.111111111, 16.13140484],
[9, -11, 10.66666667, 17.77013725],
[9.777777778, -5.555555556, 3, 11.63912092],
[-8.555555556, 5.777777778, -7.555555556, 12.79322737],
[1.222222222, 1.111111111, -3.777777778, 4.123105626],
[1.111111111, -11, 9.666666667, 14.68601417],
[-8.222222222, 3.222222222, 4.555555556, 9.936837562],
[9.555555556, -9.888888889, 7.555555556, 15.69028952],
[-10.44444444, -2.666666667, 0.222222222, 10.7817862],
[-3.666666667, -1.444444444, 11.11111111, 11.78930254],
[8.222222222, 0.888888889, 3.333333333, 8.916623399],
[1.555555556, -4.333333333, -7.888888889, 9.134117295],
[-0.555555556, 8.444444444, 7.111111111, 11.05374078],
[-7.222222222, 8, -10.33333333, 14.93111756],
[7.222222222, -3.222222222, 7.111111111, 10.63537076],
[-1.666666667, -2, 5.333333333, 5.934831272],
[-2.555555556, 5.111111111, -4.777777778, 7.448589228],
[3.111111111, 3.444444444, -9.666666667, 10.72322966],
[-9, 4.666666667, -5.444444444, 11.5073782],
[-0.666666667, 8.222222222, 10.11111111, 13.04928928],
[-8.111111111, -4.222222222, -4.888888889, 10.36911368],
[-2.555555556, 0.111111111, -9, 9.356452847],
[10, -1.222222222, -2.555555556, 10.39349274],
]
for test in testCases:
assert_array_equal(
round(accel_value(test[0], test[1], test[2]), 4), round(test[3], 4)
)
|
169482
|
from aetherling.helpers.nameCleanup import cleanName
from magma import *
from magma.frontend.coreir_ import GetCoreIRBackend
from aetherling.modules.hydrate import Dehydrate, Hydrate
from mantle.coreir.memory import DefineRAM, getRAMAddrWidth
__all__ = ['DefineRAMAnyType', 'RAMAnyType']
@cache_definition
def DefineRAMAnyType(t: Kind, n: int, read_latency = 0):
"""
Generate a RAM that handles n of any type.
RADDR : In(Array[log_2(n), Bit)], RDATA : Out(t), WADDR : In(Array(log_2(n), Bit)), WDATA : In(t), WE: In(Bit)
"""
class _RAM(Circuit):
name = 'RAM_{}t_{}n'.format(cleanName(str(t)), n)
addr_width = getRAMAddrWidth(n)
IO = ['RADDR', In(Bits[addr_width]),
'RDATA', Out(t),
'WADDR', In(Bits[addr_width]),
'WDATA', In(t),
'WE', In(Bit)
] + ClockInterface()
@classmethod
def definition(cls):
type_size_in_bits = GetCoreIRBackend().get_type(t).size
ram = DefineRAM(n, type_size_in_bits, read_latency=read_latency)()
type_to_bits = Dehydrate(t)
wire(cls.WDATA, type_to_bits.I)
wire(type_to_bits.out, ram.WDATA)
bits_to_type = Hydrate(t)
wire(ram.RDATA, bits_to_type.I)
wire(bits_to_type.out, cls.RDATA)
wire(cls.RADDR, ram.RADDR)
wire(ram.WADDR, cls.WADDR)
wire(cls.WE, ram.WE)
return _RAM
def RAMAnyType(t: Kind, n: int):
return DefineRAMAnyType(t, n)()
|
169483
|
from pySDC.projects.parallelSDC.newton_vs_sdc import main as main_newton_vs_sdc
from pySDC.projects.parallelSDC.newton_vs_sdc import plot_graphs as plot_graphs_newton_vs_sdc
from pySDC.projects.parallelSDC.nonlinear_playground import main, plot_graphs
def test_main():
main()
plot_graphs()
def test_newton_vs_sdc():
main_newton_vs_sdc()
plot_graphs_newton_vs_sdc()
|
169485
|
from __future__ import division
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
from nose.tools import (assert_equal, assert_not_equal, raises,
assert_almost_equal)
from nose.plugins.skip import SkipTest
from .test_helpers import assert_items_almost_equal, assert_items_equal
import pandas as pd
import numpy as np
import openpathsampling as paths
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestWHAM(object):
def setup(self):
self.exact = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625]
self.iface1 = [2.0, 1.0, 0.5, 0.25, 0.125, 0.0625, 0.0]
self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
self.iface3 = [3.0, 3.0, 3.0, 3.0, 3.0, 1.5, 0.75]
# self.iface1 = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.0, 0.0]
# self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
# self.iface3 = [1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.25]
# self.iface1 = [2.0, 0.5, 0.125, 0.0]
# self.iface2 = [1.0, 1.0, 0.25, 0.0625]
# self.iface3 = [3.0, 3.0, 3.0, 0.75]
# self.index = [0.0, 0.2, 0.4, 0.6]
self.columns = ["Interface 1", "Interface 2", "Interface 3"]
self.index = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
self.input_df = pd.DataFrame(
data=np.array([self.iface1, self.iface2, self.iface3]).T,
index=self.index,
columns=self.columns
)
self.expected_cleaned = np.array([[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.5, 1.0, 0.0],
[0.25, 0.5, 0.0],
[0.0, 0.25, 3.0],
[0.0, 0.125, 1.5],
[0.0, 0.0, 0.75]])
self.cleaned = pd.DataFrame(data=self.expected_cleaned,
index=self.index,
columns=self.columns)
self.wham = paths.numerics.WHAM(cutoff=0.1)
def test_prep_reverse_cumulative(self):
cleaned = self.wham.prep_reverse_cumulative(self.input_df)
np.testing.assert_allclose(cleaned.values,
self.expected_cleaned)
def test_prep_reverse_cumulative_with_interfaces(self):
wham = paths.numerics.WHAM(cutoff=0.1, interfaces=[0.0, 0.2, 0.3])
cleaned = wham.prep_reverse_cumulative(self.input_df)
np.testing.assert_allclose(cleaned.values,
np.array([[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.5, 1.0, 0.0],
[0.25, 0.5, 3.0],
[0.0, 0.25, 3.0],
[0.0, 0.125, 1.5],
[0.0, 0.0, 0.75]]))
def test_unweighting_tis(self):
unweighting = self.wham.unweighting_tis(self.cleaned)
expected = np.array([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 1.0]])
np.testing.assert_allclose(unweighting.values, expected)
def test_sum_k_Hk_Q(self):
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
expected = np.array([2.0, 1.0, 1.5, 0.75, 3.25, 1.625, 0.75])
np.testing.assert_allclose(sum_k_Hk_Q.values, expected)
def test_n_entries(self):
n_entries = self.wham.n_entries(self.cleaned)
expected = np.array([3.75, 1.875, 5.25])
np.testing.assert_allclose(n_entries.values, expected)
def test_weighted_counts_tis(self):
n_entries = self.wham.n_entries(self.cleaned)
unweighting = self.wham.unweighting_tis(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(unweighting,
n_entries)
expected = np.array([[3.75, 0.0, 0.0],
[3.75, 0.0, 0.0],
[3.75, 1.875, 0.0],
[3.75, 1.875, 0.0],
[0.0, 1.875, 5.25],
[0.0, 1.875, 5.25],
[0.0, 0.0, 5.25]])
np.testing.assert_allclose(weighted_counts.values, expected)
def test_generate_lnZ(self):
guess = [1.0, 1.0, 1.0]
expected_lnZ = np.log([1.0, old_div(1.0,4.0), old_div(7.0,120.0)])
# TODO: I'm not sure the last is log(7/120)
# however, I got the same result out of the old version, too, and
# this does combine into the correct result in the end (see
# test_output_histogram)
unweighting = self.wham.unweighting_tis(self.cleaned)
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(
unweighting,
self.wham.n_entries(self.cleaned)
)
lnZ = self.wham.generate_lnZ(guess, unweighting, weighted_counts,
sum_k_Hk_Q)
np.testing.assert_allclose(lnZ.values, expected_lnZ)
def test_output_histogram(self):
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
n_entries = self.wham.n_entries(self.cleaned)
unweighting = self.wham.unweighting_tis(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(unweighting,
n_entries)
lnZ = pd.Series(data=np.log([1.0, old_div(1.0,4.0), old_div(7.0,120.0)]),
index=n_entries.index)
wham_hist = self.wham.output_histogram(lnZ, sum_k_Hk_Q,
weighted_counts)
normed = self.wham.normalize_cumulative(wham_hist)
np.testing.assert_allclose(normed.values, np.array(self.exact))
def test_guess_lnZ_crossing_probability(self):
input_data = np.array([[2.0, 1.0, 5.0],
[1.0, 1.0, 5.0],
[0.5, 1.0, 5.0],
[0.1, 0.2, 5.0],
[0.0, 0.04, 1.0],
[0.0, 0.02, 0.2]])
input_df = pd.DataFrame(data=input_data,
index=self.index[0:6],
columns=self.columns)
cleaned = self.wham.prep_reverse_cumulative(input_df)
guess_lnZ = self.wham.guess_lnZ_crossing_probability(cleaned)
expected_Z = np.array([1.0, 0.25, 0.25*0.2])
np.testing.assert_allclose(guess_lnZ.values, np.log(expected_Z))
def test_wham_bam_histogram(self):
wham_hist = self.wham.wham_bam_histogram(self.input_df)
np.testing.assert_allclose(wham_hist.values, self.exact)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_with_first(self):
bad_data = np.array([[1.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 1.0],
[0.0, 0.1, 0.2]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:5],
columns=self.columns)
self.wham.check_cleaned_overlaps(bad_df)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_with_final(self):
bad_data = np.array([[1.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.2, 1.0, 0.0],
[0.1, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.5]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:6],
columns=self.columns)
self.wham.check_cleaned_overlaps(bad_df)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_in_middle(self):
bad_data = np.array([[1.0, 0.0, 0.0, 0.0],
[0.5, 1.0, 0.0, 0.0],
[0.1, 0.2, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.1, 0.2]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:6],
columns=self.columns + ['Interface 4'])
self.wham.check_cleaned_overlaps(bad_df)
|
169499
|
from __future__ import with_statement # this is to work with python2.5
from validation import vworkspace
with vworkspace() as w:
w.props.flatten_code_unroll = False
w.all_functions.validate_phases("coarse_grain_parallelization","flatten_code","coarse_grain_parallelization","loop_fusion")
|
169510
|
from typing import List
from veniq.ast_framework import ASTNodeType, AST
from veniq.ast_framework.ast_node import ASTNode
class ClassicSetter:
"""
The method's name starts with set. There are attributes
assigning in the method. Also, asserts are ignored.
"""
suitable_nodes: List[ASTNodeType] = [
ASTNodeType.ASSERT_STATEMENT,
ASTNodeType.STATEMENT_EXPRESSION,
]
def _check_body_nodes(self, check_setter_body: List[ASTNode]) -> bool:
"""
Check whether nodes are agree with the following types
(in self.suitable_nodes) or not.
"""
for node in check_setter_body:
if node.node_type not in self.suitable_nodes:
return False
return True
def value(self, ast: AST) -> List[int]:
lines: List[int] = []
for node in ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION):
method_name = node.name
if node.return_type is None and method_name.startswith('set') and \
self._check_body_nodes(node.body):
lines.append(node.line)
return sorted(lines)
|
169517
|
from helpers import render
def aboutus(request):
return render(request, {}, 'news/aboutus.html')
def help(request):
return render(request, {}, 'news/help.html')
def buttons(request):
return render(request, {}, 'news/buttons.html')
|
169559
|
from __future__ import print_function
from pymel.core import hide, showHidden, selected, select
from .. import core
from ..nodeApi import fossilNodes
class QuickHideControls(object):
'''
Toggle the visibility of the selected rig controls.
'''
controlsToHide = []
hideMain = False
mainShapes = None
@staticmethod
@core.alt.name( 'Quick Hide Controls', 'Anim')
def act():
if not QuickHideControls.controlsToHide or all( [not o.exists() for o in QuickHideControls.controlsToHide] ):
QuickHideControls.start()
else:
QuickHideControls.end()
@classmethod
def start(cls):
temp = core.findNode.controllers()
ctrls = temp[:-2] # Cut out the main controller and root motion
# Artificially add the parents of the selected controls so they are hidden as a result
selectedControls = set(selected())
for ctrl in selected():
if type(ctrl) in (fossilNodes.SubController, fossilNodes.RigController):
selectedControls.update(ctrl.getAllParents())
# Hide the spaces since the controls vis is locked and hidden to prevent accidentally being keyed hidden.
cls.controlsToHide = set( ctrls ).difference( selectedControls )
for obj in cls.controlsToHide:
hide(obj.getParent(), obj.getParent().getParent())
main = temp[-2]
cls.hideMain = main not in selectedControls
if cls.hideMain:
cls.mainShapes = core.shape.getShapes(main)
hide(cls.mainShapes)
print( 'hide main', cls.mainShapes[0].isVisible() )
if cls.mainShapes[0].isVisible():
plug = cls.mainShapes[0].visibility.listConnections(s=True, p=True)
if plug:
plug[0].set(0)
@classmethod
def end(cls):
for obj in cls.controlsToHide:
showHidden(obj.getParent(), obj.getParent().getParent())
cls.controlsToHide = []
if cls.hideMain:
showHidden(cls.mainShapes)
if not cls.mainShapes[0].isVisible():
plug = cls.mainShapes[0].visibility.listConnections(s=True, p=True)
if plug:
plug[0].set(1)
@core.alt.name('Select Related Controllers', 'Anim')
def selectRelatedControllers():
'''
If any controllers are selected, all siblings are also selected.
'''
for obj in selected():
main = core.findNode.leadController(obj)
if main:
for name, ctrl in main.subControl.items():
select(ctrl, add=True)
select(main, add=True)
@core.alt.name('Select Children Controllers', 'Anim')
def selectChildrenControllers():
'''
If any controllers are selected, any subsequent controllers are selected.
'''
for obj in selected():
main = core.findNode.leadController(obj)
if main == obj:
for name, ctrl in main.subControl.items():
select(ctrl, add=True)
select(main, add=True)
else:
doSelect = False
for name, ctrl in main.subControl.items():
if ctrl == obj:
doSelect = True
if doSelect:
select(ctrl, add=True)
|
169595
|
import os
import boto3
from common import AWSServiceCollector, AWS_REGIONS_SET
sns = boto3.client('sns')
sts = boto3.client('sts')
class ElasticBeanstalkCollector(AWSServiceCollector):
boto3_service_name = 'elasticbeanstalk'
def _collect_assets(self):
# collect Elastic Beanstalk domains and endpoints:
response = self.client.describe_environments()
environments = response['Environments']
for environment in environments:
# get the endpoint domain:
endpoint_domain = environment['EndpointURL']
self.domains.add(endpoint_domain)
# get custom domain, if any:
custom_domain = environment.get('CNAME')
if custom_domain:
self.domains.add(custom_domain)
def handler_fan_out(event, context):
"""
Publishes an SNS message for each region from which the assets are to be
collected.
"""
elasticbeanstalk_regions = AWS_REGIONS_SET
for region in elasticbeanstalk_regions:
sns.publish(
TopicArn=os.environ['SNSTopicCollectAWSElasticBeanstalkARN'],
Message=region,
)
def handler_regional(event, context):
region = event['Records'][0]['Sns']['Message']
response = sts.assume_role(
RoleArn=os.environ['AWSIAMRoleARN'],
RoleSessionName='CloudFrontierAssetCollector',
# ExternalId='...',
)
print(f'Assumed IAM role')
credentials = response['Credentials']
client_session = boto3.Session(
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=region,
)
print(f'Created session')
ElasticBeanstalkCollector(client_session).collect()
|
169613
|
from boa3.builtin import public
from boa3.builtin.interop.contract import CallFlags
@public
def main(flag: str) -> CallFlags:
call_flags: CallFlags
if flag == 'ALL':
call_flags = CallFlags.ALL
elif flag == 'READ_ONLY':
call_flags = CallFlags.READ_ONLY
elif flag == 'STATES':
call_flags = CallFlags.STATES
elif flag == 'ALLOW_NOTIFY':
call_flags = CallFlags.ALLOW_NOTIFY
elif flag == 'ALLOW_CALL':
call_flags = CallFlags.ALLOW_CALL
elif flag == 'WRITE_STATES':
call_flags = CallFlags.WRITE_STATES
elif flag == 'READ_STATES':
call_flags = CallFlags.READ_STATES
else:
call_flags = CallFlags.NONE
return call_flags
|
169628
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class DialogDemo(QMainWindow):
def __init__(self, parent=None):
super(DialogDemo, self).__init__(parent)
self.setWindowTitle("Dialog demo")
self.resize(600, 400)
self.button = QPushButton(self)
self.button.setText("点击弹出文件对话框")
self.button.move(50, 50)
self.button.clicked.connect(self.showDialog)
def showDialog(self):
filename, _ = QFileDialog.getOpenFileName(self, "打开文件", "D:\\", "Image Files (*.jpg *.png)")
if filename:
print(f"file: {filename}")
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = DialogDemo()
demo.show()
sys.exit(app.exec_())
|
169664
|
from .repos import ApiTokenRepo
from .responses import ApiTokenResponse, ApiTokensResponse, SensitiveApiTokenResponse
from .data import ApiTokenData, SensitiveApiTokenData
from .forms import ApiTokenCreateForm
|
169675
|
import os
import sys
import unittest
from nose.config import Config
from nose.plugins import doctests
from mock import Bucket
class TestDoctestErrorHandling(unittest.TestCase):
def setUp(self):
self._path = sys.path[:]
here = os.path.dirname(__file__)
testdir = os.path.join(here, 'support', 'doctest')
sys.path.insert(0, testdir)
p = doctests.Doctest()
p.can_configure = True
p.configure(Bucket(), Config())
self.p = p
def tearDown(self):
sys.path = self._path[:]
def test_no_doctests_in_file(self):
p = self.p
mod = __import__('no_doctests')
loaded = [ t for t in p.loadTestsFromModule(mod) ]
assert not loaded, "Loaded %s from empty module" % loaded
def test_err_doctests_raises_exception(self):
p = self.p
mod = __import__('err_doctests')
try:
loaded = [ t for t in p.loadTestsFromModule(mod) ]
except ValueError:
pass
else:
self.fail("Error doctests file did not raise ValueError")
if __name__ == '__main__':
unittest.main()
|
169683
|
import os
os.system("clear")
print("\nassembling crt0...\n")
if(os.system("sdasz80 -o crt0_fap.s") != 0):
exit()
print("\ncompiling hellofap.c...\n")
# code-loc is where main() is, data-loc is where RAM starts
if os.system("sdcc -mz80 --code-loc 0x200 --data-loc 0xc000 --no-std-crt0 crt0_fap.rel hellofap.c") != 0:
exit()
print("\nconverting to bin...\n")
os.system("hex2bin hellofap.ihx")
print("\ndone")
|
169694
|
import os
import sys
from setuptools import setup
setup(
name='apple-mango',
version='0.2',
url='https://github.com/legshort/apple-mango/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Light weight BDD Pattern',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
],
keywords=['tdd', 'bdd', 'test'],
packages=['apple_mango'],
install_requires=[
'wrapt>=1.10.0',
],
long_description=open('README.md').read(),
)
|
169700
|
import numpy as np
from forge.blade.action import action
from forge.blade.systems import skill, droptable
class Entity():
def __init__(self, pos):
self.pos = pos
self.alive = True
self.skills = skill.Skills()
self.entityIndex=0
self.health = -1
self.lastAttacker = None
def act(self, world):
pass
def decide(self, stimuli):
pass
def death(self):
pass
def registerHit(self, attacker, dmg):
self.lastAttacker = attacker
self.health -= dmg
def remove(self, ent):
r, c = self.pos
ent[r, c] = 0
def isAlive(self):
return self.health > 0
@property
def isPC(self):
return False
|
169723
|
import time
import os
import sys
import requests
COLORS = {\
"black":"\u001b[30;1m",
"red": "\u001b[31;1m",
"green":"\u001b[32m",
"yellow":"\u001b[33;1m",
"blue":"\u001b[34;1m",
"magenta":"\u001b[35m",
"cyan": "\u001b[36m",
"white":"\u001b[37m",
"yellow-background":"\u001b[43m",
"black-background":"\u001b[40m",
"cyan-background":"\u001b[46;1m",
}
def colorText(text):
for color in COLORS:
text = text.replace("[[" + color + "]]", COLORS[color])
return text
os.system("pip install requests")
os.system("clear")
url = "https://google.com"
try :
request = requests.get(url, timeout = 5)
print(colorText("\n[[green]]Successfuly connected !\nSuccessfuly connected ! "))
time.sleep(3)
os.system("clear")
except (requests.ConnectionError, requests.Timeout) as exception :
print(colorText("\n[[red]] You're not connected to internet ! "))
print(colorText("\n[[red]] You're not connected to internet ! "))
time.sleep(3)
sys.exit()
print(colorText("[[green]] Installation in progress..."))
os.system("clear")
os.system("sudo bash cmd.sh")
|
169737
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Transformer(nn.Module):
def __init__(self, vocab_size: int, max_seq_len: int, embed_dim: int, hidden_dim: int, n_layer: int, n_head: int, ff_dim: int, embed_drop: float, hidden_drop: float):
super().__init__()
self.tok_embedding = nn.Embedding(vocab_size, embed_dim)
self.pos_embedding = nn.Embedding(max_seq_len, embed_dim)
layer = nn.TransformerEncoderLayer(
d_model=hidden_dim, nhead=n_head, dim_feedforward=ff_dim, dropout=hidden_drop)
self.encoder = nn.TransformerEncoder(layer, num_layers=n_layer)
self.embed_dropout = nn.Dropout(embed_drop)
self.linear1 = nn.Linear(embed_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, embed_dim)
def encode(self, x, mask):
x = x.transpose(0, 1)
x = self.encoder(x, src_key_padding_mask=mask)
x = x.transpose(0, 1)
return x
def forward(self, x, *args):
# (batch_size, max_seq_len, embed_dim)
mask = args[0] if len(args) > 0 else None
tok_emb = self.tok_embedding(x)
max_seq_len = x.shape[-1]
pos_emb = self.pos_embedding(torch.arange(max_seq_len).to(x.device))
x = tok_emb + pos_emb.unsqueeze(0)
x = self.embed_dropout(x)
x = self.linear1(x)
x = self.encode(x, mask)
x = self.linear2(x)
probs = torch.matmul(x, self.tok_embedding.weight.t())
return probs
class BiLSTM(nn.Module):
def __init__(self, vocab_size: int, embed_dim: int, hidden_dim: int, n_layer: int, embed_drop: float, rnn_drop: float):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.bilstm = nn.LSTM(embed_dim, hidden_dim // 2, num_layers=n_layer,
dropout=rnn_drop if n_layer > 1 else 0, batch_first=True, bidirectional=True)
self.embed_dropout = nn.Dropout(embed_drop)
self.linear = nn.Linear(hidden_dim, embed_dim)
def encode(self, x):
x = self.embedding(x)
x = self.embed_dropout(x)
x, _ = self.bilstm(x)
return x
def predict(self, x):
x = self.linear(x)
probs = torch.matmul(x, self.embedding.weight.t())
return probs
def forward(self, x, *args):
x = self.encode(x)
return self.predict(x)
class BiLSTMAttn(BiLSTM):
def __init__(self, vocab_size: int, embed_dim: int, hidden_dim: int, n_layer: int, embed_drop: float, rnn_drop: float, n_head: int):
super().__init__(vocab_size, embed_dim, hidden_dim, n_layer, embed_drop, rnn_drop)
self.attn = nn.MultiheadAttention(hidden_dim, n_head)
def forward(self, x, *args):
mask = args[0] if len(args) > 0 else None
x = self.encode(x)
x = x.transpose(0, 1)
x = self.attn(x, x, x, key_padding_mask=mask)[0].transpose(0, 1)
return self.predict(x)
class BiLSTMCNN(BiLSTM):
def __init__(self, vocab_size: int, embed_dim: int, hidden_dim: int, n_layer: int, embed_drop: float, rnn_drop: float):
super().__init__(vocab_size, embed_dim, hidden_dim, n_layer, embed_drop, rnn_drop)
self.conv = nn.Conv1d(in_channels=hidden_dim,
out_channels=hidden_dim, kernel_size=3, padding=1)
def forward(self, x, *args):
x = self.encode(x)
x = x.transpose(1, 2)
x = self.conv(x).transpose(1, 2).relu()
return self.predict(x)
class BiLSTMConvAttRes(BiLSTM):
def __init__(self, vocab_size: int, max_seq_len: int, embed_dim: int, hidden_dim: int, n_layer: int, embed_drop: float, rnn_drop: float, n_head: int):
super().__init__(vocab_size, embed_dim, hidden_dim, n_layer, embed_drop, rnn_drop)
self.attn = nn.MultiheadAttention(hidden_dim, n_head)
self.conv = nn.Conv1d(in_channels=hidden_dim,
out_channels=hidden_dim, kernel_size=3, padding=1)
self.norm = nn.LayerNorm(hidden_dim)
def forward(self, x, *args):
mask = args[0] if len(args) > 0 else None
x = self.encode(x)
res = x
x = self.conv(x.transpose(1, 2)).relu()
x = x.permute(2, 0, 1)
x = self.attn(x, x, x, key_padding_mask=mask)[0].transpose(0, 1)
x = self.norm(res + x)
return self.predict(x)
class CNN(nn.Module):
def __init__(self, vocab_size: int, embed_dim: int, hidden_dim: int, embed_drop: float):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.conv = nn.Conv1d(in_channels=embed_dim,
out_channels=hidden_dim, kernel_size=3, padding=1)
self.embed_dropout = nn.Dropout(embed_drop)
self.linear = nn.Linear(hidden_dim, embed_dim)
def forward(self, x, *args):
x = self.embedding(x)
x = self.embed_dropout(x)
x = x.transpose(1, 2)
x = self.conv(x).transpose(1, 2).relu()
x = self.linear(x)
probs = torch.matmul(x, self.embedding.weight.t())
return probs
|
169828
|
import os
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import KRHF
from pyscf.pbc.tdscf import KTDHF
from pyscf.pbc.tdscf import krhf_slow_gamma as ktd
import unittest
from numpy import testing
import numpy
from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close, tdhf_frozen_mask
class DiamondTest(unittest.TestCase):
"""Compare this (krhf_slow_gamma) @2kp@Gamma vs reference (pyscf)."""
k = 2
k_c = (0, 0, 0)
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1], scaled_center=cls.k_c)
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
model_krhf.kernel()
cls.td_model_krhf = td_model_krhf = KTDHF(model_krhf)
td_model_krhf.kernel()
cls.ref_m = retrieve_m(td_model_krhf)
cls.ref_e = td_model_krhf.e
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
for eri in (ktd.PhysERI, ktd.PhysERI4, ktd.PhysERI8):
# Note that specific combintation of k-points results in real orbitals and allows testing PhysERI8
try:
e = eri(self.model_krhf)
m = e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, retrieve_m_hf(e), atol=1e-11)
# Test matrix vs pyscf
testing.assert_allclose(self.ref_m, m, atol=1e-5)
except Exception:
print("When testing {} the following exception occurred:".format(eri))
raise
def test_class(self):
"""Tests container behavior."""
model = ktd.TDRHF(self.model_krhf)
model.nroots = self.td_model_krhf.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-5)
nocc = nvirt = 4
testing.assert_equal(model.xy.shape, (len(model.e), 2, self.k, nocc, nvirt))
assert_vectors_close(model.xy, numpy.array(self.td_model_krhf.xy), atol=1e-2)
class FrozenTest(unittest.TestCase):
"""Tests frozen behavior."""
k = 2
k_c = (0, 0, 0)
df_file = os.path.realpath(os.path.join(__file__, "..", "frozen_test_cderi.h5"))
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = 'sto-3g'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1], scaled_center=cls.k_c)
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
# model_krhf.with_df._cderi_to_save = cls.df_file
model_krhf.with_df._cderi = cls.df_file
model_krhf.conv_tol = 1e-14
model_krhf.kernel()
cls.td_model_krhf = model_ktd = ktd.TDRHF(model_krhf)
model_ktd.nroots = 5
model_ktd.kernel()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_class(self):
"""Tests container behavior (frozen vs non-frozen)."""
for frozen in (1, [0, 1]):
try:
model = ktd.TDRHF(self.model_krhf, frozen=frozen)
model.nroots = self.td_model_krhf.nroots
model.kernel()
mask_o, mask_v = tdhf_frozen_mask(model.eri, kind="o,v")
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-3)
assert_vectors_close(model.xy, numpy.array(self.td_model_krhf.xy)[..., mask_o, :][..., mask_v], atol=1e-2)
except Exception:
print("When testing class with frozen={} the following exception occurred:".format(repr(frozen)))
raise
|
169843
|
import numpy as np
from collections import namedtuple
from itertools import product
import pybullet as p
from pybullet_planning.utils import CLIENT, BASE_LINK, UNKNOWN_FILE, OBJ_MESH_CACHE
from pybullet_planning.utils import implies
#####################################
# Bounding box
AABB = namedtuple('AABB', ['lower', 'upper'])
"""axis-aligned bounding box: https://en.wikipedia.org/wiki/Bounding_volume
Notice that the world-axis is used here. We don't have support for OOBB (using the object's local coordinate system)?
"""
def aabb_from_points(points):
return AABB(np.min(points, axis=0), np.max(points, axis=0))
def aabb_union(aabbs):
return aabb_from_points(np.vstack([aabb for aabb in aabbs]))
def aabb_overlap(aabb1, aabb2):
lower1, upper1 = aabb1
lower2, upper2 = aabb2
return np.less_equal(lower1, upper2).all() and \
np.less_equal(lower2, upper1).all()
#####################################
# Bounding box from body
def get_subtree_aabb(body, root_link=BASE_LINK):
from pybullet_planning.interfaces.robots.link import get_link_subtree
return aabb_union(get_aabb(body, link) for link in get_link_subtree(body, root_link))
def get_aabbs(body):
from pybullet_planning.interfaces.robots.link import get_all_links
return [get_aabb(body, link=link) for link in get_all_links(body)]
def get_aabb(body, link=None):
# Note that the query is conservative and may return additional objects that don't have actual AABB overlap.
# This happens because the acceleration structures have some heuristic that enlarges the AABBs a bit
# (extra margin and extruded along the velocity vector).
# Contact points with distance exceeding this threshold are not processed by the LCP solver.
# AABBs are extended by this number. Defaults to 0.02 in Bullet 2.x
#p.setPhysicsEngineParameter(contactBreakingThreshold=0.0, physicsClientId=CLIENT)
if link is None:
aabb = aabb_union(get_aabbs(body))
else:
aabb = p.getAABB(body, linkIndex=link, physicsClientId=CLIENT)
return aabb
get_lower_upper = get_aabb
def get_aabb_center(aabb):
lower, upper = aabb
return (np.array(lower) + np.array(upper)) / 2.
def get_aabb_extent(aabb):
"""return the bounding box range in the x, y, z in the body's pose frame
Parameters
----------
aabb : AABB
[description]
Returns
-------
np array of three float
[width, length, height]
"""
lower, upper = aabb
return np.array(upper) - np.array(lower)
def get_center_extent(body, **kwargs):
aabb = get_aabb(body, **kwargs)
return get_aabb_center(aabb), get_aabb_extent(aabb)
def aabb2d_from_aabb(aabb):
(lower, upper) = aabb
return lower[:2], upper[:2]
def aabb_contains_aabb(contained, container):
lower1, upper1 = contained
lower2, upper2 = container
return np.less_equal(lower2, lower1).all() and \
np.less_equal(upper1, upper2).all()
#return np.all(lower2 <= lower1) and np.all(upper1 <= upper2)
def aabb_contains_point(point, container):
lower, upper = container
return np.less_equal(lower, point).all() and \
np.less_equal(point, upper).all()
#return np.all(lower <= point) and np.all(point <= upper)
def get_bodies_in_region(aabb):
"""This query will return all the unique ids of objects that have axis aligned bounding box overlap with a given axis aligned bounding box.
Note that the query is conservative and may return additional objects that don't have actual AABB overlap.
This happens because the acceleration structures have some heuristic that enlarges the AABBs a bit
(extra margin and extruded along the velocity vector).
Parameters
----------
aabb : [type]
[description]
Returns
-------
a list of object unique ids.
"""
(lower, upper) = aabb
bodies = p.getOverlappingObjects(lower, upper, physicsClientId=CLIENT)
return [] if bodies is None else bodies
def get_aabb_volume(aabb):
return np.prod(get_aabb_extent(aabb))
def get_aabb_area(aabb):
return np.prod(get_aabb_extent(aabb2d_from_aabb(aabb)))
#####################################
# AABB approximation
def get_aabb_vertices(aabb):
d = len(aabb[0])
return [tuple(aabb[i[k]][k] for k in range(d))
for i in product(range(len(aabb)), repeat=d)]
|
169854
|
import tensorflow as tf
import argparse
import tensorflow as tf
import environments
from agent import PPOAgent
from policy import *
def print_summary(ep_count, rew):
print("Episode: %s. Reward: %s" % (ep_count, rew))
def start(env):
MASTER_NAME = "master-0"
tf.reset_default_graph()
with tf.Session() as session:
with tf.variable_scope(MASTER_NAME) as scope:
env_opts = environments.get_env_options(env, False)
policy = get_policy(env_opts, session)
master_agent = PPOAgent(policy, session, MASTER_NAME, env_opts)
saver = tf.train.Saver(max_to_keep=1)
saver = tf.train.import_meta_graph(tf.train.latest_checkpoint("models/%s/" % env) + ".meta")
saver.restore(session, tf.train.latest_checkpoint("models/%s/" % env))
try:
pass
except:
print("Failed to restore model, starting from scratch")
session.run(tf.global_variables_initializer())
producer = environments.EnvironmentProducer(env, False)
env = producer.get_new_environment()
episode_count = 0
cum_rew = 0
while True:
terminal = False
s0 = env.reset()
cur_hidden_state = master_agent.get_init_hidden_state()
episode_count += 1
cur_rew = 0
while not terminal:
env.render()
action, h_out = master_agent.get_strict_sample(s0, cur_hidden_state)
cur_hidden_state = h_out
s0, r, terminal, _ = env.step(action)
cum_rew += r
cur_rew += r
print("Ep: %s, cur_reward: %s reward: %s" % (episode_count, cur_rew, cum_rew / episode_count))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Parallel PPO'))
parser.add_argument('-env', type=str, help='Env name')
args = parser.parse_args()
start(**vars(args))
|
169864
|
def is_armstrong_number(number):
return sum(pow(int(digit), len(str(number))) for digit in str(number)) == number
|
169890
|
from externals.moduleman.plugin import moduleman_plugin
import itertools
class piterator_void:
text="void"
def count(self):
return self.__count
def __init__(self, *i):
self._dic = i
self.__count = max(map(lambda x:x.count(), i))
self.it = self._dic[0]
def next(self):
return (self.it.next(),)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = self._dic[0]
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class zip:
name = "zip"
description = "Returns an iterator that aggregates elements from each of the iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.izip(*self._dic)
self.__count = max(map(lambda x:x.count(), i))
def count(self):
return self.__count
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.izip.__init__(self, *self._dic)
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class product:
name = "product"
description = "Returns an iterator cartesian product of input iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.product(*self._dic)
self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count())
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.product(*self._dic)
def count(self):
return self.__count
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class chain:
name = "chain"
description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted."
category = ["default"]
priority = 99
def count(self):
return self.__count
def __init__(self, *i):
self.__count = sum(map(lambda x:x.count(), i))
self._dic = i
self.it = itertools.chain(*i)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.chain(*self._dic)
def next(self):
return (self.it.next(),)
def __iter__(self):
self.restart()
return self
|
169895
|
from collections import namedtuple
from utils import lerp
class RGB(namedtuple('RGB', 'r g b')):
""" stores color as a integer triple from range [0, 255] """
class Color(namedtuple('Color', 'r g b')):
""" stores color as a float triple from range [0.0, 1.0] """
def rgb12(self):
r = int(self.r * 255.0)
g = int(self.g * 255.0)
b = int(self.b * 255.0)
r = (r & ~15) | (r >> 4)
g = (g & ~15) | (g >> 4)
b = (b & ~15) | (b >> 4)
return RGB(r, g, b)
def rgb24(self):
r = int(self.r * 255.0)
g = int(self.g * 255.0)
b = int(self.b * 255.0)
return RGB(r, g, b)
@staticmethod
def lerp(lo, hi, step):
r = lerp(lo.r, hi.r, step)
g = lerp(lo.g, hi.g, step)
b = lerp(lo.b, hi.b, step)
return Color(r, g, b)
|
169919
|
import pyos
def onStart(s, a):
global state, app, editor
state = s
app = a
editor = Editor()
def save():
editor.save()
class Editor(object):
def __init__(self):
self.path = ""
self.fobj = None
self.saved = False
self.textField = pyos.GUI.MultiLineTextEntryField((0, 0), width=app.ui.width, height=app.ui.height-40, border=0)
self.fnText = pyos.GUI.Text((2, app.ui.height-32), "new file", pyos.DEFAULT, 16)
self.openBtn = pyos.GUI.Image((app.ui.width-80, app.ui.height-40), surface=state.getIcons().getLoadedIcon("open"),
onClick=self.openAsk)
self.saveBtn = pyos.GUI.Image((app.ui.width-40, app.ui.height-40), surface=state.getIcons().getLoadedIcon("save"),
onClick=self.save, onClickData=(True,))
app.ui.addChildren(self.textField, self.fnText, self.openBtn, self.saveBtn)
if app.file != None:
self.open(app.file)
app.file = None
def setPath(self, path):
self.path = path
self.save(True)
def save(self, btn=False):
self.saved = btn
if not self.saved: return
if self.path == "":
state.getApplicationList().getApp("files").getModule().SaveAs("Enter a name for the file. A common extension is .txt",
onSelect=self.setPath).display()
else:
try:
self.fobj = open(self.path, "w")
self.fobj.write(self.textField.getText())
self.fobj.close()
self.unsaved = False
self.fnText.setText(self.path[max(self.path.rfind("/"), self.path.rfind("\\"))+1:])
except:
pyos.GUI.ErrorDialog("The file "+self.path+" could not be written to.").display()
def openAsk(self):
state.getApplicationList().getApp("files").getModule().FilePicker(("5%", "5%"), app, width="90%", height="90%",
onSelect=self.open).display()
def open(self, path):
self.path = path
ro = open(self.path, "rU")
self.textField.setText(str(unicode(ro.read(), errors="ignore")))
ro.close()
self.fnText.setText(path[max(path.rfind("/"), path.rfind("\\"))+1:])
|
169921
|
class EmptyDicomSeriesException(Exception):
"""
Exception that is raised when the given folder does not contain dcm-files.
"""
def __init__(self, *args):
if not args:
args = ('The specified path does not contain dcm-files. Please ensure that '
'the path points to a folder containing a DICOM-series.', )
Exception.__init__(self, *args)
|
169939
|
import numpy as np
import glob
cannon_teff = np.array([])
cannon_logg = np.array([])
cannon_feh = np.array([])
cannon_alpha = np.array([])
tr_teff = np.array([])
tr_logg = np.array([])
tr_feh = np.array([])
tr_alpha = np.array([])
a = glob.glob("./*tr_label.npz")
a.sort()
for filename in a:
labels = np.load(filename)['arr_0']
tr_teff = np.append(tr_teff, labels[:,0])
tr_logg = np.append(tr_logg, labels[:,1])
tr_feh = np.append(tr_feh, labels[:,2])
tr_alpha = np.append(tr_alpha, labels[:,3])
a = glob.glob("./*cannon_labels.npz")
a.sort()
for filename in a:
labels = np.load(filename)['arr_0']
cannon_teff = np.append(cannon_teff, labels[:,0])
cannon_logg = np.append(cannon_logg, labels[:,1])
cannon_feh = np.append(cannon_feh, labels[:,2])
cannon_alpha = np.append(cannon_alpha, labels[:,3])
a = glob.glob("./*_SNR.npz")
a.sort()
test_SNR = np.array([])
for filename in a:
SNRs = np.load(filename)['arr_0']
test_SNR = np.append(test_SNR, SNRs)
np.savez("test_SNR", test_SNR)
np.savez("tr_label", np.vstack((tr_teff, tr_logg, tr_feh, tr_alpha)))
np.savez("cannon_label", np.vstack((cannon_teff, cannon_logg, cannon_feh, cannon_alpha)))
|
169980
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from smart_settings import Namespace
from .literals import DEFAULT_MAXIMUM_TITLE_LENGTH
namespace = Namespace(name='appearance', label=_('Appearance'))
setting_max_title_length = namespace.add_setting(
global_name='APPEARANCE_MAXIMUM_TITLE_LENGTH',
default=DEFAULT_MAXIMUM_TITLE_LENGTH
)
|
169988
|
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from exp import ex
from utils import jsonl_to_json, mean
from data.batcher import make_feature_lm_batch_with_keywords, ConvertToken
from .modules import Attention, GRU
from .scn_rnn import SCNLSTM
from .transformer_model import TransformerModel
from .keyword_classifier import KeywordClassifier
'''
currently, this implementation deviates from the original repo
in the following regards:
1. GRU instead of LSTM
2. An (deactivated) option to share in_out embeddings
Aside from the above, I tried to closely follow the given details.
'''
class HybridDis(TransformerModel):
transformer_name = 'none' # assign transformer_name = 'bert' to use BPE
model_type = 'caption'
use_keyword = False
task = 1
@classmethod
def get_args(cls):
return {
'dim': 512,
'pretrained_embedding': False,
'keyword_classification_loss': 'bce',
'keyword_top_k': 20,
'use_gt_keywords': False,
'use_word_subset': False,
'share_in_out': False,
'keyword_num': 1000,
'dropout': 0.5,
'decoder_type': 'scn',
}
@ex.capture
def __init__(self, transformer, tokenizer,
dim, keyword_num, dropout, visual_dropout, feature_names, feature_dims,
share_in_out, use_gt_keywords, use_word_subset, max_target_len,
keyword_top_k, keyword_classification_loss, pretrained_embedding,
decoder_type, normalizer_sparsity):
super(HybridDis, self).__init__()
self.eps = 1e-09
self.normalizer_alpha = None
if normalizer_sparsity == 'adaptive':
self.normalizer_alpha = nn.Parameter(torch.ones(1) * 1.2, requires_grad=True)
self.dim = dim
self.keyword_num = keyword_num
self.dropout_ratio = dropout
self.visual_dropout_ratio = visual_dropout
self.feature_names = feature_names
self.feature_dims = {k: v for k, v in feature_dims.items() if k in self.feature_names}
self.share_in_out = share_in_out
self.use_gt_keywords = use_gt_keywords
self.use_word_subset = use_word_subset
self.max_target_len = max_target_len
self.tokenizer = tokenizer
self.vocab_size = len(tokenizer)
self.k = keyword_top_k
self.keyword_loss_type = keyword_classification_loss
for feature in self.feature_names:
setattr(self, feature, FeatureEncoder(self.feature_dims[feature], self.dim))
self.encoder = nn.Linear(len(self.feature_names) * self.dim, self.dim)
self.pretrained_embedding = pretrained_embedding
self.wte_dim = 300 if self.pretrained_embedding else self.dim
self.wte = nn.Embedding(self.vocab_size, self.wte_dim)
self.context_dim = self.dim // 4
num_layers = 1
self.rnn = {
'rnn': GRU(num_layers, self.wte_dim + self.dim + self.context_dim, self.dim, dropout=self.dropout_ratio),
'scn': SCNLSTM(self.wte_dim + self.dim + self.context_dim, self.keyword_num, self.dim,
num_layers, batch_first=True, dropout=self.dropout_ratio)
}[decoder_type]
self.context_encoder = PrevEncoder(self.dim, self.context_dim)
self.dropout = nn.Dropout(self.dropout_ratio)
if self.share_in_out:
self.out = self.out_shared
else:
self.out = nn.Linear(self.dim, self.vocab_size)
self.keyword_num = len(tokenizer) if self.use_word_subset else self.keyword_num
self.keyword_classifier = KeywordClassifier(
self.wte,
self.keyword_num, self.dim, self.feature_names,
self.feature_dims,
self.dropout_ratio,
recall_k=self.k,
loss_type=self.keyword_loss_type
)
self.init_weights()
self.use_context = False
def make_batch(self, *args, **kwargs):
return make_feature_lm_batch_with_keywords(*args, **kwargs)
def epoch_update(self, epoch):
if epoch > 10:
self.context = True
def get_keyword_map(self, ids):
# get NV
if not self.use_word_subset:
storage = torch.zeros(ids.shape[0], len(self.tokenizer)).float().to(ids.device)
storage.scatter_(-1, ids.unsqueeze(-1), 1)
else:
# storage = torch.eye(len(self.tokenizer)).float().to(ids.device)
storage = None
return storage
def get_keyword_freq(self, batch, device):
if not self.use_word_subset:
c = batch.keyword_counter
else:
c = batch.word_counter
convert_token = ConvertToken()
c = {convert_token(self.tokenizer, k): v for k, v in c.items()}
ids, freq = zip(*c.items())
ids = torch.LongTensor(list(ids)).to(device)
freq = torch.FloatTensor(list(freq)).to(device)
t = torch.zeros(self.vocab_size).float().to(device)
t.scatter_(0, ids, freq)
t = t / (t.sum(dim=-1) + self.eps) # normalize
t.requires_grad_(False)
return t
def init_weights(self):
init_range = 0.1
for feature in self.feature_names:
getattr(self, feature).linear.weight.data.uniform_(-init_range, init_range)
if not self.share_in_out:
self.out.bias.data.fill_(0)
self.out.weight.data.uniform_(-init_range, init_range)
if self.pretrained_embedding is not None and self.pretrained_embedding:
self.wte.load_state_dict({'weight': self.tokenizer.embedding})
def out_shared(self, x):
return torch.matmul(x, self.wte.weight.t())
def generate_token(self, hypo, features, c, h, keyword, group_mask=None):
s = hypo[:, -1] # get last token
s = self.wte(s).unsqueeze(1) # B1C
s = torch.cat((features, c, s), dim=-1)
o, h = self.rnn(s, h, keyword=keyword)
o = self.dropout(o)
logits = self.out(o) # BV
return logits, h
def run_token(self, features, hypo, h, c, keyword, group_mask):
features = OrderedDict(sorted(features.items())) # canonical ordering
for feature in self.feature_names:
features[feature] = getattr(self, feature)(features[feature], h)
features = self.encoder(torch.cat(list(features.values()), dim=-1))
logits, h = self.generate_token(hypo, features, c, h, keyword, group_mask)
return h, c, logits
def run_video(self, features, c, v, L, sentences=None, sampler=None,
keyword=None, reduce_hypo=True, group_mask=None):
video = features['video']
B = video.shape[0]
empty = torch.full((B, self.vocab_size), float('-inf')).to(video.device)
sent = []
eos_flags = torch.LongTensor([0] * B).bool().to(video.device)
h = self.rnn.init_h(B, device=video.device) if hasattr(self, 'rnn') else None
c = self.rnn.init_c(B, self.context_dim, device=video.device) if hasattr(self, 'rnn') else None
s0 = sentences[:, v, 0] if sentences is not None \
else torch.Tensor([self.tokenizer.cls_id]).long().to(video.device).expand(B)
s = s0
hypo = s0.unsqueeze(-1)
for w in range(L):
if eos_flags.all():
logits = empty.clone()
else:
h, c, logits = self.run_token(features, hypo, h, c, keyword=keyword)
if sentences is not None: # training
s = sentences[:, v, min(L - 1, w + 1)].clone()
eos_flags = eos_flags | (sentences[:, v, min(L - 1, w + 1)] == self.tokenizer.sep_id)
else:
s, probs = sampler(logits, self.normalizer_alpha)
eos_flags = eos_flags | (logits.argmax(dim=-1) == self.tokenizer.pad_id)
hypo = torch.cat((hypo, s.unsqueeze(-1)), dim=1)
sent.append(logits)
hypo = hypo[:, 1:]
if sentences is None and reduce_hypo:
hypo = hypo[probs.argmax(dim=-1)]
else:
sent = torch.stack(sent, 1).contiguous()
c = self.context_encoder(h)
if not self.use_context:
c = torch.full_like(c.detach(), 0)
c.requires_grad_(False)
return c, sent, hypo, None, {}
def get_keyword(self, batch, features):
keyword = None
if hasattr(batch, 'keyword_masks'):
keyword = batch.word_subsets if self.use_word_subset else batch.keyword_masks
return self.keyword_classifier(keyword, features)
def process_keyword(self, batch, features):
if (not hasattr(self, 'keyword_map')) and hasattr(batch, 'keyword_map') and batch.keyword_map is not None:
self.keyword_map = self.get_keyword_map(batch.keyword_map)
if (not hasattr(self, 'keyword_freq')) and hasattr(batch, 'word_counter') and batch.keyword_counter is not None:
self.keyword_freq = self.get_keyword_freq(batch, batch.video.device)
keywords, reg_loss, stats = self.get_keyword(batch, features)
keywords = keywords.detach()
if self.use_gt_keywords:
if not self.use_word_subset:
if hasattr(batch, 'keyword_masks'):
keywords = batch.keyword_masks.float()
else:
if hasattr(batch, 'word_subsets'):
keywords = batch.word_subsets.float()
return keywords, stats, reg_loss
def forward(self, batch, **kwargs):
# BVLC, BVL
sent_gt = batch.sentences if hasattr(batch, 'sentences') else None
features = {k: val for k, val \
in {f: getattr(batch, f) for f \
in self.feature_names}.items()}
keywords, stats, reg_loss = self.process_keyword(batch, features)
if hasattr(batch, 'sentences'):
stats = {**stats, 'sentence_len': (batch.sentences != self.tokenizer.pad_id).float().sum(dim=-1).mean().item()}
res = []
vid_stats = []
losses = []
B, V = batch.video.shape[:2]
L = batch.sentences.shape[2] if hasattr(batch, 'sentences') else self.max_target_len
for v in range(V):
feature = {k: val[:, v] for k, val in features.items()}
c = self.rnn.init_c(B, self.context_dim, device=batch.video.device) if hasattr(self, 'rnn') else None
keyword = keywords[:, v] if keywords is not None else None
c, sent, _, small_loss, vid_stat = self.run_video(feature, c, v, L,
sentences=sent_gt, keyword=keyword,
group_mask=batch.group_mask[:, v],
sampler=kwargs.get('sampler', None))
losses.append(small_loss)
vid_stats.append(vid_stat)
res.append(sent) # BLV
vid_stats = {k: mean(v) for k, v in jsonl_to_json(vid_stats).items()}
stats = {**stats, **vid_stats}
del batch.sentences # for generation
small_loss = None if losses[0] is None else mean(losses)
if reg_loss is None:
reg_loss = small_loss
elif small_loss is not None:
reg_loss = reg_loss + small_loss
return torch.stack(res, 1).contiguous(), batch.targets, reg_loss, stats, batch
class FeatureEncoder(nn.Module):
def __init__(self, video_dim, dim):
super(FeatureEncoder, self).__init__()
self.linear = nn.Linear(video_dim, dim)
self.attention = Attention(dim)
def forward(self, feature, h):
# BLC
if isinstance(h, tuple): # check LSTM/GRU
h = h[0]
feature = self.linear(feature)
h = h.mean(dim=1)
return self.attention(h, feature).unsqueeze(1)
class PrevEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super(PrevEncoder, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, h):
# BLC
if isinstance(h, tuple): # check LSTM/GRU
h = h[0]
return self.linear(h)
|
170044
|
import os
import typing
from contextlib import suppress
from pathlib import Path
from qtpy.QtWidgets import QDialog, QFileDialog, QGridLayout, QPushButton, QStackedWidget
from PartSegCore.io_utils import SaveBase
from .algorithms_description import FormWidget
from .custom_load_dialog import IORegister, LoadRegisterFileDialog
if typing.TYPE_CHECKING: # pragma: no cover
from PartSeg.common_backend.base_settings import BaseSettings
class SaveProperty(typing.NamedTuple):
save_destination: typing.Union[str, typing.List[str]]
selected_filter: str
save_class: SaveBase
parameters: dict
class FormDialog(QDialog):
@staticmethod
def widget_class() -> typing.Type[FormWidget]:
return FormWidget
def __init__(self, fields, values=None, image=None, settings=None, parent=None):
super().__init__(parent)
self.widget = self.widget_class()(fields, settings=settings)
if values is not None:
self.widget.set_values(values)
if image is not None:
self.widget.image_changed(image)
self.accept_btn = QPushButton("Save")
self.accept_btn.clicked.connect(self.accept)
self.reject_btn = QPushButton("Reject")
self.reject_btn.clicked.connect(self.reject)
layout = QGridLayout()
layout.addWidget(self.widget, 0, 0, 1, 2)
layout.addWidget(self.reject_btn, 1, 0)
layout.addWidget(self.accept_btn, 1, 1)
self.setLayout(layout)
def get_values(self):
return self.widget.get_values()
def set_values(self, values):
return self.widget.set_values(values)
class CustomSaveDialog(LoadRegisterFileDialog):
def __init__(
self,
save_register: IORegister,
system_widget=True,
base_values: typing.Optional[dict] = None,
parent=None,
caption="Save file",
history: typing.Optional[typing.List[str]] = None,
file_mode=QFileDialog.AnyFile,
):
super().__init__(save_register, caption, parent)
self.setFileMode(file_mode)
self.setOption(QFileDialog.DontUseNativeDialog, not system_widget)
self.setAcceptMode(QFileDialog.AcceptSave)
self.filterSelected.connect(self.change_filter)
self.accepted_native = False
self.values = {}
self.names = []
if history is not None:
history = self.history() + history
self.setHistory(history)
self.base_values = base_values if base_values is not None else {}
if not system_widget:
widget = QStackedWidget()
for name, val in self.io_register.items():
wi = FormWidget(val.get_fields())
if name in self.base_values:
wi.set_values(self.base_values[name])
widget.addWidget(wi)
self.names.append(name)
self.filterSelected.connect(self.change_parameters)
layout = self.layout()
if isinstance(layout, QGridLayout):
# print(layout.columnCount(), layout.rowCount())
# noinspection PyArgumentList
layout.addWidget(widget, 0, layout.columnCount(), layout.rowCount(), 1)
else:
layout.addWidget(widget)
self.stack_widget = widget
self.selectNameFilter(self.names[0])
def change_parameters(self, text):
if not hasattr(self, "stack_widget"):
return
with suppress(ValueError):
self.stack_widget.setCurrentIndex(self.names.index(text))
if not self.io_register[text].get_fields():
self.stack_widget.hide()
else:
self.stack_widget.show()
def selectNameFilter(self, filter_name: str):
with suppress(IndexError):
self.change_parameters(filter_name)
super().selectNameFilter(filter_name)
with suppress(KeyError):
ext = self.io_register[filter_name].get_default_extension()
self.setDefaultSuffix(ext)
def change_filter(self, current_filter):
if current_filter not in self.io_register:
return
ext = self.io_register[current_filter].get_default_extension()
self.setDefaultSuffix(ext)
def accept(self):
self.accepted_native = True
if hasattr(self, "stack_widget"):
self.values = self.stack_widget.currentWidget().get_values()
super().accept()
return
save_class = self.io_register[self.selectedNameFilter()]
fields = save_class.get_fields()
# print(fields, len(fields))
if len(fields) == 0:
super().accept()
return
dial = FormDialog(fields)
if self.selectedNameFilter() in self.base_values:
dial.set_values(self.base_values[self.selectedNameFilter()])
if dial.exec_():
self.values = dial.get_values()
super().accept()
else:
super().reject()
def get_result(self) -> SaveProperty:
files = self.selectedFiles()
return SaveProperty(
files[0] if len(files) == 1 else files,
self.selectedNameFilter(),
self.io_register[self.selectedNameFilter()],
self.values,
)
class PSaveDialog(CustomSaveDialog):
def __init__(
self,
save_register: typing.Union[typing.Dict[str, type(SaveBase)], type(SaveBase)],
*,
settings: "BaseSettings",
path: str,
default_directory=str(Path.home()),
filter_path="",
system_widget=True,
base_values: typing.Optional[dict] = None,
parent=None,
caption="Save file",
file_mode=QFileDialog.AnyFile,
):
super().__init__(
save_register=save_register,
system_widget=system_widget,
base_values=base_values,
parent=parent,
caption=caption,
history=settings.get_path_history(),
file_mode=file_mode,
)
self.settings = settings
self.path_in_dict = path
self.filter_path = filter_path
self.setDirectory(self.settings.get(path, default_directory))
if self.filter_path:
self.selectNameFilter(self.settings.get(self.filter_path, ""))
def accept(self):
super().accept()
if self.result() != QDialog.Accepted:
return
directory = os.path.dirname(self.selectedFiles()[0])
self.settings.add_path_history(directory)
self.settings.set(self.path_in_dict, directory)
if self.filter_path:
self.settings.set(self.filter_path, self.selectedNameFilter())
SaveDialog = CustomSaveDialog
|
170074
|
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey.auditor import Entity
from security_monkey.auditors.resource_policy_auditor import ResourcePolicyAuditor
from security_monkey import db
from security_monkey.watcher import ChangeItem
from security_monkey.datastore import Datastore
from security_monkey.datastore import Account, AccountType, ItemAudit
from collections import namedtuple
from policyuniverse.policy import Policy
from copy import deepcopy
Item = namedtuple('Item', 'config account')
# Example KMS Config
# Internet Accessible
# No Condition
# rotation Enabled
key0 = {
"Origin": "AWS_KMS",
"KeyId": "key_id",
"Description": "Description",
"Enabled": True,
"KeyUsage": "ENCRYPT_DECRYPT",
"Grants": [],
"Policy": [
{
"Version": "2012-10-17",
"Id": "key-consolepolicy-2",
"Statement": [
{
"Action": "kms:*",
"Sid": "Enable IAM User Permissions",
"Resource": "*",
"Effect": "Allow",
"Principal": {
"AWS": "*"
}
}
]
}
],
"KeyState": "Enabled",
"KeyRotationEnabled": True,
"CreationDate": "2017-01-05T20:39:18.960000+00:00",
"Arn": "arn:aws:kms:us-east-1:123456789123:key/key_id",
"AWSAccountId": "123456789123"
}
class ResourcePolicyTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
ResourcePolicyAuditor(accounts=['TEST_ACCOUNT']).OBJECT_STORE.clear()
account_type_result = AccountType(name='AWS')
db.session.add(account_type_result)
db.session.commit()
# main
account = Account(identifier="012345678910", name="TEST_ACCOUNT",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT",
third_party=False, active=True)
# friendly
account2 = Account(identifier="222222222222", name="TEST_ACCOUNT_TWO",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT_TWO",
third_party=False, active=True)
# third party
account3 = Account(identifier="333333333333", name="TEST_ACCOUNT_THREE",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT_THREE",
third_party=True, active=True)
db.session.add(account)
db.session.add(account2)
db.session.add(account3)
db.session.commit()
datastore = Datastore()
# S3
datastore.store('s3', 'us-east-1', 'TEST_ACCOUNT', 'my-test-s3-bucket',
True, dict(), arn='arn:aws:s3:::my-test-s3-bucket')
datastore.store('s3', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-s3-bucket-two',
True, dict(), arn='arn:aws:s3:::my-test-s3-bucket-two')
datastore.store('s3', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-s3-bucket-three',
True, dict(), arn='arn:aws:s3:::my-test-s3-bucket-three')
# IAM User
datastore.store('iamuser', 'us-east-1', 'TEST_ACCOUNT', 'my-test-iam-user',
True, dict(UserId='AIDA11111111111111111', UserName='my-test-iam-user'),
arn='arn:aws:iam::012345678910:user/my-test-iam-user')
datastore.store('iamuser', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-iam-user-two',
True, dict(UserId='AIDA22222222222222222', UserName='my-test-iam-user-two'),
arn='arn:aws:iam::222222222222:user/my-test-iam-user-two')
datastore.store('iamuser', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-iam-user-three',
True, dict(UserId='AIDA33333333333333333', UserName='my-test-iam-user-three'),
arn='arn:aws:iam::333333333333:user/my-test-iam-user-three')
# IAM Role
datastore.store('iamrole', 'us-east-1', 'TEST_ACCOUNT', 'my-test-iam-role',
True, dict(RoleId='AISA11111111111111111', RoleName='my-test-iam-role'),
arn='arn:aws:iam::012345678910:role/my-test-iam-role')
datastore.store('iamrole', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-iam-role-two',
True, dict(RoleId='AISA22222222222222222', RoleName='my-test-iam-role-two'),
arn='arn:aws:iam::222222222222:role/my-test-iam-role-two')
datastore.store('iamrole', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-iam-role-three',
True, dict(RoleId='AISA33333333333333333', RoleName='my-test-iam-role-three'),
arn='arn:aws:iam::333333333333:role/my-test-iam-role-three')
# NAT Gateway
datastore.store('natgateway', 'us-east-1', 'TEST_ACCOUNT', 'my-test-natgateway',
True, dict(nat_gateway_addresses=[dict(public_ip='172.16.17.32', private_ip='172.16.11.11')]),
arn=None) # natgateway has no ARN :(
datastore.store('natgateway', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-natgateway-two',
True, dict(nat_gateway_addresses=[dict(public_ip='192.168.3.11', private_ip='172.16.22.22')]),
arn=None) # natgateway has no ARN :(
datastore.store('natgateway', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-natgateway-three',
True, dict(nat_gateway_addresses=[dict(public_ip='172.16.17.32', private_ip='172.16.33.33')]),
arn=None) # natgateway has no ARN :(
# VPC
datastore.store('vpc', 'us-east-1', 'TEST_ACCOUNT', 'my-test-vpc', True,
dict(id='vpc-11111111', cidr_block='10.1.1.1/18'),
arn='arn:aws:ec2:us-east-1:012345678910:vpc/vpc-11111111')
datastore.store('vpc', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-vpc-two', True,
dict(id='vpc-22222222', cidr_block='10.2.2.2/18'),
arn='arn:aws:ec2:us-east-1:222222222222:vpc/vpc-22222222')
datastore.store('vpc', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-vpc-three', True,
dict(id='vpc-33333333', cidr_block='10.3.3.3/18'),
arn='arn:aws:ec2:us-east-1:333333333333:vpc/vpc-33333333')
# VPC Service Endpoint (For S3 and things)
datastore.store('endpoint', 'us-east-1', 'TEST_ACCOUNT', 'my-test-vpce',
True, dict(id='vpce-11111111'),
arn=None) # vpce has no ARN :(
datastore.store('endpoint', 'us-east-1', 'TEST_ACCOUNT_TWO', 'my-test-vpce-two',
True, dict(id='vpce-22222222'),
arn=None) # vpce has no ARN :(
datastore.store('endpoint', 'us-east-1', 'TEST_ACCOUNT_THREE', 'my-test-vpce-three',
True, dict(id='vpce-33333333'),
arn=None) # vpce has no ARN :(
def test_load_policies(self):
policy01 = dict(Version='2012-10-08', Statement=[])
test_item = Item(account=None, config=dict(Policy=policy01))
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
# Policy class has no equivelance test at the moment.
# Compare the underlying dicts instead
policies = [policy.policy for policy in rpa.load_resource_policies(test_item)]
self.assertEqual([policy01], policies)
policy02 = dict(Version='2012-10-08', Statement=[
dict(
Effect='Allow',
Action='*',
Resource='*')])
policy03 = dict(Version='2012-10-08', Statement=[
dict(
Effect='Allow',
Action='lambda:*',
Resource='*')])
policy04 = dict(Version='2012-10-08', Statement=[
dict(
Effect='Allow',
Action='ec2:*',
Resource='*')])
# simulate a lambda function, which contains multiple policies
test_item = Item(
account=None,
config=dict(
Policies=dict(
Aliases=dict(
stable=policy01),
DEFAULT=policy02,
Versions={
"3": policy03,
"4": policy04
})))
rpa.policy_keys = ['Policies$Aliases$*', 'Policies$DEFAULT', 'Policies$Versions$*']
policies = [policy.policy for policy in rpa.load_resource_policies(test_item)]
self.assertEqual([policy01, policy02, policy03, policy04], policies)
def test_prep_for_audit(self):
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
rpa.prep_for_audit()
self.assertEqual(rpa.OBJECT_STORE['s3']['my-test-s3-bucket'], set(['012345678910']))
self.assertEqual(rpa.OBJECT_STORE['ACCOUNTS']['FRIENDLY'], set(['012345678910', '222222222222']))
self.assertEqual(rpa.OBJECT_STORE['ACCOUNTS']['THIRDPARTY'], set(['333333333333']))
self.assertEqual(
set(rpa.OBJECT_STORE['userid'].keys()),
set(['AIDA11111111111111111', 'AISA11111111111111111',
'AIDA22222222222222222', 'AISA22222222222222222',
'AIDA33333333333333333', 'AISA33333333333333333']))
from ipaddr import IPNetwork
self.assertEqual(
set(rpa.OBJECT_STORE['cidr'].keys()),
set(['10.1.1.1/18', '172.16.11.11/32', '172.16.17.32/32',
'10.2.2.2/18', '172.16.22.22/32', '192.168.3.11/32',
'10.3.3.3/18', '172.16.33.33/32', '172.16.17.32/32']))
self.assertEqual(
set(rpa.OBJECT_STORE['vpc'].keys()),
set(['vpc-11111111', 'vpc-22222222', 'vpc-33333333']))
self.assertEqual(
set(rpa.OBJECT_STORE['vpce'].keys()),
set(['vpce-11111111', 'vpce-22222222', 'vpce-33333333']))
def test_inspect_entity(self):
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
rpa.prep_for_audit()
# All conditions are SAME account.
policy01 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::012345678910:root',
Action=['ec2:*'],
Resource='*',
Condition={
'StringEquals': {
'AWS:SourceOwner': '012345678910',
'AWS:SourceARN': 'arn:aws:iam::012345678910:root',
'AWS:SourceVPC': 'vpc-11111111',
'AWS:Sourcevpce': 'vpce-11111111',
'AWS:username': 'my-test-iam-role'
}, 'StringLike': {
'AWS:userid': ['AIDA11111111111111111:*', 'AISA11111111111111111:*']
}, 'IpAddress': {
'AWS:SourceIP': ['172.16.17.32', '10.1.1.1/18', '172.16.11.11']
}})])
test_item = Item(account='TEST_ACCOUNT', config=None)
policy = Policy(policy01)
for who in policy.whos_allowed():
entity = Entity.from_tuple(who)
self.assertEqual(set(['SAME']), rpa.inspect_entity(entity, test_item))
# All conditions are FRIENDLY account.
policy02 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::222222222222:root',
Action=['ec2:*'],
Resource='*',
Condition={
'StringEquals': {
'AWS:SourceOwner': '222222222222',
'AWS:SourceARN': 'arn:aws:s3:::my-test-s3-bucket-two',
'AWS:SourceVPC': 'vpc-22222222',
'AWS:Sourcevpce': 'vpce-22222222',
'AWS:username': 'my-test-iam-role-two'
}, 'StringLike': {
'AWS:userid': ['AIDA22222222222222222:*', 'AISA22222222222222222:*']
}, 'IpAddress': {
'AWS:SourceIP': ['192.168.3.11', '10.2.2.2/18', '172.16.22.22']
}})])
test_item = Item(account='TEST_ACCOUNT', config=None)
policy = Policy(policy02)
for who in policy.whos_allowed():
entity = Entity.from_tuple(who)
self.assertEqual(set(['FRIENDLY']), rpa.inspect_entity(entity, test_item))
# All conditions are THIRDPARTY account.
policy03 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::333333333333:root',
Action=['ec2:*'],
Resource='*',
Condition={
'StringEquals': {
'AWS:SourceOwner': '333333333333',
'AWS:SourceARN': 'arn:aws:iam::333333333333:root',
'AWS:SourceVPC': 'vpc-33333333',
'AWS:Sourcevpce': 'vpce-33333333',
'AWS:username': 'my-test-iam-role-three'
}, 'StringLike': {
'AWS:userid': ['AIDA33333333333333333:*', 'AISA33333333333333333:*']
}, 'IpAddress': {
'AWS:SourceIP': ['172.16.17.32', '10.3.3.3/18', '172.16.33.33']
}})])
test_item = Item(account='TEST_ACCOUNT', config=None)
policy = Policy(policy03)
for who in policy.whos_allowed():
entity = Entity.from_tuple(who)
self.assertEqual(set(['THIRDPARTY']), rpa.inspect_entity(entity, test_item))
# All conditions are from an UNKNOWN account.
policy04 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::444444444444:root',
Action=['ec2:*'],
Resource='*',
Condition={
'StringEquals': {
'AWS:SourceOwner': '444444444444',
'AWS:SourceARN': 'arn:aws:iam::444444444444:root',
'AWS:SourceVPC': 'vpc-44444444',
'AWS:Sourcevpce': 'vpce-44444444',
'AWS:username': 'my-test-iam-role-four'
}, 'StringLike': {
'AWS:userid': ['AIDA44444444444444444:*', 'AISA44444444444444444:*']
}, 'IpAddress': {
'AWS:SourceIP': ['172.16.31.10', '10.4.4.4/18', '172.16.44.44']
}})])
test_item = Item(account='TEST_ACCOUNT', config=None)
policy = Policy(policy04)
for who in policy.whos_allowed():
entity = Entity.from_tuple(who)
self.assertEqual(set(['UNKNOWN']), rpa.inspect_entity(entity, test_item))
def test_check_internet_accessible(self):
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
rpa.prep_for_audit()
policy01 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::*:root',
Action=['ec2:*'],
Resource='*')])
test_item = Item(account='TEST_ACCOUNT', config=dict(Policy=policy01))
def mock_add_issue(score, issue, item, notes=None, action_instructions=None):
self.assertEqual(10, score)
self.assertEqual('Internet Accessible', issue)
self.assertEqual('Entity: [principal:*] Actions: ["ec2:*"]', notes)
rpa.add_issue = lambda *args, **kwargs: mock_add_issue(*args, **kwargs)
rpa.check_internet_accessible(test_item)
policy02 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::012345678910:root',
Action=['ec2:*'],
Resource='*')])
test_item = Item(account='TEST_ACCOUNT', config=dict(Policy=policy02))
def mock_add_issue_two(score, issue, item, notes=None):
# should not get here
self.assertTrue(False)
rpa.add_issue = lambda *args, **kwargs: mock_add_issue_two(*args, **kwargs)
rpa.check_internet_accessible(test_item)
def test_check_friendly_cross_account(self):
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
rpa.prep_for_audit()
policy01 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::222222222222:root',
Action=['ec2:*'],
Resource='*')])
test_item = Item(account='TEST_ACCOUNT', config=dict(Policy=policy01))
def mock_add_issue(score, issue, item, notes=None):
self.assertEqual(0, score)
self.assertEqual('Friendly Cross Account', issue)
self.assertEqual('Account: [222222222222/TEST_ACCOUNT_TWO] Entity: [principal:arn:aws:iam::222222222222:root] Actions: ["ec2:*"]', notes)
rpa.add_issue = lambda *args, **kwargs: mock_add_issue(*args, **kwargs)
rpa.check_friendly_cross_account(test_item)
def test_check_unknown_cross_account(self):
rpa = ResourcePolicyAuditor(accounts=["012345678910"])
rpa.prep_for_audit()
policy01 = dict(
Version='2010-08-14',
Statement=[
dict(
Effect='Allow',
Principal='arn:aws:iam::444444444444:root',
Action=['ec2:*'],
Resource='*')])
test_item = Item(account='TEST_ACCOUNT', config=dict(Policy=policy01))
def mock_add_issue(score, issue, item, notes=None):
self.assertEqual(10, score)
self.assertEqual('Unknown Access', issue)
self.assertEqual('Entity: [principal:arn:aws:iam::444444444444:root] Actions: ["ec2:*"]', notes)
rpa.add_issue = lambda *args, **kwargs: mock_add_issue(*args, **kwargs)
rpa.check_unknown_cross_account(test_item)
def test_check_thirdparty_cross_account(self):
rpa = ResourcePolicyAuditor(accounts=['TEST_ACCOUNT'])
rpa.prep_for_audit()
key0_friendly_cross_account = deepcopy(key0)
key0_friendly_cross_account['Policy'][0]['Statement'][0]['Principal']['AWS'] \
= 'arn:aws:iam::333333333333:role/SomeRole'
item = ChangeItem(
account='TEST_ACCOUNT',
arn='arn:aws:kms:us-east-1:012345678910:key/key_id',
new_config=key0_friendly_cross_account)
rpa.check_thirdparty_cross_account(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 0)
def test_check_root_cross_account(self):
rpa = ResourcePolicyAuditor(accounts=['TEST_ACCOUNT'])
rpa.prep_for_audit()
key0_friendly_cross_account = deepcopy(key0)
key0_friendly_cross_account['Policy'][0]['Statement'][0]['Principal']['AWS'] \
= 'arn:aws:iam::222222222222:root'
item = ChangeItem(
account='TEST_ACCOUNT',
arn='arn:aws:kms:us-east-1:012345678910:key/key_id',
new_config=key0_friendly_cross_account)
rpa.check_root_cross_account(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 6)
|
170094
|
import pytest
from botx import SystemEvents
pytest_plugins = ("tests.test_collecting.fixtures",)
def test_registration_handler_for_several_system_events(
handler_as_function,
extract_collector,
collector_cls,
):
system_events = {
SystemEvents.chat_created,
SystemEvents.file_transfer,
SystemEvents.added_to_chat,
SystemEvents.deleted_from_chat,
SystemEvents.left_from_chat,
SystemEvents.internal_bot_notification,
SystemEvents.cts_login,
SystemEvents.cts_logout,
}
collector = collector_cls()
collector.system_event(
handler=handler_as_function,
events=list(system_events),
)
handlers = [SystemEvents(handler.body) for handler in collector.handlers]
assert handlers
@pytest.mark.parametrize(
"event",
[
SystemEvents.added_to_chat,
SystemEvents.deleted_from_chat,
SystemEvents.chat_created,
SystemEvents.file_transfer,
SystemEvents.left_from_chat,
SystemEvents.cts_login,
SystemEvents.cts_logout,
],
)
def test_defining_system_handler_in_collector_as_decorator(
handler_as_function,
extract_collector,
collector_cls,
event,
):
collector = collector_cls()
getattr(collector, event.name)()(handler_as_function)
assert SystemEvents(collector.handlers[0].body) == event
def test_error_when_no_event_was_passed(
handler_as_function,
extract_collector,
collector_cls,
):
collector = collector_cls()
with pytest.raises(AssertionError):
collector.system_event(handler=handler_as_function)
|
170095
|
from typing import Optional, Tuple
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
class Rack:
def __init__(self, ownerComp):
self.ownerComp = ownerComp
@property
def RackTools(self): return self.ownerComp.op('rack_tools')
@property
def RackToolsPane(self) -> Optional['Pane']:
tools = self.RackTools
for pane in ui.panes:
if pane.owner == tools and pane.type == PaneType.PANEL:
return pane
@property
def RackToolsPaneSize(self) -> Tuple[int, int]:
pane = self.RackToolsPane
if not pane:
return 0, 0
w = pane.topRight.x - pane.bottomLeft.x
h = pane.topRight.y - pane.bottomLeft.y
return w, h
|
170104
|
from .workspaceStructure import WorkspaceStructure
class Bond(WorkspaceStructure):
# pylint: disable=too-many-arguments
def __init__(self, ctx, source, destination, bondCategory, bondFacet,
sourceDescriptor, destinationDescriptor):
WorkspaceStructure.__init__(self, ctx)
slipnet = self.ctx.slipnet
self.source = source
self.string = self.source.string
self.destination = destination
self.leftObject = self.source
self.rightObject = self.destination
self.directionCategory = slipnet.right
if self.source.leftIndex > self.destination.rightIndex:
self.leftObject = self.destination
self.rightObject = self.source
self.directionCategory = slipnet.left
self.facet = bondFacet
self.sourceDescriptor = sourceDescriptor
self.destinationDescriptor = destinationDescriptor
self.category = bondCategory
if (self.sourceDescriptor == self.destinationDescriptor):
self.directionCategory = None
def flippedVersion(self):
slipnet = self.ctx.slipnet
return Bond(
self.ctx,
self.destination, self.source,
self.category.getRelatedNode(slipnet.opposite),
self.facet, self.destinationDescriptor, self.sourceDescriptor
)
def __repr__(self):
return '<Bond: %s>' % self.__str__()
def __str__(self):
return '%s bond between %s and %s' % (
self.category.name, self.leftObject, self.rightObject,
)
def buildBond(self):
workspace = self.ctx.workspace
workspace.structures += [self]
self.string.bonds += [self]
self.category.buffer = 100.0
if self.directionCategory:
self.directionCategory.buffer = 100.0
self.leftObject.rightBond = self
self.rightObject.leftBond = self
self.leftObject.bonds += [self]
self.rightObject.bonds += [self]
def break_the_structure(self):
self.breakBond()
def breakBond(self):
workspace = self.ctx.workspace
if self in workspace.structures:
workspace.structures.remove(self)
if self in self.string.bonds:
self.string.bonds.remove(self)
self.leftObject.rightBond = None
self.rightObject.leftBond = None
if self in self.leftObject.bonds:
self.leftObject.bonds.remove(self)
if self in self.rightObject.bonds:
self.rightObject.bonds.remove(self)
def getIncompatibleCorrespondences(self):
# returns a list of correspondences that are incompatible with
# self bond
workspace = self.ctx.workspace
incompatibles = []
if self.leftObject.leftmost and self.leftObject.correspondence:
correspondence = self.leftObject.correspondence
if self.string == workspace.initial:
objekt = self.leftObject.correspondence.objectFromTarget
else:
objekt = self.leftObject.correspondence.objectFromInitial
if objekt.leftmost and objekt.rightBond:
if (
objekt.rightBond.directionCategory and
objekt.rightBond.directionCategory != self.directionCategory
):
incompatibles += [correspondence]
if self.rightObject.rightmost and self.rightObject.correspondence:
correspondence = self.rightObject.correspondence
if self.string == workspace.initial:
objekt = self.rightObject.correspondence.objectFromTarget
else:
objekt = self.rightObject.correspondence.objectFromInitial
if objekt.rightmost and objekt.leftBond:
if (
objekt.leftBond.directionCategory and
objekt.leftBond.directionCategory != self.directionCategory
):
incompatibles += [correspondence]
return incompatibles
def updateInternalStrength(self):
slipnet = self.ctx.slipnet
# bonds between objects of same type(ie. letter or group) are
# stronger than bonds between different types
sourceGap = self.source.leftIndex != self.source.rightIndex
destinationGap = (self.destination.leftIndex !=
self.destination.rightIndex)
if sourceGap == destinationGap:
memberCompatibility = 1.0
else:
memberCompatibility = 0.7
# letter category bonds are stronger
if self.facet == slipnet.letterCategory:
facetFactor = 1.0
else:
facetFactor = 0.7
strength = min(100.0, memberCompatibility * facetFactor *
self.category.bondDegreeOfAssociation())
self.internalStrength = strength
def updateExternalStrength(self):
self.externalStrength = 0.0
supporters = self.numberOfLocalSupportingBonds()
if supporters > 0.0:
density = self.localDensity() / 100.0
density = density ** 0.5 * 100.0
supportFactor = 0.6 ** (1.0 / supporters ** 3)
supportFactor = max(1.0, supportFactor)
strength = supportFactor * density
self.externalStrength = strength
def numberOfLocalSupportingBonds(self):
return sum(
1 for b in self.string.bonds if
b.string == self.source.string and
self.leftObject.letterDistance(b.leftObject) != 0 and
self.rightObject.letterDistance(b.rightObject) != 0 and
self.category == b.category and
self.directionCategory == b.directionCategory
)
def sameCategories(self, other):
return (self.category == other.category and
self.directionCategory == other.directionCategory)
def myEnds(self, object1, object2):
if self.source == object1 and self.destination == object2:
return True
return self.source == object2 and self.destination == object1
def localDensity(self):
# returns a rough measure of the density in the string
# of the same bond-category and the direction-category of
# the given bond
workspace = self.ctx.workspace
slotSum = 0.0
supportSum = 0.0
for object1 in workspace.objects:
if object1.string == self.string:
for object2 in workspace.objects:
if object1.beside(object2):
slotSum += 1.0
for bond in self.string.bonds:
if (
bond != self and
self.sameCategories(bond) and
self.myEnds(object1, object2)
):
supportSum += 1.0
try:
return 100.0 * supportSum / slotSum
except ZeroDivisionError:
return 0.0
def sameNeighbors(self, other):
if self.leftObject == other.leftObject:
return True
return self.rightObject == other.rightObject
def getIncompatibleBonds(self):
return [b for b in self.string.bonds if self.sameNeighbors(b)]
def set_source(self, value):
self.source = value
def possibleGroupBonds(self, bonds):
result = []
slipnet = self.ctx.slipnet
for bond in bonds:
if (
bond.category == self.category and
bond.directionCategory == self.directionCategory
):
result += [bond]
else:
# a modified bond might be made
if bond.category == self.category:
return [] # a different bond cannot be made here
if bond.directionCategory == self.directionCategory:
return [] # a different bond cannot be made here
if slipnet.sameness in [self.category, bond.category]:
return []
bond = Bond(
bond.ctx, bond.destination, bond.source, self.category,
self.facet, bond.destinationDescriptor,
bond.sourceDescriptor
)
result += [bond]
return result
|
170173
|
from insights.parsers import sap_host_profile, SkipException
from insights.parsers.sap_host_profile import SAPHostProfile
from insights.tests import context_wrap
import doctest
import pytest
HOST_PROFILE_DOC = """
SAPSYSTEMNAME = SAP
SAPSYSTEM = 99
service/porttypes = SAPHostControl SAPOscol SAPCCMS
DIR_LIBRARY =
DIR_EXECUTABLE = /usr/sap/hostctrl/exe
DIR_PROFILE = /usr/sap/hostctrl/exe
DIR_GLOBAL = /usr/sap/hostctrl/exe
DIR_INSTANCE = /usr/sap/hostctrl/exe
DIR_HOME = /usr/sap/hostctrl/work
""".strip()
HOST_PROFILE_AB = """
SAPSYSTEMNAME = SAP
SAPSYSTEM = 99
service/porttypes = SAPHostControl SAPOscol SAPCCMS
DIR_LIBRARY = /usr/sap/hostctrl/exe
DIR_EXECUTABLE = /usr/sap/hostctrl/exe
DIR_PROFILE = /usr/sap/hostctrl/exe
DIR_GLOBAL
""".strip()
def test_sap_host_profile():
hpf = SAPHostProfile(context_wrap(HOST_PROFILE_DOC))
assert "SAPSYSTEM" in hpf
assert hpf["DIR_GLOBAL"] == "/usr/sap/hostctrl/exe"
assert hpf["DIR_LIBRARY"] == ""
def test_sap_host_profile_abnormal():
with pytest.raises(SkipException) as s:
SAPHostProfile(context_wrap(HOST_PROFILE_AB))
assert "Incorrect line: 'DIR_GLOBAL'" in str(s)
def test_doc_examples():
env = {
'hpf': SAPHostProfile(context_wrap(HOST_PROFILE_DOC)),
}
failed, total = doctest.testmod(sap_host_profile, globs=env)
assert failed == 0
|
170201
|
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import fileinput
import sys
#
# Displays one ore more CSV files in a graph. Intended to be used
# with the `bench_tables.rs` example.
#
# Accepts data from STDIN and additional files can be passed in as
# command line arguments. A use case would be to display the current
# benchmark results in STDIN and a reference benchmark as a file.
#
def process_file(ax, filename, fileinput):
first_line = True
if filename == "STDIN":
linestyle = "-"
else:
linestyle = "--"
x_axis = [0, 1]
for line in fileinput:
cells = line.strip().split(",") # Assume CSV string in English locale
title = filename + " " + cells[0].strip()
end = len(cells)
# Ignore last cell if it's empty. That allows a trailing "," in
# the CSV string
if not cells[-1].strip():
end = end - 1
values = map(int, cells[1:end])
if first_line:
x_axis = values
first_line = False
else:
line, = ax.plot(x_axis, values, linestyle, label=title)
fig, ax = plt.subplots()
process_file(ax, "STDIN", fileinput.input("-", openhook=fileinput.hook_encoded("utf16")))
for filename in sys.argv[1:]:
with open(filename, "r") as filehandle:
process_file(ax, filename, fileinput.input(filename, openhook=fileinput.hook_encoded("utf16")))
ax.legend(loc='lower right')
plt.show()
|
170248
|
import unittest
import random
from collection.set import Set
class TestSet(unittest.TestCase):
def setUp(self):
self.set = Set()
def test_constructor(self):
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
def test_one_add(self):
element = 'foo'
self.set.add(element)
self.assertFalse(self.set.is_empty())
self.assertEquals(1, len(self.set))
self.assertTrue(self.set.exists(element))
def test_multiple_adds(self):
elements = range(100)
for element in elements:
self.set.add(element)
for element in elements:
self.assertTrue(self.set.exists(element))
self.assertFalse(self.set.is_empty())
self.assertEquals(len(elements), len(self.set))
def test_clear(self):
elements = range(100)
for element in elements:
self.set.add(element)
self.set.clear()
self.test_constructor()
def test_one_remove(self):
element = 'foo'
self.set.add(element)
self.set.remove(element)
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
self.assertFalse(self.set.exists(element))
def test_multiple_removes(self):
elements = range(100)
for element in elements:
self.set.add(element)
for element in elements:
self.set.remove(element)
for element in elements:
self.assertFalse(self.set.exists(element))
self.assertTrue(self.set.is_empty())
self.assertEquals(0, len(self.set))
def test_remove_empty(self):
self.assertRaises(KeyError, self.set.remove, 'foo')
def test_remove_too_many(self):
element = 'foo'
self.set.add(element)
self.set.remove(element)
self.assertRaises(KeyError, self.set.remove, element)
def test_remove_empty(self):
self.assertRaises(KeyError, self.set.remove, 'foo')
def test_merge_empty_sets(self):
self.assertEquals(Set().merge(Set()), Set())
def test_merge_multiple_elements(self):
lst0 = [1, 2, 3]
lst1 = [11, 12, 13]
lst2 = [13]
set0 = Set(lst0)
set1 = Set(lst1)
set2 = Set(lst2)
self.assertEquals(set0.merge(set1), Set(lst0 + lst1))
self.assertEquals(set1.merge(set2), set1)
def test_diff_empty_sets(self):
self.assertEquals(Set().diff(Set()), Set())
def test_diff_multiple_elements(self):
lst0 = [1, 2, 3]
lst1 = [11, 12, 13]
lst2 = [13]
lst3 = [11, 12]
set0 = Set(lst0)
set1 = Set(lst1)
set2 = Set(lst2)
set3 = Set(lst3)
self.assertEquals(set0.diff(set1), set0)
self.assertEquals(set1.diff(set0), set1)
self.assertEquals(set1.diff(set2), set3)
self.assertEquals(set2.diff(set1), Set())
def test_eq_empty(self):
self.assertTrue(Set() == Set())
def test_eq_not_empty(self):
set0 = Set([1, 2, 3])
set1 = Set([1, 2, 3])
set2 = Set([4, 5, 6])
self.assertTrue(set0 == set0)
self.assertTrue(set1 == set1)
self.assertTrue(set2 == set2)
self.assertFalse(set0 == Set())
self.assertFalse(Set() == set0)
self.assertTrue(set0 == set1)
self.assertTrue(set1 == set0)
self.assertFalse(set0 == set2)
self.assertFalse(set2 == set0)
self.assertFalse(set1 == set2)
self.assertFalse(set2 == set1)
|
170250
|
import factory
from intake import models
from django.contrib.auth.models import User
from .status_notification_factory import StatusNotificationFactory
class StatusUpdateFactory(factory.DjangoModelFactory):
status_type = factory.Iterator(models.StatusType.objects.filter(
is_a_status_update_choice=True))
application = factory.Iterator(models.Application.objects.all())
author = factory.Iterator(
User.objects.filter(profile__organization__is_receiving_agency=True))
additional_information = "We may be able to get a fee waived for you"
other_next_step = "Come to our Walk-In Clinic"
class Meta:
model = models.StatusUpdate
@classmethod
def create(cls, *args, **kwargs):
next_steps = kwargs.pop('next_steps', [])
instance = super().create(*args, **kwargs)
if not next_steps:
step = models.NextStep.objects.first()
next_steps = [step]
instance.next_steps.add(*next_steps)
instance.save()
return instance
class StatusUpdateWithNotificationFactory(StatusUpdateFactory):
notification = factory.RelatedFactory(
StatusNotificationFactory, 'status_update')
|
170280
|
import struct
import sys
import matplotlib.pyplot as plt
import numpy as np
import mmap
import os
if len(sys.argv) < 2:
print("Usage: %s <path>" %(sys.argv[0]))
file = sys.argv[1]
filename = file.split("/")[-1]
arr = np.memmap(file, dtype='float64', mode='r')
plt.plot(arr)
plt.title(filename)
print("saving="+file + ".png")
plt.savefig(file + ".png")
|
170282
|
import os
import discord
from discord.ext import commands
from discord.ext.commands import BucketType, cooldown
import motor.motor_asyncio
import nest_asyncio
import json
with open('./data.json') as f:
d1 = json.load(f)
with open('./market.json') as f:
d2 = json.load(f)
items = {}
for x in d2["IoT"]:
i = {x[2] : ["iot", x[1], x[0]]}
items.update(i)
for x in d2["Food"]:
i = {x[2] : ["food", x[1], x[0]]}
items.update(i)
for x in d2["Cars"]:
i = {x[2] : ["cars", x[1], x[0]]}
items.update(i)
#print(items)
nest_asyncio.apply()
mongo_url = d1['mongo']
cluster = motor.motor_asyncio.AsyncIOMotorClient(mongo_url)
ecomoney = cluster["eco"]["money"]
ecobag = cluster["eco"]["bag"]
class Shop(commands.Cog):
""" Commands related to market"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Shop Cog Loaded Succesfully")
async def open_account(self, id : int):
if id is not None:
newuser = {"id": id, "wallet": 0, "bank": 100}
# wallet = current money, bank = money in bank
await ecomoney.insert_one(newuser)
async def update_wallet(self, id : int, wallet : int):
if id is not None:
await ecomoney.update_one({"id": id}, {"$set": {"wallet": wallet}})
async def update_bank(self, id : int, bank : int):
if id is not None:
await ecomoney.update_one({"id": id}, {"$set": {"bank": bank}})
async def open_bag(self, id : int):
if id is not None:
newuser = {"id": id, "bag": []}
await ecobag.insert_one(newuser)
@commands.group(name="mkt", invoke_without_command=True)
@cooldown(1, 2, BucketType.user)
async def mkt(self,ctx):
""" Market Commands"""
embed = discord.Embed(
timestamp=ctx.message.created_at,
title="Market Categories",
color=0xFF0000,
)
embed.add_field(
name="IoT",
value="Buy items related to IoT/Technology | Use `.mkt iot`",
inline=False
)
embed.add_field(
name="Food",
value="Buy items related to Food | Use `.mkt food`",
inline=False
)
embed.add_field(
name="Cars",
value="Buy items related to Cars | Use `.mkt cars`",
inline=False
)
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
@mkt.command(name="iot")
@cooldown(1, 2, BucketType.user)
async def iot(self,ctx):
""" IoT/Technology Market"""
embed = discord.Embed(
timestamp=ctx.message.created_at,
title="IoT Market",
color=0xFF0000,
)
for x in d2["IoT"]:
embed.add_field(
name=x[0],
value=f"Name {x[2]} | Price: ${x[1]}",
inline=False
)
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
@mkt.command(name="food")
@cooldown(1, 2, BucketType.user)
async def food(self,ctx):
""" Food Market"""
embed = discord.Embed(
timestamp=ctx.message.created_at,
title="Food Market",
color=0xFF0000,
)
for x in d2["Food"]:
embed.add_field(
name=x[0],
value=f"Name {x[2]} | Price: ${x[1]}",
inline=False
)
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
@mkt.command(name="cars")
@cooldown(1, 2, BucketType.user)
async def cars(self,ctx):
""" Cars Market"""
embed = discord.Embed(
timestamp=ctx.message.created_at,
title="Automobile Market",
color=0xFF0000,
)
for x in d2["Cars"]:
embed.add_field(
name=x[0],
value=f"Name {x[2]} | Price: ${x[1]}",
inline=False
)
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
# function to add item in ecobag
async def add_item(self, id : int, item : str, amount : int):
if id is not None:
await ecobag.update_one({"id": id}, {"$push": {"bag": [item, amount]}})
# function to edit amount of item in ecobag
async def edit_item(self, id : int, index : int, amount : int):
if id is not None:
await ecobag.update_one({"id": id}, {"$set": {f"bag.{index}.1": amount}})
# function to remove item from ecobag
async def remove_item(self, id : int, name : str, amount : int):
if id is not None:
await ecobag.update_one({"id": id}, {"$pull": {"bag": [name, amount]}})
@commands.command(aliases=["b"])
@cooldown(1, 2, BucketType.user)
async def buy(self, ctx, item : str, amount : int = 1):
""" Buy an item from the market"""
if amount <= 0 or amount > 100:
await ctx.send("Amount must be greater than 0 or less than 100")
return
bal = await ecomoney.find_one({"id": ctx.author.id})
if bal is None:
await self.open_account(ctx.author.id)
bal = await ecomoney.find_one({"id": ctx.author.id})
bag = await ecobag.find_one({"id": ctx.author.id})
if bag is None:
await self.open_bag(ctx.author.id)
bag = await ecobag.find_one({"id": ctx.author.id})
fg = items.get(item)
if fg is None:
await ctx.send("Item not found")
return
price = fg[1] * amount
name = fg[2]
u_bal = bal["bank"]
if u_bal < price:
await ctx.send("You don't have enough money in your bank")
return
await self.update_bank(ctx.author.id, u_bal - price)
for x in bag['bag']:
if x[0] == item:
init_amount = x[1]
final_amount = amount + init_amount
index = bag['bag'].index(x)
await self.edit_item(ctx.author.id, index, final_amount)
await ctx.send(f"You bought {amount} {name} for ${price}")
return
await self.add_item(ctx.author.id, item, amount)
await ctx.send(f"You bought {amount} {name} for ${price}")
@commands.command(aliases=["s"])
@cooldown(1, 2, BucketType.user)
async def sell(self, ctx, item : str, amount : int = 1):
""" Sell items from your bag """
if amount <= 0 or amount > 100:
await ctx.send("Amount must be greater than 0 or less than 0")
return
bal = await ecomoney.find_one({"id": ctx.author.id})
if bal is None:
await self.open_account(ctx.author.id)
bal = await ecomoney.find_one({"id": ctx.author.id})
bag = await ecobag.find_one({"id": ctx.author.id})
if bag is None:
await self.open_bag(ctx.author.id)
bag = await ecobag.find_one({"id": ctx.author.id})
fg = items.get(item)
if fg is None:
await ctx.send("Item not found")
return
price = fg[1]
name = fg[2]
u_bal = bal["bank"]
for x in bag['bag']:
if x[0] == item:
init_amount = x[1]
if amount > init_amount:
await ctx.send("You don't have enough of this item")
return
elif amount == init_amount:
price = int(round(price * init_amount * 0.7,0))
index = bag['bag'].index(x)
await self.remove_item(ctx.author.id, item, init_amount)
await self.update_bank(ctx.author.id, u_bal + price)
await ctx.send(f"You sold {amount} {name} for ${price}")
return
else:
final_amount = init_amount - amount
price = int(round(price * amount * 0.7,0))
index = bag['bag'].index(x)
await self.edit_item(ctx.author.id, index, final_amount)
await self.update_bank(ctx.author.id, u_bal + price)
await ctx.send(f"You sold {amount} {name} for ${price}")
return
await ctx.send("You don't have this item")
@commands.command(aliases=["i"])
@cooldown(1, 2, BucketType.user)
async def inventory(self, ctx, page : int = 1):
""" Checkout your inventory. For more than one page, use the page number.
{1 : "0-9", 2 : "10-20", 3 : "20-30", 4 : "30-40", 5 : "40-50"} - Page and item number
"""
if page > 5 or page < 1:
await ctx.send("Page must be between 1 and 5")
return
bal = await ecomoney.find_one({"id": ctx.author.id})
if bal is None:
await self.open_account(ctx.author.id)
bal = await ecomoney.find_one({"id": ctx.author.id})
bag = await ecobag.find_one({"id": ctx.author.id})
if bag is None:
await self.open_bag(ctx.author.id)
bag = await ecobag.find_one({"id": ctx.author.id})
total = 0
page_dict = {1 : "0-9", 2 : "10-20", 3 : "20-30", 4 : "30-40", 5 : "40-50"}
intial, final = page_dict[page].split('-')
for x in bag['bag']:
total += 1
if total == 0:
await ctx.send("Your bag is empty")
return
page_items = bag['bag'][int(intial):int(final)+1]
embed = discord.Embed(
title=f"{ctx.author.name}'s Inventory",
description=f"Page {page} | Total Items In Inventory: {total}",
color=0xFF0000
)
for x in page_items:
fg = items.get(x[0])
embed.add_field(name=fg[2], value=f"{x[1]}", inline=False)
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
# leaderboard
@commands.command(aliases=["lb"])
@cooldown(1, 2, BucketType.user)
async def leaderboard(self, ctx):
""" Checkout the leaderboard."""
rankings = ecomoney.find().sort("bank", -1)
i = 1
embed = discord.Embed(
title=f"{ctx.guild.name}'s Leaderboard",
description=f"\u200b",
color=0xFF0000
)
async for x in rankings:
try:
temp = ctx.guild.get_member(x["id"])
tb = x["bank"]
embed.add_field(
name=f"{i} : {temp.name}", value=f"Money: ${tb}", inline=False
)
i += 1
except:
pass
if i == 11:
break
embed.set_footer(
text=f"Requested By: {ctx.author.name}", icon_url=f"{ctx.author.avatar_url}"
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Shop(bot))
|
170323
|
import time
from functools import partial
from operator import is_not
import requests
from lxml import html
from lxml.cssselect import CSSSelector
from reppy.cache import RobotsCache
from reppy.exceptions import ConnectionException
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
DEFAULT_HODOR_UA = 'Hodor'
DEFAULT_HODOR_MAX_PAGES = 100
DEFAULT_CRAWL_DELAY = 3
EMPTY_VALUES = (None, '', [], (), {})
class Hodor(object):
def __init__(self, url, config={}, proxies={},
auth=None, ua=DEFAULT_HODOR_UA,
pagination_max_limit=DEFAULT_HODOR_MAX_PAGES,
crawl_delay=DEFAULT_CRAWL_DELAY,
ssl_verify=False,
trim_values=True,
robots=True,
reppy_capacity=100):
self.content = None
self.url = url
self.domain = self._get_domain()
self.proxies = proxies
self.auth = auth
self.ua = ua
self.trim_values = trim_values
self.ssl_verify = ssl_verify
self.config = {}
self.extra_config = {}
self.robots = RobotsCache(capacity=reppy_capacity) if robots else None
self._pages = []
self._page_count = 0
self._pagination_max_limit = pagination_max_limit
self.crawl_delay = self._crawl_delay(crawl_delay)
for k, v in config.items():
if k.startswith("_"):
self.extra_config[k.lstrip("_")] = v
else:
self.config[k] = v
def _get_domain(self):
parsed_uri = urlparse(self.url)
return '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
def _crawl_delay(self, crawl_delay):
if self.robots not in EMPTY_VALUES:
expiry, robots = self.robots.fetch('{}robots.txt'.format(self.domain))
delay = robots.agent(self.ua).delay
try:
crawl_delay = max(filter(partial(is_not, None),
[delay, crawl_delay]))
except ConnectionException:
pass
return crawl_delay
def _fetch(self, url):
'''Does the requests fetching and stores result in self.content'''
if self.robots in EMPTY_VALUES or self.robots.allowed(url, self.ua):
session = requests.session()
headers = {'User-Agent': self.ua}
if len(self.proxies) > 0:
session.proxies = self.proxies
if self.auth:
r = session.get(url, headers=headers, auth=self.auth, verify=self.ssl_verify)
else:
r = session.get(url, headers=headers, verify=self.ssl_verify)
self.content = r.content
return self.content
@staticmethod
def _get_value(content, rule):
'''Returns result for a specific xpath'''
try:
tree = html.fromstring(content)
except TypeError:
tree = None
post_processing = rule.get('transform', lambda data: data)
data = ""
if tree not in EMPTY_VALUES:
if 'xpath' in rule:
data = tree.xpath(rule['xpath'])
elif 'css' in rule:
data = [node.text_content() for node in tree.cssselect(rule['css'])]
many = rule.get('many', True)
if not many:
if len(data) == 0:
data = None
else:
data = post_processing(data[0])
else:
data = [post_processing(d) for d in data]
return data
@staticmethod
def _group_data(data, groups, config):
del_fields = []
for dest, group_fields in groups.items():
if '__all__' in group_fields or group_fields == '__all__':
group_fields = [rule for rule in config.keys() if not rule.startswith('_')]
del_fields.extend(group_fields)
gdata = []
for field in group_fields:
gdata.append(data[field])
data[dest] = []
for gd in zip(*gdata):
d = {}
for i, field in enumerate(group_fields):
d[field] = gd[i]
data[dest].append(d)
if len(del_fields) == 0:
del_fields = [field for field_set in groups.values() for field in field_set]
for field in del_fields:
if field in data:
del data[field]
def _package_pages(self):
self._data = {}
if len(self._pages) == 1:
self._data = self._pages[0]
else:
self._data = {key: [] for key in self._pages[0].keys()}
for page in self._pages:
for k, v in page.items():
if hasattr(v, '__iter__'):
self._data[k].extend(v)
else:
self._data[k].append(v)
return self._data
@classmethod
def _parse(cls, content, config={}, extra_config={}, trim_values=True):
'''Parses the content based on the config set'''
if len(config) is 0:
_data = {'content': content}
else:
_data = {}
try:
str_class = basestring
except NameError:
str_class = str
for key, rule in config.items():
value = cls._get_value(content, rule)
if trim_values and value not in EMPTY_VALUES:
if 'many' in rule and rule['many']:
value = [v.strip() if isinstance(v, str_class) else v for v in value]
else:
value = value.strip() if isinstance(value, str_class) else value
_data[key] = value
paginate_by = extra_config.get('paginate_by')
if paginate_by:
paginate_by = cls._get_value(content, paginate_by)
groups = extra_config.get('groups', {})
if groups:
cls._group_data(_data, groups, config)
return _data, paginate_by
def _get(self, url):
self._fetch(url)
data, paginate_by = self._parse(self.content, self.config, self.extra_config, self.trim_values)
if paginate_by not in EMPTY_VALUES:
paginate_by = urljoin(self.domain, paginate_by)
return data, paginate_by
def get(self, url=None):
url = url if url else self.url
self._data, paginate_by = self._get(url)
self._pages.append(self._data)
self._page_count += 1
if paginate_by and self._page_count < self._pagination_max_limit:
time.sleep(self.crawl_delay)
self.get(paginate_by)
self._package_pages()
return self._data
@property
def data(self):
if not hasattr(self, '_data'):
self.get()
return self._data
|
170344
|
from flask_app import hello_world
SWAGGER_SETTINGS = {
'title': 'Flask Test Application API',
'version': '1.0.0',
'basePath': '/',
'host': '',
'consumes': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data',
],
'produce': [
'application/json',
],
'enabled_methods': ['get', 'post', 'put', 'patch', 'delete'],
}
PLUGIN_SETTINGS = {
'endpoints': [
('/hello', 'GET', hello_world),
]
}
|
170352
|
import numpy as np
import os
import csv
import argparse
import torchvision.transforms as transforms
from PIL import Image
def loading_ucf_lists():
dataset_root = "/home/ubuntu/data/ucf101"
split = 'split_1'
# data frame root
dataset_frame_root = os.path.join(dataset_root, 'rawframes')
# data list file
train_list_file = os.path.join(dataset_root, 'ucfTrainTestlist',
'ucf101_' + 'train' + '_' + split + '_rawframes' + '.txt')
test_list_file = os.path.join(dataset_root, 'ucfTrainTestlist',
'ucf101_' + 'test' + '_' + split + '_rawframes' + '.txt')
# load vid samples
samples_train = _load_list(train_list_file, dataset_frame_root)
samples_test = _load_list(test_list_file, dataset_frame_root)
return samples_train, samples_test
def loading_hmdb_lists():
dataset_root = "/home/ubuntu/data/hmdb51/"
split = 'split_1'
# data frame root
dataset_frame_root = os.path.join(dataset_root, 'rawframes')
# data list file
train_list_file = os.path.join(dataset_root, 'testTrainMulti_7030_splits',
'hmdb51_' + 'train' + '_' + split + '_rawframes' + '.txt')
test_list_file = os.path.join(dataset_root, 'testTrainMulti_7030_splits',
'hmdb51_' + 'test' + '_' + split + '_rawframes' + '.txt')
# load vid samples
samples_train = _load_list(train_list_file, dataset_frame_root)
samples_test = _load_list(test_list_file, dataset_frame_root)
return samples_train, samples_test
def _load_list(list_root, dataset_frame_root):
with open(list_root, 'r') as f:
lines = f.readlines()
vids = []
for k, l in enumerate(lines):
lsp = l.strip().split(' ')
# path, frame, label
vid_root = os.path.join(dataset_frame_root, lsp[0])
vid_root, _ = os.path.splitext(vid_root)
# use splitetxt twice because there are some video root like: abseiling/9EnSwbXxu5g.mp4.webm
vid_root, _ = os.path.splitext(vid_root)
vids.append((vid_root, int(lsp[1]), int(lsp[2])))
return vids
def _get_imgs(frame_root, frame_idx, transform):
frame = Image.open(os.path.join(frame_root, 'img_{:05d}.jpg'.format(frame_idx)))
frame.convert('RGB')
frame_aug = transform(frame)
return np.array(frame_aug)
def retrieval_imgs(samples, idx, transform):
frame_root, frame_num, cls = samples[idx]
frame_indices = np.round(np.linspace(1, frame_num, num=3)).astype(np.int64)
# get query images
imgs = []
for frame_idx in frame_indices:
imgs.append(_get_imgs(frame_root, frame_idx, transform))
out_img = Image.fromarray(np.concatenate(imgs, axis=1))
return frame_root.split('/')[7], out_img
if __name__ == '__main__':
parser = argparse.ArgumentParser('retrieval visualization')
parser.add_argument('--data-source', type=str)
args = parser.parse_args()
if args.data_source == "ucf":
samples_train, samples_query = loading_ucf_lists()
elif args.data_source == "hmdb":
samples_train, samples_query = loading_hmdb_lists()
else:
raise Exception("Please assigne the data-source argument!")
top_k_indices = np.load('./model/eval_retrieval/top_k_indices.npy')
transform_list = [transforms.CenterCrop(224)]
img_transform = transforms.Compose(transform_list)
save_folder = './model/eval_retrieval/imgs'
os.makedirs(save_folder, exist_ok=True)
label_dict = dict()
for idx, top_k in enumerate(top_k_indices):
query_label, query = retrieval_imgs(samples_query, idx, img_transform)
query_root = os.path.join(save_folder, query_label)
os.makedirs(query_root, exist_ok=True)
query.save(os.path.join(query_root, 'query.png'))
# top k images
top = 1
top_k_label = []
for topk_idx in top_k:
key_label, key = retrieval_imgs(samples_train, topk_idx, img_transform)
key.save(os.path.join(query_root, 'top_{}.png'.format(top)))
top_k_label.append(key_label)
top += 1
label_dict[query_label] = top_k_label
# save label
label_file = os.path.join(save_folder, 'label_dict.txt')
f = open(label_file, 'w')
for k, v in label_dict.items():
print(k, ":", v)
f.write(k + ':' + str(v))
f.write('\n')
f.close()
|
170360
|
import tensorflow as tf
slim = tf.contrib.slim
from helper_net.inception_v4 import *
import pickle
import numpy as np
def get_weights():
checkpoint_file = '../checkpoints/inception_v4.ckpt'
sess = tf.Session()
arg_scope = inception_v4_arg_scope()
input_tensor = tf.placeholder(tf.float32, (None, 299, 299, 3))
with slim.arg_scope(arg_scope):
logits, end_points = inception_v4(input_tensor, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_file)
final_weights = []
current_bn = []
final_lr = []
vars_model = tf.global_variables()
for i in range(0, len(vars_model), 4):
for y in range(4):
key = vars_model[i+y]
if not "Aux" in key.name:
if y in [1, 2, 3] and not "Logits" in key.name:
value = sess.run(key)
if y == 1:
current_bn = []
current_bn.append(value)
elif y == 2:
current_bn.append(value)
elif y == 3:
current_bn.append(value)
final_weights.append(current_bn)
elif "Logits" in key.name:
value = sess.run(key)
if not "biases" in key.name:
final_lr.append(value)
else:
final_lr.append(value)
final_weights.append(final_lr)
else:
value = sess.run(key)
final_weights.append([value])
with open('weights.p', 'wb') as fp:
pickle.dump(final_weights, fp)
if __name__ == "__main__":
get_weights()
|
170407
|
import comet_ml # noqa: F401
import pytest
import numpy as np
import torch
from conftest import create_dataset, create_image
from traintool.image_classification.preprocessing import (
recognize_data_format,
torch_to_numpy,
numpy_to_torch,
files_to_numpy,
files_to_torch,
load_image,
recognize_image_format,
get_num_classes,
)
@pytest.fixture
def numpy_data():
return create_dataset(data_format="numpy", seed=0, grayscale=False)
@pytest.fixture
def torch_data():
return create_dataset(data_format="torch", seed=0, grayscale=False)
@pytest.fixture
def files_data(tmp_path):
return create_dataset(data_format="files", seed=0, tmp_path=tmp_path)
@pytest.fixture
def numpy_image():
return create_image(data_format="numpy", seed=0, grayscale=False)
@pytest.fixture
def torch_image():
return create_image(data_format="torch", seed=0, grayscale=False)
@pytest.fixture
def files_image(tmp_path):
return create_image(data_format="files", seed=0, tmp_path=tmp_path)
def test_recognize_data_format(numpy_data, torch_data, files_data):
# correct data formats
assert recognize_data_format(numpy_data) == "numpy"
assert recognize_data_format(torch_data) == "pytorch-dataset"
assert recognize_data_format(files_data) == "files"
# incorrect data formats
with pytest.raises(ValueError):
recognize_data_format(None)
with pytest.raises(ValueError):
recognize_data_format([1, 2, 3])
with pytest.raises(FileNotFoundError):
recognize_data_format("non/existent/dir/123")
def test_recognize_image_format(numpy_image, torch_image, files_image):
# correct image formats
assert recognize_image_format(numpy_image) == "numpy"
assert recognize_image_format(files_image) == "files"
# incorrect image formats
with pytest.raises(ValueError):
recognize_image_format(None)
with pytest.raises(ValueError):
recognize_image_format([1, 2, 3])
with pytest.raises(FileNotFoundError):
recognize_image_format("non/existent/file/123")
def test_torch_to_numpy(numpy_data, torch_data):
converted_data = torch_to_numpy(torch_data)
assert np.allclose(converted_data[0], numpy_data[0])
assert np.allclose(converted_data[1], numpy_data[1])
def test_numpy_to_torch(numpy_data, torch_data):
converted_data = numpy_to_torch(numpy_data)
# Note that we compare with tolerance of 0.1 here, because due to conversion to PIL,
# values are not exactly preserved.
assert torch.allclose(converted_data[0][0], torch_data[0][0], atol=0.1)
assert converted_data[0][1] == torch_data[0][1]
resized_converted_data = numpy_to_torch(
numpy_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
def test_files_to_numpy(files_data, numpy_data):
converted_data = files_to_numpy(files_data)
assert converted_data[0][0].shape == numpy_data[0][0].shape
resized_converted_data = files_to_numpy(
files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
def test_files_to_torch(files_data, torch_data):
converted_data = files_to_torch(files_data)
assert converted_data[0][0].shape == torch_data[0][0].shape
resized_converted_data = files_to_numpy(
files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
# TODO: Maybe add tests for to_numpy and to_torch, but note that these are kinda
# redundant to the tests above.
def test_load_image(tmp_path):
data = create_dataset(grayscale=False, data_format="files", tmp_path=tmp_path)
# Select a random image.
image_path = next(data.rglob("*.png"))
# torch
img = load_image(image_path, resize=50, crop=40)
assert isinstance(img, torch.Tensor)
assert img.shape == (3, 40, 40)
# numpy
img = load_image(image_path, resize=50, crop=40, to_numpy=True)
assert isinstance(img, np.ndarray)
assert img.shape == (3, 40, 40)
def test_get_num_classes(numpy_data, files_data):
assert get_num_classes(numpy_data) == 4
assert get_num_classes(files_data) == 4
|
170409
|
from cms.models.pluginmodel import CMSPlugin
from django.db import models
class VerticalSpacerPlugin(CMSPlugin):
smart_space = models.PositiveIntegerField(
"Default Space",
default=0,
help_text="in px, for desktop, height on other devices is calculated automatically",
)
space_xs = models.PositiveIntegerField(
"All screens (default value)",
default=0,
help_text="in px for extra small screens and above",
blank=True,
null=True,
)
space_sm = models.PositiveIntegerField(
"small screens and above",
help_text="in px for small screens and above",
blank=True,
null=True,
)
space_md = models.PositiveIntegerField(
"medium screens and above",
help_text="in px for medium screens and above",
blank=True,
null=True,
)
space_lg = models.PositiveIntegerField(
"large screens and above",
help_text="in px for large screens and above",
blank=True,
null=True,
)
space_xl = models.PositiveIntegerField(
"very large screens", help_text="in px for extra large screens", blank=True, null=True
)
def has_advanced_settings(self):
# 0 doesnt count
return self.space_xs or self.space_sm or self.space_md or self.space_lg or self.space_xl
def __str__(self):
return "smart {}, xs {}, sm {}, md {}, lg {}, xl {}".format(
self.smart_space,
self.space_xs,
self.space_sm,
self.space_md,
self.space_lg,
self.space_xl,
)
|
170466
|
import pytest
from redis.exceptions import WatchError
def test_ok(redis):
client = redis.ext.client
pipeline = client.pipeline()
pipeline.set('test', 1)
pipeline.sadd('test2', 2)
pipeline.execute()
assert client.get('test') == b'1'
assert redis.dict == {b'test': b'1', b'test2': {b'2'}}
def test_ok_lowlevel(redis):
client = redis.ext.client
assert client.execute_command('MULTI') == b'OK'
assert client.get('test') == b'QUEUED'
# Redis client uses response hooks to bool this
assert not client.set('test', 1)
assert redis.dict == {}
assert client.execute_command('EXEC') == [None, b'OK']
assert redis.dict == {b'test': b'1'}
def test_ok_response(redis):
client = redis.ext.client
client.sadd('test', 1, 2)
client.sadd('test2', 2, 3, 4)
assert client.pipeline().scard('test').scard('test2').execute() == [2, 3]
def test_back_to_normal_state(redis):
client = redis.ext.client
result = client.pipeline().set('test', 1).execute()
assert result
result = client.sadd('test2', 1, 2)
assert result == 2
def test_watch(redis):
client = redis.ext.client
client2 = redis.ext.new_client()
client.set('watched', 1)
pipeline = client.pipeline()
assert pipeline.watch('watched')
pipeline.multi()
pipeline.get('watched')
pipeline.set('key', 1)
assert client2.set('watched', 2)
with pytest.raises(WatchError, match='Watched variable changed'):
pipeline.execute()
assert redis.dict == {b'watched': b'2'}
|
170474
|
import torch
from torch import nn
from torch.nn import Parameter
jit_scripts = {}
class StochasticModule(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(StochasticModule, self).__init__(*args, **kwargs)
class BDropout(StochasticModule):
"""
Extends the base Dropout layer by adding a regularizer as derived by
Gal and Ghahrahmani "Dropout as a Bayesian Approximation" (2015)
"""
def __init__(self, rate=0.5, name=None, regularizer_scale=1.0, **kwargs):
super(BDropout, self).__init__(**kwargs)
self.name = name
self.register_buffer('regularizer_scale',
torch.tensor(0.5 * regularizer_scale))
self.register_buffer(
'rate',
rate if isinstance(rate, torch.Tensor) else torch.tensor(rate))
self.register_buffer('p', 1 - self.rate)
self.register_buffer('noise', torch.bernoulli(self.p))
def weights_regularizer(self, weights):
self.p = 1 - self.rate
return self.regularizer_scale * (self.p * (weights**2).sum(0)).sum()
def biases_regularizer(self, biases):
return self.regularizer_scale * ((biases**2).sum(0)).sum()
def resample(self, seed=None):
self.update_noise(self.noise, seed)
def update_noise(self, x, seed=None):
if seed is not None:
torch.manual_seed(seed)
self.p = 1 - self.rate
self.noise.data = torch.bernoulli(self.p.expand(x.shape))
def forward(self, x, resample=True, mask_dims=2, seed=None, **kwargs):
sample_shape = x.shape[-mask_dims:]
if (sample_shape[1:] != self.noise.shape[1:]
or sample_shape[0] > self.noise.shape[0]):
# resample if we can't re-use old numbers
# this happens when the incoming batch size is bigger than
# the noise batch size, or when the rest of the shape differs
sample = x.view(-1, *sample_shape)[0]
self.update_noise(sample, seed)
elif resample:
if seed is not None:
torch.manual_seed(seed)
return (x * torch.bernoulli(self.p.expand(x.shape))) / self.p
# we never need the noise gradients
return (x * self.noise[..., :x.shape[-mask_dims], :].detach()) / self.p
def extra_repr(self):
if self.rate.dim() >= 1 and len(self.rate) > 1:
desc = 'rate=[mean: {}, min: {}, max: {}], regularizer_scale={}'
return desc.format(self.rate.mean(), self.rate.min(),
self.rate.max(), self.regularizer_scale)
else:
return 'rate={}, regularizer_scale={}'.format(
self.rate, self.regularizer_scale)
class CDropout(BDropout):
def __init__(self,
rate=0.5,
name=None,
regularizer_scale=1.0,
dropout_regularizer=1.0,
temperature=0.1,
**kwargs):
super(CDropout, self).__init__(rate, name, regularizer_scale, **kwargs)
self.register_buffer('temp', torch.tensor(temperature))
self.register_buffer('dropout_regularizer',
torch.tensor(dropout_regularizer))
self.logit_p = Parameter(-torch.log(1.0 / self.p - 1.0))
self.register_buffer('concrete_noise', torch.bernoulli(self.p))
def weights_regularizer(self, weights):
p = self.p
reg = self.regularizer_scale * (p * (weights**2).sum(0))
reg += self.dropout_regularizer * (p * p.log() + (1 - p) *
(1 - p).log())
return reg.sum()
def update_noise(self, x, seed=None):
if seed is not None:
torch.manual_seed(seed)
self.noise.data = torch.rand_like(x)
if not self.training:
self.update_concrete_noise(self.noise)
def update_concrete_noise(self, noise):
"""Updates the concrete dropout masks.
Args:
noise (Tensor): Input.
"""
noise_p = noise + 1e-7
noise_m = noise - 1e-7
concrete_p = self.logit_p + (noise_p / (1 - noise_m)).log()
probs = (concrete_p / self.temp).sigmoid()
# forward pass uses bernoulli sampled noise, but backwards
# through concrete distribution
noise = torch.bernoulli(probs)
self.concrete_noise = (noise - probs).detach() + probs
self.p = self.logit_p.sigmoid()
def forward(self, x, resample=False, mask_dims=2, seed=None, **kwargs):
"""Computes the concrete dropout.
Args:
x (Tensor): Input.
resample (bool): Whether to force resample.
mask_dims (int): Number of dimensions to sample noise for
(0 for all).
Returns:
Output (Tensor).
"""
sample_shape = x.shape[-mask_dims:]
noise = self.noise
resampled = False
if resample:
if seed is not None:
torch.manual_seed(seed)
noise = torch.rand_like(x)
resampled = True
elif (sample_shape[1:] != self.noise.shape[1:]
or sample_shape[1:] != self.concrete_noise.shape[1:]
or sample_shape[0] > self.concrete_noise.shape[0]):
# resample if we can't re-use old numbers
# this happens when the incoming batch size is bigger than
# the noise batch size, or when the rest of the shape differs
sample = x.view(-1, *sample_shape)[0]
self.update_noise(sample, seed)
noise = self.noise
resampled = True
if self.training:
self.update_concrete_noise(noise)
concrete_noise = self.concrete_noise
else:
if resampled:
self.update_concrete_noise(noise)
# We never need these gradients in evaluation mode.
concrete_noise = self.concrete_noise.detach()
return x * concrete_noise[..., :x.shape[-mask_dims], :]
def extra_repr(self):
self.rate = 1 - self.logit_p.sigmoid().detach()
if self.rate.dim() >= 1 and len(self.rate) > 1:
desc = 'rate=[mean: {}, min: {}, max: {}], regularizer_scale={}'
return desc.format(self.rate.mean(), self.rate.min(),
self.rate.max(), self.temp,
self.regularizer_scale)
else:
return 'rate={}, temperature={}, regularizer_scale={}'.format(
1 - self.logit_p.sigmoid(), self.temp, self.regularizer_scale)
class TLNDropout(BDropout):
'''
'Implements truncated log-normal dropout (NIPS 2017)
'''
def __init__(self, interval=[-10, 0]):
self.register_buffer('interval', torch.tensor(interval))
self.logit_posterior_mean = Parameter(
-torch.log(1.0 / torch.tensor(1 - self.rate) - 1.0))
# self.logit_posterior_std = logit_posterior_std
def weights_regularizer(self, weights):
'''
In this case the weights regularizer is actually independent of the
weights (only depends on the alpha parameter)
'''
return 0
def update_noise(self, x):
pass
def forward(self, x):
pass
class BSequential(nn.modules.Sequential):
" An extension to sequential that allows for controlling resampling"
def __init__(self, *args):
super(BSequential, self).__init__(*args)
self.modules_to_regularize = []
def resample(self, seed=None):
i = 0
for module in self._modules.values():
if isinstance(module, BDropout):
if seed is not None:
module.resample(seed + i)
else:
module.resample()
i += 1
def forward(self, input, resample=True, repeat_mask=False, **kwargs):
modules = list(self._modules.values())
for i, module in enumerate(modules):
if isinstance(module, BDropout) or isinstance(
module, torch.nn.Dropout):
if i < len(modules):
next_module = modules[i + 1]
if isinstance(next_module, SpectralNorm):
# rescale lipschitz constant by dropout probability
pass
if isinstance(module, StochasticModule):
input = module(input,
resample=resample,
repeat_mask=repeat_mask,
**kwargs)
else:
input = module(input)
return input
def regularization_loss(self):
names, modules = [list(x) for x in zip(*self._modules.items())]
reg_loss = 0
if len(self.modules_to_regularize) > 0:
# use memoized list of parameters instead of doing the recursive
# calls in the loop below
for d in self.modules_to_regularize:
module = d['module']
if 'weight' in d:
reg_loss += module.weights_regularizer(d['weight'])
if 'bias' in d:
reg_loss += module.biases_regularizer(d['bias'])
return reg_loss
for i, (name, module) in enumerate(zip(names, modules)):
if hasattr(module, 'weights_regularizer'):
# find first subsequent module, from current,output_samples
# with a weight attribute
to_regularize = {'module': module}
for next_module in modules[i:]:
if isinstance(next_module, SpectralNorm):
next_module = next_module.module
if isinstance(next_module, nn.Linear)\
or isinstance(next_module,
nn.modules.conv._ConvNd):
to_regularize['weight'] = next_module.weight
reg_loss += module.weights_regularizer(
next_module.weight)
if hasattr(next_module, 'bias')\
and next_module.bias is not None\
and hasattr(module, 'biases_regularizer'):
to_regularize['bias'] = next_module.bias
reg_loss += module.biases_regularizer(
next_module.bias)
break
if len(to_regularize) > 1:
self.modules_to_regularize.append(to_regularize)
elif hasattr(module, 'regularization_loss'):
reg_loss += module.regularization_loss()
self.modules_to_regularize.extend(module.modules_to_regularize)
return reg_loss
class SpectralNorm(torch.nn.Module):
"""
Applies spectral normalization to the weights matrix, i.e.
W_sn = W/sigma(W), where sigma(W) is the largest eigenvalue
of W
"""
def __init__(self,
module,
power_iterations=1,
max_K=10,
param_name='weight',
train_scale=False):
assert hasattr(module, param_name)
super(SpectralNorm, self).__init__()
self.module = module
self.param_name = param_name
self.n_iter = power_iterations
self.max_K = max_K
self.init_params()
self.scale = torch.nn.Parameter(torch.zeros(1),
requires_grad=train_scale)
def extra_repr(self):
if hasattr(self.module, self.param_name):
w = getattr(self.module, self.param_name)
else:
w = getattr(self.module, self.param_name + '_bar')
return "scale={}, norm={}".format(
(self.max_K * self.scale.sigmoid()).data,
torch.svd(w)[1][0])
def init_params(self):
w = self.module._parameters[self.param_name]
w_sn = torch.nn.Parameter(w.data)
M = w.shape[0]
N = w.view(M, -1).shape[0]
u = torch.randn(M).to(w.device, w.dtype)
u.data = u / (u.norm() + 1e-12)
v = torch.randn(N).to(w.device, w.dtype)
v.data = v / (v.norm() + 1e-12)
self.module.register_parameter(self.param_name + "_bar", w_sn)
self.module.register_buffer(self.param_name + "_u", u)
self.module.register_buffer(self.param_name + "_v", v)
del self.module._parameters[self.param_name]
def power_iteration(self, n_iters=1):
u = getattr(self.module, self.param_name + '_u')
v = getattr(self.module, self.param_name + '_v')
w = getattr(self.module, self.param_name + '_bar')
M = w.shape[0]
w_square = w.view(M, -1)
for i in range(n_iters):
v_ = torch.mv(w_square.data.transpose(0, 1), u.data)
v.data = v_ / (v_.norm() + 1e-12)
u_ = torch.mv(w_square.data, v.data)
u.data = u_ / (u_.norm() + 1e-12)
sigma_w = u.dot(w.view(M, -1).mv(v))
setattr(self.module, self.param_name,
self.max_K * self.scale.sigmoid() * (w / sigma_w.expand_as(w)))
def forward(self, *args, **kwargs):
if self.training:
self.power_iteration(self.n_iter)
return self.module(*args, **kwargs)
|
170506
|
import sublime, sublime_plugin
import shlex, os
from ..libs import util
from ..libs import Terminal
from ..libs import javaScriptEnhancements
from ..libs.global_vars import *
class JavascriptEnhancementsExecuteOnTerminalCommand():
custom_name = ""
cli = ""
path_cli = ""
settings_name = ""
placeholders = {}
settings = None
command = []
working_directory = ""
is_node = False
is_npm = False
is_bin_path = False
also_non_project = False
def run(self, **kwargs):
self.settings = util.get_project_settings()
if self.settings:
if not self.settings_name:
self.working_directory = self.settings["project_dir_name"]
else:
self.working_directory = self.settings[self.settings_name]["working_directory"]
if self.is_node:
self.path_cli = self.settings["project_settings"]["node_js_custom_path"] or javaScriptEnhancements.get("node_js_custom_path") or NODE_JS_EXEC
elif self.is_npm:
if self.settings["project_settings"]["use_yarn"]:
self.path_cli = self.settings["project_settings"]["yarn_custom_path"] or javaScriptEnhancements.get("yarn_custom_path") or YARN_EXEC
else:
self.path_cli = self.settings["project_settings"]["npm_custom_path"] or javaScriptEnhancements.get("npm_custom_path") or NPM_EXEC
else:
self.path_cli = self.settings[self.settings_name]["cli_custom_path"] if self.settings[self.settings_name]["cli_custom_path"] else ( javaScriptEnhancements.get(self.custom_name+"_custom_path") if javaScriptEnhancements.get(self.custom_name+"_custom_path") else self.cli )
if sublime.platform() != "windows" and (self.settings["project_settings"]["node_js_custom_path"] or javaScriptEnhancements.get("node_js_custom_path")):
if os.path.isabs(self.path_cli) :
self.command = [shlex.quote(self.path_cli)]
else:
self.command = ["$(which "+shlex.quote(self.path_cli)+")"]
self.path_cli = self.settings["project_settings"]["node_js_custom_path"] or javaScriptEnhancements.get("node_js_custom_path")
if kwargs.get("command"):
if not self.command:
self.command = kwargs.get("command")
else:
self.command += kwargs.get("command")
self.prepare_command(**kwargs)
elif self.also_non_project:
self.working_directory = os.path.expanduser("~")
if self.is_node:
self.path_cli = javaScriptEnhancements.get("node_js_custom_path") or NODE_JS_EXEC
elif self.is_npm:
if self.settings["project_settings"]["use_yarn"]:
self.path_cli = javaScriptEnhancements.get("yarn_custom_path") or YARN_EXEC
else:
self.path_cli = javaScriptEnhancements.get("npm_custom_path") or NPM_EXEC
else:
self.path_cli = javaScriptEnhancements.get(self.custom_name+"_custom_path") if javaScriptEnhancements.get(self.custom_name+"_custom_path") else self.cli
if sublime.platform() != "windows" and javaScriptEnhancements.get("node_js_custom_path"):
if os.path.isabs(self.path_cli) :
self.command = [shlex.quote(self.path_cli)]
else:
self.command = ["$(which "+shlex.quote(self.path_cli)+")"]
self.path_cli = javaScriptEnhancements.get("node_js_custom_path")
if kwargs.get("command"):
if not self.command:
self.command = kwargs.get("command")
else:
self.command += kwargs.get("command")
self.prepare_command(**kwargs)
else :
sublime.error_message("Error: can't get project settings")
def prepare_command(self):
pass
def _run(self):
if self.is_node and self.is_bin_path:
self.command[0] = shlex.quote(os.path.join(NODE_MODULES_BIN_PATH, self.command[0])) if sublime.platform() != "windows" else os.path.join(NODE_MODULES_BIN_PATH, self.command[0]+".cmd")
self.working_directory = shlex.quote(self.working_directory) if sublime.platform() != "windows" else self.working_directory
self.path_cli = shlex.quote(self.path_cli) if sublime.platform() != "windows" else self.path_cli
if sublime.platform() != "windows":
views = self.window.views()
view_with_term = None
for view in views:
if view.name() == "JavaScript Enhancements Terminal (bash)":
view_with_term = view
if view_with_term:
self.window.focus_view(view_with_term)
self.window.run_command("terminal_view_send_string", args={"string": "cd "+self.working_directory+"\n"})
else :
self.window.run_command("set_layout", args={"cells": [[0, 0, 1, 1], [0, 1, 1, 2]], "cols": [0.0, 1.0], "rows": [0.0, 0.7, 1.0]})
self.window.focus_group(1)
view = self.window.new_file()
args = {"cmd": "/bin/bash -l", "title": "JavaScript Enhancements Terminal (bash)", "cwd": self.working_directory, "syntax": None, "keep_open": False}
view.run_command('terminal_view_activate', args=args)
# stop the current process with SIGINT and call the command
sublime.set_timeout_async(lambda: self.window.run_command("terminal_view_send_string", args={"string": "\x03"}) or
self.window.run_command("terminal_view_send_string", args={"string": self.path_cli+" "+(" ".join(self.command))+"\n"}), 500)
else:
terminal = Terminal(cwd=self.working_directory, title="JavaScript Enhancements Terminal (cmd.exe)")
terminal.run([self.path_cli]+self.command)
def substitute_placeholders(self, variable):
if isinstance(variable, list) :
for index in range(len(variable)):
for key, placeholder in self.placeholders.items():
variable[index] = variable[index].replace(key, placeholder)
return variable
elif isinstance(variable, str) :
for key, placeholder in self.placeholders.items():
variable = variable.replace(key, placeholder)
return variable
|
170513
|
from typing import Callable, List
from .header import Header
from .method import MethodType
from .responses import Response, no_content
class AllowHeader(Header, type=str, http_name='allow'):
...
def make_options_controller(
methods: List[MethodType],
) -> Callable[[], Response]:
def controller() -> Response:
return no_content(
headers=[AllowHeader(','.join([m.value for m in methods]))]
)
return controller
|
170558
|
from random import randint
import pytest
from ms.algo.mergesort_thread import sort as sort_thread
from ms.algo.mergesort_proc import sort as sort_proc
# the following helps when running $ pytest -vv tests
sort_thread.__name__ = 'Sort Thread'
sort_proc.__name__ = 'Sort Proc'
@pytest.fixture(params=[sort_thread, sort_proc])
def sort(request):
return request.param
@pytest.fixture(params=list(range(1, 25)) + [10000])
def vector(request):
yield [randint(-10**5, 10**5) for n in range(request.param)]
@pytest.fixture(params=[1, 2, 4, 8])
def workers(request):
yield request.param
def test_random_vector(sort, vector, workers):
assert sort(vector, workers=workers) == sorted(vector)
@pytest.mark.parametrize('v', [
[],
[1],
[1, 1],
[1, 2],
[2, 1],
])
def test_sort_edge_cases(v, sort):
sorted_v = sort(v)
assert sorted_v == sorted(v)
def test_pure_function(sort):
v = [2, 3, 1]
idv = id(v)
assert sort(v) == [1, 2, 3]
assert id(v) == idv
assert v == [2, 3, 1]
|
170634
|
import logging
import example_app
from jivago.jivago_application import JivagoApplication
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
app = JivagoApplication(example_app, debug=True)
app.run_dev()
|
170674
|
import unittest
import copy
from typing import Optional, List, Callable, Tuple
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.parallel as parallel
from torch import Tensor
import torchshard as ts
from testing import dist_worker, assertEqual, set_seed
from testing import LinearModel, LinearStackModel, ConvLinearModel
from testing import loss_reduction_type, threshold
# global test configurations
batch_size = 3
feats_size = 8
seed = 12357
class TestParallelCrossEntropy(unittest.TestCase):
@staticmethod
def run_test_parallel_cross_entropy(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = -1
x = torch.randn(batch_size, feats_size).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
dist.broadcast(x, 0)
dist.broadcast(y, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
raw_model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
raw_model.layer2.weight,
a=0, mode='fan_in', nonlinearity='relu'
)
master_weight = ts.distributed.gather(raw_model.layer2.weight.data, dim=0)
raw_model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
raw_model.layer2.bias,
val=0.5
)
master_bias = ts.distributed.gather(raw_model.layer2.bias.data, dim=0)
raw_model.layer1.bias.data.copy_(master_bias)
model.train()
criterion1 = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
criterion2 = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
y1, y2 = model(x)
# 1st assert: forward outputs
gathered_y2 = ts.distributed.gather(y2)
assertEqual(y1, gathered_y2, threshold=threshold)
loss1 = criterion1(y1, y)
loss2 = criterion2(y2, y)
if loss_reduction_type == 'none':
loss1 = loss1.sum()
loss2 = loss2.sum()
# 2nd assert: forward losses
assertEqual(loss1, loss2, threshold=threshold)
# 3rd assert: backward gradients
loss1.backward()
loss2.backward()
assertEqual(
raw_model.layer1.weight.grad,
ts.distributed.gather(raw_model.layer2.weight.grad, dim=0),
threshold=threshold
)
assertEqual(
raw_model.layer1.bias.grad,
ts.distributed.gather(raw_model.layer2.bias.grad, dim=0),
threshold=threshold
)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = None
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
raw_model.module.fc.weight.data.copy_(ddp_model.module.fc.weight.data)
raw_model.module.fc.bias.data.copy_(ddp_model.module.fc.bias.data)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(raw_model.module.fc.weight.data, ddp_model.module.fc.weight.data, threshold=threshold)
assertEqual(raw_model.module.fc.bias.data, ddp_model.module.fc.bias.data, threshold=threshold)
# switch mode
raw_model.train()
ddp_model.train()
# 1st assert: forward outputs
y1 = raw_model(x)
y2 = ddp_model(x)
assertEqual(y1, y2, threshold=threshold)
# 2nd assert: forward losses
raw_loss = raw_criterion(y1, y)
ddp_loss = ddp_criterion(y2, y)
assertEqual(raw_loss, ddp_loss, threshold=threshold)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
assertEqual(raw_model.module.fc.weight.grad, ddp_model.module.fc.weight.grad, threshold=threshold)
assertEqual(raw_model.module.fc.bias.grad, ddp_model.module.fc.bias.grad, threshold=threshold)
assertEqual(raw_model.module.conv.weight.grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(raw_model.module.conv.bias.grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode_and_row_parallel(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = 0
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
_weight = ts.distributed.scatter(ddp_model.module.fc.weight.data, dim=1)
raw_model.module.fc.weight.data.copy_(_weight)
raw_model.module.fc.bias.data.copy_(ddp_model.module.fc.bias.data)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(
ts.distributed.gather(raw_model.module.fc.weight.data, dim=1),
ddp_model.module.fc.weight.data, threshold=threshold
)
assertEqual(raw_model.module.fc.bias.data, ddp_model.module.fc.bias.data, threshold=threshold)
# switch mode
raw_model.train()
ddp_model.train()
x = ts.distributed.gather(x, dim=0)
y = ts.distributed.gather(y, dim=0)
y1 = raw_model(x)
y2 = ddp_model(x)
# 1st assert: forward outputs
assertEqual(y1, y2, threshold=threshold)
raw_loss = raw_criterion(y1, y)
ddp_loss = ddp_criterion(y2, y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 2nd assert: forward losses
assertEqual(raw_loss, ddp_loss, threshold=threshold)
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
assertEqual(ts.distributed.gather(raw_model.module.fc.weight.grad, dim=-1), ddp_model.module.fc.weight.grad, threshold=threshold)
assertEqual(raw_model.module.fc.bias.grad, ddp_model.module.fc.bias.grad, threshold=threshold)
assertEqual(raw_model.module.conv.weight.grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(raw_model.module.conv.bias.grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode_and_col_parallel(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = -1
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
_weight = ts.distributed.scatter(ddp_model.module.fc.weight.data, dim=0)
raw_model.module.fc.weight.data.copy_(_weight)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
_bias = ts.distributed.scatter(ddp_model.module.fc.bias.data, dim=0)
raw_model.module.fc.bias.data.copy_(_bias)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(
ts.distributed.gather(raw_model.module.fc.weight.data, dim=0),
ddp_model.module.fc.weight.data,
threshold=threshold
)
assertEqual(
ts.distributed.gather(raw_model.module.fc.bias.data, dim=0),
ddp_model.module.fc.bias.data,
threshold=threshold
)
# switch mode
raw_model.train()
ddp_model.train()
y1 = raw_model(x)
y2 = ddp_model(x)
# 1st assert: forward outputs
gathered_y1 = ts.distributed.gather(y1, dim=1)
gathered_y2 = ts.distributed.gather(y2, dim=0)
assertEqual(gathered_y1, gathered_y2, threshold=threshold)
# 2nd assert: forward losses
gathered_y = ts.distributed.gather(y)
raw_loss = raw_criterion(y1, gathered_y)
ddp_loss = ddp_criterion(y2, y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
if loss_reduction_type == 'mean':
linear_w_grad = ddp_model.module.fc.weight.grad
linear_b_grad = ddp_model.module.fc.bias.grad
else:
linear_w_grad = ts.distributed.reduce(ddp_model.module.fc.weight.grad)
linear_b_grad = ts.distributed.reduce(ddp_model.module.fc.bias.grad)
assertEqual(
raw_model.module.fc.weight.grad,
ts.distributed.scatter(linear_w_grad, dim=0),
threshold=threshold
)
assertEqual(
raw_model.module.fc.bias.grad,
ts.distributed.scatter(linear_b_grad, dim=0),
threshold=threshold
)
if loss_reduction_type == 'mean':
conv_w_grad = ts.distributed.reduce(raw_model.module.conv.weight.grad)
conv_b_grad = ts.distributed.reduce(raw_model.module.conv.bias.grad)
else:
conv_w_grad = raw_model.module.conv.weight.grad
conv_b_grad = raw_model.module.conv.bias.grad
assertEqual(conv_w_grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(conv_b_grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode_and_row_parallel(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode_and_row_parallel, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode_and_col_parallel(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode_and_col_parallel, ngpus),
nprocs=ngpus
)
class TestParallelLinearStack(unittest.TestCase):
@staticmethod
def run_test_parallel_linear_stack(local_rank: int) -> None:
set_seed(seed + local_rank)
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearStackModel(feats_size, feats_size*2, bias=True).cuda(local_rank)
raw_model = model.module if hasattr(model, "module") else model
# align weight
for idx, (m1, m2) in enumerate(zip(raw_model.module1.modules(), raw_model.module2.modules())):
if idx == 0:
continue
# align weight and bias
ts.nn.init.shard_init_helper_(
torch.nn.init.xavier_normal_,
m2.weight
)
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
m2.bias,
val=0.133
)
parallel_dim = getattr(m2.weight, ts._PARALLEL_DIM)
if parallel_dim == None:
master_weight = m2.weight.data
master_bias = m2.bias.data
elif parallel_dim == 0:
master_weight = ts.distributed.gather(m2.weight.data, dim=1)
master_bias = m2.bias.data
elif parallel_dim == 1 or parallel_dim == -1:
master_weight = ts.distributed.gather(m2.weight.data, dim=0)
master_bias = ts.distributed.gather(m2.bias.data, dim=0)
else:
raise
m1.weight.data.copy_(master_weight)
m1.bias.data.copy_(master_bias)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_linear_stack(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_linear_stack, ngpus),
nprocs=ngpus
)
class TestParallelLinear(unittest.TestCase):
@staticmethod
def run_test_raw_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = None
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
)
model.layer1.weight.data.copy_(model.layer2.weight.data)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.1
)
model.layer1.bias.data.copy_(model.layer2.bias.data)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@staticmethod
def run_test_row_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = 0
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
a=0, mode='fan_in', nonlinearity='leaky_relu'
)
master_weight = ts.distributed.gather(model.layer2.weight.data, dim=-1)
model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.333
)
model.layer1.bias.data.copy_(model.layer2.bias.data)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@staticmethod
def run_test_col_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = -1
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
a=0, mode='fan_in', nonlinearity='leaky_relu'
)
master_weight = ts.distributed.gather(model.layer2.weight.data, dim=0)
model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.5
)
master_bias = ts.distributed.gather(model.layer2.bias.data, dim=0)
model.layer1.bias.data.copy_(master_bias)
# forward
model.train()
y1, y2 = model(x)
y2 = ts.distributed.gather(y2)
assertEqual(y1, y2, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_col_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_col_parallel_linear, ngpus),
nprocs=ngpus
)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_raw_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_raw_parallel_linear, ngpus),
nprocs=ngpus
)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_row_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_row_parallel_linear, ngpus),
nprocs=ngpus
)
if __name__ == '__main__':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
unittest.main()
|
170675
|
class Chain():
def __init__(self, val):
self.val = val
def add(self, b):
self.val += b
return self
def sub(self, b):
self.val -= b
return self
def mul(self, b):
self.val *= b
return self
print(Chain(5).add(5).sub(2).mul(10))
|
170715
|
import unittest
import tock
from tock.grammars import *
from tock.syntax import String
class TestGrammar(unittest.TestCase):
def test_init(self):
g = Grammar()
g.set_start_nonterminal('S')
g.add_nonterminal('T')
g.add_rule('S', 'a S b')
g.add_rule('S', 'T')
g.add_rule('T', 'c T d')
g.add_rule('T', '&')
self.assertEqual(g.nonterminals, {'S', 'T'})
self.assertEqual(set(g.rules), {(String('S'), String('a S b')),
(String('S'), String('T')),
(String('T'), String('c T d')),
(String('T'), String('&'))})
self.assertEqual(str(g), 'nonterminals: {S,T}\nstart: S\nS → a S b\nS → T\nT → c T d\nT → ε')
def test_from_lines(self):
g = Grammar.from_lines([
'S -> a S b',
'S -> &'
])
self.assertEqual(g.nonterminals, {'S'})
self.assertEqual(set(g.rules), {(String('S'), String('a S b')),
(String('S'), String('&'))})
self.assertEqual(str(g), 'nonterminals: {S}\nstart: S\nS → a S b\nS → ε')
def test_is(self):
g = Grammar.from_lines([
'S -> a S',
'S -> &'
])
self.assertFalse(g.is_leftlinear())
self.assertTrue(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertFalse(g.is_contextsensitive())
self.assertFalse(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
g = Grammar.from_lines([
'S -> S b',
'S -> b'
])
self.assertTrue(g.is_leftlinear())
self.assertFalse(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertTrue(g.is_contextsensitive())
self.assertTrue(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
g = Grammar.from_lines([
"S' -> &",
"S' -> S",
'S -> a S b',
'S -> a b'
])
self.assertFalse(g.is_leftlinear())
self.assertFalse(g.is_rightlinear())
self.assertTrue(g.is_contextfree())
self.assertTrue(g.is_contextsensitive())
self.assertTrue(g.is_noncontracting())
self.assertTrue(g.is_unrestricted())
def test_ll(self):
self.maxDiff = None
g = Grammar.from_lines(["S -> a S c",
"S -> T",
"T -> b T",
"T -> &"])
nullable = g.compute_nullable()
self.assertEqual(nullable, set(map(String,
['S', 'T', '&'])))
first = g.compute_first(nullable)
first_correct = dict([(String(k), set(v)) for (k, v) in [
('S', ['a', 'b']),
('T', ['b']),
('a', ['a']),
('b', ['b']),
('c', ['c']),
('&', []),
('S c', ['a', 'b', 'c']),
('a S c', ['a']),
('b T', ['b']),
]])
self.assertEqual(first, first_correct)
follow = g.compute_follow(nullable, first)
follow_correct = {'S': {'c', '⊣'},
'T': {'c', '⊣'}}
self.assertEqual(follow, follow_correct)
|
170755
|
sc.addPyFile('magichour.zip')
from magichour.api.dist.events.eventEval import event_eval_rdd
from magichour.api.local.util.namedtuples import DistributedLogLine
logLineURI = 'hdfs://namenode/magichour/tbird.500.templateEvalRDD'
rddlogLines = sc.pickleFile(logLineURI)
eventDefURI = 'hdfs://namenode/magichour/tbird.500.eventsRDD'
eventDefs = sc.pickleFile(eventDefURI).collect()
windowSeconds = 500
test = event_eval_rdd(sc, rddlogLines, eventDefs, windowSeconds)
test.collect()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.