id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
400014
|
import os
from win32com.client import Dispatch
import logging
logging.basicConfig()
logger = logging.getLogger('coreFunctions_PS')
logger.setLevel(logging.WARNING)
class PsCoreFunctions(object):
def __init__(self):
super(PsCoreFunctions, self).__init__()
# self.psApp = Dispatch('Photoshop.Application')
def comLink(self):
return Dispatch('Photoshop.Application')
def _new(self, force=True, fps=None):
pass
def _save(self, *args, **kwargs):
pass
# not needed
def _saveAs(self, filePath, format=None, *args, **kwargs):
if format == "psd":
desc19 = Dispatch("Photoshop.ActionDescriptor")
desc20 = Dispatch("Photoshop.ActionDescriptor")
desc20.PutBoolean(self.psApp.StringIDToTypeID('maximizeCompatibility'), True)
desc19.PutObject(
self.psApp.CharIDToTypeID('As '), self.psApp.CharIDToTypeID('Pht3'), desc20)
desc19.PutPath(self.psApp.CharIDToTypeID('In '), filePath)
desc19.PutBoolean(self.psApp.CharIDToTypeID('LwCs'), True)
self.psApp.ExecuteAction(self.psApp.CharIDToTypeID('save'), desc19, 3)
else:
desc19 = Dispatch("Photoshop.ActionDescriptor")
desc20 = Dispatch("Photoshop.ActionDescriptor")
desc20.PutBoolean(self.psApp.StringIDToTypeID('maximizeCompatibility'), True)
desc19.PutObject(
self.psApp.CharIDToTypeID('As '), self.psApp.CharIDToTypeID('Pht8'), desc20)
desc19.PutPath(self.psApp.CharIDToTypeID('In '), filePath)
desc19.PutBoolean(self.psApp.CharIDToTypeID('LwCs'), True)
self.psApp.ExecuteAction(self.psApp.CharIDToTypeID('save'), desc19, 3)
def _load(self, filePath, force=True, *args, **kwargs):
self.psApp.Open(filePath)
def _reference(self, filePath):
pass
def _import(self, filePath, *args, **kwargs):
pass
def _importSequence(self, pySeq_sequence, *args, **kwargs):
logger.warning("This function is not yet implemented")
def _importObj(self, filePath, importSettings, *args, **kwargs):
pass
def _importAlembic(self, filePath, importSettings, *args, **kwargs):
pass
def _importFbx(self, filePath, importSettings, *args, **kwargs):
pass
def _exportObj(self, filePath, exportSettings, exportSelected=True):
pass
def _exportAlembic(self, filePath, exportSettings, exportSelected=True, timeRange=[0,10]):
pass
def _exportFbx(self, filePath, exportSettings, exportSelected=True, timeRange=[0,10]):
pass
def _getSceneFile(self):
try:
activeDocument = self.psApp.Application.ActiveDocument
docName = activeDocument.name
docPath = activeDocument.path
return os.path.join(docPath, docName)
except:
return "Untitled"
def _getProject(self):
"""returns the project folder DEFINED BY THE HOST SOFTWARE, not the Tik Manager Project"""
homeDir = os.path.expanduser("~")
norm_p_path = os.path.normpath(homeDir)
return norm_p_path
def _getVersion(self):
pass
def _getCurrentFrame(self):
pass
def _getSelection(self):
pass
def _isSceneModified(self):
return False
def _exportJPG(self, filePath, quality=12):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.JPEGSaveOptions")
saveOPT.EmbedColorProfile = True
saveOPT.FormatOptions = 1 # => psStandardBaseline
saveOPT.Matte = 1 # => No Matte
saveOPT.Quality = quality
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportPNG(self, filePath):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.PNGSaveOptions")
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportBMP(self, filePath):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.BMPSaveOptions")
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportTGA(self, filePath):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.TargaSaveOptions")
saveOPT.Resolution = 32
saveOPT.AlphaChannels = True
saveOPT.RLECompression = True
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportPSD(self, filePath):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.PhotoshopSaveOptions")
saveOPT.AlphaChannels = True
saveOPT.Annotations = True
saveOPT.Layers = True
saveOPT.SpotColors = True
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportTIF(self, filePath):
activeDocument = self.psApp.Application.ActiveDocument
saveOPT = Dispatch("Photoshop.TiffSaveOptions")
saveOPT.AlphaChannels = True
saveOPT.EmbedColorProfile = True
saveOPT.Layers = False
activeDocument.SaveAs(filePath, saveOPT, True)
return True
def _exportEXR(self, filePath):
idsave = self.psApp.CharIDToTypeID("save")
desc182 = Dispatch("Photoshop.ActionDescriptor")
idAs = self.psApp.CharIDToTypeID("As ")
desc183 = Dispatch("Photoshop.ActionDescriptor")
idBtDp = self.psApp.CharIDToTypeID("BtDp")
desc183.PutInteger(idBtDp, 16);
idCmpr = self.psApp.CharIDToTypeID("Cmpr")
desc183.PutInteger(idCmpr, 1)
idAChn = self.psApp.CharIDToTypeID("AChn")
desc183.PutInteger(idAChn, 0)
idEXRf = self.psApp.CharIDToTypeID("EXRf")
desc182.PutObject(idAs, idEXRf, desc183)
idIn = self.psApp.CharIDToTypeID("In ")
desc182.PutPath(idIn, (filePath))
idDocI = self.psApp.CharIDToTypeID("DocI")
desc182.PutInteger(idDocI, 340)
idCpy = self.psApp.CharIDToTypeID("Cpy ")
desc182.PutBoolean(idCpy, True)
idsaveStage = self.psApp.StringIDToTypeID("saveStage")
idsaveStageType = self.psApp.StringIDToTypeID("saveStageType")
idsaveSucceeded = self.psApp.StringIDToTypeID("saveSucceeded")
desc182.PutEnumerated(idsaveStage, idsaveStageType, idsaveSucceeded)
self.psApp.ExecuteAction(idsave, desc182, 3)
return True
def _exportHDR(self, filePath):
idsave = self.psApp.CharIDToTypeID("save")
desc419 = Dispatch("Photoshop.ActionDescriptor")
idAs = self.psApp.CharIDToTypeID("As ")
desc419.PutString(idAs, """Radiance""")
idIn = self.psApp.CharIDToTypeID("In ")
desc419.PutPath(idIn, (filePath))
idDocI = self.psApp.CharIDToTypeID("DocI")
desc419.PutInteger(idDocI, 333)
idCpy = self.psApp.CharIDToTypeID("Cpy ")
desc419.PutBoolean(idCpy, True)
idsaveStage = self.psApp.StringIDToTypeID("saveStage")
idsaveStageType = self.psApp.StringIDToTypeID("saveStageType")
idsaveSucceeded = self.psApp.StringIDToTypeID("saveSucceeded")
desc419.PutEnumerated(idsaveStage, idsaveStageType, idsaveSucceeded)
self.psApp.ExecuteAction(idsave, desc419, 3)
return True
def _setFPS(self, fps, *args, **kwargs):
pass
def _getFPS(self, *args, **kwargs):
pass
|
400057
|
from typing import Union
import numpy as np
from observer.base_observer import BaseObserver
from shape.point_2d import Point2D
class ClosestVertexSelector(BaseObserver):
def __init__(self, editor: 'MapBasedCalibrator'):
super().__init__(editor)
def find_closest_vertex_to_mouse(
self, mouse_position: np.ndarray(shape=(2,)) ) \
-> Union[Point2D, None]:
if self.editor.layer_manager.vector_map_layer() is None:
return
shapes = \
self.editor.layer_manager.vector_map_layer().reprojected_shapes()
if len(shapes) == 0:
return
shape_coords = np.vstack([shape.coords() for shape in shapes])
shape_origin_vertices = \
np.vstack([shape.origin_vertices() for shape in shapes])
closest_vertex_idx = np.argmin(np.linalg.norm(
mouse_position - shape_coords, axis=1))
closest_vertex = Point2D()
closest_vertex.set_position(shape_coords[closest_vertex_idx, :])
closest_vertex.set_origin_position(
shape_origin_vertices[closest_vertex_idx, :])
return closest_vertex
|
400066
|
from setuptools import setup, find_namespace_packages
from pathlib import Path
long_description = (Path(__file__).parent / "README.md").read_text()
setup(
name="icolos",
maintainer="<NAME>, <NAME>",
version="1.9.0",
url="https://github.com/MolecularAI/Icolos",
packages=find_namespace_packages(where="src"),
package_dir={"": "src"},
description="Icolos Workflow Manager",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.8",
entry_points={
"console_scripts": [
"icolos = icolos.scripts.executor:main",
"validator = icolos.scripts.validator:main",
"sdf2smi = icolos.scripts.sdf2smi:main",
]
},
)
|
400069
|
import os
from datetime import datetime, timedelta, timezone
import tempfile
from django.db import transaction
import boto3
import pytz
from google.transit import gtfs_realtime_pb2
from busshaming.models import Trip, TripDate, TripStop, RealtimeEntry, Route, Stop
from busshaming.enums import ScheduleRelationship
S3_BUCKET_NAME = os.environ.get('S3_BUCKET_NAME', 'busshaming-realtime-dumps')
DEBUG = False
global_stats = {}
SCHEDULE_RELATIONSHIP = dict(gtfs_realtime_pb2.TripDescriptor.ScheduleRelationship.items())
ROUTE_ID_SET = set()
upsert_log = {}
def to_schedule_relationship(proto):
if proto == SCHEDULE_RELATIONSHIP['SCHEDULED']:
return ScheduleRelationship.SCHEDULED.value
elif proto == SCHEDULE_RELATIONSHIP['ADDED']:
return ScheduleRelationship.ADDED.value
elif proto == SCHEDULE_RELATIONSHIP['UNSCHEDULED']:
return ScheduleRelationship.UNSCHEDULED.value
elif proto == SCHEDULE_RELATIONSHIP['CANCELED']:
return ScheduleRelationship.CANCELLED.value
return ScheduleRelationship.SCHEDULED.value
def add_missing_tripdate(feed, realtime_trip, vehicle_id):
gtfs_trip_id = realtime_trip.trip_id
start_date = realtime_trip.start_date
if DEBUG:
print(f'Adding missing trip date for gtfs id {gtfs_trip_id} on date {start_date}')
date = datetime.strptime(start_date, '%Y%m%d').date()
if not realtime_trip.route_id:
return None
with transaction.atomic():
try:
trip = Trip.objects.filter(gtfs_trip_id=gtfs_trip_id, route__gtfs_route_id=realtime_trip.route_id).order_by('-version').first()
except Trip.DoesNotExist as e:
trip = None
if trip is None:
trip = add_missing_trip(feed, realtime_trip)
if trip is None:
return None
if DEBUG:
print(f'Found trip: {trip}')
try:
return TripDate.objects.get(trip=trip, date=date)
except TripDate.DoesNotExist as e:
pass
schedule_relationship = to_schedule_relationship(realtime_trip.schedule_relationship)
tripdate = TripDate(trip=trip, date=date, added_from_realtime=True, vehicle_id=vehicle_id, schedule_relationship=schedule_relationship)
tripdate.save()
if trip.scheduled:
global_stats['missing_tripdates'] += 1
else:
global_stats['unscheduled_tripdates'] += 1
if not trip.added_from_realtime:
global_stats['missing_tripdates_but_trip_existed'] += 1
print(f'Trip {trip} was in the timetable, just not for tripdate {tripdate}')
return tripdate
def add_missing_trip(feed, realtime_trip):
gtfs_trip_id = realtime_trip.trip_id
if realtime_trip.route_id not in ROUTE_ID_SET:
return None
if realtime_trip.schedule_relationship == SCHEDULE_RELATIONSHIP['ADDED'] and '_' in gtfs_trip_id:
original_trip_id = gtfs_trip_id.split('_')[0]
trip = Trip.objects.filter(gtfs_trip_id=original_trip_id).order_by('-version').first()
if trip is not None:
global_stats['unscheduled_trips'] += 1
new_trip = trip.clone_to_unscheduled(gtfs_trip_id)
return new_trip
try:
route = Route.objects.get(feed=feed, gtfs_route_id=realtime_trip.route_id)
except Route.DoesNotExist as e2:
global_stats['missing_routes'] += 1
print(f'Route did not exist: {realtime_trip.route_id}')
return None
print(f'Trip with gtfs id {gtfs_trip_id} (from route {route}) does not exist!!')
newtrip = Trip(
gtfs_trip_id=gtfs_trip_id,
active=True,
direction=0,
route=route,
added_from_realtime=True,
wheelchair_accessible=False,
bikes_allowed=False,
scheduled=(realtime_trip.schedule_relationship == SCHEDULE_RELATIONSHIP['SCHEDULED'])
)
newtrip.save()
global_stats['missing_trips'] += 1
if DEBUG:
print(f'Added new trip: {newtrip}')
return newtrip
def format_stop_time(time, plus_24h):
hour = time.hour
if plus_24h:
hour += 24
return f'{hour:02d}:{time.minute:02d}:{time.second:02d}'
def get_stop(feed, stop_id, stops):
with transaction.atomic():
try:
stop = Stop.objects.get(feed=feed, gtfs_stop_id=stop_id)
except Stop.DoesNotExist:
stop = Stop(feed=feed, gtfs_stop_id=stop_id, name='Unknown', position=None)
stop.save()
stops[stop.gtfs_stop_id] = stop
global_stats['missing_stops'] += 1
return stop
def process_trip_update(feed, trip_dates, stops, feed_tz, trip_update, threshold, start_date_str, start_date_str_after_midnight):
global_stats['trip_updates_found'] += 1
trip = trip_update.trip
plus_24h = False
if trip.start_time < '04:00:00':
if trip.start_date == start_date_str_after_midnight:
trip.start_date = start_date_str
plus_24h = True
else:
return
if trip.start_date != start_date_str:
return
# Some trips are missing ids altogether.
# Construct an id from 'unscheduled' and the vehicle id
if not trip.trip_id:
if not trip_update.vehicle.id:
return
global_stats['missing_trip_id'] += 1
trip.trip_id = 'unscheduled_' + trip_update.vehicle.id
key = (trip.trip_id, start_date_str)
if key not in trip_dates:
trip_date = add_missing_tripdate(feed, trip, trip_update.vehicle.id)
if trip_date is not None:
if DEBUG:
print("COULDN'T FIND IN SCHEDULE: {}".format(key))
print(trip)
trip_dates[key] = trip_date
else:
trip_date = trip_dates[key]
if trip_date is None:
return
if trip.schedule_relationship != SCHEDULE_RELATIONSHIP['SCHEDULED']:
trip_date.schedule_relationship = to_schedule_relationship(trip.schedule_relationship)
trip_date.vehicle_id = trip_update.vehicle.id
trip_date.save()
if DEBUG:
print(f'Upserting realtime entries for tripdate {trip_date.id}')
for stop_update in trip_update.stop_time_update:
global_stats['stop_updates_found'] += 1
if stop_update.arrival.time < threshold:
if stop_update.stop_id in stops:
stop = stops[stop_update.stop_id]
else:
stop = get_stop(feed, stop_update.stop_id, stops)
arrival_time = datetime.fromtimestamp(stop_update.arrival.time, feed_tz)
departure_time = datetime.fromtimestamp(stop_update.departure.time, feed_tz)
schedule_relationship = to_schedule_relationship(stop_update.schedule_relationship)
# Upsert RealtimeEntry
# RealtimeEntry.objects.upsert(trip_date.id, stop.id, stop_update.stop_sequence, arrival_time, stop_update.arrival.delay, departure_time, stop_update.departure.delay)
upsert_log[(trip_date.id, stop.id, stop_update.stop_sequence)] = (arrival_time, stop_update.arrival.delay, departure_time, stop_update.departure.delay, schedule_relationship)
global_stats['stop_updates_stored'] += 1
def process_dump_contents(feed, contents, trip_dates, stops, fetchtime, feed_tz, start_date_str, start_date_str_after_midnight):
global global_stats
global_stats = {
'trip_updates_found': 0,
'stop_updates_found': 0,
'stop_updates_stored': 0,
'missing_trips': 0,
'unscheduled_trips': 0,
'unscheduled_tripdates': 0,
'missing_stops': 0,
'missing_trip_id': 0,
'missing_tripdates': 0,
'missing_tripdates_but_trip_existed': 0,
'missing_routes': 0,
}
feed_message = gtfs_realtime_pb2.FeedMessage()
feed_message.ParseFromString(contents)
threshold = int((fetchtime + timedelta(minutes=5)).timestamp())
for entity in feed_message.entity:
if entity.HasField('trip_update'):
process_trip_update(feed, trip_dates, stops, feed_tz, entity.trip_update, threshold, start_date_str, start_date_str_after_midnight)
for stat in global_stats:
print(f'{stat}: {global_stats[stat]}')
def fetch_next_dumps(realtime_progress, num_dumps, temp_dir):
print(f'Processing next {num_dumps} realtime dumps in {realtime_progress}')
client = boto3.client('s3')
file_prefix = f'{realtime_progress.feed.slug}/'
last_processed_file = realtime_progress.last_processed_dump
if last_processed_file is None:
last_processed_file = file_prefix + realtime_progress.start_time().strftime('%Y-%m-%dT%H:%M:%S.%f')
if last_processed_file is not None:
response = client.list_objects_v2(Bucket=S3_BUCKET_NAME, Prefix=file_prefix, StartAfter=last_processed_file, MaxKeys=num_dumps)
else:
response = client.list_objects_v2(Bucket=S3_BUCKET_NAME, Prefix=file_prefix, MaxKeys=num_dumps)
results = []
if response['KeyCount'] != 0:
for content in response['Contents']:
key = content['Key']
s3 = boto3.resource('s3')
tmp_path = os.path.join(temp_dir, key.split('/')[1])
if DEBUG:
print(f'Fetching {key}...')
s3.Object(S3_BUCKET_NAME, key).download_file(tmp_path)
results.append((key, tmp_path))
else:
print(f'No new realtime dump data for {realtime_progress.feed}')
return results
def refresh_route_list():
global ROUTE_LIST
ROUTE_ID_SET = set(Route.objects.values_list('gtfs_route_id', flat=True))
def clear_upsert_log():
global upsert_log
upsert_log = {}
def write_upsert_log():
global upsert_log
print(f'Upsert log contains {len(upsert_log)} entries.')
list_batch = []
for realtime_key, value in upsert_log.items():
#RealtimeEntry.objects.upsert(trip_date.id, stop.id, stop_update.stop_sequence, arrival_time, stop_update.arrival.delay, departure_time, stop_update.departure.delay)
# RealtimeEntry.objects.upsert(*realtime_key, *value)
list_batch.append((*realtime_key, *value))
start = 0
while start < len(list_batch):
batch = list_batch[start : start + 500]
start += 500
if len(batch) != 0:
RealtimeEntry.objects.upsert_bulk(batch)
def process_next(realtime_progress, num_dumps):
feed = realtime_progress.feed
succeed = realtime_progress.take_processing_lock()
clear_upsert_log()
if not succeed:
# It'll try again with another progress
return
try:
with tempfile.TemporaryDirectory() as temp_dir:
cached_dumps = fetch_next_dumps(realtime_progress, num_dumps, temp_dir)
feed_tz = pytz.timezone(feed.timezone)
start_date_str = realtime_progress.start_date.strftime('%Y%m%d')
start_date_str_after_midnight = (realtime_progress.start_date + timedelta(days=1)).strftime('%Y%m%d')
if len(cached_dumps) != 0:
# Prefetch stops
stops = {}
for stop in Stop.objects.filter(feed=feed):
stops[stop.gtfs_stop_id] = stop
# Prefetch Routes
refresh_route_list()
# Prefetch Trip Dates
first_key = cached_dumps[0][0]
trip_dates = {}
datestr = os.path.split(first_key)[1].rstrip('.pb')
fetchtime = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc)
# Assume no bus runs longer than 48h
fetchtime = fetchtime.astimezone(feed_tz)
for trip_date in TripDate.objects.filter(date=realtime_progress.start_date).prefetch_related('trip'):
datestr = trip_date.date.strftime('%Y%m%d')
trip_dates[(trip_date.trip.gtfs_trip_id, datestr)] = trip_date
for key, tmp_file in cached_dumps:
datestr = os.path.split(key)[1].rstrip('.pb')
fetchtime = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc)
# Assume no bus runs longer than 48h
fetchtime = fetchtime.astimezone(feed_tz)
with open(tmp_file, 'rb') as f:
contents = f.read()
print(f'Processing {key}')
process_dump_contents(feed, contents, trip_dates, stops, fetchtime, feed_tz, start_date_str, start_date_str_after_midnight)
if fetchtime > realtime_progress.end_time():
break
# Update where we're up to.
write_upsert_log()
if fetchtime > realtime_progress.end_time():
realtime_progress.update_progress(key, True)
else:
realtime_progress.update_progress(key, False)
finally:
realtime_progress.release_processing_lock()
|
400074
|
from collections import defaultdict
from itertools import groupby, product
import numpy as np
import pandas as pd
from scipy.stats import hmean, spearmanr
from statsmodels.stats.proportion import proportion_confint
import wordfreq
from conceptnet5.util import get_support_data_filename
from conceptnet5.vectors import standardized_uri
from conceptnet5.vectors.evaluation.wordsim import (
confidence_interval,
empty_comparison_table,
)
from conceptnet5.vectors.query import VectorSpaceWrapper
def read_google_analogies(filename):
"""
Read the 'questions-words.txt' file that comes with the word2vec package.
"""
quads = [
[standardized_uri('en', term) for term in line.rstrip().split(' ')]
for line in open(filename, encoding='utf-8')
if not line.startswith(':')
]
return quads
def read_turney_analogies(filename):
"""
Read Turney and Littman's dataset of SAT analogy questions. This data
requires permission to redistribute, so you have to ask <NAME>
for the file.
"""
questions = []
question_lines = []
with open(filename, encoding='utf-8') as file:
for line in file:
line = line.rstrip()
if line and not line.startswith('#'):
if len(line) == 1:
# A single letter on a line indicates the answer to a question.
answer_index = ord(line) - ord('a')
# Line 0 is a header we can discard.
raw_pairs = [qline.split(' ')[:2] for qline in question_lines[1:]]
concept_pairs = [
tuple(standardized_uri('en', term) for term in pair)
for pair in raw_pairs
]
# The first of the pairs we got is the prompt pair. The others are
# answers (a) through (e).
questions.append(
(concept_pairs[0], concept_pairs[1:], answer_index)
)
question_lines.clear()
else:
question_lines.append(line)
return questions
def read_train_pairs_semeval2012(subset, subclass):
"""
Read a set of three training pairs for a given subclass. These pairs are
used as prototypical examples of a given relation to which test pairs are compared.
"""
filename = 'semeval12-2/{}/Phase1Questions-{}.txt'.format(subset, subclass)
with open(get_support_data_filename(filename)) as file:
train_pairs = []
for i, line in enumerate(file):
if i in [4, 5, 6]:
pair = line.strip().split(':')
pair = tuple(pair)
train_pairs.append(pair)
return train_pairs
def read_turk_answers_semeval2012(subset, subclass, test_questions):
"""
A line represents one turker's answer to a given question. An answer has the
following format:
pair1, pair2, pair3, pair4, least_prototypical_pair, most_prototypical_pair, relation_name
This function returns two dictionaries:
* pairqnum2least -
* pairqnum2most
"""
filename = 'semeval12-2/{}/Phase2Answers-{}.txt'.format(subset, subclass)
with open(get_support_data_filename(filename)) as file:
answers = []
for i, line in enumerate(file):
if i == 0:
continue
pairs = tuple(line.split('\t'))
answers.append(pairs)
pairqnum2least = defaultdict(int)
pairqnum2most = defaultdict(int)
for question, answers in groupby(answers, key=lambda x: x[:4]):
question_num = test_questions.index(question)
for answer in answers:
pairqnum2least[(question_num, answer[4])] += 1
pairqnum2most[(question_num, answer[5])] += 1
return pairqnum2least, pairqnum2most
def read_test_questions_semeval2012(subset, subclass):
"""
Read test questions for a specific subclass. A test question has the following format:
pair1,pair2,pair3,pair4
"""
filename = 'semeval12-2/{}/Phase2Questions-{}.txt'.format(subset, subclass)
with open(get_support_data_filename(filename)) as file:
test_questions = []
for line in file:
pairs = tuple(line.strip().split(','))
test_questions.append(pairs)
return test_questions
def read_turk_ranks_semeval2012(subset, subclass):
"""
Read gold rankings of prototypicality, as computed using turkers answers to MaxDiff
questions.
A score is defined as the difference between the number of times the turkers judged
a pair the most prototypical and the number of times they judged it as the least
prototypical.
"""
filename = 'semeval12-2/{}/GoldRatings-{}.txt'.format(subset, subclass)
with open(get_support_data_filename(filename)) as file:
gold_ranks = []
for line in file:
if line.startswith('#'):
continue
gold_score, pair = line.split()
gold_score = float(gold_score)
gold_ranks.append((pair, gold_score))
return sorted(gold_ranks)
def read_bats(category):
"""
Read BATS dataset pairs for a specific category. Turn them into questions.
For some questions, BATS contains multiple answers. For example, the answer to an
analogy question Nicaragua:Spanish::Switzerland:? could be German, French, or Italian. These
will all be supplied as a list if they are an answer (b2). However, if they are a part of a
question (b1), only the first one will be used.
"""
filename = 'bats/{}.txt'.format(category)
pairs = []
with open(get_support_data_filename(filename)) as file:
for line in file:
if '\t' in line:
left, right = line.lower().split('\t')
else:
left, right = line.lower().split()
right = right.strip()
if '/' in right:
right = [i.strip() for i in right.split('/')]
else:
right = [i.strip() for i in right.split(',')]
pairs.append([left, right])
quads = []
for i in range(len(pairs)):
first_pair = pairs[i]
first_pair[1] = first_pair[1][
0
] # select only one term for b1, even if more may be available
second_pairs = [pair for j, pair in enumerate(pairs) if j != i]
for second_pair in second_pairs:
quad = []
# the first three elements of a quad are the two terms in first_pair and the first
# term of the second_pair
quad.extend(
[standardized_uri('en', term) for term in first_pair + second_pair[:1]]
)
# if the second element of the second pair (b2) is a list, it means there are multiple
# correct answers for b2. We want to keep all of them.
if isinstance(second_pair[1], list):
quad.append([standardized_uri('en', term) for term in second_pair[1]])
else:
quad.append(standardized_uri('en', second_pair[1]))
quads.append(quad)
return quads
def analogy_func(wrap, a1, b1, a2, weight_direct=2 / 3, weight_transpose=1 / 3):
"""
Find the vector representing the best b2 to complete the analogy
a1 : b1 :: a2 : b2, according to `pairwise_analogy_func`.
This is the partial derivative of `pairwise_analogy_func` with respect
to b2.
"""
va1 = wrap.get_vector(a1)
vb1 = wrap.get_vector(b1)
va2 = wrap.get_vector(a2)
return (vb1 - va1) * weight_direct + (va2 - va1) * weight_transpose + vb1
def best_analogy_3cosmul(wrap, subframe, a1, b1, a2):
"""
Find the best b2 to complete the analogy a1 : b1 :: a2 : b2, according
to the 3CosMul metric.
"""
va1 = wrap.get_vector(a1)
vb1 = wrap.get_vector(b1)
va2 = wrap.get_vector(a2)
sa1 = subframe.dot(va1)
sb1 = subframe.dot(vb1)
sa2 = subframe.dot(va2)
eps = 1e-6
mul3cos = (sb1 + 1 + eps) * (sa2 + 1 + eps) / (sa1 + 1 + eps)
best = mul3cos.dropna().nlargest(4)
prompt = (a1, b1, a2)
for term in best.index:
if term not in prompt:
return term
def pairwise_analogy_func(wrap, a1, b1, a2, b2, weight_direct, weight_transpose):
"""
Rate the quality of the analogy a1 : b1 :: a2 : b2.
"""
va1 = wrap.get_vector(a1)
vb1 = wrap.get_vector(b1)
va2 = wrap.get_vector(a2)
vb2 = wrap.get_vector(b2)
value = (
weight_direct * (vb2 - va2).dot(vb1 - va1)
+ weight_transpose * (vb2 - vb1).dot(va2 - va1)
+ vb2.dot(vb1)
+ va2.dot(va1)
)
return value
def eval_pairwise_analogies(
vectors, eval_filename, weight_direct, weight_transpose, subset='all'
):
total = 0
correct = 0
for idx, (prompt, choices, answer) in enumerate(
read_turney_analogies(eval_filename)
):
# Enable an artificial training/test split
if subset == 'all' or (subset == 'dev') == (idx % 2 == 0):
a1, b1 = prompt
choice_values = []
for choice in choices:
a2, b2 = choice
choice_values.append(
pairwise_analogy_func(
vectors, a1, b1, a2, b2, weight_direct, weight_transpose
)
)
our_answer = np.argmax(choice_values)
if our_answer == answer:
correct += 1
total += 1
low, high = proportion_confint(correct, total)
return pd.Series([correct / total, low, high], index=['acc', 'low', 'high'])
def optimize_weights(func, *args):
"""
Both eval_pairwise_analogies() and eval_semeval2012_analogies() have three
weights that can be tuned (and therefore two free parameters, as the total
weight does not matter):
- The *direct weight*, comparing (b2 - a2) to (b1 - a1)
- The *transpose weight*, comparing (b2 - b1) to (a2 - a1)
- The *similarity weight*, comparing b2 to b1 and a2 to a1
This function takes a function for which to optimize the weights as an
argument and returns the optimal weights, `weight_direct` and
`weight_transpose`.
"""
print('Tuning analogy weights')
weights = [
0.,
0.05,
0.1,
0.15,
0.2,
0.3,
0.35,
0.4,
0.5,
0.6,
0.65,
0.7,
0.8,
0.9,
1.0,
1.5,
2.0,
2.5,
3.0,
]
best_weights = None
best_acc = 0.
for weight_direct in weights:
for weight_transpose in weights:
scores = func(
*args,
weight_direct=weight_direct,
weight_transpose=weight_transpose,
subset='dev'
)
if isinstance(scores, list):
# If a function to optimize returns two results, like eval_semeval2012_analogies(),
# take their harmonic mean to compute the weights optimal for both results
acc = hmean([scores[0].loc['acc'], scores[1].loc['acc']])
else:
acc = scores.loc['acc']
if acc > best_acc:
print(weight_direct, weight_transpose, acc)
best_weights = (weight_direct, weight_transpose)
best_acc = acc
elif acc == best_acc:
print(weight_direct, weight_transpose, acc)
weight_direct, weight_transpose = best_weights
print()
return weight_direct, weight_transpose
def eval_google_analogies(vectors, subset='semantic', vocab_size=200000, verbose=False):
"""
Evaluate the Google Research analogies, released by Mikolov et al. along
with word2vec.
These analogies come in two flavors: semantic and syntactic. Numberbatch
is intended to be a semantic space, so we focus on semantic analogies.
The syntactic analogies are about whether you can inflect or conjugate a
particular word. The semantic analogies are about whether you can sort
words by their gender, and about geographic trivia.
I (Rob) think this data set is not very representative, but evaluating
against it is all the rage.
"""
filename = get_support_data_filename('google-analogies/{}-words.txt'.format(subset))
quads = read_google_analogies(filename)
return eval_open_vocab_analogies(vectors, quads, vocab_size, verbose)
def eval_open_vocab_analogies(vectors, quads, vocab_size=200000, verbose=False):
"""
Solve open vocabulary analogies, using 3CosMul function. This is used by Google and Bats
test sets.
"""
vocab = choose_vocab(quads, vocab_size)
vecs = np.vstack([vectors.get_vector(word) for word in vocab])
tframe = pd.DataFrame(vecs, index=vocab)
total = 0
correct = 0
seen_mistakes = set()
for quad in quads:
prompt = quad[:3]
answer = quad[3]
result = best_analogy_3cosmul(vectors, tframe, *prompt)
is_correct = (isinstance(answer, list) and result in answer) or (
result == answer
)
if is_correct:
correct += 1
else:
if verbose and result not in seen_mistakes:
print(
"%s : %s :: %s : [%s] (should be %s)"
% (quad[0], quad[1], quad[2], result, answer)
)
seen_mistakes.add(result)
total += 1
low, high = proportion_confint(correct, total)
result = pd.Series([correct / total, low, high], index=['acc', 'low', 'high'])
if verbose:
print(result)
return result
def choose_vocab(quads, vocab_size):
"""
Google and Bats analogies are not multiple-choice; instead, you're supposed to pick
the best match out of your vector space's entire vocabulary, excluding the
three words used in the prompt. The vocabulary size can matter a lot: Set
it too high and you'll get low-frequency words that the data set wasn't
looking for as answers. Set it too low and the correct answers won't be
in the vocabulary.
Set vocab_size='cheat' to see the results for an unrealistically optimal
vocabulary (the vocabulary of the set of answer words).
"""
if vocab_size == 'cheat':
vocab = [
standardized_uri('en', word)
for word in sorted(set([quad[3] for quad in quads]))
]
else:
vocab = [
standardized_uri('en', word)
for word in wordfreq.top_n_list('en', vocab_size)
]
return vocab
def eval_semeval2012_analogies(
vectors, weight_direct, weight_transpose, subset, subclass
):
"""
For a set of test pairs:
* Compute a Spearman correlation coefficient between the ranks produced by vectors and
gold ranks.
* Compute an accuracy score of answering MaxDiff questions.
"""
train_pairs = read_train_pairs_semeval2012(subset, subclass)
test_questions = read_test_questions_semeval2012(subset, subclass)
pairqnum2least, pairqnum2most = read_turk_answers_semeval2012(
subset, subclass, test_questions
)
turk_rank = read_turk_ranks_semeval2012(subset, subclass)
pairs_to_rank = [pair for pair, score in turk_rank]
# Assign a score to each pair, according to pairwise_analogy_func
our_pair_scores = {}
for pair in pairs_to_rank:
rank_pair_scores = []
for train_pair in train_pairs:
pair_to_rank = pair.strip().replace('"', '').split(':')
score = pairwise_analogy_func(
vectors,
standardized_uri('en', train_pair[0]),
standardized_uri('en', train_pair[1]),
standardized_uri('en', pair_to_rank[0]),
standardized_uri('en', pair_to_rank[1]),
weight_direct,
weight_transpose,
)
rank_pair_scores.append(score)
our_pair_scores[pair] = np.mean(rank_pair_scores)
# Answer MaxDiff questions using the ranks from the previous step
correct_most = 0
correct_least = 0
total = 0
for i, question in enumerate(test_questions):
question_pairs_scores = []
for question_pair in question:
score = our_pair_scores[question_pair]
question_pairs_scores.append(score)
our_answer_most = question[np.argmax(question_pairs_scores)]
our_answer_least = question[np.argmin(question_pairs_scores)]
votes_guess_least = pairqnum2least[(i, our_answer_least)]
votes_guess_most = pairqnum2most[(i, our_answer_most)]
max_votes_least = 0
max_votes_most = 0
for question_pair in question:
num_votes_least = pairqnum2least[(i, question_pair)]
num_votes_most = pairqnum2most[(i, question_pair)]
if num_votes_least > max_votes_least:
max_votes_least = num_votes_least
if num_votes_most > max_votes_most:
max_votes_most = num_votes_most
# a guess is correct if it got the same number of votes as the most frequent turkers' answer
if votes_guess_least == max_votes_least:
correct_least += 1
if votes_guess_most == max_votes_most:
correct_most += 1
total += 1
# Compute Spearman correlation of our ranks and MT ranks
our_semeval_scores = [score for pair, score in sorted(our_pair_scores.items())]
turk_semeval_scores = [score for pair, score in turk_rank]
spearman = spearmanr(our_semeval_scores, turk_semeval_scores)[0]
spearman_results = confidence_interval(spearman, total)
# Compute an accuracy score on MaxDiff questions
maxdiff = (correct_least + correct_most) / (2 * total)
low_maxdiff, high_maxdiff = proportion_confint(
(correct_least + correct_most), (2 * total)
)
maxdiff_results = pd.Series(
[maxdiff, low_maxdiff, high_maxdiff], index=['acc', 'low', 'high']
)
return [maxdiff_results, spearman_results]
def eval_semeval2012_global(vectors, weight_direct, weight_transpose, subset):
"""
Return the average Spearman score and MaxDiff accuracy score for the entire test set.
"""
spearman_scores = []
maxdiff_scores = []
for subclass in product(range(1, 11), 'a b c d e f g h i j'):
subclass = ''.join([str(element) for element in subclass])
try:
maxdiff, spearman = eval_semeval2012_analogies(
vectors, weight_direct, weight_transpose, subset, subclass
)
spearman_scores.append(spearman)
maxdiff_scores.append(maxdiff)
except FileNotFoundError:
continue
spearman_output = []
maxdiff_output = []
for interval in ['acc', 'low', 'high']:
average_maxdiff_score = np.mean([score[interval] for score in maxdiff_scores])
average_spearman_score = np.mean([score[interval] for score in spearman_scores])
spearman_output.append(average_spearman_score)
maxdiff_output.append(average_maxdiff_score)
return [
pd.Series(maxdiff_output, index=['acc', 'low', 'high']),
pd.Series(spearman_output, index=['acc', 'low', 'high']),
]
def eval_bats_category(vectors, category, vocab_size=200000, verbose=False):
"""
Evaluate a single category of BATS dataset.
"""
quads = read_bats(category)
category_results = eval_open_vocab_analogies(vectors, quads, vocab_size, verbose)
return category_results
def evaluate(
frame,
analogy_filename,
subset='test',
tune_analogies=True,
scope='global',
google_vocab_size=200000,
):
"""
Run SAT and Semeval12-2 evaluations.
Required parameters:
frame
a DataFrame containing term vectors
analogy_filename
the filename of Turney's SAT evaluation data
Optional parameters:
subset (string, default 'test')
a subset of a data to evaluate on, either 'test' or 'dev'
tune_analogies (boolean, default True)
tune the weights in eval_pairwise_analogies()
semeval_scope (string, default 'global')
'global' to get the average of the results across all subclasses of semeval12-2,
or another string to get the results broken down by a subclass (1a, 1b, etc.)
"""
vectors = VectorSpaceWrapper(frame=frame)
results = empty_comparison_table()
if tune_analogies:
sat_weights = optimize_weights(
eval_pairwise_analogies, vectors, analogy_filename
)
semeval_weights = optimize_weights(eval_semeval2012_global, vectors)
else:
sat_weights = (0.35, 0.65)
semeval_weights = (0.3, 0.35)
sat_results = eval_pairwise_analogies(
vectors, analogy_filename, sat_weights[0], sat_weights[1], subset
)
results.loc['sat-analogies'] = sat_results
for gsubset in ['semantic', 'syntactic']:
google_results = eval_google_analogies(
vectors, subset=gsubset, vocab_size=google_vocab_size
)
results.loc['google-%s' % gsubset] = google_results
# There's no meaningful "all" subset for semeval12, because the dev and
# test data are stored entirely separately. Just use "test".
if subset == 'dev':
semeval12_subset = 'dev'
else:
semeval12_subset = 'test'
if scope == 'global':
maxdiff_score, spearman_score = eval_semeval2012_global(
vectors, semeval_weights[0], semeval_weights[1], semeval12_subset
)
results.loc['semeval12-spearman'] = spearman_score
results.loc['semeval12-maxdiff'] = maxdiff_score
else:
for subclass in product(range(1, 11), 'a b c d e f g h i j'):
subclass = ''.join([str(element) for element in subclass])
try:
maxdiff_score, spearman_score = eval_semeval2012_analogies(
vectors,
semeval_weights[0],
semeval_weights[1],
semeval12_subset,
subclass,
)
results.loc['semeval12-{}-spearman'.format(subclass)] = spearman_score
results.loc['semeval12-{}-maxdiff'.format(subclass)] = maxdiff_score
except FileNotFoundError:
continue
bats_results = []
for category in product('DEIL', range(1, 11)):
category = ''.join([str(element) for element in category])
quads = read_bats(category)
category_results = eval_open_vocab_analogies(vectors, quads)
bats_results.append((category, category_results))
if scope == 'global':
average_scores = []
for interval in ['acc', 'low', 'high']:
average_scores.append(
np.mean([result[interval] for name, result in bats_results])
)
results.loc['bats'] = pd.Series(average_scores, index=['acc', 'low', 'high'])
else:
for name, result in bats_results:
results.loc['bats-{}'.format(''.join(name))] = result
return results
|
400079
|
import numpy as np
import minimal_pytorch_rasterizer
import torch
from torch import nn
import nvdiffrast.torch as dr
class UVRenderer(torch.nn.Module):
def __init__(self, H, W, faces_path='data/uv_renderer/face_tex.npy',
vertice_values_path='data/uv_renderer/uv.npy'):
super().__init__()
faces_cpu = np.load(faces_path)
uv_cpu = np.load(vertice_values_path)
self.faces = torch.nn.Parameter(torch.tensor(faces_cpu, dtype=torch.int32).contiguous(), requires_grad=False)
self.vertice_values = torch.nn.Parameter(torch.tensor(uv_cpu, dtype=torch.float32).contiguous(),
requires_grad=False)
self.pinhole = minimal_pytorch_rasterizer.Pinhole2D(
fx=1, fy=1,
cx=0, cy=0,
h=H, w=W
)
def set_vertice_values(self, vertive_values):
self.vertice_values = torch.nn.Parameter(
torch.tensor(vertive_values, dtype=torch.float32).to(self.vertice_values.device), requires_grad=False)
def forward(self, verts, norm=True, negbg=True, return_mask=False):
N = verts.shape[0]
uvs = []
for i in range(N):
v = verts[i]
uv = minimal_pytorch_rasterizer.project_mesh(v, self.faces, self.vertice_values, self.pinhole)
uvs.append(uv)
uvs = torch.stack(uvs, dim=0).permute(0, 3, 1, 2)
mask = (uvs > 0).sum(dim=1, keepdim=True).float().clamp(0., 1.)
if norm:
uvs = (uvs * 2 - 1.)
if negbg:
uvs = uvs * mask - 10 * torch.logical_not(mask)
if return_mask:
return uvs, mask
else:
return uvs
class NVDiffRastUVRenderer(torch.nn.Module):
def __init__(self, faces_path='data/uv_renderer/face_tex.npy',
uv_vert_values_path='data/uv_renderer/uv.npy'):
super().__init__()
self.glctx = dr.RasterizeGLContext()
# load faces
self.faces = nn.Parameter(
torch.tensor(np.load(faces_path), dtype=torch.int32).contiguous(),
requires_grad=False
)
# load uv vert values
self.uv_vert_values = nn.Parameter(
torch.tensor(np.load(uv_vert_values_path), dtype=torch.float32).contiguous(),
requires_grad=False
)
def convert_to_ndc(self, verts, calibration_matrix, orig_w, orig_h, near=0.0001, far=10.0, invert_verts=True):
device = verts.device
# unproject verts
if invert_verts:
calibration_matrix_inv = torch.inverse(calibration_matrix)
verts_3d = torch.bmm(verts, calibration_matrix_inv.transpose(1, 2))
else:
verts_3d = verts
# build ndc projection matrix
matrix_ndc = []
for batch_i in range(calibration_matrix.shape[0]):
fx, fy = calibration_matrix[batch_i, 0, 0], calibration_matrix[batch_i, 1, 1]
cx, cy = calibration_matrix[batch_i, 0, 2], calibration_matrix[batch_i, 1, 2]
matrix_ndc.append(torch.tensor([
[2*fx/orig_w, 0.0, (orig_w - 2*cx)/orig_w, 0.0],
[0.0, -2*fy/orig_h, -(orig_h - 2*cy)/orig_h, 0.0],
[0.0, 0.0, (-far - near) / (far - near), -2.0*far*near/(far-near)],
[0.0, 0.0, -1.0, 0.0]
], device=device))
matrix_ndc = torch.stack(matrix_ndc, dim=0)
# convert verts to verts ndc
verts_3d_homo = torch.cat([verts_3d, torch.ones(*verts_3d.shape[:2], 1, device=device)], dim=-1)
verts_3d_homo[:, :, 2] *= -1 # invert z-axis
verts_ndc = torch.bmm(verts_3d_homo, matrix_ndc.transpose(1, 2))
return verts_ndc, matrix_ndc
def render(self, verts_ndc, matrix_ndc, render_h=256, render_w=256):
device = verts_ndc.device
rast, rast_db = dr.rasterize(self.glctx, verts_ndc, self.faces, resolution=[render_h, render_w])
mask = (rast[:, :, :, 2] > 0.0).unsqueeze(-1).type(torch.float32)
uv, uv_da = dr.interpolate(self.uv_vert_values, rast, self.faces, rast_db=rast_db, diff_attrs='all')
# invert y-axis
inv_idx = torch.arange(uv.shape[1] - 1, -1, -1).long().to(device)
uv = uv.index_select(1, inv_idx)
uv_da = uv_da.index_select(1, inv_idx)
mask = mask.index_select(1, inv_idx)
# make channel dim second
uv = uv.permute(0, 3, 1, 2)
uv_da = uv_da.permute(0, 3, 1, 2)
mask = mask.permute(0, 3, 1, 2)
rast = rast.permute(0, 3, 1, 2)
# norm uv to [-1.0, 1.0]
uv = 2 * uv - 1
# set empty pixels to -10.0
uv = uv * mask + (-10.0) * (1 - mask)
return uv, uv_da, mask, rast
def texture(self, texture, uv, mask=None, uv_da=None, mip=None, filter_mode='auto', boundary_mode='wrap', max_mip_level=None):
texture = texture.permute(0, 2, 3, 1).contiguous()
uv = (uv.permute(0, 2, 3, 1).contiguous() + 1) / 2 # norm to [0.0, 1.0]
if uv_da is not None:
uv_da = uv_da.permute(0, 2, 3, 1).contiguous()
sampled_texture = dr.texture(
texture,
uv,
uv_da=uv_da,
mip=mip,
filter_mode=filter_mode,
boundary_mode=boundary_mode,
max_mip_level=max_mip_level,
)
sampled_texture = sampled_texture.permute(0, 3, 1, 2)
if mask is not None:
sampled_texture = sampled_texture * mask
return sampled_texture
def antialias(self, color, rast, verts_ndc, topology_hash=None, pos_gradient_boost=1.0):
color = color.permute(0, 2, 3, 1).contiguous()
rast = rast.permute(0, 2, 3, 1).contiguous()
color = dr.antialias(
color,
rast,
verts_ndc,
self.faces,
topology_hash=topology_hash,
pos_gradient_boost=pos_gradient_boost
)
color = color.permute(0, 3, 1, 2)
return color
|
400133
|
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = "\n" + f.read()
if __name__ == "__main__":
setup(
name="paragraph",
use_scm_version=True,
author="<NAME>",
author_email="<EMAIL>",
description="A computation graph micro-framework providing seamless lazy and concurrent evaluation.",
long_description=long_description,
long_description_content_type="text/x-rst",
keywords=["computation graph", "concurrent", "lazy"],
url="https://github.com/Othoz/paragraph",
project_urls={
"Bug Tracker": "https://github.com/Othoz/paragraph/issues",
"Documentation": "http://paragraph.readthedocs.io/en/latest/",
},
packages=["paragraph"],
license="MIT",
python_requires=">=3.6",
setup_requires=['setuptools_scm'],
install_requires=[
"attrs>=18.1.0",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation",
],
)
|
400157
|
from compressor.conf import settings
from compressor.filters import CompilerFilter
class ClosureCompilerFilter(CompilerFilter):
command = "{binary} {args}"
options = (
("binary", settings.COMPRESS_CLOSURE_COMPILER_BINARY),
("args", settings.COMPRESS_CLOSURE_COMPILER_ARGUMENTS),
)
|
400179
|
import copy
import typing
from commercetools.platform import models
class Paginator:
"""This paginator uses the offset kwarg for retrieving the pages
Example::
paginator = Paginator(client.products.query, sort=["id asc"])
for product in paginator:
print(product)
paginator = Paginator(client.products.query, sort=["id asc"])
for product in paginator[:-20]:
print(product)
"""
def __init__(
self, operation: typing.Callable, **kwargs: typing.Dict[str, typing.Any]
):
if not callable(operation):
raise ValueError("Expected a callable as first argument")
if "offset" in kwargs or "limit" in kwargs:
raise ValueError(
"It is not possible to supply either the offset or limit "
"keyword arguments when using the paginator."
)
self.page_size = 20
self._operation = operation
self._kwargs = kwargs
self._offset = 0
self._limit = None
def __iter__(self) -> typing.Generator[models.BaseResource, None, None]:
offset = self._offset or 0
limit = self._limit
num = 0
while True:
response = self._operation(
**self._kwargs, offset=offset, limit=self.page_size
)
if limit is not None and limit < 0:
limit = (response.total + limit) - offset
for item in response.results:
yield item
if limit is not None:
num += 1
if num >= limit:
return
offset += response.count
if offset >= response.total:
break
def _clone(self):
clone = self.__class__(operation=self._operation, **self._kwargs)
clone.page_size = self.page_size
return clone
def __getitem__(self, item):
if isinstance(item, slice):
clone = self._clone()
clone._offset = item.start
clone._limit = item.stop
return clone
raise IndexError
class CursorPaginator(Paginator):
"""This paginator uses a cursor (where clause) for pagination.
See https://docs.commercetools.com/http-api.html#paging
"""
def __iter__(self) -> typing.Generator[models.BaseResource, None, None]:
last_created_at = None
limit = self._limit
num = 0
kwargs = copy.deepcopy(self._kwargs)
where_clause = kwargs.setdefault("where", [])
if where_clause and not isinstance(where_clause, list):
where_clause = [where_clause]
kwargs["sort"] = "createdAt asc"
while True:
if last_created_at:
# copy list since we want to append the createdAt filter for
# every request with an other value.
kwargs["where"] = list(where_clause)
kwargs["where"].append(f'createdAt > "{last_created_at.isoformat()}"')
response = self._operation(**kwargs, limit=self.page_size)
for item in response.results:
last_created_at = item.created_at
yield item
if limit is not None:
num += 1
if num >= limit:
return
if response.count < self.page_size:
break
def __getitem__(self, item):
if isinstance(item, slice):
if item.start:
raise ValueError("Start slice is not supported")
clone = self._clone()
clone._limit = item.stop
return clone
raise IndexError
|
400199
|
from __future__ import print_function, division, absolute_import
import json
from collections import OrderedDict
from functools import partial
from os.path import basename
from future import standard_library
from littleutils import DecentJSONEncoder, withattrs, group_by_attr
standard_library.install_aliases()
import argparse
import os
import sys
from flask import Flask, request, jsonify, url_for
from flask.templating import render_template
from flask_humanize import Humanize
from werkzeug.routing import PathConverter
import sqlalchemy
from birdseye.db import Database
from birdseye.utils import short_path, IPYTHON_FILE_PATH, fix_abs_path, is_ipython_cell
app = Flask('birdseye')
app.jinja_env.auto_reload = True
Humanize(app)
class FileConverter(PathConverter):
regex = '.*?'
app.url_map.converters['file'] = FileConverter
db = Database()
Session = db.Session
Function = db.Function
Call = db.Call
@app.route('/')
@db.provide_session
def index(session):
all_paths = db.all_file_paths()
recent_calls = (session.query(*(Call.basic_columns + Function.basic_columns))
.join(Function)
.order_by(Call.start_time.desc())[:100])
files = OrderedDict()
for row in recent_calls:
if is_ipython_cell(row.file):
continue
files.setdefault(
row.file, OrderedDict()
).setdefault(
row.name, row
)
for path in all_paths:
files.setdefault(
path, OrderedDict()
)
short = partial(short_path, all_paths=all_paths)
return render_template('index.html',
short=short,
files=files)
@app.route('/file/<file:path>')
@db.provide_session
def file_view(session, path):
path = fix_abs_path(path)
# Get all calls and functions in this file
filtered_calls = (session.query(*(Call.basic_columns + Function.basic_columns))
.join(Function)
.filter_by(file=path)
.subquery('filtered_calls'))
# Get the latest call *time* for each function in the file
latest_calls = session.query(
filtered_calls.c.name,
sqlalchemy.func.max(filtered_calls.c.start_time).label('maxtime')
).group_by(
filtered_calls.c.name,
).subquery('latest_calls')
# Get the latest call for each function
query = session.query(filtered_calls).join(
latest_calls,
sqlalchemy.and_(
filtered_calls.c.name == latest_calls.c.name,
filtered_calls.c.start_time == latest_calls.c.maxtime,
)
).order_by(filtered_calls.c.start_time.desc())
funcs = group_by_attr(query, 'type')
# Add any functions which were never called
all_funcs = sorted(session.query(Function.name, Function.type)
.filter_by(file=path)
.distinct())
func_names = {row.name for row in query}
for func in all_funcs:
if func.name not in func_names:
funcs[func.type].append(func)
return render_template('file.html',
funcs=funcs,
is_ipython=path == IPYTHON_FILE_PATH,
full_path=path,
short_path=basename(path))
@app.route('/file/<file:path>/__function__/<func_name>')
@db.provide_session
def func_view(session, path, func_name):
path = fix_abs_path(path)
query = get_calls(session, path, func_name, 200)
if query:
func = query[0]
calls = [withattrs(Call(), **row._asdict()) for row in query]
else:
func = session.query(Function).filter_by(file=path, name=func_name)[0]
calls = None
return render_template('function.html',
func=func,
short_path=basename(path),
calls=calls)
@app.route('/api/file/<file:path>/__function__/<func_name>/latest_call/')
@db.provide_session
def latest_call(session, path, func_name):
path = fix_abs_path(path)
call = get_calls(session, path, func_name, 1)[0]
return jsonify(dict(
id=call.id,
url=url_for(call_view.__name__,
call_id=call.id),
))
def get_calls(session, path, func_name, limit):
return (session.query(*(Call.basic_columns + Function.basic_columns))
.join(Function)
.filter_by(file=path, name=func_name)
.order_by(Call.start_time.desc())[:limit])
@db.provide_session
def base_call_view(session, call_id, template):
call = session.query(Call).filter_by(id=call_id).one()
func = call.function
return render_template(template,
short_path=basename(func.file),
call=call,
func=func)
@app.route('/call/<call_id>')
def call_view(call_id):
return base_call_view(call_id, 'call.html')
@app.route('/ipython_call/<call_id>')
def ipython_call_view(call_id):
return base_call_view(call_id, 'ipython_call.html')
@app.route('/ipython_iframe/<call_id>')
def ipython_iframe_view(call_id):
"""
This view isn't generally used, it's just an easy way to play with the template
without a notebook.
"""
return render_template('ipython_iframe.html',
container_id='1234',
port=7777,
call_id=call_id)
@app.route('/kill', methods=['POST'])
def kill():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
@app.route('/api/call/<call_id>')
@db.provide_session
def api_call_view(session, call_id):
call = session.query(Call).filter_by(id=call_id).one()
func = call.function
return DecentJSONEncoder().encode(dict(
call=dict(data=call.parsed_data, **Call.basic_dict(call)),
function=dict(data=func.parsed_data, **Function.basic_dict(func))))
@app.route('/api/calls_by_body_hash/<body_hash>')
@db.provide_session
def calls_by_body_hash(session, body_hash):
query = (session.query(*Call.basic_columns + (Function.data,))
.join(Function)
.filter_by(body_hash=body_hash)
.order_by(Call.start_time.desc())[:200])
calls = [Call.basic_dict(withattrs(Call(), **row._asdict()))
for row in query]
function_data_set = {row.data for row in query}
ranges = set()
loop_ranges = set()
for function_data in function_data_set:
function_data = json.loads(function_data)
def add(key, ranges_set):
for node in function_data[key]:
ranges_set.add((node['start'], node['end']))
add('node_ranges', ranges)
# All functions are expected to have the same set
# of loop nodes
current_loop_ranges = set()
add('loop_ranges', current_loop_ranges)
assert loop_ranges in (set(), current_loop_ranges)
loop_ranges = current_loop_ranges
ranges = [dict(start=start, end=end) for start, end in ranges]
loop_ranges = [dict(start=start, end=end) for start, end in loop_ranges]
return DecentJSONEncoder().encode(dict(
calls=calls, ranges=ranges, loop_ranges=loop_ranges))
@app.route('/api/body_hashes_present/', methods=['POST'])
@db.provide_session
def body_hashes_present(session):
hashes = request.get_json()
query = (session.query(Function.body_hash, sqlalchemy.func.count(Call.id))
.outerjoin(Call)
.filter(Function.body_hash.in_(hashes))
.group_by(Function.body_hash))
return DecentJSONEncoder().encode([
dict(hash=h, count=count)
for h, count in query
])
def main(argv=sys.argv[1:]):
# Support legacy CLI where there was just one positional argument: the port
if len(argv) == 1 and argv[0].isdigit():
argv.insert(0, '--port')
parser = argparse.ArgumentParser(description="Bird's Eye: A graphical Python debugger")
parser.add_argument('-p', '--port', help='HTTP port, default is 7777', default=7777, type=int)
parser.add_argument('--host', help="HTTP host, default is 'localhost'", default='localhost')
args = parser.parse_args(argv)
app.run(
port=args.port,
host=args.host,
use_reloader=os.environ.get('BIRDSEYE_RELOADER') == '1',
)
if __name__ == '__main__':
main()
|
400245
|
from django.core.management import BaseCommand
from importer import models
from importer.tasks import find_and_run_next_batch_chunks
from workbaskets.validators import WorkflowStatus
def run_batch(batch: str, status: str, username: str):
import_batch = models.ImportBatch.objects.get(name=batch)
find_and_run_next_batch_chunks(import_batch, status, username)
class Command(BaseCommand):
help = "Import data from a TARIC XML file into TaMaTo"
def add_arguments(self, parser):
parser.add_argument(
"batch",
help="The batch Id to be imported",
type=str,
)
parser.add_argument(
"-s",
"--status",
choices=[
WorkflowStatus.EDITING.value,
WorkflowStatus.PROPOSED.value,
WorkflowStatus.APPROVED.value,
WorkflowStatus.PUBLISHED.value,
],
help="The status of the workbaskets containing the import changes.",
type=str,
)
parser.add_argument(
"-u",
"--username",
help="The username to use for the owner of the workbaskets created.",
type=str,
)
def handle(self, *args, **options):
run_batch(
batch=options["batch"],
status=options["status"],
username=options["username"],
)
|
400285
|
from icevision.all import *
def _test_dl(x, y, recs):
assert len(recs) == 1
assert recs[0].img is None
assert x.shape == torch.Size([1, 3, 64, 64])
assert recs[0].segmentation.class_map is not None
assert recs[0].segmentation.class_map.num_classes == 32
if y is not None:
assert y.shape == torch.Size([1, 64, 64])
# check maximum value in y is not higher than 31, given we have 32 classes in total
assert y.max() < 32
assert y.min() >= 0
def test_train_dataloader(camvid_ds):
train_ds, _ = camvid_ds
dl = models.fastai.unet.dataloaders.train_dl(
train_ds, batch_size=1, num_workers=0, shuffle=False
)
(x, y), records = first(dl)
assert records[0].record_id == "0006R0_f02340"
_test_dl(x, y, records)
def test_valid_dataloader(camvid_ds):
_, valid_ds = camvid_ds
dl = models.fastai.unet.valid_dl(
valid_ds, batch_size=1, num_workers=0, shuffle=False
)
(x, y), records = first(dl)
assert records[0].record_id == "0006R0_f02400"
_test_dl(x, y, records)
def test_infer_dataloader(camvid_ds):
_, valid_ds = camvid_ds
dl = models.fastai.unet.infer_dl(
valid_ds, batch_size=1, num_workers=0, shuffle=False
)
(x,), records = first(dl)
assert records[0].record_id == "0006R0_f02400"
_test_dl(x, None, records)
|
400304
|
from .state import *
from .target_selection import *
from .gpu_status import *
from .download_button import *
from .ps_process import *
from .run_proteinsolver import *
from .structure import *
|
400309
|
from typing import Tuple, Callable, Optional, cast
from ..model import Model
from ..config import registry
from ..types import Floats1d, Floats2d
from ..initializers import glorot_uniform_init, zero_init
from ..util import get_width, partial
InT = Floats2d
OutT = Floats2d
@registry.layers("Linear.v1")
def Linear(
nO: Optional[int] = None,
nI: Optional[int] = None,
*,
init_W: Callable = glorot_uniform_init,
init_b: Callable = zero_init,
) -> Model[InT, OutT]:
"""Multiply inputs by a weights matrix and adds a bias vector."""
return Model(
"linear",
forward,
init=partial(init, init_W, init_b),
dims={"nO": nO, "nI": nI},
params={"W": None, "b": None},
)
def forward(model: Model[InT, OutT], X: InT, is_train: bool) -> Tuple[OutT, Callable]:
W = cast(Floats2d, model.get_param("W"))
b = cast(Floats1d, model.get_param("b"))
Y = model.ops.gemm(X, W, trans2=True)
Y += b
def backprop(dY: OutT) -> InT:
model.inc_grad("b", dY.sum(axis=0))
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True))
return model.ops.gemm(dY, W)
return Y, backprop
def init(
init_W: Callable,
init_b: Callable,
model: Model[InT, OutT],
X: Optional[InT] = None,
Y: Optional[OutT] = None,
) -> Model[InT, OutT]:
if X is not None:
model.set_dim("nI", get_width(X))
if Y is not None:
model.set_dim("nO", get_width(Y))
model.set_param("W", init_W(model.ops, (model.get_dim("nO"), model.get_dim("nI"))))
model.set_param("b", init_b(model.ops, (model.get_dim("nO"),)))
return model
|
400320
|
import math
import sys
import coloredlogs
from pyvox.parser import VoxParser
coloredlogs.install(level='DEBUG')
m = VoxParser(sys.argv[1]).parse()
img = m.to_dense()
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
cm = ListedColormap(np.array(m.palette, dtype='f')/256)
s = math.ceil(math.sqrt(img.shape[0]))
print('size', img.shape, s)
f, arr = plt.subplots(s,s)
for i, slc in enumerate(img):
arr[i//s, i%s].imshow(img[i], cmap=cm)
for a in range(i+1, s*s):
arr[a//s, a%s].imshow(np.zeros(img.shape[1:3]))
plt.show()
|
400390
|
from posixpath import split
import pytest
from tasrif.processing_pipeline import SplitOperator
from tasrif.processing_pipeline.processing_operator import ProcessingOperator
class NotProcessingOperator:
pass
def test_error_is_raised_when_split_operators_are_not_ProcessingOperators(mocker):
with pytest.raises(ValueError) as exc:
SplitOperator(
[NotProcessingOperator(), NotProcessingOperator(), NotProcessingOperator()]
)
def test_error_is_raised_when_bind_list_does_not_match_operators(mocker):
split_operators = [
mocker.Mock(spec=ProcessingOperator),
mocker.Mock(spec=ProcessingOperator),
mocker.Mock(spec=ProcessingOperator),
]
bind_list = [0, 0]
with pytest.raises(ValueError) as exc:
SplitOperator(split_operators, bind_list=bind_list)
def test_args_are_split_correctly_when_no_bind_list_is_passed(mocker):
split_operators = []
mock_inputs = []
for i in range(3):
split_operator = mocker.Mock(spec=ProcessingOperator)
split_operators.append(split_operator)
mock_input = mocker.Mock()
mock_inputs.append(mock_input)
operator = SplitOperator(split_operators)
operator.process(*mock_inputs)
# Assert that the operators were called with the correct inputs.
for i in range(len(split_operators)):
correct_input = mock_inputs[i]
split_operators[i].process.assert_called_once_with(correct_input)
def test_args_are_split_correctly_when_bind_list_is_passed(mocker):
split_operators = []
mock_inputs = []
for i in range(3):
split_operator = mocker.Mock(spec=ProcessingOperator)
split_operators.append(split_operator)
mock_input = mocker.Mock()
mock_inputs.append(mock_input)
bind_list = [2, 1, 0]
operator = SplitOperator(split_operators, bind_list=bind_list)
operator.process(*mock_inputs)
# Assert that the operators were called with the correct inputs.
for i in range(len(split_operators)):
correct_input = mock_inputs[bind_list[i]]
split_operators[i].process.assert_called_once_with(correct_input)
|
400470
|
import plotly
import plotly.graph_objects as go
from plotly.graph_objs.scatter import Line
from plotly.subplots import make_subplots
import plotly.express as px
import ipywidgets
from ipywidgets.widgets import Layout, HBox, VBox
from ipywidgets.embed import embed_minimal_html
import pandas as pd
import os,sys
import constants
import config
from config import args
def convert_3dpose_to_line_figs(poses, bones, pred_color='goldenrod', gt_color='red'):
figs = []
items_name = ["x","y","z",'class','joint_name']
if bones.max()==13:
joint_names = constants.LSP_14_names
elif bones.max()==23:
joint_names = constants.SMPL_24_names
for batch_inds, (pred, real) in enumerate(zip(*poses)):
pose_dict, color_maps = {}, {}
for bone_inds in bones:
si, ei = bone_inds
bone_name = '{}-{}'.format(joint_names[si], joint_names[ei])
pose_dict['pred_'+bone_name+'_start'] = [*pred[si],'pred_'+bone_name, joint_names[si]]
pose_dict['pred_'+bone_name+'_end'] = [*pred[ei],'pred_'+bone_name, joint_names[ei]]
color_maps['pred_'+bone_name] = pred_color
pose_dict['real_'+bone_name+'_start'] = [*real[si],'real_'+bone_name, joint_names[si]]
pose_dict['real_'+bone_name+'_end'] = [*real[ei],'real_'+bone_name, joint_names[ei]]
color_maps['real_'+bone_name] = gt_color
pred_real_pose_df = pd.DataFrame.from_dict(pose_dict,orient='index',columns=items_name)
pose3d_fig = px.line_3d(pred_real_pose_df, x="x", y="y", z="z", color='class', color_discrete_map=color_maps)#, text='joint_name'
figs.append(pose3d_fig)
return figs
def write_to_html(img_names, plot_dict, vis_cfg):
containers = []
raw_layout = Layout(overflow_x='scroll',border='2px solid black',width='1800px',height='',
flex_direction='row',display='flex')
for inds, img_name in enumerate(img_names):
Hboxes = []
for item in list(plot_dict.keys()):
fig = plot_dict[item]['figs'][inds]
fig['layout'] = {"title":{"text":img_name.replace(args().dataset_rootdir, '')}}
Hboxes.append(go.FigureWidget(fig))
containers.append(HBox(Hboxes,layout=raw_layout))
all_figs = VBox(containers)
save_name = os.path.join(vis_cfg['save_dir'],vis_cfg['save_name']+'.html')
embed_minimal_html(save_name, views=[all_figs], title=vis_cfg['save_name'], drop_defaults=True)
ipywidgets.Widget.close_all()
del all_figs, containers, Hboxes
def convert_image_list(images):
figs = []
for img in images:
figs.append(px.imshow(img))
return figs
if __name__ == '__main__':
import numpy as np
convert_3dpose_to_line_figs([np.random.rand(18).reshape((2,3,3)),np.random.rand(18).reshape((2,3,3))],np.array([[0,1],[1,2]]))
# import cv2
# imgs = [cv2.imread('/home/yusun/ROMP/demo/images/3dpw_sit_on_street.jpg') for i in range(3)]
# plot_dict = {'ds':{'figs':convert_image_list(imgs)}, 'd2':{'figs':convert_image_list(imgs)}, 'd3':{'figs':convert_image_list(imgs)}}
# vis_cfg = {'save_dir':'~'}
# write_to_html(['/asfd/safasdf.jpg','/asfd/asdf.jpg','/asfd/asdfasfd.jpg'], plot_dict, vis_cfg)
|
400539
|
import os, shutil
dir_path = os.path.dirname(os.path.realpath(__file__))
source = '/readme.md'
destinations = [
'/js/logipar/readme.md',
# '/python/pip/readme.md'
]
for d in destinations:
shutil.copyfile("{}{}".format(dir_path, source), "{}{}".format(dir_path, d))
|
400556
|
import os
import re
import time
from itertools import chain
from errbot import BotPlugin, re_botcmd
from errbot.core import ErrBot
from slack_sdk.errors import SlackApiError
import config_template
from lib import ApproveHelper, create_sdm_service, MSTeamsPlatform, PollerHelper, \
ShowResourcesHelper, ShowRolesHelper, SlackBoltPlatform, SlackRTMPlatform, \
ResourceGrantHelper, RoleGrantHelper
ACCESS_REGEX = r"\*{0,2}access to (.+)"
APPROVE_REGEX = r"\*{0,2}yes (.+)"
ASSIGN_ROLE_REGEX = r"\*{0,2}access to role (.+)"
SHOW_RESOURCES_REGEX = r"\*{0,2}show available resources\*{0,2}"
SHOW_ROLES_REGEX = r"\*{0,2}show available roles\*{0,2}"
FIVE_SECONDS = 5
ONE_MINUTE = 60
def get_callback_message_fn(bot):
def callback_message(msg):
msg.body = bot.plugin_manager.plugins['AccessBot'].clean_up_message(msg.body)
ErrBot.callback_message(bot, msg)
return callback_message
def get_platform(bot):
platform = bot.bot_config.BOT_PLATFORM if hasattr(bot.bot_config, 'BOT_PLATFORM') else None
if platform == 'ms-teams':
return MSTeamsPlatform(bot)
elif platform == 'slack-classic':
return SlackRTMPlatform(bot)
return SlackBoltPlatform(bot)
# pylint: disable=too-many-ancestors
class AccessBot(BotPlugin):
__grant_requests = {}
_platform = None
def activate(self):
super().activate()
self._platform = get_platform(self)
self._bot.MSG_ERROR_OCCURRED = 'An error occurred, please contact your SDM admin'
self._bot.callback_message = get_callback_message_fn(self._bot)
self['auto_approve_uses'] = {}
poller_helper = self.get_poller_helper()
self.start_poller(FIVE_SECONDS, poller_helper.stale_grant_requests_cleaner)
self.start_poller(ONE_MINUTE, poller_helper.stale_max_auto_approve_cleaner)
self._platform.activate()
def deactivate(self):
self._platform.deactivate()
super().deactivate()
def get_configuration_template(self):
return config_template.get()
def configure(self, configuration):
if configuration is not None and configuration != {}:
config = dict(chain(config_template.get().items(), configuration.items()))
else:
config = config_template.get()
super(AccessBot, self).configure(config)
def check_configuration(self, configuration):
pass
@re_botcmd(pattern=ACCESS_REGEX, flags=re.IGNORECASE, prefixed=False, re_cmd_name_help="access to resource-name")
def access_resource(self, message, match):
"""
Grant access to a resource (using the requester's email address)
"""
resource_name = re.sub(ACCESS_REGEX, "\\1", match.string.replace("*", ""))
if re.match("^role (.*)", resource_name):
self.log.debug("##SDM## AccessBot.access better match for assign_role")
return
if not self._platform.can_access_resource(message):
return
yield from self.get_resource_grant_helper().request_access(message, resource_name)
@re_botcmd(pattern=ASSIGN_ROLE_REGEX, flags=re.IGNORECASE, prefixed=False, re_cmd_name_help="access to role role-name")
def assign_role(self, message, match):
"""
Grant access to all resources in a role (using the requester's email address)
"""
if not self._platform.can_assign_role(message):
return
role_name = re.sub(ASSIGN_ROLE_REGEX, "\\1", match.string.replace("*", ""))
yield from self.get_role_grant_helper().request_access(message, role_name)
@re_botcmd(pattern=APPROVE_REGEX, flags=re.IGNORECASE, prefixed=False, hidden=True)
def approve(self, message, match):
"""
Approve a grant (resource or role)
"""
access_request_id = re.sub(APPROVE_REGEX, r"\1", match.string.replace("*", ""))
approver = message.frm
yield from self.get_approve_helper().execute(approver, access_request_id)
#pylint: disable=unused-argument
@re_botcmd(pattern=SHOW_RESOURCES_REGEX, flags=re.IGNORECASE, prefixed=False, re_cmd_name_help="show available resources [--filter expression]")
def show_resources(self, message, match):
"""
Show all available resources
"""
if not self._platform.can_show_resources(message):
return
filter = self.extract_filter(message.body)
yield from self.get_show_resources_helper().execute(message, filter=filter)
#pylint: disable=unused-argument
@re_botcmd(pattern=SHOW_ROLES_REGEX, flags=re.IGNORECASE, prefixed=False, re_cmd_name_help="show available roles")
def show_roles(self, message, match):
"""
Show all available roles
"""
if not self._platform.can_show_roles(message):
return
yield from self.get_show_roles_helper().execute(message)
@staticmethod
def get_admins():
return os.getenv("SDM_ADMINS", "").split(" ")
@staticmethod
def get_api_access_key():
return os.getenv("SDM_API_ACCESS_KEY")
@staticmethod
def get_api_secret_key():
return os.getenv("SDM_API_SECRET_KEY")
def get_sdm_service(self):
return create_sdm_service(self.get_api_access_key(), self.get_api_secret_key(), self.log)
def get_resource_grant_helper(self):
return ResourceGrantHelper(self)
def get_role_grant_helper(self):
return RoleGrantHelper(self)
def get_approve_helper(self):
return ApproveHelper(self)
def get_poller_helper(self):
return PollerHelper(self)
def get_show_resources_helper(self):
return ShowResourcesHelper(self)
def get_show_roles_helper(self):
return ShowRolesHelper(self)
def get_admin_ids(self):
return self._platform.get_admin_ids()
def is_valid_grant_request_id(self, request_id):
return request_id in self.__grant_requests
def enter_grant_request(self, request_id, message, sdm_object, sdm_account, grant_request_type):
self.__grant_requests[request_id] = {
'id': request_id,
'status': 'PENDING', # TODO Remove?
'timestamp': time.time(),
'message': message, # cannot be persisted in errbot state
'sdm_object': sdm_object,
'sdm_account': sdm_account,
'type': grant_request_type
}
def remove_grant_request(self, request_id):
self.__grant_requests.pop(request_id, None)
def get_grant_request(self, request_id):
return self.__grant_requests[request_id]
def get_grant_request_ids(self):
return list(self.__grant_requests.keys())
def add_thumbsup_reaction(self, message):
if self._bot.mode != 'test':
self._bot.add_reaction(message, "thumbsup")
def get_sender_nick(self, sender):
override = self.config['SENDER_NICK_OVERRIDE']
return override if override else f"@{sender.nick}"
def get_sender_id(self, sender):
return self._platform.get_sender_id(sender)
def get_sender_email(self, sender):
override = self.config['SENDER_EMAIL_OVERRIDE']
if override:
return override
sender_email = self._platform.get_sender_email(sender)
sdm_email_subaddress = self.config['EMAIL_SUBADDRESS']
if sdm_email_subaddress:
return sender_email.replace('@', f'+{sdm_email_subaddress}@')
return sender_email
def get_user_nick(self, user):
return self._platform.get_user_nick(user)
def increment_auto_approve_use(self, requester_id):
prev = 0
if requester_id in self['auto_approve_uses']:
prev = self['auto_approve_uses'][requester_id]
with self.mutable('auto_approve_uses') as aau:
aau[requester_id] = prev + 1
return self['auto_approve_uses'][requester_id]
def get_auto_approve_use(self, requester_id):
if requester_id not in self['auto_approve_uses']:
return 0
return self['auto_approve_uses'][requester_id]
def increase_auto_approve_uses_counter(self):
prev = 0
if 'poller_counter' in self['auto_approve_uses']:
prev = self['auto_approve_uses']['poller_counter']
with self.mutable('auto_approve_uses') as aau:
aau['poller_counter'] = prev + ONE_MINUTE # same value used for poller
return self['auto_approve_uses']['poller_counter']
def clean_auto_approve_uses(self):
self['auto_approve_uses'] = {}
def get_sdm_email_from_profile(self, sender, email_field):
try:
user_profile = self._bot.find_user_profile(sender.userid)
if user_profile['fields'] is None:
return None
for field in user_profile['fields'].values():
if field['label'] == email_field:
return field['value']
except SlackApiError as e:
if e.response['error'] == 'ratelimited':
self.log.error(
f"Slack throwed a ratelimited error. Too many requests were made\n{str(e)}"
)
raise Exception("Too many requests were made. Please, try again in 1 minute") from e
self.log.error(
f"I got an error when trying to get the user profile\n{str(e)}"
)
raise e
return None
def clean_up_message(self, message):
return self._platform.clean_up_message(message)
def format_access_request_params(self, resource_name, sender_nick):
return self._platform.format_access_request_params(resource_name, sender_nick)
def format_strikethrough(self, text):
return self._platform.format_strikethrough(text)
def get_rich_identifier(self, identifier, message):
return self._platform.get_rich_identifier(identifier, message)
def extract_filter(self, message):
if '--filter' in message:
filter = re.search(r'(?<=--filter ).+', message)
if not filter:
raise Exception('You must pass the filter arguments after the "--filter" tag.')
return filter.group()
return ''
|
400559
|
import pytest
from dbt.tests.util import run_dbt
from tests.functional.simple_snapshot.fixtures import models_slow__gen_sql
test_snapshots_changing_strategy__test_snapshot_sql = """
{# /*
Given the repro case for the snapshot build, we'd
expect to see both records have color='pink'
in their most recent rows.
*/ #}
with expected as (
select 1 as id, 'pink' as color union all
select 2 as id, 'pink' as color
),
actual as (
select id, color
from {{ ref('my_snapshot') }}
where color = 'pink'
and dbt_valid_to is null
)
select * from expected
except
select * from actual
union all
select * from actual
except
select * from expected
"""
snapshots_changing_strategy__snapshot_sql = """
{#
REPRO:
1. Run with check strategy
2. Add a new ts column and run with check strategy
3. Run with timestamp strategy on new ts column
Expect: new entry is added for changed rows in (3)
#}
{% snapshot my_snapshot %}
{#--------------- Configuration ------------ #}
{{ config(
target_schema=schema,
unique_key='id'
) }}
{% if var('strategy') == 'timestamp' %}
{{ config(strategy='timestamp', updated_at='updated_at') }}
{% else %}
{{ config(strategy='check', check_cols=['color']) }}
{% endif %}
{#--------------- Test setup ------------ #}
{% if var('step') == 1 %}
select 1 as id, 'blue' as color
union all
select 2 as id, 'red' as color
{% elif var('step') == 2 %}
-- change id=1 color from blue to green
-- id=2 is unchanged when using the check strategy
select 1 as id, 'green' as color, '2020-01-01'::date as updated_at
union all
select 2 as id, 'red' as color, '2020-01-01'::date as updated_at
{% elif var('step') == 3 %}
-- bump timestamp for both records. Expect that after this runs
-- using the timestamp strategy, both ids should have the color
-- 'pink' in the database. This should be in the future b/c we're
-- going to compare to the check timestamp, which will be _now_
select 1 as id, 'pink' as color, (now() + interval '1 day')::date as updated_at
union all
select 2 as id, 'pink' as color, (now() + interval '1 day')::date as updated_at
{% endif %}
{% endsnapshot %}
"""
@pytest.fixture(scope="class")
def models():
return {"gen.sql": models_slow__gen_sql}
@pytest.fixture(scope="class")
def snapshots():
return {"snapshot.sql": snapshots_changing_strategy__snapshot_sql}
@pytest.fixture(scope="class")
def tests():
return {"test_snapshot.sql": test_snapshots_changing_strategy__test_snapshot_sql}
def test_changing_strategy(project):
results = run_dbt(["snapshot", "--vars", "{strategy: check, step: 1}"])
assert len(results) == 1
results = run_dbt(["snapshot", "--vars", "{strategy: check, step: 2}"])
assert len(results) == 1
results = run_dbt(["snapshot", "--vars", "{strategy: timestamp, step: 3}"])
assert len(results) == 1
results = run_dbt(["test"])
assert len(results) == 1
|
400614
|
import unittest
from craft_ai import Client, errors as craft_err
from . import settings
from .utils import generate_entity_id
from .data import valid_data, invalid_data
class TestGetContextStateSuccess(unittest.TestCase):
"""Checks that the client succeeds when retrieving an agent's current state
with OK input.
"""
@classmethod
def setUpClass(cls):
cls.client = Client(settings.CRAFT_CFG)
cls.agent_id = generate_entity_id("test_get_agent_state")
def setUp(self):
self.client.delete_agent(self.agent_id)
self.client.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
self.client.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
self.client.delete_agent(self.agent_id)
def test_get_agent_state_with_correct_input(self):
"""get_agent_state should succeed when given proper ID and timestamp.
It should give a proper JSON response with `timestamp` field being an
integer and equal to the requested one, and a `diff` field containing
a dict.
"""
context_state = self.client.get_agent_state(
self.agent_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(context_state, dict)
context_state_keys = context_state.keys()
self.assertTrue("timestamp" in context_state_keys)
self.assertIsInstance(context_state["timestamp"], int)
self.assertEqual(context_state["timestamp"], valid_data.VALID_TIMESTAMP)
class TestGetContextStateFailure(unittest.TestCase):
"""Checks that the client fails properly when getting an agent's context
with bad input"""
@classmethod
def setUpClass(cls):
cls.client = Client(settings.CRAFT_CFG)
cls.agent_id = generate_entity_id("test_get_state")
def setUp(self):
self.client.delete_agent(self.agent_id)
self.client.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
self.client.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
self.client.delete_agent(self.agent_id)
def test_get_agent_state_with_invalid_id(self):
"""get_agent_state should fail when given a non-string/empty string ID
It should raise an error upon request for retrieval of
an agent with an ID that is not of type string, since agent IDs
should always be strings.
"""
for empty_id in invalid_data.UNDEFINED_KEY:
self.assertRaises(
craft_err.CraftAiBadRequestError,
self.client.get_agent_state,
invalid_data.UNDEFINED_KEY[empty_id],
valid_data.VALID_TIMESTAMP,
)
def test_get_agent_state_with_unknown_id(self):
"""get_agent_state should fail when given an unknown agent ID
It should raise an error upon request for the retrieval of an agent
that doesn't exist.
"""
self.assertRaises(
craft_err.CraftAiNotFoundError,
self.client.get_agent_state,
invalid_data.UNKNOWN_ID,
valid_data.VALID_TIMESTAMP,
)
def test_get_agent_state_with_invalid_timestamp(self):
for inv_ts in invalid_data.INVALID_TIMESTAMPS:
if inv_ts is not None:
self.assertRaises(
craft_err.CraftAiBadRequestError,
self.client.get_agent_state,
self.agent_id,
invalid_data.INVALID_TIMESTAMPS[inv_ts],
)
|
400697
|
from __future__ import with_statement # this is to work with python2.5
#!/usr/bin/env python
# import everything so that a session looks like tpips one
from pyps import workspace
with workspace("properties2.f",deleteOnClose=True) as w:
#Get foo function
foo1 = w.fun.FOO1
foo2 = w.fun.FOO2
foo3 = w.fun.FOO3
foo4 = w.fun.FOO4
foo5 = w.fun.FOO5
foo6 = w.fun.FOO6
# the return type of this one should be void
foo1.display (rc="c_printed_file")
# the return type of this one should be plouch
foo2.display (rc="c_printed_file",
DO_RETURN_TYPE_AS_TYPEDEF=True,
SET_RETURN_TYPE_AS_TYPEDEF_NEW_TYPE="plouch")
# the return type of this one should be void if the context
# has been restored
foo3.display (rc="c_printed_file")
# the return type of this one should be void
foo4.display (rc="c_printed_file",
DO_RETURN_TYPE_AS_TYPEDEF=False)
# the return type of this one should be void
foo5.display (rc="c_printed_file")
# the return type of this one should be the default value
# i.e p4a_smth
foo6.display (rc="c_printed_file",
DO_RETURN_TYPE_AS_TYPEDEF=True)
|
400728
|
import hashlib
import re
import time
from urllib import parse
from app.thirdparty.oneforall.common.query import Query
class NetCraft(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Dataset'
self.source = 'NetCraftQuery'
self.addr = 'https://searchdns.netcraft.com/?restriction=site+contains&position=limited'
self.page_num = 1
self.per_page_num = 20
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header() # NetCraft会检查User-Agent
self.proxy = self.get_proxy(self.source)
last = ''
while True:
time.sleep(self.delay)
self.proxy = self.get_proxy(self.source)
params = {'host': '*.' + self.domain,
'from': self.page_num}
resp = self.get(self.addr + last, params)
subdomains = self.match_subdomains(resp)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
self.subdomains.update(subdomains)
if 'Next Page' not in resp.text: # 搜索页面没有出现下一页时停止搜索
break
last = re.search(r'&last=.*' + self.domain, resp.text).group(0)
self.page_num += self.per_page_num
if self.page_num > 500:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def run(domain):
"""
类统一调用入口
:param str domain: 域名
"""
query = NetCraft(domain)
query.run()
if __name__ == '__main__':
run('example.com')
|
400754
|
import torch
import numpy as np
from torch.utils.data import Dataset
import os, glob
import re
import cv2
import math
from random import shuffle
import torch.nn.functional as F
from torchvision import transforms
from tqdm import tqdm
from PIL import Image
import scipy.io as io
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from mpl_toolkits.mplot3d import Axes3D
import time
import open3d as o3d
from queue import Queue
class Standardize(object):
""" Standardizes a 'PIL Image' such that each channel
gets zero mean and unit variance. """
def __call__(self, img):
return (img - img.mean(dim=(1,2), keepdim=True)) \
/ torch.clamp(img.std(dim=(1,2), keepdim=True), min=1e-8)
def __repr__(self):
return self.__class__.__name__ + '()'
def rotate(xyz):
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
num = dotproduct(v1, v2)
den = (length(v1) * length(v2))
if den == 0:
print('den = 0')
print(length(v1))
print(length(v2))
print(num)
ratio = num/den
ratio = np.minimum(1, ratio)
ratio = np.maximum(-1, ratio)
return math.acos(ratio)
p1 = np.float32(xyz[1, :])
p2 = np.float32(xyz[6, :])
v1 = np.subtract(p2, p1)
mod_v1 = np.sqrt(np.sum(v1 ** 2))
x = np.float32([1., 0., 0.])
y = np.float32([0., 1., 0.])
z = np.float32([0., 0., 1.])
theta = math.acos(np.sum(v1 * z) / (mod_v1 * 1)) * 360 / (2 * math.pi)
# M = cv2.getAffineTransform()
p = np.cross(v1, z)
# if sum(p)==0:
# p = np.cross(v1,y)
p[2] = 0.
# ang = -np.minimum(np.abs(angle(p, x)), 2 * math.pi - np.abs(angle(p, x)))
ang = angle(x, p)
if p[1] < 0:
ang = -ang
M = [[np.cos(ang), np.sin(ang), 0.],
[-np.sin(ang), np.cos(ang), 0.], [0., 0., 1.]]
M = np.reshape(M, [3, 3])
xyz = np.transpose(xyz)
xyz_ = np.matmul(M, xyz)
xyz_ = np.transpose(xyz_)
return xyz_
def flip_3d(msk):
msk[:, 1] = -msk[:, 1]
return msk
def compute_distances(FLAGS, labels3D, predictions3D, labels2D, predictions2D, labelsD, predictionsD):
ED_list_3d = torch.sum(torch.square(predictions3D - labels3D), dim=2)
ED_3d = torch.mean(ED_list_3d)
EDs_3d = torch.mean(torch.sqrt(ED_list_3d))
ED_list_2d = torch.sum(torch.square(predictions2D - labels2D), dim=2)
ED_2d = torch.mean(ED_list_2d)
EDs_2d = torch.mean(torch.sqrt(ED_list_2d))
# print("P3D: ", predictions3D.shape)
# print("L3D: ", labels3D.shape)
# print("P2D: ", predictions2D.shape)
# print("L2D: ", labels2D.shape)
# print(torch.max(labelsD))
# print(torch.min(labelsD))
# print(torch.max(predictionsD))
# print(torch.min(predictionsD))
valid_mask = (labelsD > 0).detach()
diff = (labelsD - predictionsD).abs()
diff_masked = diff[valid_mask]
ED_D = (diff_masked.mean() + diff.mean()) / 2.
# cv2.imshow("Predicted", predictionsD.clone()[0].permute(1,2,0).cpu().detach().numpy())
# cv2.imshow("Real", labelsD.clone()[0].permute(1,2,0).cpu().detach().numpy())
# cv2.imshow("Diff", diff.clone()[0].permute(1,2,0).cpu().detach().numpy())
# cv2.waitKey(1)
return ED_3d, ED_2d, EDs_3d, EDs_2d, ED_D
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n, m = X.shape
ny, my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0 ** 2.).sum()
ssY = (Y0 ** 2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m - my)), 0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:, -1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA ** 2
# transformed coords
Z = normX * traceTA * np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY / ssX - 2 * traceTA * normY / normX
Z = normY * np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my, :]
c = muX - b * np.dot(muY, T)
# transformation values
tform = {'rotation': T, 'scale': b, 'translation': c}
return d, Z, tform
def plot_skeletons(FLAGS, fig, images_orig, links, preds_2D, gts_2D, preds_3D, gts_3D, preds_D, gts_D, writer, angle):
plt.rcParams.update({'axes.labelsize': 'small'})
for index in range(0, FLAGS.batch_size):
plt.clf()
angle = (angle + 1) % 360
ax_bb = fig.add_subplot(331)
ax_bb.set_title('Input image')
ax_hat_3D = fig.add_subplot(338, projection='3d')
ax_hat_3D.set_title('3D prediction')
ax_hat_3D.set_xlabel('X')
ax_hat_3D.set_ylabel('Y')
ax_hat_3D.set_zlabel('Z')
ax_hat_3D.set_xlim([-100, 100])
ax_hat_3D.set_ylim([-100, 100])
ax_hat_3D.set_zlim([-100, 100])
ax_hat_3D.view_init(15, angle)
ax_hat_3D.labelsize = 10
ax_gt_3D = fig.add_subplot(339, projection='3d')
ax_gt_3D.set_title('3D ground truth')
ax_gt_3D.set_xlabel('X')
ax_gt_3D.set_ylabel('Y')
ax_gt_3D.set_zlabel('Z')
ax_gt_3D.set_xlim([-100, 100])
ax_gt_3D.set_ylim([-100, 100])
ax_gt_3D.set_zlim([-100, 100])
ax_gt_3D.view_init(15, angle)
ax_hat_2D = fig.add_subplot(335)
ax_hat_2D.set_title('2D prediction')
ax_hat_2D.set_xlabel('X')
ax_hat_2D.set_ylabel('Y')
ax_hat_2D.set_xlim([0, 1])
ax_hat_2D.set_ylim([0, 1])
ax_gt_2D = fig.add_subplot(336)
ax_gt_2D.set_title('2D ground truth')
ax_gt_2D.set_xlabel('X')
ax_gt_2D.set_ylabel('Y')
ax_gt_2D.set_xlim([0, 1])
ax_gt_2D.set_ylim([0, 1])
ax_hat_D = fig.add_subplot(332)
ax_hat_D.set_title('Depth prediction')
ax_gt_D = fig.add_subplot(333)
ax_gt_D.set_title('Depth ground truth')
ax_bb.imshow(np.reshape(
images_orig[index], (FLAGS.input_height, FLAGS.input_width, FLAGS.n_channels)))
colormaps = [
'Greys_r', 'Purples_r', 'Blues_r', 'Greens_r', 'Oranges_r', 'Reds_r',
'YlOrBr_r', 'YlOrRd_r', 'OrRd_r', 'PuRd_r', 'RdPu_r', 'BuPu_r',
'GnBu_r', 'PuBu_r', 'YlGnBu_r', 'PuBuGn_r', 'BuGn_r', 'YlGn_r']
for i in range(len(links)):
link = links[i]
for j in range(len(link)):
P2_hat_3D = preds_3D[index][i, :]
P1_hat_3D = preds_3D[index][link[j], :]
link_hat_3D = [list(x)
for x in list(zip(P1_hat_3D, P2_hat_3D))]
ax_hat_3D.plot(
link_hat_3D[0], link_hat_3D[2], zs=[ -x for x in link_hat_3D[1]])
P2_gt_3D = gts_3D[index][i, :]
P1_gt_3D = gts_3D[index][link[j], :]
link_gt_3D = [list(x) for x in list(zip(P1_gt_3D, P2_gt_3D))]
ax_gt_3D.plot(link_gt_3D[0], link_gt_3D[2], zs=[ -x for x in link_gt_3D[1]])
P2_hat_2D = preds_2D[index][i, :]
P1_hat_2D = preds_2D[index][link[j], :]
link_hat_2D = [list(x)
for x in list(zip(P1_hat_2D, P2_hat_2D))]
ax_hat_2D.plot(
link_hat_2D[0], link_hat_2D[1])
P2_gt_2D = gts_2D[index][i, :]
P1_gt_2D = gts_2D[index][link[j], :]
link_gt_2D = [list(x) for x in list(zip(P1_gt_2D, P2_gt_2D))]
ax_gt_2D.plot(link_gt_2D[0], link_gt_2D[1])
ax_gt_D.imshow(gts_D[index])
# ax_hat_D.imshow(preds_D[index].cpu())
ax_hat_D.imshow(preds_D[index])
plt.draw()
fig.canvas.flush_events()
plt.show(block=False)
writer.grab_frame()
return angle
def eval_image(model):
viewpoint = "top"
sample = "05_00000000_rear"
image = cv2.imread("/media/disi/New Volume/Datasets/PANOPTIC_CAPS/"+viewpoint+"/train/"+ sample +".png")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
Standardize()])
image = transform(image)
image_tensor = image.unsqueeze(0)
# image_tensor = image_tensor.permute(0,3,1,2)
input = torch.autograd.Variable(image_tensor)
input = input.cuda()
input = torch.cat(128*[input])
print("INPUT SHAPE: ", input.shape)
yhat2D, yhat3D, yhatD, W_reg, _ = model(input)
itop_labels = ['Head','Neck','LShould','RShould',"LElbow","RElbow","LHand","RHand","Torso","LHip","RHip","LKnee","RKnee","LFoot","RFoot"]
import gzip
msk3D = np.load("/media/disi/New Volume/Datasets/PANOPTIC_CAPS/"+viewpoint+"/train/"+sample+".npy")
msk3D = torch.from_numpy(msk3D).float().unsqueeze(0).unsqueeze(-1)
msk3D = torch.cat(128*[msk3D]) / 100.
msk3D = center_skeleton(msk3D)
msk3D = discretize(msk3D, 0, 1)
print(msk3D.shape)
pred = yhat3D.cpu().detach().numpy().squeeze(-1)
gt = msk3D.cpu().detach().numpy().squeeze(-1)
assert(pred.shape == gt.shape)
assert(len(pred.shape) == 3)
msk3D = msk3D.squeeze(3)
yhat3D = yhat3D.squeeze(3)
for i, p in enumerate(pred):
d, Z, tform = procrustes(
gt[i], pred[i])
pred[i] = Z
print(yhat3D.shape)
print(pred.shape)
yhat3D = torch.from_numpy(pred).float()
# if(viewpoint=="top"):
msk3D = msk3D[:,:,[2,0,1]]
yhat3D = yhat3D[:,:,[2,0,1]]
print("GT: ", msk3D.shape)
print("PRED: ", yhat3D.shape)
print("ERROR: ", np.mean(np.sqrt(np.sum((yhat3D.cpu().detach().numpy() - msk3D.cpu().detach().numpy())**2, axis=2))))
save_3d_plot(msk3D, "gt_depth", display_labels=True, viewpoint=viewpoint)
save_3d_plot(yhat3D.cpu().detach().numpy(), "pred_depth", viewpoint=viewpoint)
index = 10
image_2d = input[index].permute(1,2,0).cpu().detach().numpy()
# # img_kps = np.zeros((256,256,3), np.uint8)
# img_kps = cv2. cvtColor(image_2d, cv2.COLOR_GRAY2BGR)#.astype(np.uint8)
# for i, kps in enumerate(yhat2D[index]): # (15,2,1)
# if(i == 8):
# color = (255,0,0)
# else:
# color = (0,255,0)
# cv2.circle(img_kps, (int(256*kps[0].cpu()), int(256*kps[1].cpu())), 2, color, 8, 0)
# # cv2.putText(img_kps, itop_labels[i], (int(256*kps[0].cpu()) + 10, int(256*kps[1].cpu())), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0))
# cv2.imshow("Kps", img_kps)
cv2.imshow("Input", image_2d)
cv2.waitKey(0)
def save_3d_plot(itop, name, azim=None, elev=None, gt=None, display_labels=False, viewpoint="top"):
# itop_labels = ['Head','Neck','RShould','LShould',"RElbow","LElbow","RHand","LHand","Torso","RHip","LHip","RKnee","LKnee","RFoot","LFoot"]
itop_labels = ['Head','Neck','LShould','RShould',"LElbow","RElbow","LHand","RHand","Torso","LHip","RHip","LKnee","RKnee","LFoot","RFoot"]
itop_labels = ['0','1','2','3',"4","5","6","7","8","9","10","11","12","13","14"]
itop_connections = [[0,1],[1,2],[1,3],[2,3],[2,4],[3,5],[4,6],[5,7],[1,8],[8,9],[8,10],[9,10],[9,11],[10,12],[11,13],[12,14]]
fig = plt.figure()
ax = plt.axes(projection='3d')
index = 10
itop_newjoints = change_format_from_19_joints_to_15_joints(itop[0])
itop_newjoints = np.expand_dims(itop_newjoints, 0)
itop = np.repeat(itop_newjoints, 128, axis=0)
# print(itop.shape)
xdata = itop[index,:,0].flatten()
ydata = itop[index,:,1].flatten()
zdata = itop[index,:,2].flatten()
for i in itop_connections:
x1,x2,y1,y2,z1,z2 = connect(xdata,ydata,zdata,i[0],i[1])
ax.plot([x1,x2],[y1,y2],[z1,z2],'k-')
ax.scatter3D(xdata, ydata, zdata, c=zdata)
if(gt is not None):
pred = undiscretize(itop, 0, 1)[index]
gt = undiscretize(gt, 0, 1)[index]
pred = pred.squeeze()
gt = gt.squeeze()
assert(pred.shape == gt.shape)
assert(len(pred.shape) == 2)
err_dist = np.sqrt(np.sum((pred - gt)**2, axis=1)) # (N, K)
errors = (err_dist < 0.1)
for i, (x, y, z, label) in enumerate(zip(xdata,ydata,zdata, itop_labels)):
error_color='black'
if(gt is not None and not errors[i]):
error_color='red'
if(display_labels):
ax.text(x, y, z, label, color=error_color)
# ax.text2D(0.05, 0.95, "ITOP", transform=ax.transAxes)
if(azim):
ax.view_init(elev=elev, azim=azim)
# ax.set_xlabel('x', rotation=0, fontsize=20, labelpad=20)
# ax.set_ylabel('y', rotation=0, fontsize=20, labelpad=20)
# ax.set_zlabel('z', rotation=0, fontsize=20, labelpad=20)
# ax.set_xlim3d(-1,1)
# ax.set_ylim3d(-2,2)
# ax.set_zlim3d(0,2)
ax.set_xlim3d(0.2,1)
ax.set_ylim3d(0,0.6)
ax.set_zlim3d(0.8,0.2)
# plt.show(block=False)
# redraw the canvas
fig.canvas.draw()
# convert canvas to image
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,
sep='')
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# img is rgb, convert to opencv's default bgr
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
# display image with opencv or any operation you like
cv2.imshow(name, img)
cv2.imwrite(name+".png", img)
if(name=="True side"):
cv2.waitKey(1)
else:
cv2.waitKey(1)
def connect(x,y,z,p1,p2):
x1, x2 = x[p1], x[p2]
y1, y2 = y[p1], y[p2]
z1,z2 = z[p1],z[p2]
return x1,x2,y1,y2,z1,z2
def center_skeleton(skeletons):
for b, batch in enumerate(skeletons):
skeletons[b,:,:] = skeletons[b,:,:] - skeletons[b,2,:]
return skeletons
def change_format_from_19_joints_to_15_joints(joints):
xdata = joints[:,0]
ydata = joints[:,1]
zdata = joints[:,2]
panoptic_head = [(xdata[16]+xdata[18])/2,(ydata[16]+ydata[18])/2,(zdata[16]+zdata[18])/2]
panoptic_torso = [(xdata[0]+xdata[2])/2,(ydata[0]+ydata[2])/2,(zdata[0]+zdata[2])/2]
# head neck r shoulder l shoulder r elbow l elbow r hand l hand torso r hip l hip r knee l knee r foot l foot
#xdata_new = np.array([panoptic_head[0], xdata[0], xdata[9], xdata[3], xdata[10], xdata[4], xdata[11], xdata[5], panoptic_torso[0], xdata[12], xdata[6], xdata[13], xdata[7], xdata[14], xdata[8]])
#ydata_new = np.array([panoptic_head[1], ydata[0], ydata[9], ydata[3], ydata[10], ydata[4], ydata[11], ydata[5], panoptic_torso[1], ydata[12], ydata[6], ydata[13], ydata[7], ydata[14], ydata[8]])
#zdata_new = np.array([panoptic_head[2], zdata[0], zdata[9], zdata[3], zdata[10], zdata[4], zdata[11], zdata[5], panoptic_torso[2], zdata[12], zdata[6], zdata[13], zdata[7], zdata[14], zdata[8]])
xdata_new = np.array([panoptic_head[0], xdata[0], xdata[3], xdata[9], xdata[4], xdata[10], xdata[5], xdata[11], panoptic_torso[0], xdata[6], xdata[12], xdata[7], xdata[13], xdata[8], xdata[14]])
ydata_new = np.array([panoptic_head[1], ydata[0], ydata[3], ydata[9], ydata[4], ydata[10], ydata[5], ydata[11], panoptic_torso[1], ydata[6], ydata[12], ydata[7], ydata[13], ydata[8], ydata[14]])
zdata_new = np.array([panoptic_head[2], zdata[0], zdata[3], zdata[9], zdata[4], zdata[10], zdata[5], zdata[11], panoptic_torso[2], zdata[6], zdata[12], zdata[7], zdata[13], zdata[8], zdata[14]])
panoptic_converted = np.empty(shape=(15, 3), dtype=float)
for index in range(len(panoptic_converted)):
panoptic_converted[index,0] = xdata_new[index]
panoptic_converted[index,1] = ydata_new[index]
panoptic_converted[index,2] = zdata_new[index]
return panoptic_converted
def discretize(coord, a, b):
normalizers_3D = [[-0.927149999999999, 1.4176299999999982], [-1.1949180000000008, 0.991252999999999], [-0.8993889999999993, 0.8777908000000015]]
for i in range(3):
coord[:,:,i] = (b - a) * (coord[:,:,i] - normalizers_3D[i][0]) / (normalizers_3D[i][1] - normalizers_3D[i][0]) + a
return coord
def undiscretize(coord, a, b):
normalizers_3D = [[-0.927149999999999, 1.4176299999999982], [-1.1949180000000008, 0.991252999999999], [-0.8993889999999993, 0.8777908000000015]]
for i in range(3):
coord[:,:,i] = ( (coord[:,:,i] - a) * (normalizers_3D[i][1] - normalizers_3D[i][0]) / (b - a) ) + normalizers_3D[i][0]
return coord
|
400781
|
from __future__ import absolute_import
import struct
import random
from . import *
_len = len
_type = type
try:
long(0)
except:
long = int
default_xid = lambda: long(random.random()*0xFFFFFFFF)
def _obj(obj):
if isinstance(obj, bytes):
return obj
elif isinstance(obj, tuple):
return eval(obj.__class__.__name__)(*obj)
else:
raise ValueError(obj)
def _pack(fmt, *args):
if fmt[0] != "!":
fmt = "!"+fmt
return struct.pack(fmt, *args)
def _unpack(fmt, message, offset):
if fmt[0] != "!":
fmt = "!"+fmt
return struct.unpack_from(fmt, message, offset)
def _align(length):
return (length+7)//8*8
# 7.1
def ofp_header(version, type, length, xid):
if version is None:
version = 5
assert version==5
if length is None:
length = 8
if xid is None:
xid = default_xid()
return _pack("BBHI", version, type, length, xid)
def ofp_(header, data, type=None):
if isinstance(header, bytes):
(version, oftype, length, xid) = _unpack("BBHI", header, 0)
if isinstance(type, int):
assert oftype == type
elif isinstance(type, (list,tuple)):
assert oftype in type
elif type is None:
pass
else:
raise ValueError(type)
elif isinstance(header, tuple):
(version, oftype, length, xid) = header
if isinstance(type, int):
if oftype is None:
oftype = type
else:
assert oftype == type
elif isinstance(type, (list,tuple)):
assert oftype in type
elif type is None:
assert isinstance(oftype, int)
else:
raise ValueError(type)
elif header is None:
version = 5
if isinstance(type, int):
oftype = type
else:
raise ValueError(type)
xid = default_xid()
else:
raise ValueError(header)
data = _obj(data)
length = 8 + _len(data)
return ofp_header(version, oftype, length, xid)+data
# 7.2.1.1
def ofp_port(port_no, length, hw_addr, name, config, state, properties):
assert isinstance(hw_addr, str) and _len(hw_addr)==6
assert isinstance(name, str) and _len(name)<=16
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
length = 40 + _len(properties)
msg = _pack("IH2x6s2x16sII",
port_no,
length,
hw_addr,
name,
config,
state
)
return msg
# 7.2.1.2
def ofp_port_desc_prop_header(type, length):
# XXX: You won't need this function
return _pack("HH", type, length)
def ofp_port_desc_prop_ethernet(type, length,
curr, advertised, supported, peer, curr_speed, max_speed):
if type is None:
type = OFPPDPT_ETHERNET
assert type == OFPPDPT_ETHERNET
length = 32
return _pack("HH4x6I", type, length,
curr, advertised, supported, peer, curr_speed, max_speed)
def ofp_port_desc_prop_optical(type, length, supported,
tx_min_freq_lmda, tx_max_freq_lmda, tx_grid_freq_lmda,
rx_min_freq_lmda, rx_max_freq_lmda, rx_grid_freq_lmda,
tx_pwr_min, tx_pwr_max):
if type is None:
type = OFPPDPT_OPTICAL
assert type == OFPPDPT_OPTICAL
length = 40
return _pack("HH4x7I2H", type, length, supported,
tx_min_freq_lmda, tx_max_freq_lmda, tx_grid_freq_lmda,
rx_min_freq_lmda, rx_max_freq_lmda, rx_grid_freq_lmda,
tx_pwr_min, tx_pwr_max)
def ofp_port_desc_prop_experimenter(type, length,
experimenter, exp_type, experimenter_data):
if type is None:
type = OFPPDPT_EXPERIMENTER
assert type == OFPPDPT_EXPERIMENTER
if experimenter_data is None:
experimenter_data = b""
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length, experimenter, exp_type
) + experimenter_data + b"\x00" * (align(length) - length)
# 7.2.2.1
def ofp_match(type, length, oxm_fields):
'''type: OFPMT_STANDARD/OXM
'''
if type is None:
type = OFPMT_OXM
if isinstance(oxm_fields, str):
pass
elif isinstance(oxm_fields, (list, tuple)):
oxm_fields = b"".join([_obj(f) for f in oxm_fields])
elif oxm_fields is None:
oxm_fields = b""
else:
ValueError(oxm_fields)
length = 4 + _len(oxm_fields)
msg = _pack("HH", type, length) + oxm_fields + b"\0"*(_align(length)-length)
assert _len(msg) % 8 == 0
return msg
# 7.2.4
def ofp_instruction_header(type, len):
return _pack("HH", type, len)
def ofp_instruction_goto_table(type, len, table_id):
if type is None:
type = OFPIT_GOTO_TABLE
assert type==OFPIT_GOTO_TABLE
len = 8
msg = _pack("HHB3x",
type,
len,
table_id)
assert _len(msg)==8
return msg
def ofp_instruction_write_metadata(type, len, metadata, metadata_mask):
if type is None:
type = OFPIT_WRITE_METADATA
assert type==OFPIT_WRITE_METADATA
len = 24
msg = _pack("HH4xQQ",
type,
len,
metadata,
metadata_mask)
assert _len(msg)==24
return msg
def ofp_instruction_actions(type, len, actions):
'''type: OFPIT_WRITE_ACTIONS/APPLY_ACTIONS/CLEAR_ACTIONS
'''
if isinstance(actions, str):
pass
elif isinstance(actions, (tuple, list)):
actions = b"".join([_obj(a) for a in actions])
elif actions is None:
actions = b""
else:
raise ValueError(actions)
assert type in (OFPIT_WRITE_ACTIONS, OFPIT_APPLY_ACTIONS, OFPIT_CLEAR_ACTIONS)
len = _align(8 + _len(actions))
msg = _pack("HH4x",
type,
len) + actions
return msg
def ofp_instruction_meter(type, len, meter_id):
if type is None:
type = OFPIT_METER
assert type==OFPIT_METER
len = 8
msg = _pack("HHI",
type,
len,
meter_id)
return msg
def ofp_instruction_experimenter_(type, len, experimenter, data):
if type is None:
type = OFPIT_EXPERIMENTER
assert type == OFPIT_EXPERIMENTER
data = _obj(data)
len = 8 + _len(data)
msg = _pack("HHI",
type,
len,
experimenter) + data
return msg
# 7.2.4
def ofp_action_header(type, len):
return _pack("HH", type, len)
def ofp_action_output(type, len, port, max_len):
'''max_len: OFP_CML_MAX/NO_BUFFER
'''
if type is None:
type = OFPAT_OUTPUT
assert type == OFPAT_OUTPUT
len = 16
msg = _pack("HHIH6x",
type,
len,
port,
max_len)
assert _len(msg) == 16
return msg
def ofp_action_group(type, len, group_id):
if type is None:
type = OFPAT_GROUP
assert type == OFPAT_GROUP
len = 8
msg = _pack("HHI",
type,
len,
group_id)
assert _len(msg) == 8
return msg
def ofp_action_set_queue(type, len, queue_id):
if type is None:
type = OFPAT_SET_QUEUE
assert type == OFPAT_SET_QUEUE
len = 8
msg = _pack("HHI",
type,
len,
queue_id)
assert _len(msg) == 8
return msg
def ofp_action_mpls_ttl(type, len, mpls_ttl):
if type is None:
type = OFPAT_SET_MPLS_TTL
assert type == OFPAT_SET_MPLS_TTL
len = 8
msg = _pack("HHB3x",
type,
len,
mpls_ttl)
assert _len(msg) == 8
return msg
def ofp_action_generic(type, len):
assert type in (OFPAT_COPY_TTL_OUT, OFPAT_COPY_TTL_IN,
OFPAT_DEC_MPLS_TTL, OFPAT_DEC_NW_TTL, OFPAT_POP_VLAN, OFPAT_POP_PBB)
len = 8
return _pack("HH4x", type, len)
def ofp_action_nw_ttl(type, len, nw_ttl):
if type is None:
type = OFPAT_SET_NW_TTL
assert type == OFPAT_SET_NW_TTL
len = 8
msg = _pack("HHB3x",
type,
len,
nw_ttl)
assert _len(msg) == 8
return msg
def ofp_action_push(type, len, ethertype):
'''type: OFPAT_PUSH_VLAN/PUSH_MPLS/PUSH_PBB
'''
assert type in (OFPAT_PUSH_VLAN, OFPAT_PUSH_MPLS, OFPAT_PUSH_PBB)
len = 8
msg = _pack("3H2x",
type,
len,
ethertype)
assert _len(msg) == 8
return msg
def ofp_action_pop_mpls(type, len, ethertype):
if type is None:
type = OFPAT_POP_MPLS
assert type == OFPAT_POP_MPLS
len = 8
msg = _pack("3H2x",
type,
len,
ethertype)
assert _len(msg) == 8
return msg
def ofp_action_set_field(type, len, field):
if type is None:
type = OFPAT_SET_FIELD
assert type == OFPAT_SET_FIELD
assert isinstance(field, bytes)
filled_len = 4 + _len(field)
len = _align(filled_len)
return _pack("HH", type, len) + field + b'\0'*(len-filled_len)
def ofp_action_experimenter_header(type, len, experimenter):
return _pack("HHI", type, len, experimenter)
def ofp_action_experimenter_(type, len, experimenter, data):
if type is None:
type = OFPAT_EXPERIMENTER
assert type == OFPAT_EXPERIMENTER
assert isinstance(data, str)
filled_len = 8 + _len(data)
len = _align(filled_len)
return _pack("HHI", type, len, experimenter) + data + b'\0'*(len-filled_len)
# 7.3.1
def ofp_switch_features(header, datapath_id, n_buffers, n_tables, auxiliary_id, capabilities):
msg = ofp_(header, _pack("QIBB2xII",
datapath_id,
n_buffers,
n_tables,
auxiliary_id,
capabilities,
0), OFPT_FEATURES_REPLY)
assert _len(msg) == 32
return msg
# 7.3.2
def ofp_switch_config(header, flags, miss_send_len):
if miss_send_len is None:
miss_send_len = OFPCML_NO_BUFFER
msg = ofp_(header, _pack("HH",
flags,
miss_send_len), (OFPT_SET_CONFIG, OFPT_GET_CONFIG_REPLY))
assert _len(msg) == 12
return msg
# 7.3.3
def ofp_table_mod(header, table_id, config, properties):
msg = ofp_(header, _pack("B3xI",
table_id,
config)+properties, OFPT_TABLE_MOD)
return msg
def ofp_table_mod_prop_header(type, length):
return _pack("HH", type, length)
def ofp_table_mod_prop_eviction(type, length, flags):
type = OFPTMPT_EVICTION
return _pack("HHI", type, length, flags)
def ofp_table_mod_prop_vacancy(type, length, vacancy_down, vacancy_up, vacancy):
type =OFPTMPT_VACANCY
length = 8
return _pack("HH3Bx", type, length, vacancy_down, vacancy_up, vacancy)
def ofp_table_mod_prop_experimenter(type, length, experimenter, exp_type, experimenter_data):
type = OFPTMPT_EXPERIMENTER
length = 12 + _len(experimenter_data)
if experimenter_data is None:
experimenter_data = b""
return _pack("HHII", type, length, experimenter, exp_type
) + experimenter_data + b'\0'*(_align(length)-length)
# 7.3.4.1
def ofp_flow_mod(header, cookie, cookie_mask, table_id, command,
idle_timeout, hard_timeout, priority, buffer_id, out_port, out_group, flags, importance,
match, instructions):
'''command=OFPFC_ADD/MODIFY/MODIFY_STRICT/DELETE/DELETE_STRICT
'''
if isinstance(instructions, str):
pass
elif isinstance(instructions, (tuple,list)):
instructions = b"".join([_obj(i) for i in instructions])
elif instructions is None:
instructions = b""
else:
raise ValueError(instructions)
if buffer_id is None:
buffer_id = 0xffffffff # OFP_NO_BUFFER
if out_port is None:
out_port = OFPP_ANY
if out_group is None:
out_group = OFPG_ANY
msg = ofp_(header, _pack("QQBB3H3IHH",
cookie, cookie_mask,
table_id, command,
idle_timeout, hard_timeout,
priority, buffer_id,
out_port, out_group,
flags, importance)+_obj(match)+instructions,
OFPT_FLOW_MOD)
return msg
# 7.3.4.2
def ofp_group_mod(header, command, type, group_id, buckets):
'''
command = OFPGC_ADD/MODIFY/DELETE
type = OFPGT_ALL/SELECT/INDIRECT/FF
'''
if isinstance(buckets, str):
pass
elif isinstance(buckets, (list, tuple)):
buckets = b"".join([_obj(b) for b in buckets])
elif buckets is None:
buckets = b""
else:
raise ValueError(buckets)
msg = ofp_(header,
_pack("HBxI", command, type, group_id)+buckets,
OFPT_GROUP_MOD)
return msg
def ofp_bucket(len, weight, watch_port, watch_group, actions):
if isinstance(actions, str):
pass
elif isinstance(actions, (list, tuple)):
actions = b"".join([_obj(a) for a in actions])
elif actions is None:
actions = b""
else:
raise ValueError(actions)
filled_len = 16 + _len(actions)
len = _align(filled_len)
msg = _pack("HHII4x",
len,
weight,
watch_port,
watch_group) + actions + b'\0'*(len-filled_len)
return msg
# 7.3.4.3
def ofp_port_mod(header, port_no, hw_addr, config, mask, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
msg = ofp_(header,
_pack("I4x6s2xII", port_no, hw_addr, config, mask)+properties,
OFPT_PORT_MOD)
return msg
def ofp_port_mod_prop_header(type, length):
return _pack("HH", type, length)
def ofp_port_mod_prop_ethernet(type, length, advertise):
type = OFPPMPT_ETHERNET
length = 8
return _pack("HHI", type, length, advertise)
def ofp_port_mod_prop_optical(type, length, configure, freq_lmda, fl_offset, grid_span, tx_pwr):
type = OFPPMPT_OPTICAL
length = 24
return _pack("HHIIiII", type, length, configure, freq_lmda, fl_offset, grid_span, tx_pwr)
def ofp_port_mod_prop_experimenter(type, length, experimenter, exp_type, experiimenter_data):
type = OFPPMPT_EXPERIMENTER
length = 12 + _len(experiimenter_data)
if experiimenter_data is None:
experiimenter_data = b""
return _pack("HHII", type, length, experimenter, exp_type
)+experiimenter_data+b'\0'*(_align(length)-length)
# 7.3.4.5
def ofp_meter_mod(header, command, flags, meter_id, bands):
if isinstance(bands, str):
pass
elif isinstance(bands, (list, tuple)):
bands = b"".join([_obj(b) for b in bands])
elif bands is None:
bands = b""
else:
raise ValueError(bands)
msg = _pack("8sHHI",
_obj(header),
command,
flags,
meter_id) + bands
return msg
def ofp_meter_band_header(type, len, rate, burst_size):
msg = _pack("HHII", type, len, rate, burst_size)
assert _len(msg) == 12
return msg
def ofp_meter_band_drop(type, len, rate, burst_size):
if type is None:
type = OFPMBT_DROP
assert type == OFPMBT_DROP
len = 16
msg = _pack("HHII4x",
type,
len,
rate,
burst_size)
assert _len(msg) == 16
return msg
def ofp_meter_band_dscp_remark(type, len, rate, burst_size, prec_level):
if type is None:
type = OFPMBT_DSCP_REMARK
assert type == OFPMBT_DSCP_REMARK
len = 16
msg = _pack("HHIIB3x",
type,
len,
rate,
burst_size,
prec_level)
assert _len(msg) == 16
return msg
def ofp_meter_band_experimenter(type, len, rate, burst_size, experimenter, data):
if type is None:
type = OFPMBT_EXPERIMENTER
assert type == OFPMBT_EXPERIMENTER
assert isinstance(data, str)
len = 16 + _len(data)
# XXX: no _align here in spec
msg = _pack("HHIII",
type,
len,
rate,
burst_size,
experimenter) + data
return msg
# 7.3.5
def ofp_multipart_request(header, type, flags, body=None):
if type in (OFPMP_DESC, OFPMP_TABLE, OFPMP_GROUP_DESC,
OFPMP_GROUP_FEATURES, OFPMP_METER_FEATURES, OFPMP_PORT_DESC):
body = b""
elif type in (OFPMP_FLOW, OFPMP_AGGREGATE, OFPMP_PORT_STATS,
OFPMP_QUEUE_STATS, OFPMP_GROUP, OFPMP_METER, OFPMP_METER_CONFIG):
if body is None:
body = b""
else:
body = _obj(body)
elif type == OFPMP_TABLE_FEATURES:
if isinstance(body, str):
pass
elif isinstance(body, (list, tuple)):
body = b"".join([_obj(b) for b in body])
elif body is None:
body = []
msg = ofp_(header,
_pack("HH4x", type, flags) + body,
OFPT_MULTIPART_REQUEST)
return msg
def ofp_multipart_reply(header, type, flags, body):
if type in (OFPMP_DESC, OFPMP_AGGREGATE, OFPMP_GROUP_FEATURES,
OFPMP_METER_FEATURES):
if isinstance(body, (tuple,str)):
body = _obj(body)
elif body is None:
body = b""
else:
raise ValueError(body)
elif type in (OFPMP_FLOW, OFPMP_TABLE, OFPMP_PORT_STATS, OFPMP_QUEUE_STATS,
OFPMP_GROUP, OFPMP_GROUP_DESC, OFPMP_METER, OFPMP_METER_CONFIG,
OFPMP_TABLE_FEATURES, OFPMP_PORT_DESC):
if isinstance(body, (list,tuple)):
body = b"".join([_obj(b) for b in body])
elif body is None:
body = b""
else:
raise ValueError(body)
elif type == OFPMP_EXPERIMENTER:
if isinstance(body, str):
pass
else:
raise ValueError(body)
else:
raise ValueError(type)
msg = ofp_(header,
_pack("HH4x", type, flags) + body,
OFPT_MULTIPART_REPLY)
return msg
# 7.3.5.1
def ofp_desc(mfr_desc, hw_desc, sw_desc, serial_num, dp_desc):
if mfr_desc is None:
mfr_desc = b""
if hw_desc is None:
hw_desc = b""
if sw_desc is None:
sw_desc = b""
if serial_num is None:
serial_num = b""
if dp_desc is None:
dp_desc = b""
msg = _pack("256s256s256s32s256s",
mfr_desc,
hw_desc,
sw_desc,
serial_num,
dp_desc)
assert _len(msg) == 1056
return msg
# 7.3.5.2
def ofp_flow_stats_request(table_id, out_port, out_group, cookie, cookie_mask, match):
if table_id is None:
table_id = OFPTT_ALL
if out_port is None:
out_port = OFPP_ANY
if out_group is None:
out_group = OFPG_ANY
if cookie is None:
cookie = 0
if cookie_mask is None:
cookie_mask = 0
if match is None:
match = ofp_match(None, None, [])
desc = _pack("B3xII4xQQ", table_id, out_port, out_group, cookie, cookie_mask)
assert _len(desc)==32
return desc+_obj(match)
def ofp_flow_stats(length, table_id, duration_sec, duration_nsec, priority,
idle_timeout, hard_timeout, flags, cookie, packet_count, byte_count,
match, instructions):
if instructions is None:
instructions = b""
elif isinstance(instructions, (list, tuple)):
instructions = b"".join([_obj(i) for i in instructions])
elif isinstance(instructions, str):
pass
else:
raise ValueError(instructions)
match = _obj(match)
length = 48 + _len(match) + _len(instructions)
msg = _pack("HBxII4H4x3Q", length, table_id, duration_sec, duration_nsec,
priority, idle_timeout, hard_timeout, flags,
cookie, packet_count, byte_count)+match+instructions
assert _len(msg) == length
return msg
# 7.3.5.3
def ofp_aggregate_stats_request(table_id, out_port, out_group, cookie, cookie_mask, match):
if table_id is None:
table_id = OFPTT_ALL
if out_port is None:
out_port = OFPP_ANY
if out_group is None:
out_group = OFPG_ANY
if cookie is None:
cookie = 0
if cookie_mask is None:
cookie_mask = 0
desc = _pack("B3xII4xQQ", table_id, out_port, out_group, cookie, cookie_mask)
assert len(desc) == 40
return desc + _obj(match)
def ofp_aggregate_stats_reply(packet_count, byte_count, flow_count):
return _pack("QQI4x", packet_count, byte_count, flow_count)
# 7.3.5.4
def ofp_table_stats(table_id, active_count, lookup_count, matched_count):
msg = _pack("B3xIQQ", table_id, active_count, lookup_count, matched_count)
assert _len(msg) == 24
return msg
# 7.3.5.5
def ofp_table_desc(length, table_id, config, properties):
if isinstance(properties, (list,tuple)):
properties = b"".join([_obj(p) for p in properties])
elif isinstance(properties, str):
pass
elif properties is None:
properties = b""
else:
raise ValueError(properties)
length = 8 + _len(properties)
return _pack("HBxI", length, table_id, config) + properties
# 7.3.5.5.1
def ofp_table_features(length, table_id, name, metadata_match, metadata_write, capabilities, max_entries, properties):
if isinstance(properties, (list,tuple)):
properties = b"".join([_obj(p) for p in properties])
elif isinstance(properties, str):
pass
elif properties is None:
properties = b""
else:
raise ValueError(properties)
length = 64 + _len(properties)
msg = _pack("HB5x32sQQII", length, table_id, name, metadata_match, metadata_write,
capabilities, max_entries) + properties
assert _len(msg) == length
return msg
# 7.3.5.5.2
def ofp_table_feature_prop_header(type, length):
return _pack("HH", type, length)
def ofp_table_feature_prop_instructions(type, length, instruction_ids):
if isinstance(instruction_ids, (list,tuple)):
instruction_ids = b"".join([_obj(i) for i in instruction_ids])
elif isinstance(instruction_ids, str):
pass
elif instruction_ids is None:
instruction_ids = b""
else:
raise ValueError(instruction_ids)
assert type in (OFPTFPT_INSTRUCTIONS, OFPTFPT_INSTRUCTIONS_MISS)
length = 4 + _len(instruction_ids)
return _pack("HH", type, length) + instruction_ids + b'\0'*(_align(length)-length)
def ofp_instruction_id(type, len, exp_data):
if type in (OFPTFPT_EXPERIMENTER, OFPTFPT_EXPERIMENTER_MISS):
if exp_data is None:
exp_data = b""
else:
exp_data = b""
len = 4 + _len(exp_data)
return _pack("HH", type, len) + exp_data
def ofp_table_feature_prop_tables(type, length, table_ids):
if isinstance(table_ids, (list,tuple)):
table_ids = b"".join([_obj(n) for n in table_ids])
elif isinstance(table_ids, str):
pass
elif table_ids is None:
table_ids = b""
else:
raise ValueError(table_ids)
assert type in (OFPTFPT_NEXT_TABLES, OFPTFPT_NEXT_TABLES_MISS)
length = 4 + _len(table_ids)
return _pack("HH", type, length) + table_ids + b'\0'*(_align(length)-length)
def ofp_table_feature_prop_actions(type, length, action_ids):
if isinstance(action_ids, (list,tuple)):
action_ids = b"".join([_obj(a) for a in action_ids])
elif isinstance(action_ids, str):
pass
elif action_ids is None:
action_ids = b""
else:
raise ValueError(action_ids)
assert type in (OFPTFPT_WRITE_ACTIONS, OFPTFPT_WRITE_ACTIONS_MISS,
OFPTFPT_APPLY_ACTIONS, OFPTFPT_APPLY_ACTIONS_MISS)
length = 4 + _len(action_ids)
return _pack("HH", type, length) + action_ids + b'\0'*(_align(length)-length)
def ofp_action_id(type, len, exp_data):
if type == OFPAT_EXPERIMENTER:
if exp_data is None:
exp_data = b""
else:
exp_data = b""
len = 4 + _len(exp_data)
return _pack("HH", type, len) + exp_data
def ofp_table_feature_prop_oxm(type, length, oxm_ids):
if isinstance(oxm_ids, (list,tuple)):
oxm_ids = _pack("%dI" % len(oxm_ids), *oxm_ids)
elif isinstance(oxm_ids, str):
pass
elif oxm_ids is None:
oxm_ids = b""
else:
raise ValueError(oxm_ids)
assert type in (OFPTFPT_MATCH, OFPTFPT_WILDCARDS, OFPTFPT_WRITE_SETFIELD, OFPTFPT_WRITE_SETFIELD_MISS,
OFPTFPT_APPLY_SETFIELD, OFPTFPT_APPLY_SETFIELD_MISS)
length = 4 + _len(oxm_ids)
return _pack("HH", type, length) + oxm_ids + b'\0'*(_align(length)-length)
def ofp_table_feature_prop_experimenter(type, length, experimenter, exp_type, data):
assert isinsntace(data, str)
length = 12 + _len(data)
assert type in (OFPTFPT_EXPERIMENTER, OFPTFPT_EXPERIMENTER_MISS)
return _pack("HHII", type, length, experimenter, exp_type) + data
# 7.3.5.6
def ofp_port_stats_request(port_no):
if port_no is None:
port_no = OFPP_ANY
return _pack("I4x", port_no)
def ofp_port_stats(length, port_no, duration_sec, duration_nsec,
rx_packets, tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors, properties):
if isinstance(properties, (list,tuple)):
properties = b"".join([_obj(p) for p in properties])
elif isinstance(properties, str):
pass
elif properties is None:
properties = b""
else:
raise ValueError(properties)
length = 80 + _len(properties)
return _pack("H2x3I8Q", length, port_no, duration_sec, duration_nsec,
rx_packets, tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors) + properties
def ofp_port_stats_prop_header(type, length):
return _pack("HH", type, length)
def ofp_port_stats_prop_ethernet(type, length,
rx_frame_err, rx_over_err, rx_crc_err, collisions):
type = OFPPSPT_ETHERNET
length = 40
return _pack("HH4x4Q", type, length, rx_frame_err, rx_over_err, rx_crc_err, collisions)
def ofp_port_stats_prop_optical(type, length,
flags,
tx_freq_lmda, tx_offset, tx_grid_span,
rx_freq_lmda, rx_offset, rx_grid_span,
tx_pwr, rx_pwr, bias_current, temperature):
type = OFPPSPT_OPTICAL
length = 44
return _pack("HH4x7I4H", type, length,
flags,
tx_freq_lmda, tx_offset, tx_grid_span,
rx_freq_lmda, rx_offset, rx_grid_span,
tx_pwr, rx_pwr, bias_current, temperature)
def ofp_port_stats_prop_experimenter(type, length,
experimenter, exp_type, experimenter_data):
type = OFPPSPT_EXPERIMENTER
if experimenter_data is None:
experimenter_data = b""
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length,
experimenter, exp_type) + experimenter_data
# 7.3.5.9
def ofp_queue_stats_request(port_no, queue_id):
if port_no is None:
port_no = OFPP_ANY
if queue_id is None:
queue_id = OFPQ_ALL
return _pack("II", port_no, queue_id)
def ofp_queue_stats(length, port_no, queue_id, tx_bytes, tx_packets, tx_errors, duration_sec, duration_nsec, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
length = 48 + _len(properties)
return _pack("H6x2I3Q2I", length, port_no, queue_id, tx_bytes, tx_packets, tx_errors, duration_sec, duration_nsec
)+properties
def ofp_queue_stats_prop_header(type, length):
return _pack("HH", type, length)
def ofp_queue_stats_prop_experimenter(type, length, experimenter, exp_type, experimenter_data):
type = OFPQSPT_EXPERIMENTER
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length, experimenter, exp_type) + experimenter_data + b'\0'*(_align(length)-length)
# 172.16.17.32
def ofp_queue_desc_request(port_no, queue_id):
if port_no is None:
port_no = OFPP_ANY
if queue_id is None:
queue_id = OFPQ_ALL
return _pack("II", port_no, queue_id)
def ofp_queue_desc(port_no, queue_id, len, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
len = 16 + _len(properties)
desc = _pack("IIH6x",
port_no,
queue_id,
len)
assert _len(desc)==16
return desc + properties
def ofp_queue_desc_prop_header(type, len):
# XXX: You won't need this function
return _pack("HH4x", type, len)
def ofp_queue_desc_prop_min_rate(type, length, rate):
type = OFPQDPT_MIN_RATE
length = 8
return _pack("HHH2x", type, length, rate)
def ofp_queue_desc_prop_max_rate(type, length, rate):
type = OFPQDPT_MAX_RATE
length = 8
return _pack("HHH2x", type, length, rate)
def ofp_queue_desc_prop_experimenter(type, length, experimenter, exp_type, experimenter_data):
type = OFPQDPT_EXPERIMENTER
length = 12 + _len(data)
return _pack("HHII", type, length, experimenter, exp_type
) + experimenter_data + b'\0'*(_align(length)-length)
# 7.3.5.9
def ofp_group_stats_request(group_id):
if group_id is None:
group_id = OFPG_ALL
return _pack("I4x", group_id)
def ofp_group_stats(length, group_id, ref_count, packet_count, byte_count,
duration_sec, duration_nsec, bucket_stats):
if isinstance(bucket_stats, (list,tuple)):
bucket_stats = b"".join([_obj(b) for b in bucket_stats])
elif isinstance(bucket_stats, str):
pass
elif bucket_stats is None:
bucket_stats = b""
else:
raise ValueError(bucket_stats)
length = 40 + _len(bucket_stats)
return _pack("H2xII4xQQII", length, group_id, ref_count, packet_count, byte_count,
duration_sec, duration_nsec) + bucket_stats
def ofp_bucket_counter(packet_count, byte_count):
return _pack("QQ", packet_count, byte_count)
# 7.3.5.10
def ofp_group_desc(length, type, group_id, buckets):
if isinstance(buckets, (list,tuple)):
buckets = b"".join([_obj(b) for b in buckets])
elif isinstance(buckets, str):
pass
elif buckets is None:
buckets = b""
else:
raise ValueError(buckets)
length = 8 + _len(buckets)
return _pack("HBxI", length, type, group_id) + buckets
# 7.3.5.11
def ofp_group_features(types, capabilities, max_groups, actions):
if isinstance(max_groups, (list,tuple)):
max_groups = _pack("4I", *max_groups)
elif isinstance(max_groups, str):
assert len(max_groups) == 16
elif max_groups is None:
max_groups = b'\0'*16
else:
raise ValueError(max_groups)
if isinstance(actions, (list,tuple)):
actions = _pack("4I", *actions)
elif isinstance(actions, str):
assert len(actions) == 16
elif actions is None:
actions = b'\0'*16
else:
raise ValueError(actions)
return _pack("II", types, capabilities) + max_groups + actions
# 7.3.5.14
def ofp_meter_multipart_request(meter_id):
return _pack("I4x", meter_id)
def ofp_meter_stats(meter_id, len, flow_count, packet_in_count, byte_in_count,
duration_sec, duration_nsec, band_stats):
if isinstance(band_stats, (list, tuple)):
band_stats = b"".join([_obj(b) for b in band_stats])
elif isinstance(band_stats, str):
pass
elif band_stats is None:
band_stats = b""
else:
raise ValueError(band_stats)
return _pack("IH6xIQQII", meter_id, len, flow_count, packet_in_count, byte_in_count,
duration_sec, duration_nsec) + band_stats
def ofp_meter_band_stats(packet_band_count, byte_band_count):
return _pack("QQ", packet_band_count, byte_band_count)
# 7.3.5.13
def ofp_meter_config(length, flags, meter_id, bands):
if isinstance(bands, (list,tuple)):
bands = b"".join([_obj(b) for b in bands])
elif isinstance(bands, str):
pass
elif bands is None:
bands = b""
else:
raise ValueError(bands)
length = 8 + _len(bands)
return _pack("HHI", length, flags, meter_id) + bands
# 7.3.5.16
def ofp_meter_features(max_meter, band_types, capabilities, max_bands, max_color):
return _pack("IIIBB2x", max_meter, band_types, capabilities, max_bands, max_color)
# 7.3.5.17
def ofp_flow_monitor_request(monitor_id, out_port, out_group,
flags, table_id, command, match):
return _pack("3IHBB", monitor_id, out_port, out_group,
flags, table_id, command)+_obj(match)
# 7.3.5.17.2
def ofp_flow_update_full(length, event, table_id, reason,
idle_timeout, hard_timeout, priority, cookie, match, instructions):
if isinstance(instructions, str):
pass
elif isinstance(instructions, (list, tuple)):
instructions = b"".join([_obj(p) for p in instructions])
elif instructions is None:
instructions = b""
else:
raise ValueError(instructions)
match = _obj(match)
length = 32+_len(match)+_len(instructions)
return _pack("HHBB3H4xQ", length, event, table_id, reason,
idle_timeout, hard_timeout, priority, cookie)+match+instructions
def ofp_flow_update_abbrev(length, event, xid):
length = 8
event = OFPFME_ABBREV
return _pack("HHI", length, event, xid)
def ofp_flow_update_paused(length, event):
length = 8
return _pack("HH4x", length, event)
# 7.3.5.15
def ofp_experimenter_multipart_header(experimenter, exp_type):
return _pack("II", experimenter, exp_type)
# 7.3.7
def ofp_packet_out(header, buffer_id, in_port, actions_len, actions, data):
if isinstance(actions, (list,tuple)):
actions = b"".join([_obj(a) for a in actions])
elif isinstance(actions, str):
pass
elif actions is None:
actions = b""
else:
raise ValueError(actions)
if isinstance(data, str):
pass
elif data is None:
data = b""
else:
raise ValueError(data)
if buffer_id is None:
buffer_id = OFP_NO_BUFFER
actions_len = _len(actions)
return ofp_(header,
_pack("IIH6x", buffer_id, in_port, actions_len) + actions + data,
OFPT_PACKET_OUT)
# 7.3.8
def ofp_role_request(header, role, generation_id):
return ofp_(header,
_pack("I4xQ", role, generation_id),
(OFPT_ROLE_REQUEST, OFPT_ROLE_REPLY))
# 7.3.9.1
def ofp_bundle_ctrl_msg(header, bundle_id, type, flags, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
return ofp_(header,
_pack("IHH", bundle_id, type, flags)+properties,
OFPT_BUNDLE_CONTROL)
# 7.3.9.2
def ofp_bundle_add_msg(header, bundle_id, flags, message, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
return ofp_(header,
_pack("I2xH", bundle_id, flags)+_obj(message)+properties,
OFPT_BUNDLE_ADD_MESSAGE)
# 7.3.9.4
def ofp_bundle_prop_header(type, length):
return _pack("HH", type, length)
def ofp_bundle_prop_experimenter(type, length,
experimenter, exp_type, experimenter_data):
type = OFPBPT_EXPERIMENTER
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length, experimenter, exp_type
)+experimenter_data+b'\0'*(_align(length)-length)
# 7.3.10
def ofp_async_config(header, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
return ofp_(header,
properties,
(OFPT_GET_ASYNC_REPLY, OFPT_SET_ASYNC))
def ofp_async_config_prop_header(type, length):
return _pack("HH", type, length)
def ofp_async_config_prop_reasons(type, length, mask):
assert type in (OFPACPT_PACKET_IN_SLAVE, OFPACPT_PACKET_IN_MASTER,
OFPACPT_PORT_STATUS_SLAVE, OFPACPT_PORT_STATUS_MASTER,
OFPACPT_FLOW_REMOVED_SLAVE, OFPACPT_FLOW_REMOVED_MASTER,
OFPACPT_ROLE_STATUS_SLAVE, OFPACPT_ROLE_STATUS_MASTER,
OFPACPT_TABLE_STATUS_SLAVE, OFPACPT_TABLE_STATUS_MASTER,
OFPACPT_REQUESTFORWARD_SLAVE, OFPACPT_REQUESTFORWARD_MASTER)
length = 8
return _pack("HHI", type, length, mask)
def ofp_async_config_prop_experimenter(type, length,
experimenter, exp_type, experimenter_data):
assert type in (OFPACPT_EXPERIIMENTER_SLAVE, OFPACPT_EXPERIMENTER_MASTER)
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length, experimenter, exp_type
)+experimenter_data+b'\0'*(_align(length)-length)
# 7.4.1
def ofp_packet_in(header, buffer_id, total_len, reason, table_id, cookie, match, data):
return ofp_(header,
_pack("IHBBQ", buffer_id, total_len, reason, table_id, cookie) + _obj(match) + b"\0"*2 + data,
OFPT_PACKET_IN)
# 7.4.2
def ofp_flow_removed(header, cookie, priority, reason, table_id,
duration_sec, duration_nsec, idle_timeout, hard_timeout,
packet_count, byte_count, match):
return ofp_(header,
_pack("QHBBIIHHQQ", cookie, priority, reason, table_id,
duration_sec, duration_nsec, idle_timeout, hard_timeout,
packet_count, byte_count) + _obj(match),
OFPT_FLOW_REMOVED)
# 7.4.3
def ofp_port_status(header, reason, desc):
return ofp_(header,
_pack("B7x", reason) + _obj(desc),
OFP_PORT_STATUS)
# 7.4.4
def ofp_role_status(header, role, reason, generation_id, properties):
if isinstance(properties, str):
pass
elif isinstance(properties, (list, tuple)):
properties = b"".join([_obj(p) for p in properties])
elif properties is None:
properties = b""
else:
raise ValueError(properties)
return ofp_(header,
_pack("IB3xQ", role, reason, generation_id)+properties,
(OFPT_ROLE_REQUEST, OFPT_ROLE_REPLY))
def ofp_role_prop_header(type, length):
return _pack("HH", type, length)
def ofp_role_prop_experimenter(type, length, experimenter, exp_type, experimenter_data):
type = OFPRPT_EXPERIMENTER
if experimenter_data is None:
experimenter_data = b""
length = 12 + _len(experimenter_data)
return _pack("HHII", type, length, experimenter, exp_type
) + experimenter_data + b'\0'*(_align(length)-length)
# 7.4.5
def ofp_table_status(header, reason, table):
return ofp_(header,
_pack("H7x", reason)+_obj(table),
OFPT_TABLE_STATUS)
# 7.4.6
def ofp_requestforward_header(header, request):
return ofp_(header,
_obj(request),
OFPT_REQUESTFORWARD)
# 7.5.1
def ofp_hello(header, elements):
if isinstance(elements, str):
pass
elif isinstance(elements, (tuple, list)):
elements = b"".join([_obj(e) for e in elements])
elif elements is None:
elements = b""
else:
raise ValueError(elements)
return ofp_(header, elements, OFPT_HELLO)
def ofp_hello_elem_header(type, length):
return _pack("HH", type, length)
def ofp_hello_elem_versionbitmap(type, length, bitmaps):
if type is None:
type = 1
assert type == 1 # VERSIONBITMAP
if isinstance(bitmaps, str):
pass
elif isinstance(bitmaps, (tuple, list)):
bitmaps = b"".join([_pack("I",e) for e in bitmaps])
elif bitmaps is None:
bitmaps = b""
else:
raise ValueError("%s" % bitmaps)
length = 4 + _len(bitmaps)
return struct.pack("!HH", type, length) + bitmaps + b'\0'*(_align(length)-length)
# 7.5.4
def ofp_error_msg(header, type, code, data):
if data is None:
data = b""
return ofp_(header,
_pack("HH", type, code)+data,
OFPT_ERROR)
def ofp_error_experimenter_msg(header, type, exp_code, experimenter, data):
type = OFPET_EXPERIMENTER
return ofp_(header,
_pack("HHI", type, exp_code, experimenter)+data,
OFPT_ERROR)
# 7.5.5
def ofp_experimenter_msg(header, experimenter, exp_type, experimenter_data):
if experimenter_data is None:
experimenter_data = b""
return ofp_(header,
_pack("II", experimenter, exp_type) + experimenter_data,
OFPT_EXPERIMENTER)
|
400787
|
import ezc3d
import xarray as xr
from ._constants import EXPECTED_VALUES, MARKERS_ANALOGS_C3D
from .utils import is_expected_array
def test_ezc3d():
c3d = ezc3d.c3d(f"{MARKERS_ANALOGS_C3D}")
is_expected_array(xr.DataArray(c3d["data"]["points"]), **EXPECTED_VALUES[65])
is_expected_array(xr.DataArray(c3d["data"]["analogs"]), **EXPECTED_VALUES[66])
|
400872
|
import praw
from prawcore.exceptions import ServerError
from logger import log
import json
import Config
import os
from pathlib import Path
logger = log('reddit')
def get_user_subscriptions():
users = []
empty_users = []
subs = {}
erroredUsers = []
i = 1
with open('../shared/nsfw_subs.json', 'r') as f:
nsfw_subs = set(json.load(f)['subs'])
with open('db.json') as f:
db_entries = json.load(f)
n = len(db_entries)
for db_entry in db_entries:
print(f'Processing user {str(i)} of {n}', end='\r')
logger.info(f'Processing user {str(i)} of {n} with username {db_entry["name"]}')
refresh_token = db_entry['refreshToken']
if not refresh_token:
raise Exception('null refreshToken')
reddit_user = praw.Reddit(client_id=Config.webapp_client_id,
client_secret=Config.webapp_client_secret,
refresh_token=refresh_token,
user_agent=Config.user_agent)
user_subs = []
while True: # loop to keep trying if response 500 is received
try:
# suspended users cannot be messaged, and therefore cannot take part in matching
if reddit_user.user.me().is_suspended:
logger.info('user is suspended')
break
subreddit_subscriptions = list(reddit_user.user.subreddits(limit=None))
for subreddit in subreddit_subscriptions:
subreddit_name = subreddit.display_name
if subreddit_name[0:2] != 'u_' and subreddit_name not in nsfw_subs:
user_subs.append(subreddit_name)
if subreddit_name not in subs:
subs[subreddit_name] = subreddit.subscribers
if not user_subs:
empty_users.append(db_entry['name'])
else:
users.append({'name': db_entry['name'], 'subscriptions': user_subs})
logger.debug(user_subs)
break
except ServerError as e:
print(e)
print(db_entry)
print('Retrying...')
except Exception as e:
print(e)
print(db_entry['name'])
erroredUsers.append(db_entry['name'])
break
i += 1
logger.info(f'Number of failed user info retrieval attempts: {len(erroredUsers)}')
logger.debug(erroredUsers)
logger.debug(empty_users)
# dump in case something goes wrong during matching
cur_dir = os.getcwd()
Path(f'{cur_dir}/dump').mkdir(parents=True, exist_ok=True)
with open(f'{cur_dir}/dump/subs.json', 'w', encoding='utf-8') as f:
json.dump({ 'subs': subs }, f, ensure_ascii=False, indent=4)
with open(f'{cur_dir}/dump/users.json', 'w', encoding='utf-8') as f:
json.dump({ 'users': users }, f, ensure_ascii=False, indent=4)
with open(f'{cur_dir}/dump/empty_users.json', 'w', encoding='utf-8') as f:
json.dump({ 'empty_users': empty_users }, f, ensure_ascii=False, indent=4)
return users, empty_users, subs
def message_users(matches, unmatched_users, empty_users, round_number):
submatch_bot = praw.Reddit(user_agent=Config.user_agent,
username=Config.username, password=Config.password,
client_id=Config.script_client_id, client_secret=Config.script_client_secret,
refresh_token=Config.script_refresh_token)
messageSubject = f'Submatch Matching Round {round_number} Results'
messageFooter = '-----\n\n^I ^do ^not ^reply ^to ^messages ^| ^[Code](https://github.com/LucasAnderson07/RedditSubMatch) ^| ^Problems ^with ^your ^match ^or ^have ^questions? ^[Message](https://www.reddit.com/message/compose/?to=r/submatch) ^the ^mods'
deleted_matches = []
print('messaging empty users...')
for user in empty_users:
message = f'Hey {user},\n\n'
message += 'Unfortunately, you were not matched this round because you currently aren\'t subscribed to any subreddits that are SFW!\n\n'
message += 'If you would like to participate in the next round of matching, please subscribe to subreddits that align with your interests.\n\n'
message += messageFooter
try:
submatch_bot.redditor(user).message(messageSubject, message)
logger.info(f'messaged empty user {user}')
except Exception as e:
logger.error(f'received error when attempting to message {user}')
logger.error(e)
print('messaging unmatched users...')
for user in unmatched_users:
message = f'Hey {user},\n\n'
message += 'Unfortunately, a good match was unable to be found for you this round.\n\n'
message += 'However, every round of matching always prioritizes unmatched users from the round before, so you are sure to get a match next round!\n\n'
message += 'Also, chances of getting a better match can always be increased by subscribing to more subreddits that align with your interests.\n\n'
message += messageFooter
try:
submatch_bot.redditor(user).message(messageSubject, message)
logger.info(f'messaged unmatched user {user}')
except Exception as e:
logger.error(f'received error when attempting to message {user}')
logger.error(e)
print('messaging matched users...')
for match in matches:
user1 = match[0]
user2 = match[1]
for _ in range(2):
message = f'Hey {user1},\n\n'
message += f'You have been matched with u/{user2}! Here is the list of {"" if len(match[2]) < 100 else "the 100 smallest "}subreddits the two of you have in common:\n\n'
i = 1
for subreddit in match[2]:
message += f'- r/{subreddit[1]} - {subreddit[0]} subscribers\n'
if i == 100:
break
i += 1
message += f'\nWanna send u/{user2} a message? [Click this link!](https://www.reddit.com/message/compose/?to={user2})\n\n'
message += messageFooter
try:
submatch_bot.redditor(user1).message(messageSubject, message)
logger.info(f'messaged matched user {user1} about match with {user2}')
except Exception as e:
logger.error(f'received error when attempting to message {user1}')
logger.error(e)
deleted_matches.append(user2)
user1, user2 = user2, user1
return deleted_matches
|
400888
|
import os
import pandas as pd
import calendar
import datetime as dt
import requests
URL = "https://earthquake.usgs.gov/fdsnws/event/1/query.csv?starttime={start}&endtime={end}&minmagnitude=2.0&orderby=time"
for yr in range(2000, 2019):
for m in range(1, 13):
if os.path.isfile('{yr}_{m}.csv'.format(yr=yr, m=m)):
continue
_, ed = calendar.monthrange(yr, m)
start = dt.datetime(yr, m, 1)
end = dt.datetime(yr, m, ed, 23, 59, 59)
with open('{yr}_{m}.csv'.format(yr=yr, m=m), 'w', encoding='utf-8') as f:
f.write(requests.get(URL.format(start=start, end=end)).content.decode('utf-8'))
dfs = []
for i in range(2000, 2019):
for m in range(1, 13):
if not os.path.isfile('%d_%d.csv' % (i, m)):
continue
df = pd.read_csv('%d_%d.csv' % (i, m), dtype={'nst': 'float64'})
dfs.append(df)
df = pd.concat(dfs, sort=True)
df.to_parquet('../earthquakes.parq', 'fastparquet')
# Reprojected, cleaned and gzip (not snappy)
# import numpy as np
# import pandas as pd
# from holoviews.util.transform import lon_lat_to_easting_northing
# df = pd.read_parquet('../data/earthquakes.parq')
# #df.time = df.time.astype('datetime64[ns]')
# cleaned_df = df.copy()
# cleaned_df['mag'] = df.mag.where(df.mag > 0)
# cleaned_df = cleaned_df.reset_index()
# x, y = lon_lat_to_easting_northing(cleaned_df.longitude, cleaned_df.latitude)
# cleaned_projected = cleaned_df.join([pd.DataFrame({'easting': x}), pd.DataFrame({'northing': y})])
# cleaned_projected.to_parquet('../data/earthquakes-projected.parq', 'fastparquet', compression='gzip', file_scheme='simple')
|
400894
|
import os
import yaml
from .test_utils import CliTestCase, skip_if_environ
class BuildAndLintTestCase(CliTestCase):
def test_build_and_lint(self):
with self._isolate():
self._check_exit_code(_init_command())
self._check_lint(exit_code=0)
def test_build_and_lint_with_macros(self):
with self._isolate() as f:
self._check_exit_code(_init_command(macros=True))
self._check_lint(exit_code=0)
macros_file = os.path.join(f, "macros.xml")
assert os.path.exists(macros_file)
def test_lint_fails_if_no_help(self):
with self._isolate():
self._check_exit_code(_init_command(help_text=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_test(self):
with self._isolate():
self._check_exit_code(_init_command(test_case=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_doi(self):
with self._isolate():
self._check_exit_code(_init_command(doi=False))
self._check_lint(exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl(self):
with self._isolate() as f:
self._check_exit_code(_cwl_init_command())
self._check_lint(filename="seqtk_seq.cwl", exit_code=0)
with open(os.path.join(f, "seqtk_seq.cwl")) as stream:
process_dict = yaml.safe_load(stream)
assert process_dict["id"] == "seqtk_seq"
assert process_dict["label"] == "Convert to FASTA (seqtk)"
assert process_dict["baseCommand"] == ["seqtk", "seq"]
input0 = process_dict["inputs"]["input1"]
assert input0["inputBinding"]["position"] == 1
assert input0["inputBinding"]["prefix"] == "-a"
assert input0["type"] == "File"
output = process_dict["outputs"]["output1"]
assert output["type"] == "File"
assert output["outputBinding"]["glob"] == "out"
assert process_dict["stdout"] == "out"
with open(os.path.join(f, "seqtk_seq_tests.yml")) as stream:
test_dict = yaml.safe_load(stream)
assert test_dict
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_empty_help(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_no_docker(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
def _check_lint(self, filename="seqtk_seq.xml", exit_code=0):
lint_cmd = ["lint", "--fail_level", "warn", filename]
try:
self._check_exit_code(lint_cmd, exit_code=exit_code)
except Exception:
with open(filename, "r") as f:
print("Failing file contents are [%s]." % f.read())
raise
def _cwl_init_command(help_text=True, container=True, test_case=True):
command = [
"tool_init", "--force", "--cwl",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--name", "Convert to FASTA (seqtk)",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if container:
command.extend(["--container", "quay.io/biocontainers/seqtk:1.2--0"])
if help_text:
command.extend(["--help_text", "The help text."])
if test_case:
command.append("--test_case")
return command
def _init_command(test_case=True, help_text=True, doi=True, macros=False):
command = [
"tool_init", "--force",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--requirement", "seqtk@1.0-r68",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if test_case:
command.append("--test_case")
if help_text:
command.extend(["--help_text", "The help text."])
if doi:
command.extend(["--doi", "10.1101/014043"])
command.extend(["--cite_url", "https://github.com/ekg/vcflib"])
command.extend(["--cite_url", "http://wiki.hpc.ufl.edu/doc/Seqtk"])
if macros:
command.append("--macros")
return command
|
400935
|
class Solution:
def subsetsWithDup(self, nums: List[int], sorted: bool = False) -> List[List[int]]:
if not nums: return [[]]
if len(nums) == 1: return [[], nums]
if not sorted: nums.sort()
pre_lists = self.subsetsWithDup(nums[:-1], sorted=True)
all_lists = [i + [nums[-1]] for i in pre_lists] + pre_lists
result = []
for i in all_lists:
if i not in result:
result.append(i)
return result
|
400951
|
import FWCore.ParameterSet.Config as cms
pfAllMuons = cms.EDFilter("PFCandidateFwdPtrCollectionPdgIdFilter",
src = cms.InputTag("pfNoPileUp"),
pdgId = cms.vint32( -13, 13),
makeClones = cms.bool(True)
)
pfAllMuonsClones = cms.EDProducer("PFCandidateProductFromFwdPtrProducer",
src = cms.InputTag("pfAllMuons")
)
|
400966
|
import os,random,glob
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.description='please enter two parameters a and b ...'
parser.add_argument("-p", "--path", help="A list of paths to jpgs for seperate",
type=str,
default= '/mnt/data9/independent_raw/')
parser.add_argument("-t", "--train_txt",
help="train list output path",
type=str,
default='txt/ind_train_jpg.txt')
parser.add_argument("-v", "--val_txt",
help="validation list output path",
type=str,
default='lists/ind_list.list')
args = parser.parse_args()
#path=['/mnt/data7/slice_test_seg/jpgs2']
f1 = open(args.train_txt, 'w')
f2 = open(args.val_txt, 'w')
path=args.path
c=0
train_jpg=[]
test_jpg=[]
for type in os.listdir(path):
All = []
for person in os.listdir(os.path.join(path,type)):
for scan in os.listdir(os.path.join(path,type,person)):
jpgs=os.listdir(os.path.join(path,type,person,scan))
id=[int(j.split('.')[0]) for j in jpgs]
try:
mm=np.max(id)
valid=np.arange(int(mm//4),int(mm*3//4)).tolist()
jpgs=[os.path.join(path,type,person,scan,j) for j in jpgs if int(j.split('.')[0]) in valid]
All+=jpgs
except:
continue
persons=[allone.split('/')[-3] for allone in All]
persons=list(set(persons))
random.shuffle(persons)
ptrain=persons[:len(persons)//2]
ptest = persons[len(persons) // 2:]
train_jpg+=[a for a in All if a.split('/')[-3] in ptrain]
for item in ptest:
ap=os.listdir(os.path.join(path, type, item))
test_jpg+=[os.path.join(path,type,item,p)for p in ap]
for p in train_jpg:
f1.writelines(p+'\n')
for p in test_jpg:
f2.writelines(p+'\n')
|
400970
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import mxnet as mx
from mxnet import ndarray as nd
import random
import argparse
import cv2
import time
import sklearn
from sklearn.decomposition import PCA
from easydict import EasyDict as edict
from sklearn.cluster import DBSCAN
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__),'..', 'common'))
import face_image
def do_clean(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd)>0:
for i in xrange(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx)==0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
ctx_num = len(ctx)
path_imgrec = os.path.join(args.input, 'train.rec')
path_imgidx = os.path.join(args.input, 'train.idx')
imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
assert header.flag>0
print('header0 label', header.label)
header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
imgidx = range(1, int(header.label[0]))
id2range = {}
seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in seq_identity:
s = imgrec.read_idx(identity)
header, _ = mx.recordio.unpack(s)
id2range[identity] = (int(header.label[0]), int(header.label[1]))
print('id2range', len(id2range))
prop = face_image.load_property(args.input)
image_size = prop.image_size
print('image_size', image_size)
vec = args.model.split(',')
prefix = vec[0]
epoch = int(vec[1])
print('loading',prefix, epoch)
model = mx.mod.Module.load(prefix, epoch, context = ctx)
model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
if args.test==0:
if not os.path.exists(args.output):
os.makedirs(args.output)
writer = mx.recordio.MXIndexedRecordIO(os.path.join(args.output, 'train.idx'), os.path.join(args.output, 'train.rec'), 'w')
nrof_images = 0
nrof_removed = 0
idx = 1
id2label = {}
pp = 0
for _id, v in id2range.iteritems():
pp+=1
if pp%100==0:
print('stat', nrof_images, nrof_removed)
_list = range(*v)
ocontents = []
for i in xrange(len(_list)):
_idx = _list[i]
s = imgrec.read_idx(_idx)
ocontents.append(s)
if len(ocontents)>15:
nrof_removed+=len(ocontents)
continue
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in xrange(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
contents = []
if args.mode==1:
emb_mean = np.mean(embeddings, axis=0, keepdims=True)
emb_mean = sklearn.preprocessing.normalize(emb_mean)
sim = np.dot(embeddings, emb_mean.T)
#print(sim.shape)
sim = sim.flatten()
#print(sim.flatten())
x = np.argsort(sim)
for ix in xrange(len(x)):
_idx = x[ix]
_sim = sim[_idx]
#if ix<int(len(x)*0.3) and _sim<args.threshold:
if _sim<args.threshold:
continue
contents.append(ocontents[_idx])
else:
y_pred = DBSCAN(eps = args.threshold, min_samples = 2).fit_predict(embeddings)
#print(y_pred)
gmap = {}
for _idx in xrange(embeddings.shape[0]):
label = int(y_pred[_idx])
if label not in gmap:
gmap[label] = []
gmap[label].append(_idx)
assert len(gmap)>0
_max = [0, 0]
for label in xrange(10):
if not label in gmap:
break
glist = gmap[label]
if len(glist)>_max[1]:
_max[0] = label
_max[1] = len(glist)
if _max[1]>0:
glist = gmap[_max[0]]
for _idx in glist:
contents.append(ocontents[_idx])
nrof_removed+=(len(ocontents)-len(contents))
if len(contents)==0:
continue
#assert len(contents)>0
id2label[_id] = (idx, idx+len(contents))
nrof_images += len(contents)
for content in contents:
if args.test==0:
writer.write_idx(idx, content)
idx+=1
id_idx = idx
if args.test==0:
for _id, _label in id2label.iteritems():
_header = mx.recordio.IRHeader(1, _label, idx, 0)
s = mx.recordio.pack(_header, '')
writer.write_idx(idx, s)
idx+=1
_header = mx.recordio.IRHeader(1, (id_idx, idx), 0, 0)
s = mx.recordio.pack(_header, '')
writer.write_idx(0, s)
print(nrof_images, nrof_removed)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='do data clean')
# general
parser.add_argument('--input', default='', type=str, help='')
parser.add_argument('--output', default='', type=str, help='')
parser.add_argument('--model', default='../model/softmax,50', help='path to load model.')
parser.add_argument('--batch-size', default=32, type=int, help='')
parser.add_argument('--threshold', default=0.6, type=float, help='')
parser.add_argument('--mode', default=1, type=int, help='')
parser.add_argument('--test', default=0, type=int, help='')
args = parser.parse_args()
do_clean(args)
|
400975
|
from datetime import datetime
from typing import Dict, List, Optional
from pydantic import BaseModel
from common_osint_model.models import ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger
from common_osint_model.models.http import HTTPComponent
from common_osint_model.models.ssh import SSHComponent
from common_osint_model.models.tls import TLSComponent
from common_osint_model.models.dns import DNSComponent
from common_osint_model.utils import hash_all
class Service(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger):
"""Represents a single service answering connections on specific ports."""
port: int
# Banner is optional as not every scanning service offers complete banners as response. Banners might be
# reconstructed from the data, but some attributes might have the wrong order then (e.g. HTTP headers).
# The according hashes are also not reliable because of this.
banner: Optional[str]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
# Every service object should include these timestamps. "timestamp" can be used for tracking the observation
# timestamp from scanning services (e.g. Shodan)
first_seen: Optional[datetime] = datetime.utcnow()
last_seen: Optional[datetime] = datetime.utcnow()
timestamp: Optional[datetime]
# We need to include every possible service component here. In order to not export empty dictionary keys, the class
# object can be exported with dict(exclude_none=True), so e.g. empty tls keys are skipped.
http: Optional[HTTPComponent]
tls: Optional[TLSComponent]
ssh: Optional[SSHComponent]
dns: Optional[DNSComponent]
# Typically hosts consist of different services which might be discovered by different scanning services, so
# remarking which service was observed by which scanner might be a good idea.
source: str
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class using a dictionary with typical shodan data."""
if isinstance(d, List):
cls.info("The dictionary given is a list. Typically this list represents multiple services. Iterate over "
"the list to create Service objects for every item available. "
"This method just uses the first item.")
d = d[0]
port = d["port"]
sshobj = None
if "ssh" in d:
sshobj = SSHComponent.from_shodan(d)
httpobj = None
if "http" in d:
httpobj = HTTPComponent.from_shodan(d)
tlsobj = None
if "ssl" in d:
tlsobj = TLSComponent.from_shodan(d)
dnsobj = None
if "dns" in d:
dnsobj = DNSComponent.from_shodan(d)
banner = d["data"]
md5, sha1, sha256, murmur = hash_all(banner.encode("utf-8"))
return Service(
port=port,
banner=d["data"],
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
ssh=sshobj,
http=httpobj,
tls=tlsobj,
dns=dnsobj,
timestamp=datetime.fromisoformat(d["timestamp"]),
source="shodan"
)
@classmethod
def from_censys(cls, d: Dict):
"""Creates an instance of this class using a dictionary with typical Censys data."""
port = d["port"]
banner = d.get("banner", None)
md5, sha1, sha256, murmur = None, None, None, None
if banner:
md5, sha1, sha256, murmur = hash_all(banner.encode("utf-8"))
httpobj = None
if "http" in d:
httpobj = HTTPComponent.from_censys(d)
tlsobj = None
if "tls" in d:
tlsobj = TLSComponent.from_censys(d)
sshobj = None
if "ssh" in d:
sshobj = SSHComponent.from_censys(d)
dnsobj = None
if "dns" in d:
dnsobj = DNSComponent.from_censys(d)
return Service(
port=port,
banner=banner,
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
http=httpobj,
tls=tlsobj,
ssh=sshobj,
dns=dnsobj,
timestamp=datetime.fromisoformat(d["observed_at"][:-4]),
source="censys"
)
@classmethod
def from_binaryedge(cls, d: List):
"""Creates an instance of this class using a dictionary with typical BinaryEdge data. Contrary to the other
scanning services, binaryedge provides multiple entries per port."""
port = d[0]["target"]["port"]
type_index: Dict[str, int] = {service["origin"]["type"]: idx for idx, service in enumerate(d)}
httpobj = None
if "webv2" in type_index:
httpobj = HTTPComponent.from_binaryedge(d[type_index["webv2"]])
tlsobj = None
if "ssl-simple" in type_index:
tlsobj = TLSComponent.from_binaryedge(d[type_index["ssl-simple"]])
sshobj = None
if "ssh" in type_index:
sshobj = SSHComponent.from_binaryedge(d[type_index["ssh"]])
banner = None
md5, sha1, sha256, murmur = None, None, None, None
if "service-simple" in type_index:
banner = d[type_index["service-simple"]]["result"]["data"]["service"].get("banner", None)
if banner:
md5, sha1, sha256, murmur = hash_all(banner.encode("utf-8"))
return Service(
port=port,
http=httpobj,
tls=tlsobj,
ssh=sshobj,
banner=banner,
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
source="binaryedge"
)
|
400976
|
import numpy as np
import matplotlib.pyplot as plt
from FEM.Torsion2D import Torsion2D
from FEM.Mesh.Geometry import Geometry
G = 1
phi = 1
geometria = Geometry.loadmsh('Mesh_tests/Web_test.msh')
O = Torsion2D(geometria, G, phi)
O.solve()
plt.show()
|
400992
|
from kadot.models import CRFExtractor
# Find the city in a weather related query
train = {
"What is the weather like in Paris ?": ('Paris',),
"What kind of weather will it do in London ?": ('London',),
"Give me the weather forecast for Berlin please.": ('Berlin',),
"Tell me the forecast in New York !": ('New', 'York'),
"Give me the weather in San Francisco...": ('San', 'Francisco'),
"I want the forecast in Dublin.": ('Dublin',)
}
test = [
"the forecast for Barcelona",
"will it rain in Los Angeles ?",
"Give me the weather !"
]
city_recognizer = CRFExtractor(train)
for test_sample in test:
city = city_recognizer.predict(test_sample)
print('"{}" -> {}'.format(test_sample, city))
|
400997
|
from __future__ import absolute_import, division, print_function
from cctbx.xray import ext
from cctbx.array_family import flex
from libtbx.test_utils import approx_equal
from libtbx.math_utils import iceil
from itertools import count
import sys
from six.moves import range
from six.moves import zip
class random_inputs(object):
def __init__(O, mt, n_refl, target_type, obs_type):
O.target_type = target_type
O.obs_type = obs_type
O.obs = mt.random_double(size=n_refl)
O.weights = mt.random_double(size=n_refl)
rff = flex.bool(max(1,iceil(n_refl*0.6)), False)
rff.resize(n_refl, True)
O.r_free_flags = rff.select(mt.random_permutation(size=n_refl))
O.scale_factor = 1 + mt.random_double()
O.a = mt.random_double(size=n_refl)
O.b = mt.random_double(size=n_refl)
def get(O, derivatives_depth=0):
if (O.target_type == "ls"):
return ext.targets_least_squares(
compute_scale_using_all_data=False,
obs_type=O.obs_type,
obs=O.obs,
weights=O.weights,
r_free_flags=O.r_free_flags,
f_calc=flex.complex_double(O.a, O.b),
derivatives_depth=derivatives_depth,
scale_factor=O.scale_factor)
if (O.target_type == "cc"):
return ext.targets_correlation(
obs_type=O.obs_type,
obs=O.obs,
weights=O.weights,
r_free_flags=O.r_free_flags,
f_calc=flex.complex_double(O.a, O.b),
derivatives_depth=derivatives_depth)
raise RuntimeError("Unknown target_type.")
def gradients_work_fd(O, eps=1.e-6):
result = flex.complex_double()
for ih,a,b,f in zip(count(), O.a, O.b, O.r_free_flags):
if (f): continue
def fd(x, Ox):
fs = []
for signed_eps in [eps, -eps]:
Ox[ih] = x + signed_eps
fs.append(O.get(derivatives_depth=0).target_work())
Ox[ih] = x
return (fs[0]-fs[1])/(2*eps)
result.append(complex(fd(a, O.a), fd(b, O.b)))
return result
def hessians_work_fd(O, eps=1.e-6):
result = flex.vec3_double()
for ih,a,b,f in zip(count(), O.a, O.b, O.r_free_flags):
if (f): continue
def fd(x, Ox, ri):
fs = []
for signed_eps in [eps, -eps]:
Ox[ih] = x + signed_eps
ga = O.get(derivatives_depth=1).gradients_work()
fs.append(getattr(ga[len(result)], ri))
Ox[ih] = x
return (fs[0]-fs[1])/(2*eps)
daa = fd(a, O.a, "real")
dbb = fd(b, O.b, "imag")
dab = fd(a, O.a, "imag")
dba = fd(b, O.b, "real")
assert approx_equal(dab, dba)
result.append((daa, dbb, dab))
return result
def exercise_random(n_trials=10, n_refl=30):
mt = flex.mersenne_twister(seed=0)
for target_type in ["ls", "cc"]:
for i_trial in range(n_trials):
for obs_type in ["F", "I"]:
ri = random_inputs(
mt=mt, n_refl=n_refl, target_type=target_type, obs_type=obs_type)
tg = ri.get(derivatives_depth=2)
ga = tg.gradients_work()
gf = ri.gradients_work_fd()
assert approx_equal(ga, gf)
ca = tg.hessians_work()
cf = ri.hessians_work_fd()
assert approx_equal(ca, cf)
def exercise_singular_least_squares():
obs = flex.double([1.234])
weights_2345 = flex.double([2.345])
weights_zero = flex.double([0])
r_free_flags = flex.bool([False])
a = flex.double([0])
b = flex.double([0])
for obs_type in ["F", "I"]:
for weights,scale_factor in [
(weights_2345, 3.456),
(weights_zero, 0)]:
tg = ext.targets_least_squares(
compute_scale_using_all_data=False,
obs_type=obs_type,
obs=obs,
weights=weights,
r_free_flags=r_free_flags,
f_calc=flex.complex_double(a, b),
derivatives_depth=2,
scale_factor=scale_factor)
if (weights is weights_2345):
assert approx_equal(tg.scale_factor(), scale_factor)
assert list(tg.gradients_work()) == [0j]
assert list(tg.hessians_work()) == [(1,1,1)]
else:
assert tg.scale_factor() is None
assert tg.target_work() is None
assert tg.target_test() is None
assert tg.gradients_work().size() == 0
assert tg.hessians_work().size() == 0
def exercise_singular_correlation():
def check():
for obs_type in ["F", "I"]:
tg = ext.targets_correlation(
obs_type=obs_type,
obs=obs,
weights=weights,
r_free_flags=None,
f_calc=flex.complex_double(a, b),
derivatives_depth=2)
assert tg.cc() is None
assert tg.target_work() is None
assert tg.target_test() is None
assert tg.gradients_work().size() == 0
assert tg.hessians_work().size() == 0
obs = flex.double([1.234])
weights = None
a = flex.double([0])
b = flex.double([0])
check()
obs = flex.double([1.234, 2.345])
a = flex.double([1, 1])
b = flex.double([2, 2])
check()
weights = flex.double([0,0])
a = flex.double([1, 2])
b = flex.double([3, 4])
check()
def run(args):
assert len(args) == 0
exercise_random()
exercise_singular_least_squares()
exercise_singular_correlation()
print("OK")
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
401007
|
from instructionexecutor import InstructionExecutor
from optimizer import Optimizer
from tracereader import EffectReader, TraceReader
from ceptions import TimeoutException
import signal, sys
def handler(signum, frame):
raise TimeoutException("timeout")
class OptimizerTester:
def __init__(self, line, debug):
reader = EffectReader(line)
# reader = TraceReader(line)
reader.parse_trace()
self.code_size = len(reader.code)
signal.signal(signal.SIGALRM, handler)
signal.alarm(15)
# print(reader.signature)
optimizer = Optimizer(reader.code)
InstructionExecutor(reader, optimizer, debug)
signal.alarm(0)
def get_code_size(self):
return self.code_size
if __name__ == "__main__":
line = open(sys.argv[1]).readline()
debug = "-d" in sys.argv
OptimizerTester(line, debug)
|
401037
|
from elasticsearch import Elasticsearch
import json
import re
from utils.constant import WIKIPEDIA_INDEX_NAME
es = Elasticsearch(timeout=300)
core_title_matcher = re.compile('([^()]+[^\s()])(?:\s*\(.+\))?')
core_title_filter = lambda x: core_title_matcher.match(x).group(1) if core_title_matcher.match(x) else x
def _extract_one(item, lazy=False):
res = {k: item['_source'][k] for k in ['id', 'url', 'title', 'text', 'title_unescape']}
res['_score'] = item['_score']
res['data_object'] = item['_source']['original_json'] if lazy else json.loads(item['_source']['original_json'])
return res
def _single_query_constructor(query, topn=50):
return {
"query": {
"multi_match": {
"query": query,
"fields": ["title^1.25", "title_unescape^1.25", "text", "title_bigram^1.25", "title_unescape_bigram^1.25", "text_bigram"]
}
},
"size": topn
}
def single_text_query(query, topn=10, lazy=False, rerank_topn=50):
body = _single_query_constructor(query, topn=max(topn, rerank_topn))
res = es.search(index=WIKIPEDIA_INDEX_NAME, doc_type='doc', body=json.dumps(body))
res = [_extract_one(x, lazy=lazy) for x in res['hits']['hits']]
res = rerank_with_query(query, res)[:topn]
return res
def bulk_text_query(queries, topn=10, lazy=False, rerank_topn=50):
body = ["{}\n" + json.dumps(_single_query_constructor(query, topn=max(topn, rerank_topn))) for query in queries]
res = es.msearch(index=WIKIPEDIA_INDEX_NAME, doc_type='doc', body='\n'.join(body))
res = [[_extract_one(x, lazy=lazy) for x in r['hits']['hits']] for r in res['responses']]
res = [rerank_with_query(query, results)[:topn] for query, results in zip(queries, res)]
return res
def rerank_with_query(query, results):
def score_boost(item, query):
score = item['_score']
core_title = core_title_filter(item['title_unescape'])
if query.startswith('The ') or query.startswith('the '):
query1 = query[4:]
else:
query1 = query
if query == item['title_unescape'] or query1 == item['title_unescape']:
score *= 1.5
elif query.lower() == item['title_unescape'].lower() or query1.lower() == item['title_unescape'].lower():
score *= 1.2
elif item['title'].lower() in query:
score *= 1.1
elif query == core_title or query1 == core_title:
score *= 1.2
elif query.lower() == core_title.lower() or query1.lower() == core_title.lower():
score *= 1.1
elif core_title.lower() in query.lower():
score *= 1.05
item['_score'] = score
return item
return list(sorted([score_boost(item, query) for item in results], key=lambda item: -item['_score']))
if __name__ == "__main__":
print([x['title'] for x in single_text_query("In which city did <NAME> go to college?")])
print([[y['title'] for y in x] for x in bulk_text_query(["In which city did <NAME> go to college?"])])
|
401064
|
from flask import Flask, render_template, request, jsonify
from urlparse import urljoin
from werkzeug.contrib.atom import AtomFeed
import os
from bs4 import BeautifulSoup
import os
import datetime
import html2text
app = Flask(__name__)
app.debug=True
def make_external(url):
return urljoin(request.url_root, url)
def get_articles():
file_names = os.listdir('static/newsletters/')
file_names.remove("example.html")
newsletter_objects = []
for x in range(0,len(file_names)):
file_object = open('static/newsletters/' + str(file_names[x]), "r")
newsletter_objects.append(file_object)
return newsletter_objects
def modification_date(filename):
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
@app.route("/")
def index():
get_articles()
return render_template("index.html")
@app.route("/unsubscribe")
def unsub_view():
return render_template("unsubscribe.html")
@app.route('/recent.atom')
def recent_feed():
try:
feed = AtomFeed('Recent Websecweekly Releases',
feed_url=request.url, url=request.url_root)
articles = get_articles()
for article in articles:
soup = BeautifulSoup(article.read())
rendered_text = html2text.html2text(article.read())
time_pub = eval(repr(modification_date(os.path.dirname(os.path.realpath(__file__)) + "/" + article.name)))
title = "Websecweekly " + time_pub.strftime("%d-%m-%Y")
feed.add(title, unicode(rendered_text),
content_type='html',
generator=("Websecweekly", "https://websecweekly.org", None),
author="Websecweekly",
url=make_external(article.name),
updated=time_pub,
published=time_pub)
return feed.get_response()
except Exception as e:
return str(e)
if __name__ == "__main__":
app.run()
|
401072
|
import time
from datetime import date
from unittest import mock, skip
from django.test import override_settings
from selenium.webdriver.common.keys import Keys
from mainapp.models import Person
from mainapp.tests.live.chromedriver_test_case import ChromeDriverTestCase
from mainapp.tests.live.helper import (
MockMainappSearch,
MockMainappSearchEndlessScroll,
mock_search_autocomplete,
)
from meine_stadt_transparent import settings
class FacettedSearchTest(ChromeDriverTestCase):
fixtures = ["initdata"]
def get_querystring(self):
"""The js writes this value"""
return self.browser.find_by_css("input[name=searchterm]").first[
"data-querystring"
]
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch("mainapp.functions.search.Search.execute", new=mock_search_autocomplete)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_landing_page_redirect(self):
"""There was a case where the redirect would lead to the wrong page"""
self.visit("/")
self.browser.fill("search-query", "word")
self.browser.find_by_name("search-query").first._element.send_keys(Keys.ENTER)
# semi-busy waiting because the test is otherwise broken on travis
for i in range(200):
if "word" == self.get_querystring():
break
time.sleep(0.01)
self.assertEqual("word", self.get_querystring())
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_word(self):
self.visit("/search/query/word/")
self.assertTrue(self.browser.is_text_present("Highlight"))
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_document_type(self):
self.visit("/search/query/word/")
self.assertTextIsPresent("Document Type")
self.assertTextIsNotPresent("Meeting")
self.browser.click_link_by_id("documentTypeButton")
self.assertTextIsPresent("Meeting")
self.browser.check("document-type[person]")
self.browser.check("document-type[file]")
self.browser.check("document-type[meeting]")
self.assertEqual(
"document-type:file,meeting,person word", self.get_querystring()
)
self.browser.uncheck("document-type[meeting]")
self.assertEqual("document-type:file,person word", self.get_querystring())
self.click_by_css("#filter-document-type-list .remove-filter")
self.assertEqual("word", self.get_querystring())
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_time_range(self):
self.visit("/search/query/word/")
self.click_by_id("timeRangeButton")
self.click_by_text("This year")
first_day = date(date.today().year, 1, 1)
last_day = date(date.today().year, 12, 31)
self.assertEqual(
"after:{} before:{} word".format(first_day, last_day),
self.get_querystring(),
)
self.click_by_id("timeRangeButton")
self.click_by_css(".daterangepicker .remove-filter")
self.assertEqual("word", self.get_querystring())
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_person_filter(self):
self.visit("/search/query/word/")
self.click_by_id("personButton")
self.click_by_text("<NAME>")
self.assertEqual("person:1 word", self.get_querystring())
self.click_by_id("personButton")
self.browser.find_by_css(".show .remove-filter").first.click()
self.assertEqual("word", self.get_querystring())
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_sorting(self):
self.visit("/search/query/word/")
self.click_by_id("btnSortDropdown")
self.click_by_text("Newest first")
self.assertEqual("sort:date_newest word", self.get_querystring())
self.click_by_id("btnSortDropdown")
self.click_by_text("Relevance")
self.assertEqual("word", self.get_querystring())
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_dropdown_filter(self):
self.visit("/search/query/word/")
self.click_by_id("personButton")
count = len(self.browser.find_by_css("[data-filter-key='person'] .filter-item"))
org = settings.SITE_DEFAULT_ORGANIZATION
persons = Person.objects.filter(membership__organization=org).distinct().count()
self.assertEqual(count, persons)
self.browser.fill("filter-person", "Frank")
count = len(self.browser.find_by_css("[data-filter-key='person'] .filter-item"))
self.assertEqual(count, 1)
# Todo: This test is flaky
@skip
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute", new=MockMainappSearch.execute
)
def test_dropdown_filter_preseted(self):
self.visit("/search/query/organization:1 word/")
self.click_by_id("organizationButton")
self.assertTextIsPresent("Cancel Selection")
self.click_by_css('#filter-organization-list a[data-id="2"]')
self.assertEqual(self.get_querystring(), "organization:2 word")
self.click_by_id("organizationButton")
self.click_by_css("#filter-organization-list .remove-filter")
self.assertEqual(self.get_querystring(), "word")
@override_settings(ELASTICSEARCH_ENABLED=True)
@mock.patch(
"mainapp.functions.search.MainappSearch.execute",
new=MockMainappSearchEndlessScroll.execute,
)
def test_endless_scroll(self):
self.visit("/search/query/word/")
single_length = settings.SEARCH_PAGINATION_LENGTH
self.assertEqual(
single_length, len(self.browser.find_by_css(".results-list > li"))
)
numbers = [
int(i.text) for i in self.browser.find_by_css(".results-list > li .lead")
]
numbers.sort()
self.assertEqual(numbers, list(range(0, single_length)))
self.click_by_id("start-endless-scroll")
# semi-busy waiting
# (it does work without wating on my machine, but I won't risk having any timing based test failures)
for i in range(200):
if single_length != len(self.browser.find_by_css(".results-list > li")):
break
time.sleep(0.01)
self.assertEqual(
single_length * 2, len(self.browser.find_by_css(".results-list > li"))
)
numbers = [
int(i.text) for i in self.browser.find_by_css(".results-list > li .lead")
]
numbers.sort()
self.assertEqual(numbers, list(range(0, single_length * 2)))
|
401082
|
from imutils import face_utils
import dlib
import cv2
import numpy as np
pre_trained_model = 'classifier/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(pre_trained_model)
video = cv2.VideoCapture('video/somi.mp4')
while video.read():
_, image_input = video.read()
resize = cv2.resize(image_input, (1050,600))
image = resize
out_face = np.zeros_like(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 255, 5), -5)
#face extraction
remapped_shape = np.zeros_like(shape)
feature_mask = np.zeros((image.shape[0], image.shape[1]))
remapped_shape = cv2.convexHull(shape)
cv2.fillConvexPoly(feature_mask, remapped_shape[0:27], 1)
feature_mask = feature_mask.astype(np.bool)
out_face[feature_mask] = image[feature_mask]
#output window
cv2.imshow("Output", out_face)
cv2.resizeWindow('Output', 30,30)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
401112
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from unittest.mock import Mock, patch
import DWF
class FakeResponse:
def __init__(self):
pass
def raise_for_status(self):
pass
def json(self):
return []
# This method will be used by the mock to replace requests.get
def mocked_requests_get(*args, **kwargs):
# Same as above
return FakeResponse()
class TestDWFRepo(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('DWF.DWFGithub.requests.get', side_effect=mocked_requests_get)
def testGetNewIssues(self, mock_get):
issues = DWF.get_new_issues('http://example.com')
self.assertEqual(len(issues), 0)
@patch('DWF.DWFGithub.requests.get', side_effect=mocked_requests_get)
def testGetApprovedCan(self, mock_get):
issues = DWF.get_approved_can_issues('http://example.com')
self.assertEqual(len(issues), 0)
|
401115
|
import traceback
def agency_slug(agency_name):
return '#' + ''.join(agency_name.split()) + '#'
def user_input(prompt):
try:
return raw_input(prompt)
except NameError:
return input(prompt)
def error_info(e):
return "%s: %s" % (e, traceback.format_exc().replace("\n", "\\n "))
|
401175
|
import numpy as np
import pytest
from robogym.envs.rearrange.ycb import make_env
@pytest.mark.parametrize("mesh_scale", [0.5, 1.0, 1.5])
def test_mesh_centering(mesh_scale):
# We know these meshe stls. are not center properly.
for mesh_name in ["005_tomato_soup_can", "073-b_lego_duplo", "062_dice"]:
env = make_env(
parameters={
"mesh_names": mesh_name,
"simulation_params": {"mesh_scale": mesh_scale},
}
).unwrapped
obj_pos = env.mujoco_simulation.get_object_pos()
bounding_pos = env.mujoco_simulation.get_object_bounding_boxes()[:, 0, :]
assert np.allclose(obj_pos, bounding_pos, atol=5e-3)
|
401176
|
import cv2
import glob as gb
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_file', default='results', type=str)
parser.add_argument('--video_name', default='dancetrack.avi', type=str)
parser.add_argument('--suffix', default='png', type=str)
parser.add_argument('--show_height', default=540, type=int)
parser.add_argument('--show_width', default=960, type=int)
parser.add_argument('--show_fps', default=20, type=int)
args = parser.parse_args()
saved_img_paths = gb.glob(args.img_file + "/*." + args.suffix)
fps = args.show_fps
size = (args.show_width, args.show_height)
video_path = args.video_name
videowriter = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
print('Images is loading...')
for saved_img_path in sorted(saved_img_paths):
img = cv2.imread(saved_img_path)
img = cv2.resize(img, size)
videowriter.write(img)
videowriter.release()
print('Video is finished.')
|
401212
|
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk import ErdCode, ErdApplianceType
from .washer import WasherApi
from .dryer import DryerApi
from ..entities import GeErdSensor, GeErdBinarySensor
_LOGGER = logging.getLogger(__name__)
class WasherDryerApi(WasherApi, DryerApi):
"""API class for washer/dryer objects"""
APPLIANCE_TYPE = ErdApplianceType.COMBINATION_WASHER_DRYER
def get_all_entities(self) -> List[Entity]:
base_entities = self.get_base_entities()
common_entities = [
GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_STATE),
GeErdSensor(self, ErdCode.LAUNDRY_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_SUB_CYCLE),
GeErdBinarySensor(self, ErdCode.LAUNDRY_END_OF_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_TIME_REMAINING),
GeErdSensor(self, ErdCode.LAUNDRY_DELAY_TIME_REMAINING),
GeErdBinarySensor(self, ErdCode.LAUNDRY_DOOR),
GeErdBinarySensor(self, ErdCode.LAUNDRY_REMOTE_STATUS),
]
washer_entities = self.get_washer_entities()
dryer_entities = self.get_dryer_entities()
entities = base_entities + common_entities + washer_entities + dryer_entities
return entities
|
401278
|
import os
from fnmatch import fnmatch
def ignore(source_file, config):
file_name = os.path.basename(source_file)
if config['ignore'] is True or \
config['ignore'] and any(pattern for pattern in config['ignore'] if fnmatch(file_name, pattern)):
return
return config
ignore.defaults = {
'ignore': [
u'.*',
u'config.yaml',
],
}
|
401323
|
import pytest
from geomdl import ray
from geomdl.ray import Ray, RayIntersection
def test_ray_intersect():
r2 = Ray((5.0, 181.34), (13.659999999999997, 176.34))
r3 = Ray((19.999779996773235, 189.9998729810778), (19.999652977851035, 180.00009298430456))
t0, t1, res = ray.intersect(r2, r3)
assert res != RayIntersection.SKEW
|
401383
|
r"""
Unique factorization domains
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> (CNRS) <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.lazy_attribute import lazy_class_attribute
from sage.misc.misc_c import prod
from sage.categories.category_singleton import Category_singleton
from sage.categories.category_singleton import Category_contains_method_by_parent_class
from sage.categories.gcd_domains import GcdDomains
class UniqueFactorizationDomains(Category_singleton):
"""
The category of unique factorization domains
constructive unique factorization domains, i.e. where one can constructively
factor members into a product of a finite number of irreducible elements
EXAMPLES::
sage: UniqueFactorizationDomains()
Category of unique factorization domains
sage: UniqueFactorizationDomains().super_categories()
[Category of gcd domains]
TESTS::
sage: TestSuite(UniqueFactorizationDomains()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: UniqueFactorizationDomains().super_categories()
[Category of gcd domains]
"""
return [GcdDomains()]
def additional_structure(self):
"""
Return whether ``self`` is a structure category.
.. SEEALSO:: :meth:`Category.additional_structure`
The category of unique factorization domains does not define
additional structure: a ring morphism between unique factorization
domains is a unique factorization domain morphism.
EXAMPLES::
sage: UniqueFactorizationDomains().additional_structure()
"""
return None
def __contains__(self, x):
"""
EXAMPLES::
sage: GF(4, "a") in UniqueFactorizationDomains()
True
sage: QQ in UniqueFactorizationDomains()
True
sage: ZZ in UniqueFactorizationDomains()
True
sage: IntegerModRing(4) in UniqueFactorizationDomains()
False
sage: IntegerModRing(5) in UniqueFactorizationDomains()
True
This implementation will not be needed anymore once every
field in Sage will be properly declared in the category
:class:`UniqueFactorizationDomains`().
"""
try:
return self._contains_helper(x) or x.is_unique_factorization_domain()
except Exception:
return False
@lazy_class_attribute
def _contains_helper(cls):
"""
Helper for containment tests in the category of unique
factorization domains.
This helper just tests whether the given object's category
is already known to be a sub-category of the category of
unique factorization domains. There are, however, rings that
are initialised as plain commutative rings and found out to be
unique factorization domains only afterwards. Hence, this helper
alone is not enough for a proper containment test.
TESTS::
sage: R = Zmod(7)
sage: R.category()
Join of Category of finite commutative rings
and Category of subquotients of monoids
and Category of quotients of semigroups
and Category of finite enumerated sets
sage: ID = UniqueFactorizationDomains()
sage: ID._contains_helper(R)
False
sage: R in ID # This changes the category!
True
sage: ID._contains_helper(R)
True
"""
return Category_contains_method_by_parent_class(cls())
class ParentMethods:
def is_unique_factorization_domain(self, proof=True):
"""
Return True, since this in an object of the category of unique factorization domains.
EXAMPLES::
sage: Parent(QQ,category=UniqueFactorizationDomains()).is_unique_factorization_domain()
True
"""
return True
def _gcd_univariate_polynomial(self, f, g):
"""
Return the greatest common divisor of ``f`` and ``g``.
INPUT:
- ``f``, ``g`` -- two polynomials defined over this UFD.
.. NOTE::
This is a helper method for
:meth:`sage.rings.polynomial.polynomial_element.Polynomial.gcd`.
ALGORITHM:
Algorithm 3.3.1 in [Coh1993]_, based on pseudo-division.
EXAMPLES::
sage: R.<x> = PolynomialRing(ZZ, sparse=True)
sage: S.<T> = R[]
sage: p = (-3*x^2 - x)*T^3 - 3*x*T^2 + (x^2 - x)*T + 2*x^2 + 3*x - 2
sage: q = (-x^2 - 4*x - 5)*T^2 + (6*x^2 + x + 1)*T + 2*x^2 - x
sage: quo,rem=p.pseudo_quo_rem(q); quo,rem
((3*x^4 + 13*x^3 + 19*x^2 + 5*x)*T + 18*x^4 + 12*x^3 + 16*x^2 + 16*x,
(-113*x^6 - 106*x^5 - 133*x^4 - 101*x^3 - 42*x^2 - 41*x)*T - 34*x^6 + 13*x^5 + 54*x^4 + 126*x^3 + 134*x^2 - 5*x - 50)
sage: (-x^2 - 4*x - 5)^(3-2+1) * p == quo*q + rem
True
Check that :trac:`23620` has been resolved::
sage: R.<x> = ZpFM(2)[]
sage: f = 2*x + 2
sage: g = 4*x + 2
sage: f.gcd(g).parent() is R
True
"""
if f.degree() < g.degree():
A,B = g, f
else:
A,B = f, g
if B.is_zero():
return A
a = b = self.zero()
for c in A.coefficients():
a = a.gcd(c)
if a.is_one():
break
for c in B.coefficients():
b = b.gcd(c)
if b.is_one():
break
d = a.gcd(b)
A = A // a
B = B // b
g = h = 1
delta = A.degree()-B.degree()
_,R = A.pseudo_quo_rem(B)
while R.degree() > 0:
A = B
B = R // (g*h**delta)
g = A.leading_coefficient()
h = h*g**delta // h**delta
delta = A.degree() - B.degree()
_, R = A.pseudo_quo_rem(B)
if R.is_zero():
b = self.zero()
for c in B.coefficients():
b = b.gcd(c)
if b.is_one():
break
return d*B // b
return f.parent()(d)
class ElementMethods:
# prime?
# squareFree
# factor
def radical(self, *args, **kwds):
r"""
Return the radical of this element, i.e. the product of its
irreducible factors.
This default implementation calls ``squarefree_decomposition`` if
available, and ``factor`` otherwise.
.. seealso:: :meth:`squarefree_part`
EXAMPLES::
sage: Pol.<x> = QQ[]
sage: (x^2*(x-1)^3).radical()
x^2 - x
sage: pol = 37 * (x-1)^3 * (x-2)^2 * (x-1/3)^7 * (x-3/7)
sage: pol.radical()
37*x^4 - 2923/21*x^3 + 1147/7*x^2 - 1517/21*x + 74/7
sage: Integer(10).radical()
10
sage: Integer(-100).radical()
10
sage: Integer(0).radical()
Traceback (most recent call last):
...
ArithmeticError: Radical of 0 not defined.
The next example shows how to compute the radical of a number,
assuming no prime > 100000 has exponent > 1 in the factorization::
sage: n = 2^1000-1; n / radical(n, limit=100000)
125
TESTS::
sage: radical(pol)
37*x^4 - 2923/21*x^3 + 1147/7*x^2 - 1517/21*x + 74/7
sage: Integer(20).radical()
10
"""
if self.is_zero():
raise ArithmeticError("Radical of 0 not defined.")
try:
decomp = self.squarefree_decomposition()
except AttributeError:
return self.factor(*args, **kwds).radical_value()
else:
return prod(fac for fac, mult in decomp)
def squarefree_part(self):
r"""
Return the square-free part of this element, i.e. the product
of its irreducible factors appearing with odd multiplicity.
This default implementation calls ``squarefree_decomposition``.
.. seealso:: :meth:`radical`
EXAMPLES::
sage: Pol.<x> = QQ[]
sage: (x^2*(x-1)^3).squarefree_part()
x - 1
sage: pol = 37 * (x-1)^3 * (x-2)^2 * (x-1/3)^7 * (x-3/7)
sage: pol.squarefree_part()
37*x^3 - 1369/21*x^2 + 703/21*x - 37/7
TESTS::
sage: squarefree_part(pol)
37*x^3 - 1369/21*x^2 + 703/21*x - 37/7
"""
decomp = self.squarefree_decomposition()
return prod(fac for fac, mult in decomp if mult%2 == 1)
|
401483
|
from __future__ import print_function
from __future__ import division
from skvideo.io import VideoCapture, VideoWriter
import sys
cap_filename, wr_filename = sys.argv[1], sys.argv[2]
cap = VideoCapture(cap_filename)
cap.open()
print(str(cap.get_info()))
retval, image = cap.read()
wr = VideoWriter(wr_filename, 'H264', 30, (image.shape[1], image.shape[0]))
wr.open()
frame_num = 0
while True:
retval, image = cap.read()
if not retval:
break
wr.write(image)
print("frame %d" % (frame_num))
frame_num += 1
wr.release()
cap.release()
print("done")
|
401517
|
from django.utils.translation import gettext_lazy as _
from rest_framework import decorators, response
from rest_framework import serializers as rf_serializers
from rest_framework import status, viewsets
from waldur_core.core import exceptions as core_exceptions
from waldur_core.core import validators as core_validators
from waldur_core.structure import views as structure_views
from . import executors, filters, models, serializers
class RegionViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Region.objects.all()
serializer_class = serializers.RegionSerializer
filterset_class = filters.RegionFilter
lookup_field = 'uuid'
class ImageViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Image.objects.all()
serializer_class = serializers.ImageSerializer
filterset_class = filters.ImageFilter
lookup_field = 'uuid'
class SizeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Size.objects.all()
serializer_class = serializers.SizeSerializer
filterset_class = filters.SizeFilter
lookup_field = 'uuid'
class InstanceViewSet(structure_views.ResourceViewSet):
queryset = models.Instance.objects.all().order_by('name')
filterset_class = filters.InstanceFilter
serializer_class = serializers.InstanceSerializer
create_executor = executors.InstanceCreateExecutor
delete_executor = executors.InstanceDeleteExecutor
destroy_validators = [
core_validators.StateValidator(
models.Instance.States.OK, models.Instance.States.ERRED
)
]
def perform_create(self, serializer):
instance = serializer.save()
volume = instance.volume_set.first()
self.create_executor.execute(
instance,
image=serializer.validated_data.get('image'),
size=serializer.validated_data.get('size'),
ssh_key=serializer.validated_data.get('ssh_public_key'),
volume=volume,
)
@decorators.action(detail=True, methods=['post'])
def start(self, request, uuid=None):
instance = self.get_object()
executors.InstanceStartExecutor().execute(instance)
return response.Response(
{'status': _('start was scheduled')}, status=status.HTTP_202_ACCEPTED
)
start_validators = [
core_validators.StateValidator(models.Instance.States.OK),
core_validators.RuntimeStateValidator('stopped'),
]
start_serializer_class = rf_serializers.Serializer
@decorators.action(detail=True, methods=['post'])
def stop(self, request, uuid=None):
instance = self.get_object()
executors.InstanceStopExecutor().execute(instance)
return response.Response(
{'status': _('stop was scheduled')}, status=status.HTTP_202_ACCEPTED
)
stop_validators = [
core_validators.StateValidator(models.Instance.States.OK),
core_validators.RuntimeStateValidator('running'),
]
stop_serializer_class = rf_serializers.Serializer
@decorators.action(detail=True, methods=['post'])
def restart(self, request, uuid=None):
instance = self.get_object()
executors.InstanceRestartExecutor().execute(instance)
return response.Response(
{'status': _('restart was scheduled')}, status=status.HTTP_202_ACCEPTED
)
restart_validators = [
core_validators.StateValidator(models.Instance.States.OK),
core_validators.RuntimeStateValidator('running'),
]
restart_serializer_class = rf_serializers.Serializer
@decorators.action(detail=True, methods=['post'])
def resize(self, request, uuid=None):
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
new_size = serializer.validated_data.get('size')
executors.InstanceResizeExecutor().execute(instance, size=new_size)
return response.Response(
{'status': _('resize was scheduled')}, status=status.HTTP_202_ACCEPTED
)
resize_validators = [core_validators.StateValidator(models.Instance.States.OK)]
resize_serializer_class = serializers.InstanceResizeSerializer
class VolumeViewSet(structure_views.ResourceViewSet):
queryset = models.Volume.objects.all().order_by('name')
serializer_class = serializers.VolumeSerializer
create_executor = executors.VolumeCreateExecutor
delete_executor = executors.VolumeDeleteExecutor
def _has_instance(volume):
if not volume.instance:
raise core_exceptions.IncorrectStateException(
_('Volume is already detached.')
)
@decorators.action(detail=True, methods=['post'])
def detach(self, request, uuid=None):
executors.VolumeDetachExecutor.execute(self.get_object())
detach_validators = [
core_validators.StateValidator(models.Volume.States.OK),
_has_instance,
]
detach_serializer_class = rf_serializers.Serializer
@decorators.action(detail=True, methods=['post'])
def attach(self, request, volume, uuid=None):
serializer = self.get_serializer(volume, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
executors.VolumeAttachExecutor.execute(volume)
attach_validators = [core_validators.StateValidator(models.Volume.States.OK)]
attach_serializer_class = serializers.VolumeAttachSerializer
|
401571
|
from pathlib import Path
def relative_to_abs_path(relative_path):
dirname = Path(__file__).parent
try:
return str((dirname / relative_path).resolve())
except FileNotFoundError:
return None
prefix = relative_to_abs_path('../resources/')+"/"
device_cmd_fpath = relative_to_abs_path('../depthai.cmd')
device_usb2_cmd_fpath = relative_to_abs_path('../depthai_usb2.cmd')
boards_dir_path = relative_to_abs_path('../resources/boards') + "/"
custom_calib_fpath = relative_to_abs_path('../resources/depthai.calib')
left_mesh_fpath = relative_to_abs_path('../resources/mesh_left.calib')
right_mesh_fpath = relative_to_abs_path('../resources/mesh_right.calib')
right_map_x_fpath = relative_to_abs_path('../resources/map_x_right.calib')
right_map_y_fpath = relative_to_abs_path('../resources/map_y_right.calib')
left_map_x_fpath = relative_to_abs_path('../resources/map_x_left.calib')
left_map_y_fpath = relative_to_abs_path('../resources/map_y_left.calib')
nn_resource_path = relative_to_abs_path('../resources/nn')+"/"
blob_fpath = relative_to_abs_path('../resources/nn/mobilenet-ssd/mobilenet-ssd.blob')
blob_config_fpath = relative_to_abs_path('../resources/nn/mobilenet-ssd/mobilenet-ssd.json')
tests_functional_path = relative_to_abs_path('../testsFunctional/') + "/"
if custom_calib_fpath is not None and Path(custom_calib_fpath).exists():
calib_fpath = custom_calib_fpath
print("Using Custom Calibration File: depthai.calib")
else:
calib_fpath = ''
print("No calibration file. Using Calibration Defaults.")
|
401603
|
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spin_basis_1d
from quspin.basis import spin_basis_general
from quspin.basis import basis_int_to_python_int
import numpy as np
from itertools import product
try:
S_dict = {(str(i)+"/2" if i%2==1 else str(i//2)):(i+1,i/2.0) for i in xrange(1,10001)}
except NameError:
S_dict = {(str(i)+"/2" if i%2==1 else str(i//2)):(i+1,i/2.0) for i in range(1,10001)}
def check_ME(basis_1d,basis_gen,opstr,indx,dtype,err_msg):
ME1,row1,col1=basis_1d.Op(opstr,indx,1.0,dtype)
ME2,row2,col2=basis_gen.Op(opstr,indx,1.0,dtype)
if len(ME1) != len(ME2):
print(opstr,list(indx))
print(basis_1d)
print("spin_basis_1d:")
print(ME1)
print(row1)
print(col1)
print()
print("spin_basis_general")
print(ME2)
print(row2)
print(col2)
raise Exception("number of matrix elements do not match.")
if len(ME1)>0 and len(ME2)>0:
row1 = row1.astype(np.min_scalar_type(row1.max()))
row2 = row2.astype(np.min_scalar_type(row2.max()))
col1 = row2.astype(np.min_scalar_type(col1.max()))
col2 = row2.astype(np.min_scalar_type(col2.max()))
try:
np.testing.assert_allclose(row1,row2,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(col1,col2,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(ME1,ME2,atol=1e-6,err_msg=err_msg)
except AssertionError:
print(err_msg)
print(basis_1d)
print("difference:")
print(ME1-ME2)
print(row1-row2)
print(col1-col2)
print("spin_basis_1d:")
print(ME1)
print(row1)
print(col1)
print("spin_basis_general")
print(ME2)
print(row2)
print(col2)
raise Exception
def test_gen_basis_spin(l_max,S="1/2"):
L=6
kblocks = [None]
kblocks.extend(range(L))
pblocks = [None,0,1]
zblocks = [None,0,1]
if S=="1/2":
ops = ["x","y","z","+","-","I"]
else:
ops = ["z","+","-","I"]
sps,s=S_dict[S]
Nups = [None,int(s*L)]
t = np.array([(i+1)%L for i in range(L)])
p = np.array([L-i-1 for i in range(L)])
z = np.array([-(i+1) for i in range(L)])
for Nup,kblock,pblock,zblock in product(Nups,kblocks,pblocks,zblocks):
gen_blocks = {"pauli":False,"S":S}
basis_blocks = {"pauli":False,"S":S}
if kblock==0 or kblock==L//2:
if pblock is not None:
basis_blocks["pblock"] = (-1)**pblock
gen_blocks["pblock"] = (p,pblock)
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
else:
if pblock is not None:
continue
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
if zblock is not None:
basis_blocks["zblock"] = (-1)**zblock
gen_blocks["zblock"] = (z,zblock)
else:
basis_blocks["zblock"] = None
gen_blocks["zblock"] = None
if kblock is not None:
basis_blocks["kblock"] = kblock
gen_blocks["kblock"] = (t,kblock)
else:
basis_blocks["kblock"] = None
gen_blocks["kblock"] = None
print("checking S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:} zblock={zblock:}".format(Nup=Nup,**basis_blocks))
basis_1d = spin_basis_1d(L,Nup=Nup,**basis_blocks)
gen_basis = spin_basis_general(L,Nup=Nup,**gen_blocks)
n = basis_1d._get_norms(np.float64)**2
n_gen = (gen_basis._n.astype(np.float64))*gen_basis._pers.prod()
if basis_1d.Ns != gen_basis.Ns:
print(L,basis_blocks)
print(basis_1d)
print(gen_basis)
raise ValueError("basis size mismatch")
try:
np.testing.assert_allclose(basis_1d.states-gen_basis.states,0,atol=1e-6)
np.testing.assert_allclose(n , n_gen,atol=1e-6)
except:
print(basis_1d.states)
print(gen_basis.states)
print(n)
print(n_gen)
raise Exception
for l in range(1,l_max+1):
for i0 in range(0,L-l+1,1):
indx = range(i0,i0+l,1)
for opstr in product(*[ops for i in range(l)]):
opstr = "".join(list(opstr))
printing = dict(basis_blocks)
printing["opstr"]=opstr
printing["indx"]=indx
printing["Nup"]=Nup
printing["S"]=S
err_msg="testing: {opstr:} {indx:} S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:} zblock={zblock:}".format(**printing)
check_ME(basis_1d,gen_basis,opstr,indx,np.complex128,err_msg)
def test_gen_basis_spin_boost(L,Nups,l_max,S="1/2"):
kblocks = [None]
kblocks.extend(range(0,L,(L//4)))
pblocks = [None,0,1]
if S=="1/2":
ops = ["x","y","z","+","-","I"]
else:
ops = ["z","+","-","I"]
sps,s=S_dict[S]
t = np.array([(i+1)%L for i in range(L)])
p = np.array([L-i-1 for i in range(L)])
for Nup,kblock,pblock in product(Nups,kblocks,pblocks):
gen_blocks = {"pauli":False,"S":S}
basis_blocks = {"pauli":False,"S":S}
if kblock==0 or kblock==L//2:
if pblock is not None:
basis_blocks["pblock"] = (-1)**pblock
gen_blocks["pblock"] = (p,pblock)
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
else:
if pblock is not None:
continue
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
if kblock is not None:
basis_blocks["kblock"] = kblock
gen_blocks["kblock"] = (t,kblock)
else:
basis_blocks["kblock"] = None
gen_blocks["kblock"] = None
basis_1d = spin_basis_1d(L,Nup=Nup,**basis_blocks)
gen_basis = spin_basis_general(L,Nup=Nup,**gen_blocks)
n = basis_1d._get_norms(np.float64)**2
n_gen = (gen_basis._n.astype(np.float64))*gen_basis._pers.prod()
print("checking S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:}".format(Nup=Nup,**basis_blocks))
if basis_1d.Ns != gen_basis.Ns:
print(L,basis_blocks)
print(basis_1d)
print(gen_basis)
raise ValueError("basis size mismatch")
try:
for s_general,s_1d in zip(gen_basis.states,basis_1d.states):
assert(basis_int_to_python_int(s_general)==s_1d)
np.testing.assert_allclose(n-n_gen ,0,atol=1e-6)
except:
print(basis_1d)
print(n)
print(gen_basis)
print(n_gen)
raise Exception
for l in range(1,l_max+1):
for i0 in range(0,L-l+1,1):
indx = list(range(i0,i0+l,1))
for opstr in product(*[ops for i in range(l)]):
opstr = "".join(list(opstr))
printing = dict(basis_blocks)
printing["opstr"]=opstr
printing["indx"]=indx
printing["Nup"]=Nup
printing["S"]=S
err_msg="testing: {opstr:} {indx:} S={S:} Nup={Nup:} kblock={kblock:} pblock={pblock:}".format(**printing)
check_ME(basis_1d,gen_basis,opstr,indx,np.complex128,err_msg)
print("testing S=1/2")
test_gen_basis_spin(3,S="1/2")
test_gen_basis_spin_boost(66,[1,-2],2,S="1/2")
print("testing S=1")
test_gen_basis_spin(3,S="1")
test_gen_basis_spin_boost(40,[1,-2],2,S="1")
print("testing S=3/2")
test_gen_basis_spin(3,S="3/2")
test_gen_basis_spin_boost(34,[1,-2],2,S="3/2")
print("testing S=2")
test_gen_basis_spin(3,S="2")
test_gen_basis_spin_boost(30,[1,-2],2,S="2")
|
401606
|
import pytest
# integration tests requires nomad Vagrant VM or Binary running
def test_initiate_garbage_collection(nomad_setup):
nomad_setup.system.initiate_garbage_collection()
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.system), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.system), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.system.does_not_exist
|
401608
|
import matplotlib
#
import numpy
import math
import pylab as plt
import h5py
import itertools
#
#plt.ion()
default_events = 'vq_output_hattonsenvy_3k/events_3000_d.h5'
events_2 = 'ca_model_hattonsenvy_105yrs_3km/events_3000.hdf5'
def quick_figs(vc_data_file=default_events, fnum_0=0, events_start=0, events_end=None, m0=7.0):
# make some quick figures for preliminary analysis.
with h5py.File(vc_data_file, 'r') as vc_data:
#
events = vc_data['events']
#
if events_start==None: events_start=0
if events_end==None: events_end=len(events)-1
events = events[events_start:events_end]
#
print "get magnitudes and then sort..."
mags = sorted(events['event_magnitude'].tolist())
#
print "get delta_ts..."
T=events['event_year']
#dts = [[t, t - f['events'][j]['event_year']] for j,t in enumerate(f['events']['event_year'])]
dts = [[t, t - T[j]] for j,t in enumerate(T[1:])]
#
print "... and bigmags "
big_mags = [[rw['event_year'], rw['event_magnitude']] for rw in events if rw['event_magnitude']>=m0]
big_mag_dts = [[rw[0], rw[0]-big_mags[j][0]] for j, rw in enumerate(big_mags[1:])]
#
print "Some summary stats:"
mean_dt_m0 = numpy.mean(zip(*big_mag_dts)[1])
std_dt_m0 = numpy.std(zip(*big_mag_dts)[1])
print "mean interval (N=%d) for m>%f: %f +/- %f" % (len(big_mags), m0, mean_dt_m0, std_dt_m0)
#
print "and now plot..."
#
figs=[]
figs+=[plt.figure(len(figs)+fnum_0)]
plt.clf()
#
# first: magnitude distributions
f=figs[-1]
ax = plt.gca()
ax.set_yscale('log')
#ax.plot(mags, reversed(xrange(1, len(mags)+1)), '.-')
ax.plot(*zip(*[[m,len(mags)-j] for j,m in enumerate(mags)]), color='b', marker='.', ls='-', zorder=4, label='Cumulative $N(>m)$')
# and the pdf...
dolog=True
ax.hist(mags,bins=200, range=[min(mags), max(mags)], log=dolog, histtype='step', label='Prob. Density')
plt.legend(loc=0, numpoints=1)
plt.title('Magnitudes')
#
# magnitudes PDF only.
'''
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
dolog=True
ax.hist(mags,bins=200, range=[min(mags), max(mags)], log=dolog)
plt.title('Magnitudes (pdf)')
'''
#
# intervals, magnitudes time series:
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
ldT = numpy.log10(zip(*dts)[1])
ax.set_yscale('log')
#ax.plot(T[1:], ldT, marker='.', ls='-', color='b', label='dt(t)')
ax.plot(T[1:], zip(*dts)[1], marker='.', ls='-', color='b', zorder=8, label='$dt(t)$')
ave_len = 100
print "plot mean intervals over %d intervals(%d events).(%d)" % (ave_len, ave_len+1, len(figs))
ax.plot(T[ave_len:], [(t-T[j])/float(ave_len) for j,t in enumerate(T[ave_len:])], color = 'c', lw=2,zorder=11, label='$<dt(t)>_{%d}$' % ave_len)
# set up dt range:
dts_sorted = sorted(zip(*dts)[1])
#
#print "dt_max at: %f (%d)" % (dt_max, int(.9*len(dts_sorted)))
ax.set_ylim(.9*min(zip(*dts)[1]), 1.1*max(zip(*dts)[1]))
ax.set_ylabel('Intervals $\\Delta t$')
#ax.draw()
ax_mags = ax.twinx()
#ax.vlines(*(zip(*big_mags)),[3.0 for x in big_mags], color='r')
ax_mags.vlines(*(zip(*big_mags)), ymax=[3.0 for x in big_mags], color='r', lw=1.25, zorder=2, label='m>%.2f' % m0)
ax_mags.vlines(T,[3.0 for m in mags], events['event_magnitude'], color='g', zorder=3, label='magnitudes')
ax_mags.set_ylim(2.0, 9.5)
ax_mags.set_ylabel('magnitude')
plt.legend(loc=0, numpoints=1)
#
# big-mag intervals:
# big_mag_dts
print "... big-mag time-series:"
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
ax.set_yscale('log')
ax.set_ylabel('interval $\\Delta t_{m%.2f}$' % m0)
ax.plot(zip(*big_mag_dts)[0], zip(*big_mag_dts)[1], 'g.-', zorder=7, lw=1.5, label='$m>%.2f intervals')
ax_mags = ax.twinx()
ax_mags.vlines(*(zip(*big_mags)), ymax=[3.0 for x in big_mags], color='m', lw=1, zorder=1, label='m>%.2f' % m0, alpha=.5)
#plt.legend(loc=0, numpoints=1)
plt.title('big-mag and intervals')
#
# interval distributions:
#
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
dolog=True
normed = False
X = numpy.log10(dts_sorted)
ax.hist(X, bins=200, range=[min(X), max(X)], log=dolog, histtype='stepfilled', normed=normed)
h_cum = ax.hist(X, bins=200, range=[min(X), max(X)], log=dolog, histtype='step', cumulative=True, normed=normed)
N = float(len(X))
if normed: N=1.0
ax.plot([.5*(x+h_cum[1][j]) for j,x in enumerate(h_cum[1][1:])], [N-x for x in h_cum[0]], 'c-')
#ax.plot([x for j,x in enumerate(h_cum[1][:-1])], h_cum[0], 'c-')
plt.title('intervals distribuiton (hist)')
plt.xlabel('log intervals $\\log \left( \\Delta t \\right)$')
plt.ylabel('N(dt)')
return h_cum
#
#def plot_recurrence(
class Sweep(object):
def __init__(self, event_number=0, vc_data_file=default_events, block_id=None):
self.sweep_sequences=sweep_sequence(event_number=event_number, block_id=block_id, vc_data_file=vc_data_file)
self.shear_stress_sequences = shear_stress_sequence(sweepses=self.sweep_sequences, do_print=False)
#
b_id_list = self.sweep_sequences['block_id'].tolist()
self.block_ids = {x:b_id_list.count(x) for x in b_id_list}
#self.block_ids = list(set(self.sweep_sequences['block_id'].tolist()))
#
# we could also, at this point, parse out the individual block sequences, maybe make a class Block().
#
def plot_slips(self, block_ids=None, fignum=0):
#if block_ids==None: block_ids=self.block_ids.keys()
#if isinstance(block_ids, float): block_ids=[int(block_ids)]
#if isinstance(block_ids, int): block_ids = [block_ids]
if block_ids==None: block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
plt.clf()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
plt.plot(rws['sweep_number'], rws['block_slip'], '.-', label='block_id: %d' % block_id)
plt.legend(loc=0, numpoints=1)
plt.title('Block slip sequences')
plt.xlabel('sweep number')
plt.ylabel('slip')
#
def plot_stress_drop(self, block_ids=None, fignum=0):
block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
plt.clf()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
plt.plot(rws['sweep_number'], rws['shear_diff'], '.-', label='block_id: %d' % block_id)
plt.plot([min(self.shear_stress_sequences['sweep_number']), max(self.shear_stress_sequences['sweep_number'])], [0., 0.], 'k-')
plt.legend(loc=0, numpoints=1)
plt.title('Block shear_stress drop sequences')
plt.xlabel('sweep number')
plt.ylabel('shear stress drop')
#
def plot_stress(self, block_ids=None, fignum=0):
block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
ax1=plt.gca()
plt.clf()
plt.figure(fignum)
plt.clf()
ax0=plt.gca()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
stress_seq = []
for rw in rws:
stress_seq += [[rw['sweep_number'], rw['shear_init']]]
stress_seq += [[rw['sweep_number'], rw['shear_final']]]
X,Y = zip(*stress_seq)
#
ax0.plot(X,Y, '.-', label='block_id: %d' % block_id)
#
plt.figure(fignum+1)
plt.plot(rws['sweep_number'], rws['shear_init'], '.-', label='block_id: %d' % block_id)
plt.plot(rws['sweep_number'], rws['shear_final'], '.-', label='block_id: %d' % block_id)
plt.figure(fignum)
ax0.plot([min(self.shear_stress_sequences['sweep_number']), max(self.shear_stress_sequences['sweep_number'])], [0., 0.], 'k-')
ax0.legend(loc=0, numpoints=1)
plt.figure(fignum)
plt.title('Block shear_stress sequences')
plt.xlabel('sweep number')
plt.ylabel('shear stress')
#
def check_block_ids_list(self, block_ids):
if block_ids==None: block_ids=self.block_ids.keys()
if isinstance(block_ids, float): block_ids=[int(block_ids)]
if isinstance(block_ids, int): block_ids = [block_ids]
#
return block_ids
#
def shear_stress_sequence(block_id=None, event_number=0, vc_data_file=default_events, do_print=True, sweepses=None):
if sweepses==None: sweepses = sweep_sequence(block_id=block_id, event_number=event_number, vc_data_file=vc_data_file)
#
outsies = [[rw['sweep_number'], rw['block_id'], rw['block_slip'], rw['shear_init'], rw['shear_final'], rw['shear_init']-rw['shear_final'], (rw['shear_init']-rw['shear_final'])/rw['shear_final']] for rw in sweepses]
#
if do_print:
for rw in outsies: print rw
#
cols = ['sweep_number', 'block_id', 'block_slip', 'shear_init', 'shear_final', 'shear_diff', 'shear_diff_norm']
#outsies = numpy.core.records.fromarrays(zip(*outsies), names=cols, formats = [type(x).__name__ for x in outsies[0]])
#return outsies
return numpy.core.records.fromarrays(zip(*outsies), names=cols, formats = [type(x).__name__ for x in outsies[0]])
#
def sweep_sequence(event_number=0, block_id=None, vc_data_file=default_events):
# sweep sequence for a single block in a single event.
#
with h5py.File(vc_data_file) as vc_data:
sweep_range = [vc_data['events'][event_number]['start_sweep_rec'], vc_data['events'][event_number]['end_sweep_rec']]
sweeps = vc_data['sweeps'][sweep_range[0]:sweep_range[1]][()]
#
# so we could filter out all the blocks != block_id, but let's just assume that we (might) want all the blocks (for default None value).
#if block_id==None or block_id not in (sweeps['block_id']): block_id=sweeps['block_id'][0]
if block_id!=None:
d_type = sweeps.dtype
#sweeps = filter(lambda x: x['block_id']==block_id, sweeps)
sweeps = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, sweeps)), dtype=d_type)
#
return sweeps
def get_h5_col(col_name, vc_data_file=default_events):
#
if isinstance(col_name, str): col_name=[col_name]
if col_name[0] not in ('events', 'sweeps'): col_name.insert(0,'events')
#
with h5py.File(vc_data_file) as vc_data:
vc1 = vc_data[col_name[0]]
#
col = vc_data
for cl in col_name:
#
col=col[cl]
#
#
#
return col
|
401729
|
from builtins import object
import factory
from bluebottle.statistics.models import Statistic
class StatisticFactory(factory.DjangoModelFactory):
class Meta(object):
model = Statistic
type = 'manual'
title = factory.Sequence(lambda n: 'Metric {0}'.format(n))
value = None
sequence = factory.Sequence(lambda n: n)
active = True
language = 'en'
|
401750
|
from dataclasses import dataclass
@dataclass(frozen=True)
class VertexData:
__slots__ = ["lat", "lon"]
lat: float
lon: float
def __repr__(self) -> str:
return "{} {}".format(self.lat, self.lon)
@dataclass(frozen=True)
class Vertex:
__slots__ = ["id", "data"]
id: int
data: VertexData
@property
def description(self) -> str:
return "{} {}".format(self.id, self.data)
@dataclass(frozen=True)
class EdgeData:
__slots__ = ["length", "highway", "max_v", "name"]
length: float
highway: str
max_v: int
name: str
def __repr__(self) -> str:
return "{} {} {}".format(self.length, self.highway, self.max_v)
@dataclass(frozen=True)
class Edge:
__slots__ = ["s", "t", "forward", "backward", "data"]
s: int
t: int
forward: bool
backward: bool
data: EdgeData
@property
def description(self) -> str:
both_directions = "1" if self.forward and self.backward else "0"
return "{} {} {} {}".format(self.s, self.t, self.data, both_directions)
|
401751
|
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, UpSampling2D
def autoencoder():
input_shape=(784,)
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=input_shape))
model.add(Dense(784, activation='sigmoid'))
return model
def deep_autoencoder():
input_shape=(784,)
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=input_shape))
model.add(Dense(64, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(784, activation='sigmoid'))
return model
def convolutional_autoencoder():
input_shape=(28,28,1)
n_channels = input_shape[-1]
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=input_shape))
model.add(MaxPool2D(padding='same'))
model.add(Conv2D(16, (3,3), activation='relu', padding='same'))
model.add(MaxPool2D(padding='same'))
model.add(Conv2D(8, (3,3), activation='relu', padding='same'))
model.add(UpSampling2D())
model.add(Conv2D(16, (3,3), activation='relu', padding='same'))
model.add(UpSampling2D())
model.add(Conv2D(32, (3,3), activation='relu', padding='same'))
model.add(Conv2D(n_channels, (3,3), activation='sigmoid', padding='same'))
return model
def load_model(name):
if name=='autoencoder':
return autoencoder()
elif name=='deep_autoencoder':
return deep_autoencoder()
elif name=='convolutional_autoencoder':
return convolutional_autoencoder()
else:
raise ValueError('Unknown model name %s was given' % name)
|
401789
|
import torch
class BaseMatcher:
def __init__(self, matcher_cfg):
self.matcher_cfg = matcher_cfg
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.n_fails = 0
def match(self, s1: torch.Tensor, s2: torch.Tensor):
raise NotImplementedError
|
401795
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
ext_modules = [
Extension(
name='toolkit.utils.region',
sources=[
'toolkit/utils/region.pyx',
'toolkit/utils/src/region.c',
],
include_dirs=[
'toolkit/utils/src'
]
)
]
setup(
name='toolkit',
packages=['toolkit'],
ext_modules=cythonize(ext_modules)
)
|
401810
|
import torch
from torch.utils import data
from transformers import AutoTokenizer
from .augment import Augmenter
# map lm name to huggingface's pre-trained model names
lm_mp = {'roberta': 'roberta-base',
'distilbert': 'distilbert-base-uncased'}
def get_tokenizer(lm):
if lm in lm_mp:
return AutoTokenizer.from_pretrained(lm_mp[lm])
else:
return AutoTokenizer.from_pretrained(lm)
class DittoDataset(data.Dataset):
"""EM dataset"""
def __init__(self,
path,
max_len=256,
size=None,
lm='roberta',
da=None):
self.tokenizer = get_tokenizer(lm)
self.pairs = []
self.labels = []
self.max_len = max_len
self.size = size
if isinstance(path, list):
lines = path
else:
lines = open(path)
for line in lines:
s1, s2, label = line.strip().split('\t')
self.pairs.append((s1, s2))
self.labels.append(int(label))
self.pairs = self.pairs[:size]
self.labels = self.labels[:size]
self.da = da
if da is not None:
self.augmenter = Augmenter()
else:
self.augmenter = None
def __len__(self):
"""Return the size of the dataset."""
return len(self.pairs)
def __getitem__(self, idx):
"""Return a tokenized item of the dataset.
Args:
idx (int): the index of the item
Returns:
List of int: token ID's of the two entities
List of int: token ID's of the two entities augmented (if da is set)
int: the label of the pair (0: unmatch, 1: match)
"""
left = self.pairs[idx][0]
right = self.pairs[idx][1]
# left + right
x = self.tokenizer.encode(text=left,
text_pair=right,
max_length=self.max_len,
truncation=True)
# augment if da is set
if self.da is not None:
combined = self.augmenter.augment_sent(left + ' [SEP] ' + right, self.da)
left, right = combined.split(' [SEP] ')
x_aug = self.tokenizer.encode(text=left,
text_pair=right,
max_length=self.max_len,
truncation=True)
return x, x_aug, self.labels[idx]
else:
return x, self.labels[idx]
@staticmethod
def pad(batch):
"""Merge a list of dataset items into a train/test batch
Args:
batch (list of tuple): a list of dataset items
Returns:
LongTensor: x1 of shape (batch_size, seq_len)
LongTensor: x2 of shape (batch_size, seq_len).
Elements of x1 and x2 are padded to the same length
LongTensor: a batch of labels, (batch_size,)
"""
if len(batch[0]) == 3:
x1, x2, y = zip(*batch)
maxlen = max([len(x) for x in x1+x2])
x1 = [xi + [0]*(maxlen - len(xi)) for xi in x1]
x2 = [xi + [0]*(maxlen - len(xi)) for xi in x2]
return torch.LongTensor(x1), \
torch.LongTensor(x2), \
torch.LongTensor(y)
else:
x12, y = zip(*batch)
maxlen = max([len(x) for x in x12])
x12 = [xi + [0]*(maxlen - len(xi)) for xi in x12]
return torch.LongTensor(x12), \
torch.LongTensor(y)
|
401818
|
import bpy
import json
from bpy.props import PointerProperty
from ...nodes.BASE.node_base import RenderNodeBase
def update_node(self, context):
self.execute_tree()
class RSNodeTaskInfoInputsNode(RenderNodeBase):
'''A simple input node'''
bl_idname = 'RSNodeTaskInfoInputsNode'
bl_label = 'Task Info(Experiment)'
file: PointerProperty(type=bpy.types.Text, name="Task Info File", update=update_node)
def init(self, context):
self.outputs.new('RSNodeSocketTaskSettings', "Settings")
self.width = 200
def draw_buttons(self, context, layout):
layout.prop(self, "file", text="")
def get_data(self):
task_data = {}
try:
data = json.loads(self.file.as_string())
task_data.update(data)
except Exception:
self.use_custom_color = 1
self.color = (1, 0, 0)
return task_data
def register():
bpy.utils.register_class(RSNodeTaskInfoInputsNode)
def unregister():
bpy.utils.unregister_class(RSNodeTaskInfoInputsNode)
|
401819
|
import os
import time
from speech_tools import *
import numpy as np
from multiprocessing import Pool
datasets_dir = "datasets"
output_root_dir = "datasets_splitted"
divs = 64
def process(folder):
sampling_rate = 22050
num_mcep = 36
frame_period = 5.0
n_frames = 128
X=[]
for file in glob.glob(folder + '/*.wav'):
wav, _ = librosa.load(file, sr=sampling_rate, mono=True)
wav *= 1. / max(0.01, np.max(np.abs(wav)))
wav_splitted = librosa.effects.split(wav,top_db=48)
export_dir = folder
os.makedirs(os.path.join(output_root_dir,export_dir), exist_ok=True)
for s in range(wav_splitted.shape[0]):
x = wav[wav_splitted[s][0]:wav_splitted[s][1]]
X = np.concatenate([X,x],axis=0)
X *= 1. / max(0.01, np.max(np.abs(X)))
wavlen = X.shape[0]
crop_size = wavlen // divs
start = 0
for i in range(divs):
sub = 0
if(i==divs-1):
sub = X[start:]
else:
sub = X[start:start+crop_size]
start += crop_size
sub = sub.astype(np.float32)
librosa.output.write_wav(os.path.join(output_root_dir,export_dir,"{}_".format(i)+os.path.basename(folder)+".wav"), sub, sampling_rate)
if __name__ == '__main__':
folders = glob.glob(datasets_dir+"/*")
TIME= time.time()
cores = min(len(folders), 4)
parallel = True
print(folders)
if parallel:
p = Pool(cores)
p.map(process, folders)
p.close()
else:
for f in folders:
process(f)
print(time.time()-TIME)
|
401842
|
import numpy as np
from scipy.special import gammaln, psi
# TODO: define distribution base class
class Discrete(object):
def __init__(self, p=0.5*np.ones(2)):
assert np.all(p >= 0) and p.ndim == 1 and np.allclose(p.sum(), 1.0), \
"p must be a probability vector that sums to 1.0"
self.p = p
self.D = p.size
def _is_one_hot(self, x):
return x.shape == (self.D,) and x.dtype == np.int and x.sum() == 1
def _isindex(self, x):
return isinstance(x, int) and x >= 0 and x < self.D
def log_probability(self, x):
# TODO: Handle broadcasting
assert self._is_one_hot(x) or self._isindex(x)
if self._is_one_hot(x):
lp = x.dot(np.log(self.p))
elif self._isindex(x):
lp = np.log(self.p[x])
else:
raise Exception("x must be a one-hot vector or an index")
return lp
def expected_x(self):
return self.p
def negentropy(self, E_x=None, E_ln_p=None):
"""
Compute the negative entropy of the discrete distribution.
:param E_x: Expected observation
:param E_ln_p: Expected log probability
:return:
"""
if E_x is None:
E_x = self.expected_x()
if E_ln_p is None:
E_ln_p = np.log(self.p)
H = E_x.dot(E_ln_p)
return np.nan_to_num(H)
class Bernoulli:
#TODO: Subclass Discrete distribution
def __init__(self, p=0.5):
assert np.all(p >= 0) and np.all(p <= 1.0)
self.p = p
def log_probability(self, x):
"""
Log probability of x given p
:param x:
:return:
"""
lp = x * np.log(self.p) + (1-x) * np.log(1.0-self.p)
lp = np.nan_to_num(lp)
return lp
def expected_x(self):
return self.p
def expected_notx(self):
return 1 - self.p
def negentropy(self, E_x=None, E_notx=None, E_ln_p=None, E_ln_notp=None):
"""
Compute the entropy of the Bernoulli distribution.
:param E_x: If given, use this in place of expectation wrt p
:param E_notx: If given, use this in place of expectation wrt p
:param E_ln_p: If given, use this in place of expectation wrt p
:param E_ln_notp: If given, use this in place of expectation wrt p
:return: E[ ln p(x | p)]
"""
if E_x is None:
E_x = self.expected_x()
if E_notx is None:
E_notx = self.expected_notx()
if E_ln_p is None:
E_ln_p = np.log(self.p)
if E_ln_notp is None:
E_ln_notp = np.log(1.0 - self.p)
H = E_x * E_ln_p + E_notx * E_ln_notp
return H
class Gamma:
def __init__(self, alpha, beta=1.0):
assert np.all(alpha) >= 0
assert np.all(beta) >= 0
self.alpha = alpha
self.beta = beta
def log_probability(self, lmbda):
"""
Log probability of x given p
:param x:
:return:
"""
lp = self.alpha * np.log(self.beta) - gammaln(self.alpha) \
+ (self.alpha-1) * np.log(lmbda) - self.beta * lmbda
lp = np.nan_to_num(lp)
return lp
def expected_lambda(self):
return self.alpha / self.beta
def expected_log_lambda(self):
return psi(self.alpha) - np.log(self.beta)
def negentropy(self, E_ln_lambda=None, E_lambda=None, E_beta=None, E_ln_beta=None):
"""
Compute the entropy of the gamma distribution.
:param E_ln_lambda: If given, use this in place of expectation wrt alpha and beta
:param E_lambda: If given, use this in place of expectation wrt alpha and beta
:param E_ln_beta: If given, use this in place of expectation wrt alpha and beta
:param E_beta: If given, use this in place of expectation wrt alpha and beta
:return: E[ ln p(\lambda | \alpha, \beta)]
"""
if E_ln_lambda is None:
E_ln_lambda = self.expected_log_lambda()
if E_lambda is None:
E_lambda = self.expected_lambda()
if E_ln_beta is None:
E_ln_beta = np.log(self.beta) * np.ones_like(E_ln_lambda)
if E_beta is None:
E_beta = self.beta * np.ones_like(E_lambda)
# Make sure everything is the same shape
alpha = self.alpha * np.ones_like(E_ln_lambda)
H = alpha * E_ln_beta
H += -gammaln(alpha)
H += (alpha - 1.0) * E_ln_lambda
H += -E_beta * E_lambda
return H
class Dirichlet(object):
def __init__(self, gamma):
assert np.all(gamma) >= 0 and gamma.shape[-1] >= 1
self.gamma = gamma
def log_probability(self, x):
assert np.allclose(x.sum(axis=-1), 1.0) and np.amin(x) >= 0.0
return gammaln(self.gamma.sum()) - gammaln(self.gamma).sum() \
+ ((self.gamma-1) * np.log(x)).sum(axis=-1)
def expected_g(self):
return self.gamma / self.gamma.sum(axis=-1, keepdims=True)
def expected_log_g(self):
return psi(self.gamma) - psi(self.gamma.sum(axis=-1, keepdims=True))
def negentropy(self, E_ln_g=None):
"""
Compute the entropy of the gamma distribution.
:param E_ln_g: If given, use this in place of expectation wrt tau1 and tau0
:return: E[ ln p(g | gamma)]
"""
if E_ln_g is None:
E_ln_g = self.expected_log_g()
H = gammaln(self.gamma.sum(axis=-1, keepdims=True)).sum()
H += -gammaln(self.gamma).sum()
H += ((self.gamma - 1) * E_ln_g).sum()
return H
class Beta(Dirichlet):
def __init__(self, tau1, tau0):
tau1 = np.atleast_1d(tau1)
tau0 = np.atleast_1d(tau0)
gamma = np.concatenate((tau1[...,None], tau0[...,None]), axis=-1)
super(Beta, self).__init__(gamma)
def log_probability(self, p):
x = np.concatenate((p[...,None], 1-p[...,None]), axis=-1)
return super(Beta, self).log_probability(x)
def expected_p(self):
E_g = self.expected_g()
return E_g[...,0]
def expected_log_p(self):
E_logg = self.expected_log_g()
return E_logg[...,0]
def expected_log_notp(self):
E_logg = self.expected_log_g()
return E_logg[...,1]
def negentropy(self, E_ln_p=None, E_ln_notp=None):
if E_ln_p is not None and E_ln_notp is not None:
E_ln_g = np.concatenate((E_ln_p[...,None], E_ln_notp[...,None]), axis=-1)
else:
E_ln_g = None
return super(Beta, self).negentropy(E_ln_g=E_ln_g)
|
401843
|
from django.utils.duration import duration_string
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from resources.api.base import TranslatedModelSerializer
from ..models import Order, OrderLine, Product
class ProductSerializer(TranslatedModelSerializer):
id = serializers.CharField(source='product_id')
price = serializers.SerializerMethodField()
class Meta:
model = Product
fields = (
'id', 'type', 'name', 'description', 'price', 'max_quantity'
)
def get_price(self, obj):
if obj.price_type not in (Product.PRICE_FIXED, Product.PRICE_PER_PERIOD):
raise ValueError('{} has invalid price type "{}"'.format(obj, obj.price_type))
ret = {
'type': obj.price_type,
'tax_percentage': str(obj.tax_percentage),
'amount': str(obj.price)
}
if obj.price_type == Product.PRICE_PER_PERIOD:
ret.update({'period': duration_string(obj.price_period)})
return ret
class OrderLineSerializer(serializers.ModelSerializer):
product = serializers.SlugRelatedField(queryset=Product.objects.current(), slug_field='product_id')
price = serializers.CharField(source='get_price', read_only=True)
unit_price = serializers.CharField(source='get_unit_price', read_only=True)
class Meta:
model = OrderLine
fields = ('product', 'quantity', 'unit_price', 'price')
def to_representation(self, instance):
data = super().to_representation(instance)
data['product'] = ProductSerializer(instance.product).data
return data
def validate(self, order_line):
if order_line.get('quantity', 1) > order_line['product'].max_quantity:
raise serializers.ValidationError({'quantity': _('Cannot exceed max product quantity')})
return order_line
def validate_product(self, product):
available_products = self.context['available_products']
# available_products None means "all".
# The price check endpoint uses that because available products don't
# make sense in it's context (because there is no resource),
if available_products is not None:
if product not in available_products:
raise serializers.ValidationError(_("This product isn't available on the resource."))
return product
class OrderSerializerBase(serializers.ModelSerializer):
order_lines = OrderLineSerializer(many=True)
price = serializers.CharField(source='get_price', read_only=True)
class Meta:
model = Order
fields = ('state', 'order_lines', 'price')
|
401858
|
import argparse
import time
# terminal run: python test.py 3, 4
parser = argparse.ArgumentParser(description = 'This is a summation method.')
parser.add_argument('a')
parser.add_argument('b')
args = parser.parse_args()
a = int(args.a)
b = int(args.b)
# print('begin 1s:')
# time.sleep(1)
def sum():
return a + b
result = sum()
# print('begin 1.1s:')
# time.sleep(1.1)
print('result = ',result)
|
401865
|
from . import links
from .allocator import use_mempool_in_cupy_malloc, use_torch_in_cupy_malloc
from .datasets import TransformDataset
from .links import TorchModule
from .parameter import ChainerParameter, LinkAsTorchModel, Optimizer
from .tensor import asarray, astensor, to_numpy_dtype
from .device import to_chainer_device, to_torch_device
|
401867
|
from corehq.util.validation import is_url_or_host_banned
from corehq.util.urlvalidate.ip_resolver import CannotResolveHost
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
def form_clean_url(url):
try:
if is_url_or_host_banned(url):
raise ValidationError(_("Invalid URL"))
except CannotResolveHost:
raise ValidationError(_("Cannot Resolve URL"))
return url
|
401870
|
from FreeTAKServer.model.FTSModel.fts_protocol_object import FTSProtocolObject
class Archive(FTSProtocolObject):
@staticmethod
def drop_point():
# while the tag exists in the CoT structure no known content exists
pass
|
401874
|
def palindrome(s):
s=s.replace(" ",'')
reverse=s[::-1]
if s==reverse:
return True
else:
return False
|
401890
|
import contextlib
import time
_timing_db = []
def record(timing_information):
"""
Add a timing record to a global database.
:param timing_information: a dictionary in the format
{"command": "command line string",
"time_start": unix epoch timestamp,
"time_end": unix epoch timestamp}
"""
_timing_db.append(timing_information)
@contextlib.contextmanager
def record_step(name):
"""
Record time spent in this context handler as running $name.
Usage:
with record_step("my_program argument argument"):
do_stuff()
:param name: section name for timing purposes, will usually be
shortened to the first word.
"""
timing = {"command": name, "time_start": time.time()}
try:
yield
finally:
timing["time_end"] = time.time()
record(timing)
def report():
"""
Visualise all recorded program executions in a flow diagram
:return: A list of strings
"""
return visualise_db(_timing_db)
def reset():
"""
Remove all records from the global database
"""
global _timing_db
_timing_db = []
def visualise_db(timing_db):
"""
Visualises program execution in a flow diagram given a list of timestamps.
:param timing_db: A list of dictionaries, each in the format of
{"command": "command line string",
"time_start": unix epoch timestamp,
"time_end": unix epoch timestamp}
:return: A list of strings
"""
if not timing_db:
return []
# prepare a few helper data structures
ordered_by_start = list(
sorted((t["time_start"], n) for n, t in enumerate(timing_db))
)
start_order = tuple(n for _, n in ordered_by_start)
ordered_by_end = list(sorted((t["time_end"], n) for n, t in enumerate(timing_db)))
relative_start_time = ordered_by_start[0][0]
total_runtime = ordered_by_end[-1][0] - relative_start_time
index_width = len(str(len(timing_db))) + 1
time_width = len("%.1f" % total_runtime)
output = []
running_tasks = []
# annotate the dictionaries with useful information
for n, t in enumerate(timing_db):
t["index"] = start_order.index(n) + 1
t["index_readable"] = "%d." % t["index"]
t["runtime"] = t["time_end"] - t["time_start"]
t["short_command"] = t["command"].split(" ")[0]
if t["runtime"] <= 90:
t["runtime_readable"] = "%.1fs" % t["runtime"]
else:
t["runtime_readable"] = "%.1fm" % (t["runtime"] / 60)
# highlight any significant unaccounted periods which either take more
# than 0.5% of the total runtime or would be featured in the top 10
thinking_breaks = []
top_10_runtime = sorted((t["runtime"] for t in timing_db), reverse=True)[0:10][-1]
significant_thinking_break = min(total_runtime * 0.005, top_10_runtime)
while ordered_by_start:
timestamp, n = ordered_by_start.pop(0)
t = timing_db[n]
tree_view = [" " if task is None else "\u2502" for task in running_tasks]
if ordered_by_end[0][1] == n and (
not ordered_by_start or ordered_by_start[0][0] >= ordered_by_end[0][0]
):
tree_view.append("\u25EF")
ordered_by_end.pop(0)
end_time = t["time_end"]
else:
tree_view.append("\u252C")
running_tasks.append(n)
output.append(
"{timestamp:{time_width}.1f}s {t[index_readable]:>{index_width}} {tree_view:<5} {t[short_command]} ({t[runtime_readable]})".format(
t=t,
tree_view=" ".join(tree_view),
index_width=index_width,
time_width=time_width,
timestamp=timestamp - relative_start_time,
)
)
# to debug:
# output[-1] += " {t[time_start]} {t[time_end]} ({n})".format(t=t, n=n)
# check for any finishing tasks before the next one starts
while running_tasks and (
not ordered_by_start or ordered_by_end[0][0] < ordered_by_start[0][0]
):
timestamp, finishing_task = ordered_by_end.pop(0)
output_line = (
"{timestamp:{time_width}.1f}s {nothing:{index_width}} ".format(
nothing="",
index_width=index_width,
time_width=time_width,
timestamp=timestamp - relative_start_time,
)
)
for n, task in enumerate(running_tasks):
if task is None:
output_line += " "
elif task == finishing_task:
output_line += "\u2534 "
running_tasks[n] = None
else:
output_line += "\u2502 "
output.append(output_line)
while running_tasks and running_tasks[-1] is None:
running_tasks.pop()
end_time = timestamp
if not running_tasks and ordered_by_start:
# There are no more running tasks, but another task is due to start soon.
# This is a xia2 thinking time break.
next_task_start = ordered_by_start[0][0]
thinking_time = next_task_start - end_time
timestamp = end_time
# Highlight thinking time if it is significant.
if thinking_time >= significant_thinking_break:
tbreak = {
"runtime": thinking_time,
"index": len(thinking_breaks) + 1,
"index_readable": "T%d " % (len(thinking_breaks) + 1),
}
if tbreak["runtime"] <= 90:
tbreak["runtime_readable"] = "%.1fs" % tbreak["runtime"]
else:
tbreak["runtime_readable"] = "%.1fm" % (tbreak["runtime"] / 60)
tbreak[
"command"
] = "xia2 thinking time ({tbreak[runtime_readable]})".format(
tbreak=tbreak
)
thinking_breaks.append(tbreak)
output.append(
"{timestamp:{time_width}.1f}s {t[index_readable]:>{index_width}} {nothing:<5} {t[command]}".format(
timestamp=timestamp - relative_start_time,
nothing="\U0001F914",
time_width=time_width,
index_width=index_width,
t=tbreak,
)
)
output.append("")
output.append("Longest times:")
timing_by_time = sorted(
timing_db + thinking_breaks, key=lambda x: x["runtime"], reverse=True
)
for t in timing_by_time[0:10]:
output.append(
"{t[runtime]:{time_width}.1f}s: {t[index_readable]:>{index_width_add}} {t[command]}".format(
t=t, index_width_add=index_width + 1, time_width=time_width
)
)
return output
|
401923
|
import asyncio
from typing import Any
from behave import given, then, when # type: ignore
from behave.api.async_step import async_run_until_complete # type: ignore
from mqtt_io.modules.gpio import InterruptEdge, PinDirection
from mqtt_io.server import MqttIo
# pylint: disable=function-redefined,protected-access
# TODO: Tasks pending completion -@flyte at 22/02/2021, 16:56:52
# Add a test to go through all of the modules in the gpio dir and test them for compliance
def get_coro(task: "asyncio.Task[Any]") -> Any:
"""
Get a task's coroutine.
"""
# pylint: disable=protected-access
if hasattr(task, "get_coro"):
return task.get_coro() # type: ignore[attr-defined]
if hasattr(task, "_coro"):
return task._coro # type: ignore[attr-defined]
raise AttributeError("Unable to get task's coro")
@then("GPIO module {module_name} should have a pin config for {pin_name}")
def step(context: Any, module_name: str, pin_name: str) -> None:
mqttio = context.data["mqttio"]
module = mqttio.gpio_modules[module_name]
assert pin_name in {x["name"] for x in module.pin_configs.values()}
@then("GPIO module {module_name} should have a setup_pin() call for {pin_name}") # type: ignore[no-redef]
def step(context: Any, module_name: str, pin_name: str) -> None:
mqttio = context.data["mqttio"]
module = mqttio.gpio_modules[module_name]
call_pin_names = {
kwargs["pin_config"]["name"] for _, kwargs in module.setup_pin.call_args_list
}
assert pin_name in call_pin_names
@then("{pin_name} pin should have been set up as an {io_dir}") # type: ignore[no-redef]
def step(context: Any, pin_name: str, io_dir: str) -> None:
assert io_dir in ("input", "output")
mqttio = context.data["mqttio"]
io_conf = getattr(mqttio, f"digital_{io_dir}_configs")[pin_name]
module = mqttio.gpio_modules[io_conf["module"]]
pin_dirs = {
kwargs["pin_config"]["name"]: args[1]
for args, kwargs in module.setup_pin.call_args_list
}
if io_dir == "input":
assert pin_dirs[pin_name] == PinDirection.INPUT
else:
assert pin_dirs[pin_name] == PinDirection.OUTPUT
@then("GPIO module {module_name} {should_shouldnt} have a {setup_func_name}() call for {pin_name}") # type: ignore[no-redef]
def step(
context: Any,
module_name: str,
should_shouldnt: str,
setup_func_name: str,
pin_name: str,
) -> None:
assert should_shouldnt in ("should", "shouldn't")
mqttio = context.data["mqttio"]
module = mqttio.gpio_modules[module_name]
relevant_call_args = None
for call_args, _ in getattr(module, setup_func_name).call_args_list: # type: ignore[attr-defined]
if call_args[2]["name"] == pin_name:
relevant_call_args = call_args
if should_shouldnt == "should":
assert relevant_call_args is not None
else:
assert relevant_call_args is None
@then("a digital input poller task {is_isnt} added for {pin_name}") # type: ignore[no-redef]
def step(context: Any, is_isnt: str, pin_name: str):
assert is_isnt in ("is", "isn't")
mqttio = context.data["mqttio"]
poller_task_pin_names = {
get_coro(task).cr_frame.f_locals["in_conf"]["name"]
for task in mqttio.transient_tasks
if isinstance(task, asyncio.Task) # concurrent.Future doesn't have get_coro()
and get_coro(task).__name__ == "digital_input_poller"
}
if is_isnt == "is":
assert (
pin_name in poller_task_pin_names
), "Should have a digital input poller task added to transient_tasks"
else:
assert (
pin_name not in poller_task_pin_names
), "Shouldn't have a digital input poller task added to transient_tasks"
poller_task_pin_names = {
get_coro(task).cr_frame.f_locals["in_conf"]["name"]
for task in asyncio.Task.all_tasks(loop=mqttio.loop)
if get_coro(task).__name__ == "digital_input_poller"
}
if is_isnt == "is":
assert (
pin_name in poller_task_pin_names
), "Should have a digital input poller task added to the event loop"
else:
assert (
pin_name not in poller_task_pin_names
), "Shouldn't have a digital input poller task added to the event loop"
@then("a digital output loop task {is_isnt} added for GPIO module {module_name}") # type: ignore[no-redef]
def step(context: Any, is_isnt: str, module_name: str):
assert is_isnt in ("is", "isn't")
mqttio = context.data["mqttio"]
module = mqttio.gpio_modules[module_name]
task_modules = {
get_coro(task).cr_frame.f_locals["module"]
for task in mqttio.transient_tasks
if isinstance(task, asyncio.Task) # concurrent.Future doesn't have get_coro()
and get_coro(task).__name__ == "digital_output_loop"
}
if is_isnt == "is":
assert (
module in task_modules
), "Should have a digital output loop task added to transient_tasks"
else:
assert (
module not in task_modules
), "Shouldn't have a digital output loop task added to transient_tasks"
task_modules = {
get_coro(task).cr_frame.f_locals["module"]
for task in asyncio.Task.all_tasks(loop=mqttio.loop)
if get_coro(task).__name__ == "digital_output_loop"
}
if is_isnt == "is":
assert (
module in task_modules
), "Should have a digital output loop task added to the event loop"
else:
assert (
module not in task_modules
), "Shouldn't have a digital output loop task added to the event loop"
@then("{pin_name} {should_shouldnt} be configured as a remote interrupt") # type: ignore[no-redef]
def step(context: Any, pin_name: str, should_shouldnt: str):
assert should_shouldnt in ("should", "shouldn't")
mqttio = context.data["mqttio"]
in_conf = mqttio.digital_input_configs[pin_name]
module = mqttio.gpio_modules[in_conf["module"]]
is_remote_interrupt = module.remote_interrupt_for(in_conf["pin"])
if should_shouldnt == "should":
assert is_remote_interrupt
else:
assert not is_remote_interrupt
@then("{pin_name} should be configured as a {direction_str} interrupt") # type: ignore[no-redef]
def step(context: Any, pin_name: str, direction_str: str):
mqttio = context.data["mqttio"]
in_conf = mqttio.digital_input_configs[pin_name]
module = mqttio.gpio_modules[in_conf["module"]]
direction = module.interrupt_edges[in_conf["pin"]]
assert direction == getattr(InterruptEdge, direction_str.upper())
@when("{pin_name} reads a value of {value_str} with a last value of {last_value_str}") # type: ignore[no-redef]
@async_run_until_complete(loop="loop")
async def step(context: Any, pin_name: str, value_str: str, last_value_str: str) -> None:
assert value_str in ("true", "false")
assert last_value_str in ("null", "true", "false")
value_map = dict(true=True, false=False, null=None)
mqttio: MqttIo = context.data["mqttio"]
in_conf = mqttio.digital_input_configs[pin_name]
value = value_map[value_str]
last_value = value_map[last_value_str]
await mqttio._handle_digital_input_value(in_conf, value, last_value)
@when("we set digital output {pin_name} to {on_off}") # type: ignore[no-redef]
@async_run_until_complete(loop="loop")
async def step(context: Any, pin_name: str, on_off: str) -> None:
assert on_off in ("on", "off")
mqttio: MqttIo = context.data["mqttio"]
out_conf = mqttio.digital_output_configs[pin_name]
module = mqttio.gpio_modules[out_conf["module"]]
await mqttio.set_digital_output(module, out_conf, on_off == "on")
|
401980
|
from track import Track
import pandas as pd
import tempfile
import pkg_resources
import re
import os.path
#hg19_track_path = pkg_resources.resource_filename('deepmosaic', 'resources/hg19_seq.h5')
#hg38_track_path = pkg_resources.resource_filename('deepmosaic', 'resources/hg38_seq.h5')
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
hg19_track_path = os.path.join(HERE, "./resources/hg19_seq.h5")
hg38_track_path = os.path.join(HERE, "./resources/hg38_seq.h5")
def homopolymer_dinucleotide_annotation(chrom, pos, build):
def check_if_in_homopolymer(seq_str):
pattern = re.compile(r'([ACGT])\1{3,}')
matches = [m.group() for m in re.finditer(pattern, seq_str)]
if len(matches) > 0:
is_homopolymer = 1
else:
is_homopolymer = 0
return is_homopolymer
def check_if_in_dinucleotide_repeat(seq_str):
pattern = re.compile(r'([ACGT]{2})\1{3,}')
matches = [m.group() for m in re.finditer(pattern, seq_str)]
if len(matches) > 0:
is_dinucleotide = 1
else:
is_dinucleotide = 0
return is_dinucleotide
if build == "hg19":
track_path = hg19_track_path
elif build == "hg38":
track_path = hg38_track_path
seq_track = Track("seq", track_path)
pos = int(pos)
chrom = "chr" + chrom
seq_str_9bp = seq_track.get_seq_str(chrom, pos-4, pos+4)
seq_str_17bp = seq_track.get_seq_str(chrom, pos-8, pos+8)
return check_if_in_homopolymer(seq_str_9bp), check_if_in_dinucleotide_repeat(seq_str_17bp)
|
401995
|
import unittest
from grip.model import Package, Version
class TestPackage(unittest.TestCase):
def test_name_sanitizer(self):
pkg = Package('Django_module', '1.0')
self.assertEqual(pkg.name, 'django-module')
def test_version(self):
pkg = Package('pkg', '1.0')
self.assertEqual(pkg.version, Version('1.0'))
pkg = Package('pkg', Version('1.0'))
self.assertEqual(pkg.version, Version('1.0'))
def test_comparison(self):
a = Package('a', '1.0')
a_1 = Package('a', '1.1')
b = Package('b', '1.0')
self.assertEqual(a, a_1)
self.assertLess(a, b)
def test_str(self):
pkg = Package('Django', '1.0')
self.assertEqual(str(pkg), 'django@1.0')
|
402010
|
import random
from processor import CharacterProcessor
class Race(CharacterProcessor):
def process(self):
""" Pick the character's race, randomly. """
self.character.race = random.choice(['Dwarf', 'Elf', 'Halfling', 'Human'])
if self.character.race == 'Dwarf':
self.character.scores['CON'] += 2
self.character.speed = 25
self.character.proficiencies.union(['battleaxe', 'handaxe', 'throwing hammer', 'warhammer'])
self.character.tool_proficiencies.union(random.choice(["smith's tools", "brewer's supplies", "mason's tools"]))
self.character.languages.add("Dwarvish")
self.character.race = random.choice(['Hill Dwarf', 'Mountain Dwarf'])
if self.character.race == 'Hill Dwarf':
self.character.scores['WIS'] += 1
self.character.hp += 1
else:
self.character.scores['STR'] += 2
self.character.proficiencies.union(['light armour', 'medium armour'])
elif self.character.race == 'Elf':
self.character.scores['DEX'] += 2
self.character.speed = 30
self.character.proficiencies.union('Perception')
self.character.languages.add('Elvish')
self.character.race = random.choice(['High Elf', 'Wood Elf'])
elif self.character.race == 'Halfling':
self.character.scores['DEX'] += 2
self.character.speed = 25
self.character.proficiencies.add('Perception')
self.character.languages.add('Halfling')
else:
for a in self.character.attributes:
self.character.scores[a] += 1
self.character.speed = 30
|
402030
|
import unittest
from slack_sdk.oauth.state_store import FileOAuthStateStore
class TestFile(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_instance(self):
store = FileOAuthStateStore(expiration_seconds=10)
self.assertIsNotNone(store)
def test_issue_and_consume(self):
store = FileOAuthStateStore(expiration_seconds=10)
state = store.issue()
result = store.consume(state)
self.assertTrue(result)
result = store.consume(state)
self.assertFalse(result)
def test_kwargs(self):
store = FileOAuthStateStore(expiration_seconds=10)
store.issue(foo=123, bar="baz")
|
402037
|
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
def load_mnist():
dir_path = "mnist_datas"
files = ["train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz"]
# download mnist datas
if not os.path.exists(dir_path):
os.makedirs(dir_path)
data_url = "http://yann.lecun.com/exdb/mnist/"
for file_url in files:
after_file = file_url.split('.')[0]
if os.path.exists(dir_path + '/' + after_file):
continue
os.system("wget {}/{}".format(data_url, file_url))
os.system("mv {} {}".format(file_url, dir_path))
# load mnist data
# load train data
with gzip.open(dir_path + '/' + files[0], 'rb') as f:
train_x = np.frombuffer(f.read(), np.uint8, offset=16)
train_x = train_x.astype(np.float32) / 255
train_x = train_x.reshape((-1, 28, 28))
print("train images >>", train_x.shape)
with gzip.open(dir_path + '/' + files[1], 'rb') as f:
train_y = np.frombuffer(f.read(), np.uint8, offset=8)
print("train labels >>", train_y.shape)
# load test data
with gzip.open(dir_path + '/' + files[2], 'rb') as f:
test_x = np.frombuffer(f.read(), np.uint8, offset=16)
test_x = test_x.astype(np.float32) / 255
test_x = test_x.reshape((-1, 28, 28))
print("test images >>", test_x.shape)
with gzip.open(dir_path + '/' + files[3], 'rb') as f:
test_y = np.frombuffer(f.read(), np.uint8, offset=8)
print("test labels >>", test_y.shape)
"""
with open(dir_path + '/' + f_name, 'rb') as f:
#print(struct.unpack("b", f.read(1)))
a = f.readlines()
#print(struct.unpack("b", a[0]))
print(len(a))
for _a in a[:1]:
print(int.from_bytes(_a, 'little'))
print(_a)
"""
return train_x, train_y ,test_x, test_y
load_mnist()
|
402054
|
import torch
import numpy as np
from torch import Tensor
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def mel_to_hertz(mel_values):
"""Converts frequencies in `mel_values` from the mel scale to linear scale."""
return _MEL_BREAK_FREQUENCY_HERTZ * (
np.exp(np.array(mel_values) / _MEL_HIGH_FREQUENCY_Q) - 1.0)
def hertz_to_mel(frequencies_hertz):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale."""
return _MEL_HIGH_FREQUENCY_Q * np.log(
1.0 + (np.array(frequencies_hertz) / _MEL_BREAK_FREQUENCY_HERTZ))
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=16000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0):
"""Returns a matrix to warp linear scale spectrograms to the mel scale.
Adapted from tf.contrib.signal.linear_to_mel_weight_matrix with a minimum
band width (in Hz scale) of 1.5 * freq_bin. To preserve accuracy,
we compute the matrix at float64 precision and then cast to `dtype`
at the end. This function can be constant folded by graph optimization
since there are no Tensor inputs.
Args:
num_mel_bins: Int, number of output frequency dimensions.
num_spectrogram_bins: Int, number of input frequency dimensions.
sample_rate: Int, sample rate of the audio.
lower_edge_hertz: Float, lowest frequency to consider.
upper_edge_hertz: Float, highest frequency to consider.
Returns:
Numpy float32 matrix of shape [num_spectrogram_bins, num_mel_bins].
Raises:
ValueError: Input argument in the wrong range.
"""
# Validate input arguments
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if num_spectrogram_bins <= 0:
raise ValueError(
'num_spectrogram_bins must be positive. Got: %s' % num_spectrogram_bins)
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if lower_edge_hertz < 0.0:
raise ValueError(
'lower_edge_hertz must be non-negative. Got: %s' % lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got: %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = np.linspace(
0.0, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:, np.newaxis]
# spectrogram_bins_mel = hertz_to_mel(linear_frequencies)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = np.linspace(
hertz_to_mel(lower_edge_hertz), hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2)
lower_edge_mel = band_edges_mel[0:-2]
center_mel = band_edges_mel[1:-1]
upper_edge_mel = band_edges_mel[2:]
freq_res = nyquist_hertz / float(num_spectrogram_bins)
freq_th = 1.5 * freq_res
for i in range(0, num_mel_bins):
center_hz = mel_to_hertz(center_mel[i])
lower_hz = mel_to_hertz(lower_edge_mel[i])
upper_hz = mel_to_hertz(upper_edge_mel[i])
if upper_hz - lower_hz < freq_th:
rhs = 0.5 * freq_th / (center_hz + _MEL_BREAK_FREQUENCY_HERTZ)
dm = _MEL_HIGH_FREQUENCY_Q * np.log(rhs + np.sqrt(1.0 + rhs**2))
lower_edge_mel[i] = center_mel[i] - dm
upper_edge_mel[i] = center_mel[i] + dm
lower_edge_hz = mel_to_hertz(lower_edge_mel)[np.newaxis, :]
center_hz = mel_to_hertz(center_mel)[np.newaxis, :]
upper_edge_hz = mel_to_hertz(upper_edge_mel)[np.newaxis, :]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (linear_frequencies - lower_edge_hz) / (
center_hz - lower_edge_hz)
upper_slopes = (upper_edge_hz - linear_frequencies) / (
upper_edge_hz - center_hz)
# Intersect the line segments with each other and zero.
mel_weights_matrix = np.maximum(0.0, np.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
# [freq, mel]
mel_weights_matrix = np.pad(mel_weights_matrix, [[bands_to_zero, 0], [0, 0]],
'constant')
return mel_weights_matrix
class Gabor:
"""This class creates gabor filters designed to match mel-filterbanks.
Attributes:
n_filters: number of filters
min_freq: minimum frequency spanned by the filters
max_freq: maximum frequency spanned by the filters
sample_rate: samplerate (samples/s)
window_len: window length in samples
n_fft: number of frequency bins to compute mel-filters
normalize_energy: boolean, True means that all filters have the same energy,
False means that the higher the center frequency of a filter, the higher
its energy
"""
def __init__(self,
n_filters: int = 40,
min_freq: float = 0.,
max_freq: float = 8000.,
sample_rate: int = 16000,
window_len: int = 401,
n_fft: int = 512,
normalize_energy: bool = False):
self.n_filters = n_filters
self.min_freq = min_freq
self.max_freq = max_freq
self.sample_rate = sample_rate
self.window_len = window_len
self.n_fft = n_fft
self.normalize_energy = normalize_energy
@property
def gabor_params_from_mels(self):
"""Retrieves center frequencies and standard deviations of gabor filters."""
coeff = np.sqrt(2. * np.log(2.)) * self.n_fft
sqrt_filters = torch.sqrt(self.mel_filters)
center_frequencies = torch.argmax(sqrt_filters, dim=1).type(torch.float32)
peaks, indices = torch.max(sqrt_filters, dim=1)
half_magnitudes = torch.div(peaks, 2.)
fwhms = torch.sum((sqrt_filters >= half_magnitudes.unsqueeze(1)).type(torch.float32), dim=1)
return torch.stack(
[center_frequencies * 2 * np.pi / self.n_fft, coeff / (np.pi * fwhms)],
dim=1)
def _mel_filters_areas(self, filters):
"""Area under each mel-filter."""
peaks, indices = torch.max(filters, dim=1)
return peaks * (torch.sum((filters > 0).type(torch.float32), dim=1) + 2) * np.pi / self.n_fft
@property
def mel_filters(self):
"""Creates a bank of mel-filters."""
# build mel filter matrix
mel_filters = linear_to_mel_weight_matrix(
num_mel_bins=self.n_filters,
num_spectrogram_bins=self.n_fft // 2 + 1,
sample_rate=self.sample_rate,
lower_edge_hertz=self.min_freq,
upper_edge_hertz=self.max_freq)
mel_filters = np.transpose(mel_filters)
if self.normalize_energy:
mel_filters = mel_filters / self._mel_filters_areas(torch.from_numpy(mel_filters)).unsqueeze(1)
return mel_filters
return torch.from_numpy(mel_filters)
|
402086
|
LONG_TIMEOUT = 30.0 # For wifi scan
SHORT_TIMEOUT = 10.0 # For any other command
DEFAULT_MEROSS_HTTP_API = "https://iot.meross.com"
DEFAULT_MQTT_HOST = "mqtt.meross.com"
DEFAULT_MQTT_PORT = 443
DEFAULT_COMMAND_TIMEOUT = 10.0
|
402118
|
from pymtl import *
from lizard.bitutil import clog2, bit_enum
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.interface import Interface, UseInterface
CMPFunc = bit_enum(
'ALUFunc',
None,
'CMP_EQ',
'CMP_NE',
'CMP_LT',
'CMP_GE',
)
class ComparatorInterface(Interface):
def __init__(s, xlen):
s.Xlen = xlen
super(ComparatorInterface, s).__init__([
MethodSpec(
'exec',
args={
'func': CMPFunc.bits,
'src0': Bits(xlen),
'src1': Bits(xlen),
'unsigned': Bits(1),
},
rets={
'res': Bits(1),
},
call=True,
rdy=True,
),
])
class Comparator(Model):
def __init__(s, alu_interface):
UseInterface(s, alu_interface)
xlen = s.interface.Xlen
# PYMTL BROKEN:
XLEN_M1 = xlen - 1
# Input
s.s0_ = Wire(xlen)
s.s1_ = Wire(xlen)
s.func_ = Wire(CMPFunc.bits)
# Flags
s.eq_ = Wire(1)
s.lt_ = Wire(1)
# Output
s.res_ = Wire(1)
# Since single cycle, always ready
s.connect(s.exec_rdy, 1)
s.connect(s.exec_res, s.res_)
s.connect(s.func_, s.exec_func)
# All workarorunds due to slicing in concat() issues:
s.s0_lower_ = Wire(XLEN_M1)
s.s0_up_ = Wire(1)
s.s1_lower_ = Wire(XLEN_M1)
s.s1_up_ = Wire(1)
@s.combinational
def set_flags():
s.eq_.v = s.s0_ == s.s1_
s.lt_.v = s.s0_ < s.s1_
@s.combinational
def set_signed():
# We flip the upper most bit if signed
s.s0_up_.v = s.exec_src0[
XLEN_M1] if s.exec_unsigned else not s.exec_src0[XLEN_M1]
s.s1_up_.v = s.exec_src1[
XLEN_M1] if s.exec_unsigned else not s.exec_src1[XLEN_M1]
s.s0_lower_.v = s.exec_src0[0:XLEN_M1]
s.s1_lower_.v = s.exec_src1[0:XLEN_M1]
# Now we can concat and compare
s.s0_.v = concat(s.s0_up_, s.s0_lower_)
s.s1_.v = concat(s.s1_up_, s.s1_lower_)
@s.combinational
def eval_comb():
s.res_.v = 0
if s.func_ == CMPFunc.CMP_EQ:
s.res_.v = s.eq_
elif s.func_ == CMPFunc.CMP_NE:
s.res_.v = not s.eq_
elif s.func_ == CMPFunc.CMP_LT:
s.res_.v = s.lt_
elif s.func_ == CMPFunc.CMP_GE:
s.res_.v = not s.lt_ or s.eq_
|
402139
|
import moviepy.editor as mp
import speech_recognition as sr
import wave
import contextlib
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
import sys
def get_audio(filename):
video = mp.VideoFileClip(filename)
video.audio.write_audiofile("audio.wav")
def get_file_length(audiofile):
with contextlib.closing(wave.open(audiofile, 'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
def get_text(audiofile="audio.wav"):
r = sr.Recognizer()
audio_length = get_file_length(audiofile)
texts = []
with sr.AudioFile(audiofile) as source:
for i in range(0, int(round(audio_length / 3))):
try:
text = r.recognize_google(r.record(source, duration=3))
except sr.UnknownValueError:
text = "error, no sutitles could be distinguished"
texts.append(text)
return texts
def write_text(text_chunks, original_video, output, font_size=15):
txt_clips = []
for i in range(len(text_chunks)):
txt_clips.append(TextClip(text_chunks[i], fontsize=font_size, color="yellow")
.set_position('bottom')
.set_duration(3)
.set_start(i * 3))
clips = [original_video]
clips.extend(txt_clips)
result = CompositeVideoClip(clips)
result.write_videofile(output)
if __name__ == "__main__":
get_audio(sys.argv[1])
if (len(sys.argv)) > 3:
write_text(get_text(), VideoFileClip(sys.argv[1]), sys.argv[3], sys.argv[1])
else:
write_text(get_text(), VideoFileClip(sys.argv[1]), sys.argv[2])
|
402150
|
import os
import sys
import torch
import torch.nn as nn
import math
from models.common import conv3x3, conv3x3_bn_relu, inconv, up, down, outconv, weights_init
from torch.nn import functional as F
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
__all__ = ['ResNetU50Backbone', 'UNetBackbone']
model_urls = {
'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8):
super(ResnetDilated, self).__init__()
from functools import partial
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
@staticmethod
def _nostride_dilate(m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
conv_out.append(x)
x = self.layer2(x)
conv_out.append(x)
x = self.layer3(x)
conv_out.append(x)
x = self.layer4(x)
conv_out.append(x)
return conv_out
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
class UPerNet(nn.Module):
def __init__(self, num_class=150, fc_dim=4096, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256, 512, 1024, 2048), fpn_dim=256):
super(UPerNet, self).__init__()
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales) * 512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(F.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = F.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(F.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
x = F.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
return x
class ResNetU50Backbone(nn.Module):
def __init__(self, dim_embedding=256, encoder_weights="", decoder_weights=""):
super(ResNetU50Backbone, self).__init__()
self.encoder = self.build_encoder(encoder_weights)
self.decoder = self.build_decoder(decoder_weights, dim_embedding)
def forward(self, img):
out = self.encoder(img)
out = self.decoder(out, segSize=(img.size(2) // 4, img.size(3) // 4))
return out
@staticmethod
def build_encoder(weights=''):
pretrained = True if len(weights) == 0 else False
orig_resnet = resnet50(pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
if len(weights) > 0 and os.path.isfile(weights):
print(f'Loading weights for net_encoder @ {weights}')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage),
strict=False
)
return net_encoder
@staticmethod
def build_decoder(weights='', dim_embedding=64):
net_decoder = UPerNet(
num_class=150,
fc_dim=2048,
fpn_dim=512
)
net_decoder.conv_last[1] = conv3x3_bn_relu(net_decoder.conv_last[1].in_channels, dim_embedding, 1)
net_decoder.apply(weights_init)
if len(weights) > 0 and os.path.isfile(weights):
print(f'Loading weights for net_decoder @ {weights}')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage),
strict=False
)
return net_decoder
class UNetBackbone(nn.Module):
def __init__(self, dim_embedding=256, n_downs=5, n_ups=3, weights=""):
super(UNetBackbone, self).__init__()
assert n_downs > 0 and 0 < n_ups <= n_downs
self.n_downs = n_downs
self.n_ups = n_ups
self.inc = inconv(3, 64)
down_channels = []
for i in range(n_downs):
down_channel = 64 * 2**min(n_downs - 1, i + 1)
self.add_module(
f"down{i+1}",
down(64 * 2**i, down_channel)
)
down_channels.append(down_channel)
for i in range(n_ups):
down_channels.pop()
self.add_module(
f"up{i+1}",
up(64 * 2**(n_downs - i), 64 * 2**max(0, n_downs - i - 2))
)
self.outc = outconv(64 * 2**max(0, n_downs - n_ups - 1) + sum(down_channels) // 2, dim_embedding)
self.apply(weights_init)
if len(weights) > 0 and os.path.isfile(weights):
print(f'Loading weights for unet @ {weights}')
self.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage),
strict=False
)
def forward(self, x):
downs = [self.inc(x)]
for i in range(self.n_downs):
downs.append(getattr(self, f"down{i+1}")(downs[i]))
out = downs.pop()
for i in range(self.n_ups):
out = getattr(self, f"up{i+1}")(out, downs.pop())
for i in range(len(downs)):
downs[i] = F.interpolate(downs[i], size=out.shape[2:], mode="bilinear", align_corners=False)
downs.append(out)
out = self.outc(torch.cat(downs, dim=1))
return out
|
402151
|
from tensorboardX import SummaryWriter
import os
class Logger:
def __init__(self, *, log_dir, config):
self.writer = SummaryWriter(log_dir, write_to_disk=True)
self.config = config
def log_training(
self,
*,
training_metric_translation,
training_metric_orientation,
epoch,
):
"""
Write the training translation and orientation error in the writer object to
print it out on tensorboard for each epoch
Args:
training_metric_translation (float): error on the translation calculated on one training epoch
training_metric_orientation (float): error on the orientation calculated on one training epoch
epoch (int): number of the current epoch
"""
# loss training
self.writer.add_scalar(
f"training/loss_translation",
training_metric_translation,
epoch,
)
if self.config.dataset.symmetric == False:
self.writer.add_scalar(
f"training/loss_orientation",
training_metric_orientation,
epoch,
)
def log_evaluation(
self,
*,
evaluation_metric_translation,
evaluation_metric_orientation,
epoch,
test,
):
"""
Write the evaluatin translation and orientation error in the writer object to
print it out on tensorboard for each epoch. It is the validation translation and orientation
error if we evaluate on the validation dataset and it is the test translation and orientation
error if we evaluate it on the test set
Args:
evaluation_metric_translation (float): error on the translation calculated on one validation epoch
evaluation_metric_orientation (float): error on the orientation calculated on one validation epoch
epoch (int): number of the current epoch
test (bool): specify if it is an evaluation in the training framework or in the test one.
"""
if test:
# loss test
self.writer.add_scalar(
f"test/loss_translation", evaluation_metric_translation
)
if self.config.dataset.symmetric == False:
self.writer.add_scalar(
f"test/loss_orientation",
evaluation_metric_orientation,
)
else:
# loss validation
self.writer.add_scalar(
f"val/loss_translation",
evaluation_metric_translation,
epoch,
)
if self.config.dataset.symmetric == False:
self.writer.add_scalar(
f"val/loss_orientation",
evaluation_metric_orientation,
epoch,
)
def done(self):
"""
Close the writer after the whole training + evaluation
"""
self.writer.close()
# HELPER
def is_master():
rank = int(os.getenv("RANK", 0))
return rank == 0
|
402166
|
import sys
import numpy as np
import lutorpy as lua
try:
import torchfile
except ImportError:
torchfile = None
if torchfile is None:
raise ImportError("Please, do pip install torchfile.")
import os
import sys
import time
import cv2
TFEAT_PATCH_SIZE = 32
TFEAT_DESC_SIZE = 128
TFEAT_BATCH_SIZE = 1000
STATS_FNAME = 'nets/stats.liberty.t7'
MODEL_FNAME = 'nets/tfeat_liberty_margin_star.t7'
try:
stats = torchfile.load(STATS_FNAME)
MEAN = stats['mi']
STD = stats['sigma']
except:
print "Please, ensure that there is nets/stats.liberty.t7 file"
sys.exit(1)
def preprocess_patch(patch):
out = cv2.resize(patch, (TFEAT_PATCH_SIZE, TFEAT_PATCH_SIZE)).astype(np.float32) / 255;
out = (out - MEAN) / STD
return out.reshape(1, TFEAT_PATCH_SIZE, TFEAT_PATCH_SIZE)
def extract_tfeats(net,patches):
num,channels,h,w = patches.shape
patches_t = torch.fromNumpyArray(patches)
patches_t._view(num, 1, TFEAT_PATCH_SIZE, TFEAT_PATCH_SIZE)
patches_t = patches_t._split(TFEAT_BATCH_SIZE)
descriptors = []
for i in range(int(np.ceil(float(num) / TFEAT_BATCH_SIZE))):
prediction_t = net._forward(patches_t[i]._cuda())
prediction = prediction_t.asNumpyArray()
descriptors.append(prediction)
out = np.concatenate(descriptors)
return out.reshape(num, TFEAT_DESC_SIZE)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Wrong input format. Try ./extract_desciptors_from_hpatch_file.py imgs/ref.png ref.TFEAT')
sys.exit(1)
input_img_fname = sys.argv[1]
output_fname = sys.argv[2]
t = time.time()
image = cv2.imread(input_img_fname,0) #hpatch image is patch column 65*n x 65
h,w = image.shape
n_patches = h/w
print('{0} patches to describe in {1}'.format(n_patches, input_img_fname))
patches = np.zeros((n_patches,1,TFEAT_PATCH_SIZE, TFEAT_PATCH_SIZE))
for i in range(n_patches):
patches[i,:,:,:] = preprocess_patch(image[i*(w): (i+1)*(w), 0:w])
require('nn')
require('cunn')
require('cudnn')
net = torch.load(MODEL_FNAME)
print 'Initialization and preprocessing time', time.time() - t
t = time.time()
out_descs = extract_tfeats(net,patches)
print 'extraction time', time.time() - t
np.savetxt(output_fname, out_descs, delimiter=' ', fmt='%10.7f')
|
402170
|
from django.test import TestCase
from .models import Board, Post, Comment
class BoardsTests(TestCase):
def test_no_boards(self):
"""
Tests if the 'boards' endpoint is responding and
if the response initially contains no boards
"""
resp = self.client.get('/nchan/boards/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['count'], 0)
def test_returns_all_existing_boards(self):
"""
Tests if 'boards' endpoint returns created boards
"""
Board.objects.create(board='01', title='test-board-01')
Board.objects.create(board='02', title='test-board-02')
resp = self.client.get('/nchan/boards/')
self.assertEqual(resp.data['count'], 2)
self.assertEqual(resp.data['results'][0]['board'], '01')
self.assertEqual(resp.data['results'][0]['title'], 'test-board-01')
self.assertEqual(resp.data['results'][1]['board'], '02')
self.assertEqual(resp.data['results'][1]['title'], 'test-board-02')
def test_returns_single_board(self):
"""
Tests if 'boards/<board>/' endpoint returns the correct board
"""
Board.objects.create(board='01', title='test-board-01')
Board.objects.create(board='02', title='test-board-02')
resp1 = self.client.get('/nchan/boards/01/')
self.assertEqual(resp1.data['board'], '01')
self.assertEqual(resp1.data['title'], 'test-board-01')
resp2 = self.client.get('/nchan/boards/02/')
self.assertEqual(resp2.data['board'], '02')
self.assertEqual(resp2.data['title'], 'test-board-02')
def test_returns_board_posts(self):
"""
Tests if 'boards/<board>/posts/' returns only
that boards posts
"""
b1 = Board.objects.create(board='01', title='test-board-01')
b2 = Board.objects.create(board='02', title='test-board-02')
Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret',
text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')
Post.objects.create(title='second post', board=Board.objects.get(pk=b2.id), poster='friendly-frogs',
text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')
resp = self.client.get('/nchan/boards/01/posts/')
self.assertEqual(resp.data['count'], 1)
self.assertIn('first post', str(resp.data['results']))
self.assertNotIn('second post', str(resp.data['results']))
class PostsTests(TestCase):
def test_no_posts(self):
"""
Tests if the 'posts' endpoint is responding and
if the response initially contains no posts
"""
resp = self.client.get('/nchan/posts/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['count'], 0)
def test_returns_all_existing_posts(self):
"""
Tests if 'posts' endpoint returns created posts
"""
b1 = Board.objects.create(board='01', title='test-board-01')
Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret',
text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')
Post.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs',
text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')
resp = self.client.get('/nchan/posts/')
self.assertEqual(resp.data['count'], 2)
self.assertIn('first post', str(resp.data['results']))
self.assertIn('second post', str(resp.data['results']))
def test_returns_single_post(self):
"""
Tests if 'posts/<id>/' endpoint returns the correct post
"""
b1 = Board.objects.create(board='01', title='test-board-01')
p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret',
text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')
resp = self.client.get(f'/nchan/posts/{p1.id}/')
self.assertEqual(resp.data['title'], 'first post')
def test_returns_post_comments(self):
"""
Tests if 'posts/<id>/comments/' endpoint returns correct comments
"""
b1 = Board.objects.create(board='01', title='test-board-01')
p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret',
text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')
Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas',
text='URL namespace "admin" isn"t unique. You may not be'
'able to reverse all URLs in this namespace')
resp = self.client.get(f'/nchan/posts/{p1.id}/comments/')
self.assertIn('glossy-gorillas', str(resp.data))
class CommentsTests(TestCase):
def test_returns_comment(self):
"""
Tests if 'comments/' endpoint returns comments
"""
b1 = Board.objects.create(board='01', title='test-board-01')
p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret',
text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')
Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas',
text='URL namespace "admin" isn"t unique. You may'
'not be able to reverse all URLs in this namespace')
resp = self.client.get('/nchan/comments/')
self.assertIn('glossy-gorillas', str(resp.data))
|
402191
|
import json
import math
import os
import shutil
import torch
from . import Callback
__all__ = ['ModelCheckpoint']
class ModelCheckpoint(Callback):
"""
Model Checkpoint to save model weights during training. 'Best' is determined by minimizing (or maximizing) the value found under monitored_log_key in the logs
Saved checkpoints contain these keys by default:
'run_id'
'epoch'
'loss_type'
'loss_val'
'best_epoch'
- plus any additional key/value pairs produced by custom_func
Additionally saves a .json file with statistics about the run such as:
'run_id'
'num_epochs'
'best_epoch'
'best_loss_or_gain'
'metric_name'
- plus any additional key/value pairs produced by custom_func
:param run_id: (string):
Uniquely identifies the run
:param monitored_log_key: (string):
Name of the key in the logs that will contain the value we want to minimize (and thus that will dictate whether the model is 'best')
:param save_dir: (string):
Path indicating where to save the checkpoint
:param addl_k_v: (dict):
dictionary of additional key/value pairs to save with the model. Typically these include some initialization parameters, name of the model etc.
(e.g. from the initialization dictionary 'opt'), as well as other useful params (e.g. mean, std, proc_type: gpu/cpu etc)
:param epoch_log_keys: (list):
list of keys to save from the epoch log dictionary (Note: the logs dictionary is automatically provided by the learning framework)
:param save_interval: (int):
How often to save the model (if none then will default to every 5 iterations)
:param save_best_only: (bool):
Whether only to save the best result (and overwrite all previous)
Default: False
:param max_saves: (integer > 0 or -1):
the max number of models to save. Older model checkpoints will be overwritten if necessary.
Set equal to -1 to have no limit.
Default: 5
:param custom_func: func(k_v_dict, logs, out_dict, monitored_log_key, is_end_training):
Custom function for performing any additional logic (to add values to the model). The function will be passed the addl_k_v dictionary,
the event logs dictionary, an output dictionary to process, the monitored_log_key and a bool indicating whether the training is finished.
The function is expected to modify the output dictionary in order to preserve values across epochs. The function will be called at the
end of each epoch and at the end of the training (with is_end_traing = True)
:param do_minimize: (bool):
whether to minimize or maximize the 'monitored_log_key' value
:param verbose: (bool):
verbosity of the console output
Default: False
"""
def __init__(self, run_id, monitored_log_key, save_dir, addl_k_v=None, epoch_log_keys=None, save_interval=5, save_best_only=False, max_saves=5,
custom_func=None, do_minimize=True, verbose=False):
if addl_k_v is None:
addl_k_v = {}
if epoch_log_keys is None:
epoch_log_keys = []
self.run_id = run_id
self.addl_k_v = addl_k_v
self.save_dir = os.path.expanduser(save_dir)
self.save_interval = save_interval
self.epoch_log_keys = epoch_log_keys
self.save_best_only = save_best_only
self.max_saves = max_saves
self.custom_func = custom_func
self.custom_func_dict = {} # this is expected to be filled by the custom_func
self.verbose = verbose
self.monitored_log_key = monitored_log_key # 'e.g. dice_coeff'
self.do_minimize = do_minimize
self.last_saved_ep = 0
self.last_epoch_logs = None
self.last_epoch = -1
self.best_epoch = -1
# keep track of old files if necessary
if self.max_saves > 0:
self.old_files = []
# mode = 'min' only supported
if do_minimize:
self.best_loss = math.inf
else:
self.best_loss = -89293.923
super().__init__()
def on_epoch_end(self, epoch, logs=None):
self.last_epoch_logs = logs
self.last_epoch = epoch
if (epoch + 1) % self.save_interval == 0: # only save with given frequency
current_loss = logs.get(self.monitored_log_key)
if (current_loss < self.best_loss and self.save_best_only) or not self.save_best_only or (not self.do_minimize and current_loss > self.best_loss):
if current_loss is None:
if self.verbose:
print(f'ModelCheckpoint could not find monitored_log_key (loss variable) in logs: {self.monitored_log_key}')
else:
# Call custom function (if set) to process things like best-N results etc
if self.custom_func is not None:
self.custom_func(self.addl_k_v, logs, self.custom_func_dict, False)
checkpt_name = generate_checkpoint_name(self.run_id, self.addl_k_v, epoch, False)
if self.verbose:
print('\nEpoch %i: loss metric changed from %0.4f to %0.4f saving model to %s' % (
epoch + 1, self.best_loss, current_loss, os.path.join(self.save_dir, checkpt_name)))
if (self.do_minimize and current_loss < self.best_loss) or (not self.do_minimize and current_loss > self.best_loss):
self.best_loss = current_loss
self.best_epoch = epoch
# print('Best Loss of {} saved at epoch: {}'.format(self.best_loss, epoch + 1))
save_dict = {
'run_id': self.run_id,
'epoch': epoch + 1,
'metric_type': self.monitored_log_key,
'metric_value': current_loss,
'best_epoch': self.best_epoch + 1
}
# correctly handle saving parallelized models (https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-torch-nn-dataparallel-models)
if isinstance(self.trainer.model, torch.nn.DataParallel):
save_dict['state_dict'] = self.trainer.model.module.state_dict()
else:
save_dict['state_dict'] = self.trainer.model.state_dict()
# add values from other dictionaries
save_dict.update(self.addl_k_v)
save_dict.update(self.custom_func_dict)
for key in self.epoch_log_keys:
save_dict[key] = logs.get(key) # this is not guaranteed to be found so may return 'None'
save_checkpoint(save_dict, is_best=(self.best_epoch == epoch), save_path=self.save_dir, filename=checkpt_name)
self.last_saved_ep = epoch
if self.max_saves > 0:
if len(self.old_files) >= self.max_saves:
try:
os.remove(self.old_files[0])
if self.verbose:
print(f'ModelCheckpoint removing old model snapshot: {self.old_files[0]}')
except:
pass
self.old_files = self.old_files[1:]
self.old_files.append(os.path.join(self.save_dir, checkpt_name))
def on_train_end(self, logs=None):
final_epoch = self.last_epoch
current_loss = self.last_epoch_logs[self.monitored_log_key]
## Save model if it hasn't been previously saved and it has best loss value
if self.last_saved_ep < final_epoch and ((self.do_minimize and current_loss < self.best_loss) or (not self.do_minimize and current_loss > self.best_loss)):
# Call custom function (if set) to process things like best-N results etc
if self.custom_func is not None:
self.custom_func(self.addl_k_v, self.last_epoch_logs, self.custom_func_dict, False)
self.best_loss = current_loss
self.best_epoch = final_epoch
save_dict = {
'run_id': self.run_id,
'epoch': final_epoch + 1,
'state_dict': self.trainer.model.state_dict(),
'metric_type': self.monitored_log_key,
'metric_value': current_loss,
'best_epoch': self.best_epoch
}
# add values from other dictionaries
save_dict.update(self.addl_k_v)
save_dict.update(self.custom_func_dict)
for key in self.epoch_log_keys:
save_dict[key] = self.last_epoch_logs[key]
save_checkpoint(save_dict, is_best=True, save_path=self.save_dir, filename=generate_checkpoint_name(self.run_id, self.addl_k_v, final_epoch, False))
self.last_saved_ep = final_epoch
stats = {'run_id': self.run_id,
'num_epochs': final_epoch + 1,
'best_epoch': self.best_epoch + 1,
'best_loss_or_gain': self.best_loss,
'metric_type': self.monitored_log_key
}
stats.update(self.addl_k_v)
stats.update(self.custom_func_dict)
statsfile_path = generate_statsfile_name(self.run_id, self.save_dir)
with open(statsfile_path, 'a') as statsfile:
json.dump(stats, statsfile)
def generate_statsfile_name(run_id, save_dir):
save_dir1 = os.path.expanduser(save_dir)
return os.path.join(save_dir1, str(run_id) + "_stats.json")
def generate_checkpoint_name(run_id, kv_dict, epoch, is_best):
model_name = kv_dict.get('model_name', 'model')
optimizer_name = kv_dict.get('optimizer', 'o')
if is_best:
return str(run_id) + "_" + model_name + "_" + optimizer_name + "_ep_best.pth.tar"
else:
return str(run_id) + "_" + model_name + "_" + optimizer_name + "_ep_" + str(epoch + 1) + ".pth.tar"
def save_checkpoint(state, is_best=False, save_path=".", filename=None):
"""
Saves checkpoint to file.
:param state: (dict): the dictionary to save. Can have other values besides just model weights.
:param is_best: (bool): whether this is the best result we've seen thus far
:param save_path: (string): local dir to save to
:param filename: (string): name of the file to save under `save_path`
:return:
"""
if not filename:
print("ERROR: No filename defined. Checkpoint is NOT saved.")
save_path1 = os.path.expanduser(save_path)
if not os.path.exists(save_path1): os.makedirs(save_path1)
torch.save(state, os.path.join(save_path1, filename))
if is_best:
pos = filename.find("_ep_")
if pos and pos > 0:
bestname = filename[:pos] + "_best.pth.tar"
shutil.copyfile(os.path.join(save_path1, filename), os.path.join(save_path1, bestname))
|
402198
|
import numpy as np
from np_ml import Perceptron
if __name__ == '__main__':
print("--------------------------------------------------------")
print("Perceptron simple example!")
print("example in Statistical Learning Method(《统计学习方法》)")
print("--------------------------------------------------------")
p = Perceptron()
x = np.array([[3, 3],
[4, 3],
[1, 1]])
y = np.array([1, 1, -1])
print("x: ")
print(x)
print("y: ")
print(y)
print("")
p.fit(x, y, detailed=True)
print("y_pred: ")
print(p.predict(np.array([[3, 3],
[4, 3],
[1, 1]])))
|
402231
|
from django.db import models
from busshaming.enums import ScheduleRelationship
class TripDate(models.Model):
trip = models.ForeignKey('Trip')
date = models.DateField(db_index=True)
added_from_realtime = models.BooleanField(default=False)
# Has it had the stats calculation script run.
is_stats_calculation_done = models.BooleanField(default=False)
# Denormalized start time from the first tripstop and other data
# Must be filled if is_stats_calculation_done is set
start_time = models.CharField(max_length=8, null=True, blank=True)
num_scheduled_stops = models.PositiveSmallIntegerField(null=True, blank=True)
num_realtime_stops = models.PositiveSmallIntegerField(null=True, blank=True)
# Stats about this trip
has_realtime_stats = models.BooleanField(default=False)
# Realtime coverage/accuracy
realtime_coverage = models.FloatField(null=True, blank=True)
realtime_accuracy = models.FloatField(null=True, blank=True)
early_count = models.SmallIntegerField(null=True, blank=True)
ontime_count = models.SmallIntegerField(null=True, blank=True)
late_count = models.SmallIntegerField(null=True, blank=True)
verylate_count = models.SmallIntegerField(null=True, blank=True)
has_start_middle_end_stats = models.BooleanField(default=False)
start_delay = models.SmallIntegerField(null=True, blank=True)
middle_delay = models.SmallIntegerField(null=True, blank=True)
end_delay = models.SmallIntegerField(null=True, blank=True)
num_delay_stops = models.PositiveSmallIntegerField(null=True, blank=True)
avg_delay = models.SmallIntegerField(null=True, blank=True)
variance_delay = models.IntegerField(null=True, blank=True)
max_delay = models.SmallIntegerField(null=True, blank=True)
upper_quartile_delay = models.SmallIntegerField(null=True, blank=True)
median_delay = models.SmallIntegerField(null=True, blank=True)
lower_quartile_delay = models.SmallIntegerField(null=True, blank=True)
min_delay = models.SmallIntegerField(null=True, blank=True)
sum_delay = models.IntegerField(null=True, blank=True)
sum_delay_squared = models.BigIntegerField(null=True, blank=True)
# New stuff
schedule_relationship = models.SmallIntegerField(choices=ScheduleRelationship.choices(), null=True, blank=True)
vehicle_id = models.CharField(max_length=100, null=True, blank=True)
class Meta:
unique_together = ('trip', 'date')
def __str__(self):
return f'Trip {self.trip.gtfs_trip_id} on {self.date}'
|
402241
|
from btcproxy import ProxiedBitcoinD
from ephemeral_port_reserve import reserve
from concurrent import futures
import os
import pytest
import tempfile
import logging
import shutil
TEST_DIR = tempfile.mkdtemp(prefix='lightning-')
TEST_DEBUG = os.getenv("TEST_DEBUG", "0") == "1"
# A dict in which we count how often a particular test has run so far. Used to
# give each attempt its own numbered directory, and avoid clashes.
__attempts = {}
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, executor, bitcoind, btcd):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.btcd = btcd
def get_node(self, implementation):
node_id = self.next_id
self.next_id += 1
lightning_dir = os.path.join(
TEST_DIR, self.testname, "node-{}/".format(node_id))
port = reserve()
node = implementation(lightning_dir, port, self.bitcoind,
executor=self.executor, node_id=node_id)
self.nodes.append(node)
node.btcd = self.btcd
node.daemon.start()
return node
def killall(self):
for n in self.nodes:
n.daemon.stop()
@pytest.fixture
def directory(request, test_base_dir, test_name):
"""Return a per-test specific directory.
This makes a unique test-directory even if a test is rerun multiple times.
"""
global __attempts
# Auto set value if it isn't in the dict yet
__attempts[test_name] = __attempts.get(test_name, 0) + 1
directory = os.path.join(test_base_dir, "{}_{}".format(test_name, __attempts[test_name]))
request.node.has_errors = False
yield directory
# This uses the status set in conftest.pytest_runtest_makereport to
# determine whether we succeeded or failed.
if not request.node.has_errors and request.node.rep_call.outcome == 'passed':
shutil.rmtree(directory)
else:
logging.debug("Test execution failed, leaving the test directory {} intact.".format(directory))
@pytest.fixture(scope="session")
def test_base_dir():
directory = tempfile.mkdtemp(prefix='ltests-')
print("Running tests in {}".format(directory))
yield directory
if os.listdir(directory) == []:
shutil.rmtree(directory)
@pytest.fixture
def test_name(request):
yield request.function.__name__
@pytest.fixture()
def bitcoind(directory):
proxyport = reserve()
btc = ProxiedBitcoinD(bitcoin_dir=os.path.join(directory, "bitcoind"), proxyport=proxyport)
btc.start()
bch_info = btc.rpc.getblockchaininfo()
w_info = btc.rpc.getwalletinfo()
addr = btc.rpc.getnewaddress()
# Make sure we have segwit and some funds
if bch_info['blocks'] < 120:
logging.debug("SegWit not active, generating some more blocks")
btc.rpc.generatetoaddress(120 - bch_info['blocks'], addr)
elif w_info['balance'] < 1:
logging.debug("Insufficient balance, generating 1 block")
btc.rpc.generatetoaddress(1, addr)
# Mock `estimatesmartfee` to make c-lightning happy
def mock_estimatesmartfee(r):
return {"id": r['id'], "error": None, "result": {"feerate": 0.00100001, "blocks": r['params'][0]}}
btc.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
yield btc
try:
btc.rpc.stop()
except Exception:
btc.proc.kill()
btc.proc.wait()
@pytest.fixture(scope="module")
def btcd():
btcd = BtcD()
btcd.start()
yield btcd
try:
btcd.rpc.stop()
except:
btcd.proc.kill()
btcd.proc.wait()
@pytest.fixture
def node_factory(request, bitcoind):
executor = futures.ThreadPoolExecutor(max_workers=20)
node_factory = NodeFactory(request._pyfuncitem.name, executor, bitcoind, None)
yield node_factory
node_factory.killall()
executor.shutdown(wait=False)
|
402254
|
import time
from typing import Optional
import pyperclip
from platypush.backend import Backend
from platypush.message.event.clipboard import ClipboardEvent
class ClipboardBackend(Backend):
"""
This backend monitors for changes in the clipboard and generates even when the user copies a new text.
Requires:
- **pyperclip** (``pip install pyperclip``)
Triggers:
- :class:`platypush.message.event.clipboard.ClipboardEvent` on clipboard update.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_text: Optional[str] = None
def run(self):
self.logger.info('Started clipboard monitor backend')
while not self.should_stop():
text = pyperclip.paste()
if text and text != self._last_text:
self.bus.post(ClipboardEvent(text=text))
self._last_text = text
time.sleep(0.1)
self.logger.info('Stopped clipboard monitor backend')
# vim:sw=4:ts=4:et:
|
402261
|
from flask import Flask, jsonify
from flask_cors import CORS, cross_origin
from aci import aci
from deploy import deploy
from host import hosts
from iso import isos
from monitor import monitor
from network import networks
from server import servers
from setting import setting
from disks import disks
app = Flask(__name__)
app.register_blueprint(aci)
app.register_blueprint(deploy)
app.register_blueprint(disks)
app.register_blueprint(hosts)
app.register_blueprint(isos)
app.register_blueprint(monitor)
app.register_blueprint(networks)
app.register_blueprint(servers)
app.register_blueprint(setting)
CORS(app)
@app.route('/')
@cross_origin()
def index():
"""
Basic test to see if site is up.
Should return { 'status' : 'ok'}
"""
return jsonify({'status': 'ok'})
if __name__ == '__main__':
app.run(debug=True)
|
402291
|
import re
import copy
import numpy as np
import joblib
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import BaseEstimator, TransformerMixin
from chariot.transformer.text.base import TextNormalizer, TextFilter
from chariot.transformer.token.base import TokenFilter, TokenNormalizer
from chariot.transformer.vocabulary import Vocabulary
from chariot.transformer.tokenizer import Tokenizer
class Preprocessor(_BaseComposition, BaseEstimator, TransformerMixin):
def __init__(self, tokenizer=None,
text_transformers=(), token_transformers=(),
vocabulary=None, other_transformers=()):
self.tokenizer = tokenizer
if isinstance(self.tokenizer, str):
self.tokenizer = Tokenizer(self.tokenizer)
self.text_transformers = list(text_transformers)
self.token_transformers = list(token_transformers)
self.vocabulary = vocabulary
self.other_transformers = list(other_transformers)
def stack(self, transformer):
if isinstance(transformer, Tokenizer):
self.tokenizer = transformer
elif isinstance(transformer, (TextNormalizer, TextFilter)):
self.text_transformers.append(transformer)
elif isinstance(transformer, (TokenFilter, TokenNormalizer)):
self.token_transformers.append(transformer)
elif isinstance(transformer, Vocabulary):
self.vocabulary = transformer
elif isinstance(transformer, (BaseEstimator, TransformerMixin)):
self.other_transformers.append(transformer)
else:
raise Exception("Can't append transformer to the Preprocessor")
return self
def _to_snake(self, name):
_name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
_name = re.sub("([a-z0-9])([A-Z])", r"\1_\2", _name).lower()
return _name
@property
def _transformers(self):
transformers = list(self.text_transformers)
if self.tokenizer:
transformers += [self.tokenizer]
transformers += self.token_transformers
if self.vocabulary:
transformers += [self.vocabulary]
transformers += self.other_transformers
return (
(self._to_snake(t.__class__.__name__) + "_at_{}".format(i), t)
for i, t in enumerate(transformers)
)
def _validate_transformers(self):
names, transformers = zip(*self._transformers)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All transformer should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def transform(self, X):
self._validate_transformers()
Xt = self.check_array(X, True)
for name, t in self._transformers:
Xt = t.transform(Xt)
return Xt
def inverse_transform(self, X):
self._validate_transformers()
Xt = X
for name, t in list(self._transformers)[::-1]:
if hasattr(t, "inverse_transform"):
Xt = t.inverse_transform(Xt)
return Xt
def fit(self, X, y=None):
self._validate_transformers()
Xt = X
copied = False
for name, t in self._transformers:
t.fit(Xt)
if not isinstance(t, Vocabulary):
original_copy_setting = t.copy
if not copied:
# Do not transform original X
t.copy = False
copied = True
else:
t.copy = False
Xt = t.transform(Xt)
t.copy = original_copy_setting
return self
def fit_transform(self, X, y=None):
self._validate_transformers()
Xt = X
for name, t in self._transformers:
Xt = t.fit_transform(Xt)
return Xt
def check_array(self, X, copy_obj=True):
if not copy_obj:
return X
else:
if isinstance(X, (np.ndarray, np.generic)):
return np.array(X)
else:
return copy.deepcopy(X)
def save(self, path):
joblib.dump(self, path)
@classmethod
def load(cls, path):
instance = joblib.load(path)
return instance
|
402298
|
from sqlalchemy import Column, DateTime, Enum, Integer, String
from sqlalchemy.sql.schema import ForeignKey, UniqueConstraint
from virtool.pg.base import Base
from virtool.pg.utils import SQLEnum
class ArtifactType(str, SQLEnum):
"""
Enumerated type for possible artifact types
"""
sam = "sam"
bam = "bam"
fasta = "fasta"
fastq = "fastq"
csv = "csv"
tsv = "tsv"
json = "json"
class SampleArtifact(Base):
"""
SQL model to store sample artifacts
"""
__tablename__ = "sample_artifacts"
__table_args__ = (UniqueConstraint("sample", "name"),)
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String, nullable=False)
name_on_disk = Column(String)
size = Column(Integer)
type = Column(Enum(ArtifactType), nullable=False)
uploaded_at = Column(DateTime)
class SampleReads(Base):
"""
SQL model to store new sample reads files
"""
__tablename__ = "sample_reads"
__table_args__ = (UniqueConstraint("sample", "name"),)
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String(length=13), nullable=False)
name_on_disk = Column(String, nullable=False)
size = Column(Integer)
upload = Column(Integer, ForeignKey("uploads.id"))
uploaded_at = Column(DateTime)
|
402319
|
from modify_host_origin_request_header import app
def test_event():
return {
"Records": [
{
"cf": {
"request": {
"headers": {
"host": [
{
"key": "Host",
"value": "xxx.agilewing-demo.net"
}
]
}
}
}
}
]
}
def test_lambda_handler():
ret = app.lambda_handler(test_event(), "")
assert ret['headers']['host'][0]['value'] == 'ORIGIN_DOMAIN'
|
402321
|
import os
import numpy as np
import matplotlib.pyplot as plt
import pylab
def get_data(filename):
#print("retrieving data from " + filename)
loss_save = []
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith("unit:"):
unit = line.split(':')[1]
elif line.startswith("run time:"):
runtime = line.split(':')[1].split()
assert(len(runtime) == 2)
elif len(line) > 0:
loss_save.append(float(line))
return [unit, runtime, loss_save]
def getLabelFromFileName(filename):
return filename.split('.')[0].split('_')[1]
def getColor(label):
if label == 'Numpy':
return 'g'
if label == 'Lantern':
return 'b'
if label == 'PyTorch':
return 'r'
if label == 'TensorFlow':
return 'y'
if label == 'TensorFold' or label == 'TensorFold20':
return 'm'
if label == 'DyNet' or label == 'DyNetB':
return 'g'
if label == 'DyNetNB':
return 'c'
else:
print("NOTE: color not defined for label: %s" % label)
def plot(files, model):
# save dir
save_dir = '../save_fig/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
datas = {}
labels = []
for file1 in files:
label = getLabelFromFileName(file1)
datas[label] = get_data(file1)
labels.append(label)
print(labels)
# accumulate data of loss
losses = []
for label in labels:
losses.append(datas[label][2])
# accumulate data of runtime
prepareTimes = []
loopTimes = []
for label in labels:
# prepareTimes.append(datas[label][1][0])
loopTimes.append(float(datas[label][1][1]))
print(loopTimes)
# get unit and other description
unit = datas[labels[0]][0]
print(unit)
if (unit == ' 1 epoch'):
steps = len(losses[0])
step_desc = "1 epoch"
else:
steps = len(losses[0]) - 1
temp = unit.split()
step_desc = str(int(temp[0]) * steps) + " " + temp[1] + "s"
# plot
fig, ax = plt.subplots()
N = len(labels)
ind = np.arange(1, N+1)
ps = plt.bar(ind, loopTimes, width = 0.55)
for i in range(N):
ps[i].set_facecolor(getColor(labels[i]))
ax.set_xticks(ind)
ax.set_xticklabels(labels)
ax.set_ylim([0, max(loopTimes) * 1.2])
ax.set_ylabel("seconds")
if step_desc == "1 epoch":
ax.set_title("Training Time per Epoch in {}".format(model))
else:
ax.set_title("Training Time of {} in {}".format(step_desc, model))
pylab.savefig(save_dir + model + '.png')
if __name__ == "__main__":
import sys
#print(sys.argv)
model = sys.argv[1]
n_files = len(sys.argv) - 2
files = []
for i in range(n_files):
files.append(sys.argv[i+2])
plot(files, model)
|
402326
|
import torch
import torch.nn as nn
from ..registry import LOSSES
@LOSSES.register_module
class HausdorffLoss(nn.Module):
def __init__(self, loss_weight=1.0):
super(HausdorffLoss, self).__init__()
self.weight = loss_weight
def forward(self, set1, set2):
"""
Compute the Averaged Hausdorff Distance function
between two unordered sets of points (the function is symmetric).
Batches are not supported, so squeeze your inputs first!
:param set1: Tensor where each row is an N-dimensional point.
:param set2: Tensor where each row is an N-dimensional point.
:return: The Averaged Hausdorff Distance between set1 and set2.
"""
assert set1.ndimension() == 3, 'got %s' % set1.ndimension()
assert set2.ndimension() == 3, 'got %s' % set2.ndimension()
if set1.shape[0] > 0:
assert set1.size()[1] == set2.size()[1], \
'The points in both sets must have the same number of dimensions, got %s and %s.'\
% (set1.size()[1], set2.size()[1])
''' the second implement to avoid the CUDA out of the memory
need the size to be set1: (N, 40, 2), set2:(N, 40, 2)'''
d2_matrix = torch.stack([self.cdist(item1, item2) for item1, item2 in zip(set1, set2)], dim=0)
# Modified Chamfer Loss
# term_1 = torch.mean(torch.min(d2_matrix, 2)[0].reshape(-1))
# term_2 = torch.mean(torch.min(d2_matrix, 1)[0].reshape(-1))
term_1 = torch.mean(torch.min(d2_matrix, 2)[0], -1)
term_2 = torch.mean(torch.min(d2_matrix, 1)[0], -1)
res = term_1 + term_2
else:
res = set1.sum() * 0
# print(res)
return res*self.weight
def cdist(self, x, y):
"""
Compute distance between each pair of the two collections of inputs.
:param x: Nxd Tensor
:param y: Mxd Tensor
:res: NxM matrix where dist[i,j] is the norm between x[i,:] and y[j,:],
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
"""
differences = x.unsqueeze(1) - y.unsqueeze(0)
distances = torch.sum(differences**2, -1).sqrt()
return distances
|
402332
|
import json
import math
import os
import numpy as np
# Constants ---
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Functions ---
def __json_to_dict(fn):
return json.loads(''.join(open(fn).readlines()))
def stride(num, desired):
'''Computes the stride to be used in array slicing in order to subsample a list'''
return max(1, int(math.ceil(num / desired)))
def read_metadata(path, indices):
'''
Reads metadata for the specified indices and returns a dictionary
mapping the index to the metadata dictionary.
'''
if isinstance(indices, int):
indices = [indices]
return {idx : __json_to_dict(os.path.join(path, '{}.json'.format(idx))) \
for idx in indices}
def sort_by_scene_metadata(path, indices, key):
'''
Returns an array of indices that is sorted by the specified key in the scene metadata.
'''
metadata = read_metadata(path, indices)
return sorted(indices, key=lambda index: metadata[index]['scene'][key])
def filter_by_scene_metadata(path, indices, key, value_min, value_max):
'''
Filters a set of indices by metadata value range
'''
metadata = read_metadata(path, indices)
return sorted([x for x in indices if metadata[x]['scene'][key] >= value_min and \
metadata[x]['scene'][key] <= value_max])
def metadata_values(path, indices, key):
'''
Extracts the metadata values for the specified indices
'''
metadata = read_metadata(path, indices)
return [metadata[index]['scene'][key] for index in indices]
def euler_angles_to_rotation_matrix(theta):
Rx = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
Ry = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
Rz = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(Rz, np.dot(Ry, Rx))
R4 = np.identity(4)
R4[:3,:3] = R
return np.asmatrix(R4)
def translation_matrix(t):
M = np.identity(4)
M[:3, 3] = t[:3]
return np.asmatrix(M)
def vehicle_to_sensor_transform(metadata):
rpy = (math.radians(metadata['camera']['extrinsic']['roll']),
math.radians(metadata['camera']['extrinsic']['pitch']),
math.radians(metadata['camera']['extrinsic']['yaw']))
t = (metadata['camera']['extrinsic']['x'],
metadata['camera']['extrinsic']['y'],
metadata['camera']['extrinsic']['z'])
R = euler_angles_to_rotation_matrix(rpy)
T = translation_matrix(t)
TR = np.dot(T, R)
return np.linalg.inv(TR)
def vehicle_space_to_image_space(vs_p, vehicle_to_sensor_xform, fx, fy, u0, v0):
# Sensor coordinates
scm = np.dot(vehicle_to_sensor_xform, (vs_p[0], vs_p[1], vs_p[2], 1.0))
sc = (scm[0,0], scm[0,1], scm[0,2])
# Image coordinates
u = -sc[1] * fx / sc[0] + u0
v = -sc[2] * fy / sc[0] + v0
d = sc[0]
return (u, v, d)
def vec_sum(a, b):
return (a[0] + b[0], a[1] + b[1], a[2] + b[2])
|
402378
|
import sqlite3
try:
DBCONN = sqlite3.connect("..\\WSUS_Update_Data.db",
check_same_thread=False, isolation_level=None)
DBCONN.execute("pragma journal_mode=wal")
DBCONN.execute("pragma synchronous=NORMAL")
DBCONN.commit()
DBCONN.close()
except sqlite3.Error as error:
print("Error caught: ", error.args[0])
except:
print("Error caught")
|
402394
|
import copy
import json
import logging
from collections import OrderedDict
from typing import Dict, Tuple, List
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from conllu.parser import parse_line, DEFAULT_FIELDS
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
label_prior = ['ARG1', 'ARG2', 'ARG3', 'R-HNDL', 'L-HNDL']
label_prior_dict = {label_prior[idx]: idx for idx in range(len(label_prior))}
class Relation(object):
type = None
def __init__(self, node, rel, remote=False):
self.node = node
self.rel = rel
self.remote = remote
def show(self):
print("Node:{},Rel:{},Is remote:{} || ".format(self.node, self.rel, self.remote), )
class Head(Relation): type = 'HEAD'
class Child(Relation): type = 'CHILD'
class Node(object):
def __init__(self, info):
self.id = info["id"]
self.anchored = False
self.anchors = []
self.label = info["label"]
if "properties" in info:
assert len(info["properties"]) == 1
self.properties = info["properties"][0] if "properties" in info else None
self.values = info["values"][0] if "values" in info else None
if "anchors" in info:
self.anchors = [(anc["from"], anc["to"]) for anc in info["anchors"]]
self.anchored = True
self.heads, self.childs = [], []
self.head_ids, self.child_ids = [], []
return
def add_head(self, edge):
assert edge["target"] == self.id
remote = False
if "properties" in edge and "remote" in edge["properties"]:
remote = True
if edge["source"] in self.head_ids:
self.heads.append(Head(edge["source"], edge["label"], remote))
# print("Multiple arcs between two nodes!")
return True
self.heads.append(Head(edge["source"], edge["label"], remote))
self.head_ids.append(edge["source"])
return False
def add_child(self, edge):
assert edge["source"] == self.id
remote = False
if "properties" in edge and "remote" in edge["properties"]:
remote = True
if edge["target"] in self.child_ids:
self.childs.append(Child(edge["target"], edge["label"], remote))
# print("Multiple arcs between two nodes!")
return True
self.childs.append(Child(edge["target"], edge["label"], remote))
self.child_ids.append(edge["target"])
return False
class Graph(object):
def __init__(self, js):
self.id = js["id"]
self.input = js["input"]
self.top = js["tops"] if "tops" in js else None
self.companion = js["companion"]
self.nodes = {}
if 'nodes' in js:
for node in js["nodes"]:
self.nodes[node["id"]] = Node(node)
self.edges = {}
self.multi_arc = False
if 'edges' in js:
for edge in js["edges"]:
multi_arc_child = self.nodes[edge["source"]].add_child(edge)
multi_arc_head = self.nodes[edge["target"]].add_head(edge)
if multi_arc_child or multi_arc_head:
self.multi_arc = True
self.meta_info = json.dumps(js)
self.gold_mrps = copy.deepcopy(js)
self.gold_mrps.pop('companion')
self.prediction = True if "prediction" in js else False
def get_childs(self, id):
childs = self.nodes[id].childs
child_ids = [c.node for c in childs]
return childs, child_ids
def extract_token_info_from_companion_data(self):
annotation = []
for line in self.companion:
line = '\t'.join(line)
annotation.append(parse_line(line, DEFAULT_FIELDS))
tokens = [x["form"] for x in annotation if x["form"] is not None]
lemmas = [x["lemma"] for x in annotation if x["lemma"] is not None]
pos_tags = [x["upostag"] for x in annotation if x["upostag"] is not None]
token_range = [tuple([int(i) for i in list(x["misc"].values())[0].split(':')]) for x in annotation]
return {"tokens": tokens,
"lemmas": lemmas,
"pos_tags": pos_tags,
"token_range": token_range}
def has_cross_arc(self):
tokens_range = []
for node_id, node_info in self.nodes.items():
tokens_range.append(node_info.anchors[0])
for i in range(len(tokens_range)):
for j in range(i + 1, len(tokens_range)):
if i == j:
continue
if (tokens_range[i][1] > tokens_range[j][0] \
and tokens_range[i][1] < tokens_range[j][1] \
and tokens_range[i][0] < tokens_range[j][0]) or \
(tokens_range[j][1] > tokens_range[i][0] \
and tokens_range[j][1] < tokens_range[i][1] \
and tokens_range[j][0] < tokens_range[i][0]):
return True
return False
def get_arc_info(self):
tokens, arc_indices, arc_tags = [], [], []
concept_node = []
token_info = self.extract_token_info_from_companion_data()
tokens = token_info["tokens"]
lemmas = token_info["lemmas"]
pos_tags = token_info["pos_tags"]
token_range = token_info["token_range"]
# Step1: Construct the alignment between token and node
# Attention: multiple nodes can have overlapping anchors
alignment_dict = {}
node_label_dict = {}
for node_id, node_info in self.nodes.items():
concept_node.append(node_id + len(tokens))
alignment_dict[node_id + len(tokens)] = []
node_label_dict[node_id + len(tokens)] = node_info.label
assert len(node_info.anchors) == 1
node_anchored_begin, node_anchored_end = node_info.anchors[0][0], node_info.anchors[0][1]
for token_idx in range(len(token_range)):
token_anchored_begin, token_anchored_end = token_range[token_idx][0], token_range[token_idx][1]
if node_anchored_begin > token_anchored_end or node_anchored_end < token_anchored_begin:
continue
if token_anchored_begin >= node_anchored_begin and token_anchored_end <= node_anchored_end:
alignment_dict[node_id + len(tokens)].append(token_idx)
# check if suffix alignment exists
# Example case:
# Node anchor: 'of child'
# Sentence: 'Take of children'
if (node_anchored_end > token_anchored_begin and node_anchored_end < token_anchored_end) or \
(node_anchored_begin < token_anchored_end and node_anchored_begin > token_anchored_begin):
print((node_anchored_begin, node_anchored_end), '-->',
self.input[node_anchored_begin:node_anchored_end], \
(token_anchored_begin, token_anchored_end), '-->',
self.input[token_anchored_begin:token_anchored_end])
# Step2: Link node and its align token(s) via alignment_dict
# Add Terminal Edge
# for node_id,alignment_tokens in alignment_dict.items():
# for token_idx in alignment_tokens:
# arc_indices.append((token_idx,node_id))
# arc_tags.append('Terminal')
# Step3: Multi-Label Arc
childs_dict = {node_id: {} for node_id in self.nodes.keys()}
for node_id, node_info in self.nodes.items():
for child_of_node_info in node_info.childs:
child_node = child_of_node_info.node
arc_tag = child_of_node_info.rel
# the arc with one label
if child_node not in childs_dict[node_id]:
childs_dict[node_id][child_node] = arc_tag
# the arc with multi label
# aggregate the multi-label by label prior, defined in the start in this file
else:
# expand the label_prior_dict. this only happens when occur n-label arc (n>2)
if childs_dict[node_id][child_node] not in label_prior_dict:
label_prior_dict[childs_dict[node_id][child_node]] = len(label_prior_dict)
if label_prior_dict[arc_tag] < label_prior_dict[childs_dict[node_id][child_node]]:
arc_tag = arc_tag + '+' + childs_dict[node_id][child_node]
else:
arc_tag = childs_dict[node_id][child_node] + '+' + arc_tag
childs_dict[node_id][child_node] = arc_tag
# Step4: Add Label between node
for node_id, node_info in self.nodes.items():
for child_node in childs_dict[node_id]:
arc_tag = childs_dict[node_id][child_node]
arc_indices.append((child_node + len(tokens), node_id + len(tokens)))
arc_tags.append(arc_tag)
# Step 5: rank node by interval
node_range_dict = {}
for node_id, node_info in self.nodes.items():
node_anchored_begin, node_anchored_end = node_info.anchors[0][0], node_info.anchors[0][1]
node_range_dict[node_id + len(tokens)] = (node_anchored_begin, node_anchored_end)
node_range_dict = OrderedDict(sorted(node_range_dict.items(), key=lambda x: (x[1][0], -x[1][1])))
node_info_dict = {"alignment_dict": alignment_dict,
"node_range_dict": node_range_dict,
"node_label_dict": node_label_dict,
"graph_id": self.id}
ret = {"tokens": tokens,
"arc_indices": arc_indices,
"arc_tags": arc_tags,
"concept_node": concept_node,
"root_id": self.top[0] + len(tokens) if self.top is not None else None,
"lemmas": lemmas,
"pos_tags": pos_tags,
"node_info_dict": node_info_dict,
"graph_id": self.id,
"meta_info": self.meta_info,
"tokens_range": token_range,
"gold_mrps": self.gold_mrps}
return ret
def parse_sentence(sentence_blob: str):
graph = Graph(json.loads(sentence_blob))
if graph.has_cross_arc() and graph.prediction == False:
return False
ret = graph.get_arc_info()
return ret
def lazy_parse(text: str):
for sentence in text.split("\n"):
if sentence:
ret = parse_sentence(sentence)
if ret == False:
continue
yield ret
@DatasetReader.register("eds_reader_conll2019")
class EDSDatasetReaderConll2019(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lemma_indexers: Dict[str, TokenIndexer] = None,
action_indexers: Dict[str, TokenIndexer] = None,
arc_tag_indexers: Dict[str, TokenIndexer] = None,
concept_label_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._lemma_indexers = None
if lemma_indexers is not None and len(lemma_indexers) > 0:
self._lemma_indexers = lemma_indexers
self._action_indexers = None
if action_indexers is not None and len(action_indexers) > 0:
self._action_indexers = action_indexers
self._arc_tag_indexers = None
if arc_tag_indexers is not None and len(arc_tag_indexers) > 0:
self._arc_tag_indexers = arc_tag_indexers
self._concept_label_indexers = concept_label_indexers or {
'concept_label': SingleIdTokenIndexer(namespace='concept_label')}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r', encoding='utf8') as eds_file:
logger.info("Reading EDS instances from conllu dataset at: %s", file_path)
for ret in lazy_parse(eds_file.read()):
tokens = ret["tokens"]
arc_indices = ret["arc_indices"]
arc_tags = ret["arc_tags"]
root_id = ret["root_id"]
lemmas = ret["lemmas"]
pos_tags = ret["pos_tags"]
meta_info = ret["meta_info"]
node_info_dict = ret["node_info_dict"]
tokens_range = ret["tokens_range"]
gold_mrps = ret["gold_mrps"]
concept_node = ret["concept_node"]
gold_actions = get_oracle_actions(tokens, arc_indices, arc_tags, root_id, concept_node, node_info_dict) if arc_indices else None
# if len(gold_actions) / len(tokens) > 20:
# print(len(gold_actions) / len(tokens))
if gold_actions and gold_actions[-1] == '-E-':
print('-E-', ret["graph_id"])
continue
concept_label_list = list(node_info_dict["node_label_dict"].values())
yield self.text_to_instance(tokens, lemmas, pos_tags, arc_indices, arc_tags, gold_actions,
[root_id], [meta_info], concept_label_list, tokens_range, [gold_mrps])
@overrides
def text_to_instance(self, # type: ignore
tokens: List[str],
lemmas: List[str] = None,
pos_tags: List[str] = None,
arc_indices: List[Tuple[int, int]] = None,
arc_tags: List[str] = None,
gold_actions: List[str] = None,
root_id: List[int] = None,
meta_info: List[str] = None,
concept_label: List[int] = None,
tokens_range: List[Tuple[int, int]] = None,
gold_mrps: List[str] = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
token_field = TextField([Token(t) for t in tokens], self._token_indexers)
fields["tokens"] = token_field
meta_dict = {"tokens": tokens}
if lemmas is not None and self._lemma_indexers is not None:
fields["lemmas"] = TextField([Token(l) for l in lemmas], self._lemma_indexers)
if pos_tags is not None:
fields["pos_tags"] = SequenceLabelField(pos_tags, token_field, label_namespace="pos")
if arc_indices is not None and arc_tags is not None:
meta_dict["arc_indices"] = arc_indices
meta_dict["arc_tags"] = arc_tags
fields["arc_tags"] = TextField([Token(a) for a in arc_tags], self._arc_tag_indexers)
if gold_actions is not None:
meta_dict["gold_actions"] = gold_actions
fields["gold_actions"] = TextField([Token(a) for a in gold_actions], self._action_indexers)
if meta_info is not None:
meta_dict["meta_info"] = meta_info[0]
if gold_mrps is not None:
meta_dict["gold_mrps"] = gold_mrps[0]
if tokens_range is not None:
meta_dict["tokens_range"] = tokens_range
if concept_label is not None:
meta_dict["concept_label"] = concept_label
fields["concept_label"] = TextField([Token(a) for a in concept_label], self._concept_label_indexers)
if root_id is not None:
meta_dict["root_id"] = root_id[0]
fields["metadata"] = MetadataField(meta_dict)
return Instance(fields)
def get_oracle_actions(tokens, arc_indices, arc_tags, root_id, concept_node, node_info_dict):
actions = []
stack = []
buffer = []
deque = []
generated_order = {-1: -1}
total_node_num = len(tokens) + len(concept_node)
N = len(tokens)
for i in range(N - 1, -1, -1):
buffer.append(i)
graph = {}
for token_idx in range(total_node_num):
graph[token_idx] = []
# construct graph given directed_arc_indices and arc_tags
# key: id_of_point
# value: a list of tuples -> [(id_of_head1, label),(id_of_head2, label),...]
whole_graph = [[False for i in range(total_node_num)] for j in range(total_node_num)]
for arc, arc_tag in zip(arc_indices, arc_tags):
graph[arc[0]].append((arc[1], arc_tag))
whole_graph[arc[0]][arc[1]] = True
# i:head_point j:child_point
top_down_graph = [[] for i in range(total_node_num)] # N real point, 1 root point, concept_node
# i:child_point j:head_point ->Bool
# partial graph during construction
sub_graph = [[False for i in range(total_node_num)] for j in range(total_node_num)]
sub_graph_arc_list = []
for i in range(total_node_num):
for head_tuple_of_point_i in graph[i]:
head = head_tuple_of_point_i[0]
top_down_graph[head].append(i)
# auxiliary list for START and END op
alignment_dict = node_info_dict["alignment_dict"]
node_range_dict = node_info_dict["node_range_dict"]
node_label_dict = node_info_dict["node_label_dict"]
begin_dict = {}
end_dict = {}
# key:token id, value: list of node_id
node_begin_dict = {}
node_end_dict = {}
for token_id in range(len(tokens)):
node_begin_dict[token_id] = {}
node_end_dict[token_id] = {}
for order_node_id in node_range_dict.keys():
begin_dict[order_node_id] = alignment_dict[order_node_id][0]
end_dict[order_node_id] = alignment_dict[order_node_id][-1]
node_begin_dict[begin_dict[order_node_id]][order_node_id] = False
node_end_dict[end_dict[order_node_id]][order_node_id] = False
node_align_begin_flag = {}
node_align_end_flag = {}
for node_id in range(len(concept_node)):
node_align_begin_flag[node_id + len(tokens)] = False
node_align_end_flag[node_id + len(tokens)] = False
# return if w1 is one head of w0
def has_head(w0, w1):
if w0 < 0 or w1 < 0:
return False
for w in graph[w0]:
if w[0] == w1:
return True
return False
def has_unfound_child(w):
for child in top_down_graph[w]:
if not sub_graph[child][w]:
return True
return False
# return if w has any unfound head
def lack_head(w):
if w < 0:
return False
head_num = 0
for h in sub_graph[w]:
if h:
head_num += 1
if head_num < len(graph[w]):
return True
return False
# return the relation between child: w0, head: w1
def get_arc_label(w0, w1):
for h in graph[w0]:
if h[0] == w1:
return h[1]
def get_node_label(w0):
return node_label_dict[w0]
def check_graph_finish():
return whole_graph == sub_graph
def check_sub_graph(w0, w1):
if w0 < 0 or w1 < 0:
return False
else:
return sub_graph[w0][w1] == False
def is_surface_token(token):
return token < len(tokens) and token >= 0
def is_concept_node(token):
return token >= len(tokens)
def start_generate_node(token):
if is_surface_token(token):
for concept_node_id, concept_node_status in node_begin_dict[token].items():
if concept_node_status == False:
return concept_node_id
return -1
def end_generate_node(token):
if is_surface_token(token):
concept_node_id_list = []
for concept_node_id, concept_node_status in node_end_dict[token].items():
if concept_node_status == False:
concept_node_id_list.append(concept_node_id)
if len(concept_node_id_list) > 0:
return concept_node_id_list
return [-1]
def finish_alignment_token(token):
if not is_surface_token(token):
return False
return start_generate_node(token) == -1 and end_generate_node(token) == [-1]
def finish_alignment_node(node):
if not is_concept_node(node):
return False
begin_align_flag = node_align_begin_flag[node]
end_align_flag = node_align_end_flag[node]
return begin_align_flag and end_align_flag
def lack_end_align(node):
if not is_concept_node(node):
return False
return node_align_end_flag[node] == False
def generate_all_concept_node():
for node in concept_node:
if node_align_end_flag[node] == False:
return False
if node_align_begin_flag[node] == False:
return False
return True
def find_end_align_of_node(node):
if not is_concept_node(node):
return -1, -1
buffer_token = alignment_dict[node][-1]
buffer_position = buffer.index(buffer_token)
return buffer_position, buffer_token
def find_end_align_of_token(token):
if not is_surface_token(token) or end_generate_node(token) == [-1]:
return False
end_generate_node_list = end_generate_node(token)
for node in stack:
if node in end_generate_node_list:
stack_token = node
stack_position = stack.index(stack_token)
return stack_token, stack_position
return False
def find_all_greater_edge(node):
for node_id, node_order in generated_order.items():
# skip self-node and symbol-node in generate_order dict, i.e. -1
if node_id == node or node_id == -1:
continue
if (has_head(node_id, node) and check_sub_graph(node_id, node)) or \
(has_head(node, node_id) and check_sub_graph(node, node_id)):
return False
return True
def get_oracle_actions_onestep(sub_graph, stack, buffer, actions, root_id):
s0 = stack[-1] if len(stack) > 0 else -1
s1 = stack[-2] if len(stack) > 1 else -1
b0 = buffer[-1] if len(buffer) > 0 else -1
# LEFT
if has_head(s0, b0) and check_sub_graph(s0, b0) and is_concept_node(b0):
actions.append("LEFT-EDGE#SPLIT_TAG#" + get_arc_label(s0, b0))
sub_graph[s0][b0] = True
sub_graph_arc_list.append((s0, b0))
return
# RIGHT_EDGE
elif has_head(b0, s0) and check_sub_graph(b0, s0) and is_concept_node(b0):
actions.append("RIGHT-EDGE#SPLIT_TAG#" + get_arc_label(b0, s0))
sub_graph[b0][s0] = True
sub_graph_arc_list.append((b0, s0))
return
# SELF-EDGE
elif has_head(s0, s0) and check_sub_graph(s0, s0) and is_concept_node(s0):
actions.append("SELF-EDGE#SPLIT_TAG#" + get_arc_label(s0, s0))
sub_graph[s0][s0] = True
sub_graph_arc_list.append((s0, s0))
return
# TOP
elif b0 == root_id and "TOP" not in actions:
actions.append("TOP")
# REDUCE
elif not has_unfound_child(s0) and not lack_head(s0) and is_concept_node(s0) and finish_alignment_node(s0):
actions.append("REDUCE")
stack.pop()
return
# DROP
elif finish_alignment_token(b0) and is_surface_token(b0):
actions.append("DROP")
buffer.pop()
while len(deque) != 0:
stack.append(deque.pop())
return
# SHIFT
elif len(buffer) != 0 and is_concept_node(b0) and find_all_greater_edge(b0):
while len(deque) != 0:
stack.append(deque.pop())
if buffer[-1] not in generated_order:
num_of_generated_node = len(generated_order)
generated_order[buffer[-1]] = num_of_generated_node
stack.append(buffer.pop())
actions.append("SHIFT")
# START
elif start_generate_node(b0) != -1 and is_surface_token(b0):
node_id = start_generate_node(b0)
buffer.append(node_id)
node_begin_dict[b0][node_id] = True
node_align_begin_flag[node_id] = True
actions.append("START#SPLIT_TAG#" + get_node_label(node_id))
# END
elif s0 in end_generate_node(b0) and s0 != -1 and is_surface_token(b0):
node_end_dict[b0][s0] = True
node_align_end_flag[s0] = True
actions.append("END")
# PASS
elif len(stack) != 0:
deque.append(stack.pop())
actions.append("PASS")
# ERROR
else:
actions.append('-E-')
cnt = 0
while not (len(stack) == 0 and len(buffer) == 0):
get_oracle_actions_onestep(sub_graph, stack, buffer, actions, root_id)
remain_unfound_arc = sorted(list(set(arc_indices) - set(sub_graph_arc_list)), key=lambda x: x[0])
cnt += 1
if actions[-1] == '-E-' or cnt > 10000:
print(node_info_dict["graph_id"])
break
if not check_graph_finish():
print(node_info_dict["graph_id"])
# actions.append('FINISH')
return actions
def check_cross_arc(file_path):
# check if cross arc exists
cross_arc_num = 0
err_sentence = []
err_range = []
with open(file_path, 'r', encoding='utf8') as eds_file:
for sentence in eds_file.read().split("\n"):
graph = Graph(json.loads(sentence))
tokens_range = []
for node_id, node_info in graph.nodes.items():
tokens_range.append(node_info.anchors[0])
for i in range(len(tokens_range)):
for j in range(i + 1, len(tokens_range)):
if i == j:
continue
if (tokens_range[i][1] > tokens_range[j][0] \
and tokens_range[i][1] < tokens_range[j][1] \
and tokens_range[i][0] < tokens_range[j][0]) or \
(tokens_range[j][1] > tokens_range[i][0] \
and tokens_range[j][1] < tokens_range[i][1] \
and tokens_range[j][0] < tokens_range[i][0]):
cross_arc_num += 1
err_sentence.append(sentence)
tmp = [tokens_range[i][0], tokens_range[i][1], tokens_range[j][0], tokens_range[j][1]]
tmp = list(map(lambda x: str(x), tmp))
tmp = ','.join(tmp) + '\n' + graph.input[tokens_range[i][0]:tokens_range[i][1]] + \
'\n' + graph.input[tokens_range[j][0]:tokens_range[j][1]] + '\n'
err_range.append(tmp)
return cross_arc_num
def check_uncontinuous(file_path):
# check if un-continuous exists
cross_arc_num = 0
err_sentence = []
err_range = []
with open(file_path, 'r', encoding='utf8') as eds_file:
for sentence in eds_file.read().split("\n"):
graph = Graph(json.loads(sentence))
uncontinuous_num = 0
total_num = 0
for node_id, node_info in graph.nodes.items():
if len(node_info.anchors) > 1:
uncontinuous_num += 1
total_num += 1
if uncontinuous_num > 0:
print(uncontinuous_num, total_num)
def check_top_nodes(file_path):
# check if un-continuous exists
err_none = 0
err_multi = 0
with open(file_path, 'r', encoding='utf8') as eds_file:
for sentence in eds_file.read().split("\n"):
graph = Graph(json.loads(sentence))
if graph.top == None:
err_none += 1
print(graph.id)
continue
if len(graph.top) > 1:
print(graph.id)
err_multi += 1
continue
print(err_multi, err_none)
def check_carg(file_path, output_file_path):
# check carg property of node in EDS
value_label_dict = {}
triple_list = []
with open(file_path, 'r', encoding='utf8') as eds_file:
for sentence in eds_file.read().split("\n"):
graph = Graph(json.loads(sentence))
for node_id, node_info in graph.nodes.items():
if node_info.values is not None:
if node_info.label not in value_label_dict:
value_label_dict[node_info.label] = []
info_tuple = '---'.join(
[node_info.values, graph.input[node_info.anchors[0][0]:node_info.anchors[0][1]]])
if info_tuple not in value_label_dict[node_info.label]:
value_label_dict[node_info.label].append(info_tuple)
triple_list.append('\t'.join([graph.input[node_info.anchors[0][0]:node_info.anchors[0][1]], \
node_info.label, \
node_info.values, \
]))
print(list(value_label_dict.keys()))
def check_longest_sentence(file_path):
max_len = -1
cnt = 0
triple_list = []
with open(file_path, 'r', encoding='utf8') as eds_file:
for sentence in eds_file.read().split("\n"):
graph = Graph(json.loads(sentence))
max_len = max(len(graph.extract_token_info_from_companion_data()["tokens"]), max_len)
cnt += len(graph.extract_token_info_from_companion_data()["tokens"])
cnt = cnt / 35656.0
print(max_len, cnt)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.