id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
151235
|
import dash
from dash.dependencies import Input, Output
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminder2007.csv')
# add an id column and set it as the index
# in this case the unique ID is just the country name, so we could have just
# renamed 'country' to 'id' (but given it the display name 'country'), but
# here it's duplicated just to show the more general pattern.
df['id'] = df['country']
df.set_index('id', inplace=True, drop=False)
app = dash.Dash(__name__)
app.layout = html.Div([
dash_table.DataTable(
id='datatable-row-ids',
columns=[
{'name': i, 'id': i, 'deletable': True} for i in df.columns
# omit the id column
if i != 'id'
],
data=df.to_dict('records'),
editable=True,
filter_action="native",
sort_action="native",
sort_mode='multi',
row_selectable='multi',
row_deletable=True,
selected_rows=[],
page_action='native',
page_current= 0,
page_size= 10,
),
html.Div(id='datatable-row-ids-container')
])
@app.callback(
Output('datatable-row-ids-container', 'children'),
Input('datatable-row-ids', 'derived_virtual_row_ids'),
Input('datatable-row-ids', 'selected_row_ids'),
Input('datatable-row-ids', 'active_cell'))
def update_graphs(row_ids, selected_row_ids, active_cell):
# When the table is first rendered, `derived_virtual_data` and
# `derived_virtual_selected_rows` will be `None`. This is due to an
# idiosyncrasy in Dash (unsupplied properties are always None and Dash
# calls the dependent callbacks when the component is first rendered).
# So, if `rows` is `None`, then the component was just rendered
# and its value will be the same as the component's dataframe.
# Instead of setting `None` in here, you could also set
# `derived_virtual_data=df.to_rows('dict')` when you initialize
# the component.
selected_id_set = set(selected_row_ids or [])
if row_ids is None:
dff = df
# pandas Series works enough like a list for this to be OK
row_ids = df['id']
else:
dff = df.loc[row_ids]
active_row_id = active_cell['row_id'] if active_cell else None
colors = ['#FF69B4' if id == active_row_id
else '#7FDBFF' if id in selected_id_set
else '#0074D9'
for id in row_ids]
return [
dcc.Graph(
id=column + '--row-ids',
figure={
'data': [
{
'x': dff['country'],
'y': dff[column],
'type': 'bar',
'marker': {'color': colors},
}
],
'layout': {
'xaxis': {'automargin': True},
'yaxis': {
'automargin': True,
'title': {'text': column}
},
'height': 250,
'margin': {'t': 10, 'l': 10, 'r': 10},
},
},
)
# check if column exists - user may have deleted it
# If `column.deletable=False`, then you don't
# need to do this check.
for column in ['pop', 'lifeExp', 'gdpPercap'] if column in dff
]
if __name__ == '__main__':
app.run_server(debug=True)
|
151252
|
import logging
from collections import deque
from PyQt6.QtWidgets import QWidget, QLabel
from PyQt6.QtCore import pyqtSignal
from core.utils.win32.utilities import get_monitor_hwnd
from core.event_service import EventService
from core.event_enums import KomorebiEvent
from core.widgets.base import BaseWidget
from core.utils.komorebi.client import KomorebiClient
from core.validation.widgets.komorebi.active_layout import VALIDATION_SCHEMA
try:
from core.utils.komorebi.event_listener import KomorebiEventListener
except ImportError:
KomorebiEventListener = None
logging.warning("Failed to load Komorebi Event Listener")
layout_cmds = {
"BSP": "bsp",
"Columns": "columns",
"Rows": "rows",
"VerticalStack": "vertical-stack",
"HorizontalStack": "horizontal-stack",
"UltrawideVerticalStack": "ultrawide-vertical-stack"
}
layout_snake_case = {
"BSP": "bsp",
"Columns": "columns",
"Rows": "rows",
"VerticalStack": "vertical_stack",
"HorizontalStack": "horizontal_stack",
"UltrawideVerticalStack": "ultrawide_vertical_stack"
}
class ActiveLayoutWidget(BaseWidget):
k_signal_connect = pyqtSignal(dict)
k_signal_disconnect = pyqtSignal()
k_signal_layout_change = pyqtSignal(dict, dict)
validation_schema = VALIDATION_SCHEMA
event_listener = KomorebiEventListener
def __init__(self, label: str, layout_icons: dict[str, str], hide_if_offline: bool, callbacks: dict[str, str]):
super().__init__(class_name="komorebi-active-layout")
self._label = label
self._layout_icons = layout_icons
self._layouts = deque([
'bsp', 'columns', 'rows', 'vertical-stack', 'horizontal-stack', 'ultrawide-vertical-stack'
])
self._hide_if_offline = hide_if_offline
self._event_service = EventService()
self._komorebic = KomorebiClient()
self._komorebi_screen = None
self._komorebi_workspaces = []
self._focused_workspace = {}
self._active_layout_text = QLabel()
self._active_layout_text.setProperty("class", "label")
self._active_layout_text.hide()
self.widget_layout.addWidget(self._active_layout_text)
self.callback_left = callbacks['on_left']
self.callback_right = callbacks['on_right']
self.callback_middle = callbacks['on_middle']
self.register_callback("next_layout", self._next_layout)
self.register_callback("prev_layout", self._prev_layout)
self.register_callback("flip_layout", self._komorebic.flip_layout)
self.register_callback("toggle_tiling", lambda: self._komorebic.toggle("tiling"))
self.register_callback("toggle_float", lambda: self._komorebic.toggle("float"))
self.register_callback("toggle_monocle", lambda: self._komorebic.toggle("monocle"))
self.register_callback("toggle_maximise", lambda: self._komorebic.toggle("maximise"))
self.register_callback("toggle_pause", lambda: self._komorebic.toggle("pause"))
self._register_signals_and_events()
def _next_layout(self):
if self._is_shift_layout_allowed():
self._layouts.rotate(1)
self._komorebic.change_layout(self._layouts[0])
def _prev_layout(self):
if self._is_shift_layout_allowed():
self._layouts.rotate(-1)
self._komorebic.change_layout(self._layouts[0])
def _is_shift_layout_allowed(self):
return not bool(
not self._focused_workspace.get('tile', False) or
self._focused_workspace.get('monocle_container', None) or
self._focused_workspace.get('maximized_window', None) or
self._komorebi_state.get('is_paused', False)
)
def _register_signals_and_events(self):
active_layout_change_event_watchlist = [
KomorebiEvent.ChangeLayout,
KomorebiEvent.TogglePause,
KomorebiEvent.ToggleTiling,
KomorebiEvent.ToggleMonocle,
KomorebiEvent.ToggleMaximise
]
self.k_signal_connect.connect(self._on_komorebi_connect_event)
self.k_signal_disconnect.connect(self._on_komorebi_disconnect_event)
self.k_signal_layout_change.connect(self._on_komorebi_layout_change_event)
self._event_service.register_event(KomorebiEvent.KomorebiConnect, self.k_signal_connect)
self._event_service.register_event(KomorebiEvent.KomorebiDisconnect, self.k_signal_disconnect)
for event_type in active_layout_change_event_watchlist:
self._event_service.register_event(event_type, self.k_signal_layout_change)
def _on_komorebi_connect_event(self, state: dict) -> None:
self._update_active_layout(state, is_connect_event=True)
def _on_komorebi_layout_change_event(self, _event: dict, state: dict) -> None:
self._update_active_layout(state)
def _on_komorebi_disconnect_event(self) -> None:
if self._hide_if_offline:
self._active_layout_text.hide()
def _update_active_layout(self, state: dict, is_connect_event=False):
try:
if self._update_komorebi_state(state):
self._focused_workspace = self._komorebic.get_focused_workspace(self._komorebi_screen)
if not self._focused_workspace:
return
layout_name, layout_icon = self._get_layout_label_info()
if is_connect_event:
conn_layout_name = self._focused_workspace['layout']['Default']
conn_layout_cmd = layout_cmds.get(conn_layout_name, 'bsp')
while self._layouts[0] != conn_layout_cmd:
self._layouts.rotate(1)
self._active_layout_text.setText(
self._label.replace("{icon}", layout_icon).replace("{layout_name}", layout_name)
)
if self._active_layout_text.isHidden():
self._active_layout_text.show()
except Exception:
logging.exception("Failed to update komorebi status and widget button state")
def _get_layout_label_info(self):
if self._komorebi_state.get('is_paused', False):
layout_name = 'Paused'
layout_icon = self._layout_icons['paused']
elif not self._focused_workspace.get('tile', False):
layout_name = 'Floating'
layout_icon = self._layout_icons['floating']
elif self._focused_workspace.get('maximized_window', None):
layout_name = 'Maximised'
layout_icon = self._layout_icons['maximised']
elif self._focused_workspace.get('monocle_container', None):
layout_name = 'Monocle'
layout_icon = self._layout_icons['monocle']
else:
layout_name = self._focused_workspace['layout']['Default']
layout_icon = self._layout_icons.get(layout_snake_case[layout_name], 'unknown layout')
return layout_name, layout_icon
def _update_komorebi_state(self, komorebi_state: dict):
try:
self._screen_hwnd = get_monitor_hwnd(int(QWidget.winId(self)))
self._komorebi_state = komorebi_state
if self._komorebi_state:
self._komorebi_screen = self._komorebic.get_screen_by_hwnd(self._komorebi_state, self._screen_hwnd)
self._komorebi_workspaces = self._komorebic.get_workspaces(self._komorebi_screen)
return True
except TypeError:
return False
|
151288
|
import inspect
import unittest
from config.database import DATABASES
from src.masoniteorm.models import Model
from src.masoniteorm.query import QueryBuilder
from src.masoniteorm.query.grammars import MySQLGrammar
from src.masoniteorm.relationships import has_many
from src.masoniteorm.scopes import SoftDeleteScope
from tests.utils import MockConnectionFactory
class BaseTestQueryBuilderScopes(unittest.TestCase):
grammar = "mysql"
def get_builder(self, table="users"):
connection = MockConnectionFactory().make("default")
return QueryBuilder(
grammar=MySQLGrammar,
connection_class=connection,
connection="mysql",
table=table,
connection_details=DATABASES,
)
def test_scopes(self):
builder = self.get_builder().set_scope(
"gender", lambda model, q: q.where("gender", "w")
)
self.assertEqual(
builder.gender().where("id", 1).to_sql(),
"SELECT * FROM `users` WHERE `users`.`gender` = 'w' AND `users`.`id` = '1'",
)
def test_global_scopes(self):
builder = self.get_builder().set_global_scope(
"where_not_null", lambda q: q.where_not_null("deleted_at"), action="select"
)
self.assertEqual(
builder.where("id", 1).to_sql(),
"SELECT * FROM `users` WHERE `users`.`id` = '1' AND `users`.`deleted_at` IS NOT NULL",
)
def test_global_scope_from_class(self):
builder = self.get_builder().set_global_scope(SoftDeleteScope())
self.assertEqual(
builder.where("id", 1).to_sql(),
"SELECT * FROM `users` WHERE `users`.`id` = '1' AND `users`.`deleted_at` IS NULL",
)
def test_global_scope_remove_from_class(self):
builder = (
self.get_builder()
.set_global_scope(SoftDeleteScope())
.remove_global_scope(SoftDeleteScope())
)
self.assertEqual(
builder.where("id", 1).to_sql(),
"SELECT * FROM `users` WHERE `users`.`id` = '1'",
)
def test_global_scope_adds_method(self):
builder = self.get_builder().set_global_scope(SoftDeleteScope())
self.assertEqual(builder.with_trashed().to_sql(), "SELECT * FROM `users`")
|
151297
|
import horovod.tensorflow as hvd
import os
import tensorflow as tf
from preprocessing import resnet_preprocessing, imagenet_preprocessing, darknet_preprocessing
import functools
def create_dataset(data_dir, batch_size, preprocessing='resnet', validation=False):
filenames = [os.path.join(data_dir, i) for i in os.listdir(data_dir)]
data = tf.data.TFRecordDataset(filenames).shard(hvd.size(), hvd.rank())
if not validation:
parse_fn = functools.partial(parse_train, preprocessing=preprocessing)
data = data.shuffle(buffer_size=1000)
data = data.map(parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
parse_fn = functools.partial(parse_validation, preprocessing=preprocessing)
data = data.map(parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# we drop remainder because we want same sized batches - XLA and because of allreduce being used to calculate
# accuracy - validation accuracy may be slightly different than computing on all of validation data
data = data.batch(batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)
return data
@tf.function
def parse(record, is_training, preprocessing):
features = {'image/encoded': tf.io.FixedLenFeature((), tf.string),
'image/class/label': tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32),
}
parsed = tf.io.parse_single_example(record, features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
# bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
bbox = tf.stack([parsed['image/object/bbox/%s' % x].values for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
if preprocessing == 'resnet':
augmenter = None # augment.AutoAugment()
image = resnet_preprocessing.preprocess_image(image_bytes, bbox, 224, 224, 3, is_training=is_training)
elif preprocessing == 'imagenet': # used by hrnet
image = imagenet_preprocessing.preprocess_image(image_bytes, bbox, 224, 224, 3, is_training=is_training)
elif preprocessing == 'darknet':
image = darknet_preprocessing.preprocess_image(image_bytes, bbox, 256, 256, 3, is_training=is_training)
label = tf.cast(parsed['image/class/label'] - 1, tf.int32)
one_hot_label = tf.one_hot(label, depth=1000, dtype=tf.float32)
return image, one_hot_label
def parse_train(record, preprocessing):
return parse(record, is_training=True, preprocessing=preprocessing)
def parse_validation(record, preprocessing):
return parse(record, is_training=False, preprocessing=preprocessing)
|
151371
|
from __future__ import unicode_literals
import yaml
BLOG_AUTHOR = "Stay Static" # (translatable)
BLOG_TITLE = "Nikola Stay Static Sample" # (translatable)
SITE_URL = "http://staystatic.github.io/sites/nikola/"
BLOG_EMAIL = "<EMAIL>"
BLOG_DESCRIPTION = "Nikola demo for Stay Static" # (translatable)
COMMENT_SYSTEM = None
DEFAULT_LANG = "en"
TRANSLATIONS = {
DEFAULT_LANG: "",
}
NAVIGATION_LINKS = {}
THEME = "base"
THEME_COLOR = '#5670d4'
POSTS = (
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.md", "", "story.tmpl"),
)
TIMEZONE = "UTC"
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
}
INDEX_PATH = "posts"
COPY_SOURCES = False
SHOW_SOURCELINK = False
PRETTY_URLS = False
DISABLED_PLUGINS = ["robots"]
GLOBAL_CONTEXT = {}
WRITE_TAG_CLOUD = False
GENERATE_RSS = False
DISABLED_PLUGINS = ['classify_page_index', 'classify_sections', 'classify_indexes', 'classify_archive', 'tags', 'sitemap', 'robots', 'create_bundles']
|
151382
|
import vcs
import sys
import argparse
import vcs.testing.regression as regression
import os
p =argparse.ArgumentParser()
p.add_argument("-H","--fitToHeight",default=True,action="store_false")
p.add_argument("-u","--units",default="percent")
p.add_argument("-x","--xoffset",default=0,type=float)
p.add_argument("-y","--yoffset",default=0,type=float)
p.add_argument("-z","--zoom",default=1.,type=float)
p.add_argument("-s","--source",default="./somefile.png")
args = p.parse_args(sys.argv[1:])
print args
bg=True
x=vcs.init(bg=bg,geometry=(1200,800))
x.open()
png = os.path.join(sys.prefix,"share","uvcdat","sample_data","BlueMarble.ppm")
x.put_png_on_canvas(png,args.zoom,args.xoffset,args.yoffset,args.units,args.fitToHeight)
fnm = "test_vcs_put_png_on_canvas_%s_%s_%s_%s_%s" % (args.zoom,args.xoffset,args.yoffset,args.units,args.fitToHeight)
x.png(fnm)
ret = regression.check_result_image(fnm+'.png',args.source,20.)
|
151411
|
import logging
import os
import datetime
import random
import string
from nhd.NHDCommon import NHDCommon
from enum import Enum
from colorlog import ColoredFormatter
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException
from nhd.Node import Node
from typing import Dict, List, Set, Tuple
import magicattr
class K8SEventType(Enum):
EVENT_TYPE_NORMAL = 0
EVENT_TYPE_WARNING = 1
class K8SMgr:
"""
Helper class to communicate with Kubernetes API server.
Assumes NHD is either running in a pod with proper permissions, or
there is a KUBECONFIG file with cluster information.
"""
__instance = None
@staticmethod
def GetInstance():
if K8SMgr.__instance is None:
K8SMgr()
return K8SMgr.__instance
def __init__(self):
"""
Initializes the logger and loads Kubernetes configuration
"""
self.logger = NHDCommon.GetLogger(__name__)
if K8SMgr.__instance is None:
try:
config.load_incluster_config()
except:
config.load_kube_config()
self.v1 = client.CoreV1Api()
self.last_seen_ver = None
K8SMgr.__instance = self
else:
raise Exception("Cannot create more than one K8SMgr!")
def GetNodes(self):
"""
Get the list of all currently-ready nodes
"""
nodes = []
try:
nl = self.v1.list_node(watch=False)
for node in nl.items:
for status in node.status.conditions:
if status.reason == "KubeletReady" and status.type == "Ready" and status.status == "True":
nodes.append(node.metadata.name)
except ApiException as e:
self.logger.error(f"Exception when calling CoreV1Api->list_node:\n {e}")
return nodes
def GetNodeHugepageResources(self, node: str):
"""
Pulls the hugepage resource information from a node (requests/allocatable)
"""
try:
a = self.v1.read_node(name=node)
alloc = int(a.status.allocatable['hugepages-1Gi'][:a.status.allocatable['hugepages-1Gi'].find('G')])
free = alloc
# Don't tabulate hugepage resources that are used here since we'll get them when we subtract off pods
return (alloc, free)
except ApiException as e:
self.logger.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
except Exception as e:
self.logger.error("Non-API exception when getting hugepage information")
return (0, 0)
def GetNodeAttr(self, name, attr):
"""
Get an attribute from a node. Useful for pulling things like nested data structures.
"""
try:
n = self.v1.read_node(name=name)
for status in n.status.conditions:
if status.reason == "KubeletReady" and status.type == "Ready" and status.status == "True":
return magicattr.get(n, attr)
except ApiException as e:
self.logger.error(f"Exception when calling CoreV1Api->list_node:\n {e}")
def GetNodeAddr(self, name):
return self.GetNodeAttr(name, 'status.addresses[0].address')
def GetNodeLabels(self, name):
return self.GetNodeAttr(name, 'metadata.labels')
def GetPodNode(self, pod, ns):
"""
Get the node where the pod resides
"""
try:
ret = self.v1.read_namespaced_pod(pod, ns)
if ret == None:
return ''
return ret.spec.node_name
except ApiException as e:
self.logger.error("Exception when calling CoreV1Api->read_namespaced_pod: %s\n" % e)
return ''
def GetPodObj(self, pod, ns):
try:
pobj = self.v1.read_namespaced_pod(pod, ns)
return pobj
except ApiException as e:
self.logger.error("Exception when calling CoreV1Api->read_namespaced_pod: %s\n" % e)
return None
def GetCfgAnnotations(self, pod, ns):
""" Get the config annotations from the pod """
k = 'sigproc.viasat.io/nhd_config'
try:
annot = self.GetPodAnnotations(ns, pod)
if annot == None:
self.logger.error(f'Couldn\'t find pod annotations for pod {ns}.{pod}')
return False
return annot[k]
except KeyError as e:
self.logger.error(f'Key [{e}] not found in pod annotations for {ns}.{pod}')
return False
def GetPodNodeGroups(self, pod, ns) -> str:
""" Returns the node group name of the pod, or "default" if it doesn't exist. """
try:
p = self.v1.read_namespaced_pod(pod, ns)
except:
self.logger.warning(f"Failed to get pod annotations for pod {pod} in namespace {ns}")
return ["default"]
if 'sigproc.viasat.io/nhd_groups' in p.metadata.annotations:
groups = p.metadata.annotations["sigproc.viasat.io/nhd_groups"].split(',')
self.logger.info(f'Pod is using NHD group {groups}')
return groups
else:
return ["default"]
def IsNodeActive(self, node):
"""
Find out if the node is tainted for NHD.
Only tainted nodes will be used by NHD for scheduling, and
will also be ignored by the default scheduler.
"""
candidate = False
try:
a = self.v1.read_node(name=node)
taints = a.spec.taints
if candidate and not (a.status.conditions[0].reason == "KubeletReady" and a.status.conditions[0].type == "Ready" and a.status.conditions[0].status == "True"):
return False
for t in taints:
if t.key == 'sigproc.viasat.io/nhd_scheduler' and t.effect == 'NoSchedule':
candidate = True
if t.key == 'node.kubernetes.io/unschedulable':
self.logger.warning(f'Node {node} disabled scheduling. Removing from list')
candidate = False
break
except Exception:
return False
return candidate
def GetPodAnnotations(self, ns, podname):
try:
p = self.v1.read_namespaced_pod(podname, ns)
return p.metadata.annotations
except ApiException as e:
self.logger.error("Exception when calling CoreV1Api->read_namespaced_pod: %s\n" % e)
return None
return None
def GetScheduledPods(self, sched_name):
"""
Get all scheduled pods for a given scheduler
"""
ret = self.v1.list_pod_for_all_namespaces()
pods = []
for i in ret.items:
if i.spec.scheduler_name == sched_name:
pods.append((i.metadata.name, i.metadata.namespace, i.metadata.uid, i.status.phase))
return pods
def GetRequestedPodResources(self, pod: str, ns: str) -> Dict[str, str]:
"""
Get the pod resources in dict format
"""
try:
p = self.v1.read_namespaced_pod(pod, ns)
# Only support one container per pod for now
return p.spec.containers[0].resources.requests
except ApiException as e:
self.logger.error("Exception when calling CoreV1Api->read_namespaced_pod: %s\n" % e)
return {}
def ServicePods(self, sched_name):
""" Check if a pod is waiting to be scheduled with the NHD scheduler """
pods = {}
try:
ret = self.v1.list_pod_for_all_namespaces()
except ApiException as e:
self.logger.error("Failed to connect to Kubernetes")
return pods
for i in ret.items:
if i.spec.scheduler_name != sched_name:
continue
pods[(i.metadata.namespace, i.metadata.name, i.metadata.uid)] = (i.status.phase, i.spec.node_name)
return pods
def FlushWatchQueue(self):
self.logger.info('Flushing watch queue')
w = watch.Watch()
if self.last_seen_ver == None:
e = w.stream(self.v1.list_pod_for_all_namespaces)
else:
e = w.stream(self.v1.list_pod_for_all_namespaces, resource_version=self.last_seen_ver)
for event in e:
self.last_seen_ver = event['object'].metadata.resource_version
# def ServicePods(self, sched_name):
""" Switched to using a list of pods instead of watching """
# w = watch.Watch()
# if self.last_seen_ver == None:
# e = w.stream(self.v1.list_pod_for_all_namespaces)
# else:
# e = w.stream(self.v1.list_pod_for_all_namespaces, resource_version=self.last_seen_ver)
#
# for event in e:
# self.last_seen_ver = event['object'].metadata.resource_version
# if event['object'].spec.scheduler_name != sched_name:
# continue
#
# return (event['object'].metadata.name,
# event['object'].metadata.namespace,
# event['object'].status.phase,
# event['object'].spec.node_name,
# event['type'])
#
# return None
# print(event['object'].status.phase, event['object'].metadata.name)
# if event['object'].status.phase == "Pending" and event['object'].spec.node_name is None:
# try:
# self.logger.info(f"Received pod scheduling request for {event['object'].metadata.name}")
# return event['object'].metadata.name
# except client.rest.ApiException as e:
# self.logger.error(json.loads(e.body)['message'])
def AddNADToPod(self, pod, ns, nads):
""" Adds network attachment definitions to bind to pod """
try:
self.v1.patch_namespaced_pod(pod, ns, body= {
"metadata": {
"annotations": {
"k8s.v1.cni.cncf.io/networks": nads
}
}
})
except ApiException as e:
self.logger.error(f'Failed to update pod metadata NAD {pod} in namespace {ns}')
return False
return True
def AddSRIOVDevice(self, pod, ns, device, num):
""" Adds an SR-IOV device using the SR-IOV plugin. Only adds to the first container. Unfortunately Kubernetes
does not allow you to patch resources of a pod, so we must replace the container. """
self.logger.info(f'Adding {num} SR-IOV device{"s" if num > 0 else ""} {device} to pod {ns}.{pod}')
# This does NOT work. Even replacing a pod to update resources does not work. There's an outstanding KEP
# To fix this, but it's still not available yet:
# https://github.com/kubernetes/community/pull/2908/commits/4ad6fa7c27f4a21c27a6be83c2dc81c43549fa55
try:
p = self.v1.read_namespaced_pod(pod, ns)
except ApiException as e:
self.logger.error(f'Failed to get pod spec {pod} in namespace {ns}')
return False
# Only add to first container
p.spec.containers[0].resources.limits[f'intel.com/{device}'] = f'{num}'
p.spec.containers[0].resources.requests[f'intel.com/{device}'] = f'{num}'
try:
self.v1.replace_namespaced_pod(pod, ns, body=p)
except ApiException as e:
self.logger.error(f'Failed to replace pod spec {pod} in namespace {ns}')
print(e)
return False
self.logger.info(f'Added SR-IOV device into pod {pod}')
return True
def GetCfgMap(self, pod, ns):
"""
Gets the first configmap from an existing pod
"""
ret = self.v1.list_namespaced_pod(watch=False, namespace=ns)
for i in ret.items:
if i.metadata.name != pod: # Only look at container that use the run command
continue
cm = None
for v in i.spec.volumes:
if v.config_map:
cm = v.config_map.name
break
if cm:
self.logger.info(f'Found base ConfigMap {cm} for pod {pod}')
cmdat = self.v1.list_namespaced_config_map(ns)
for c in cmdat.items:
if c.metadata.name != cm:
continue
self.logger.info(f'Successfully looked up ConfigMap {cm}')
for cname, cval in c.data.items():
self.logger.info(f'Returning ConfigMap for file {cname}')
return (cm, cval)
else:
break
self.logger.error(f'No ConfigMap found for {ns}.{pod}')
return (None, None)
def AnnotatePodConfig(self, ns, podname, configstr):
""" Annotate the pod's configuration """
try:
self.v1.patch_namespaced_pod(podname, ns, body= {
"metadata": {
"annotations": {
"sigproc.viasat.io/nhd_config": configstr
}
}
})
except ApiException as e:
self.logger.error(f'Failed to update pod metadata configuration for {podname} in namespace {ns}')
return False
return True
def PatchConfigMap(self, ns, cmname, cmbody):
""" Patches a ConfigMap object in place with a new value """
try:
resp = self.v1.read_namespaced_config_map(name=cmname, namespace=ns)
keyname = list(resp.data.keys())[0]
tmp_map = {
"kind": "ConfigMap",
"apiVersion": "v1",
"metadata": {
"name": cmname,
},
"data": {
keyname: cmbody
}
}
ret = self.v1.patch_namespaced_config_map(name=cmname, namespace=ns, body=tmp_map)
except ApiException as e:
self.logger.error(f'Failed to replace configmap {cmname} in namespace {ns}')
return False
return True
# def GetKeyFromConfigMap(self, ns, cmname):
# """ Returns the name of the first key in a configmap """
# try:
# resp = self.v1.read_namespaced_config_map(name=cmname, namespace=ns)
# keyname = list(resp.data.keys())[0]
# return keyname
# except ApiException as e:
# self.logger.error(f'Failed to get keyname from configmap {cmname} in namespace {ns}')
# return ''
# def CopyConfigMap(self, ns, oldcm, cmbody):
# """ Replaces a ConfigMap object with a new one """
# cmname = "nhd-config" + self.GetRandomUid()
# try:
# keyname = self.GetKeyFromConfigMap(ns, oldcm)
# tmp_map = {
# "kind": "ConfigMap",
# "apiVersion": "v1",
# "metadata": {
# "name": cmname,
# },
# "data": {
# keyname: cmbody
# }
# }
# ret = self.v1.create_namespaced_config_map(body=tmp_map, namespace=ns)
# except ApiException as e:
# self.logger.error(f'Failed to create configmap {cmname} in namespace {ns}')
# return False
# return cmname
# def ReplaceVolumeMountConfigMap(self, podname, ns, oldcm, newcm):
# """ Replaces the configmap object in the volume mount of an old configmap """
# try:
# ret = self.v1.read_namespaced_pod(podname, ns)
# except ApiException as e:
# self.logger.error(f'API exception when fetching namespaced pod: {ns}.{podname}: {e}')
# return False
# for v in ret.spec.volumes:
# if v.config_map is not None:
# if v.config_map.name == oldcm:
# # We want to replace this configmap object
def BindPodToNode(self, podname, node, ns):
""" Binds a pod to a node to start the deployment process. """
try:
target = client.V1ObjectReference()
target.kind = "Node"
target.apiVersion = "v1"
target.name = node
meta = client.V1ObjectMeta()
meta.name = podname
body = client.V1Binding(target=target, metadata=meta)
body.target = target
body.metadata = meta
return self.v1.create_namespaced_binding(namespace=ns, body=body)
except ApiException as e:
self.logger.error(f'Failed to bind pod {podname} to node {node} in namespace {ns}: {e}')
return False
except ValueError as e:
# This is not a real error. It's a problem in the API waiting to be fixed:
# https://github.com/kubernetes-client/python/issues/547
pass
return True
def GetCfgType(self, pod: str, ns: str) -> str:
"""
Gets the configuration type from the pod's annotations
"""
k = 'sigproc.viasat.io/cfg_type'
try:
annot = self.GetPodAnnotations(ns, pod)
if annot == None:
self.logger.error(f'Couldn\'t find pod annotations for pod {ns}.{pod}')
return ''
return annot[k]
except KeyError as e:
self.logger.error(f'Key [{e}] not found in pod annotations for {ns}.{pod}')
return ''
def GetTimeNow(self) -> str:
"""
Uses 'Zulu' / GMT format.
Any deviation from this will throw an error at the API server
"""
return datetime.datetime.utcnow().isoformat(timespec='seconds')+'Z'
def GetRandomUid(self) -> str:
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
def GeneratePodEvent(self, podobj, podname, ns, reason, _type, message):
""" Generates a pod event on the kubernetes API server """
try:
meta = client.V1ObjectMeta()
meta.name = f'{podname}.{self.GetRandomUid()}'
meta.namespace = ns
invobj = client.V1ObjectReference()
invobj.name = podname
invobj.kind = "Pod"
invobj.namespace = ns
invobj.api_version = 'v1'
invobj.uid = podobj.metadata.uid
evtsrc = client.V1EventSource()
evtsrc.component = 'NHD Scheduler'
if _type == K8SEventType.EVENT_TYPE_NORMAL:
etype = "Normal"
lg = self.logger.info
else:
etype = "Warning"
lg = self.logger.warning
timestamp = self.GetTimeNow()
# Log an event in our pod too instead of duplicating externally
lg(f'Event for pod {ns}/{podname} -- Reason={reason}, message={message}')
event = client.V1Event( involved_object=invobj,
source = evtsrc,
metadata=meta,
reason=reason,
message=f'NHD: {message}',
count=1,
type=etype,
first_timestamp=timestamp,
last_timestamp=timestamp)
self.v1.create_namespaced_event(namespace=ns, body=event)
except ApiException as e:
self.logger.error(f'Failed to send event for pod {podname}: {e}')
|
151428
|
import os
def before_all(context):
context.tmpfiles = []
def after_all(context):
for filename in context.tmpfiles:
os.remove(filename)
|
151443
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from scipy.stats import multinomial
from ..utils.array import one_hot
from .categorical import CategoricalDist
if tf.__version__ >= '2.0':
tf.random.set_seed(11)
else:
tf.set_random_seed(11)
rnd = np.random.RandomState(13)
x_np = rnd.randn(7, 5)
y_np = rnd.randint(3, size=7)
y_onehot_np = one_hot(y_np, 3)
x = keras.Input([5], dtype='float32')
y = keras.Input([1], dtype='int32')
y_onehot = keras.Input([3], dtype='float32')
logits = keras.layers.Dense(3)(x)
proba = keras.layers.Lambda(K.softmax)(logits)
dist = CategoricalDist(logits)
sample = keras.layers.Lambda(lambda args: dist.sample())(logits)
# test against scipy implementation
proba_np = keras.Model(x, proba).predict(x_np)
dists_np = [multinomial(n=1, p=p) for p in proba_np] # cannot broadcast
def test_sample():
# if tf.__version__ >= '2.0':
# expected = one_hot(np.array([1, 1, 1, 2, 1, 0, 2]), n=3)
# else:
# expected = one_hot(np.array([1, 1, 1, 2, 1, 0, 2]), n=3)
actual = keras.Model(x, sample).predict(x_np)
assert actual.shape == (7, 3)
np.testing.assert_array_almost_equal(actual.sum(axis=1), np.ones(7))
# np.testing.assert_array_almost_equal(actual, expected)
def test_log_proba():
expected = np.stack([d.logpmf(a) for d, a in zip(dists_np, y_onehot_np)])
out = keras.layers.Lambda(lambda args: dist.log_proba(y))(logits)
actual = keras.Model([x, y], out).predict([x_np, y_np])
np.testing.assert_array_almost_equal(actual, expected)
def test_log_proba_onehot():
expected = np.stack([d.logpmf(a) for d, a in zip(dists_np, y_onehot_np)])
out = keras.layers.Lambda(lambda args: dist.log_proba(y_onehot))(logits)
actual = keras.Model([x, y_onehot], out).predict([x_np, y_onehot_np])
np.testing.assert_array_almost_equal(actual, expected)
def test_entropy():
expected = np.stack([d.entropy() for d in dists_np])
out = keras.layers.Lambda(lambda args: dist.entropy())(logits)
actual = keras.Model(x, out).predict(x_np)
np.testing.assert_array_almost_equal(actual, expected)
def test_cross_entropy():
# TODO: test this without implementing the same thing in numpy
pass
def test_kl_divergence():
# TODO: test this without implementing the same thing in numpy
pass
def test_proba_ratio():
# TODO: test this without implementing the same thing in numpy
pass
|
151527
|
import numpy as np
import torch
import onqg.dataset.Constants as Constants
def get_non_pad_mask(seq):
assert seq.dim() == 2
return seq.ne(Constants.PAD).type(torch.float).unsqueeze(-1)
def get_attn_key_pad_mask(seq_k, seq_q):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
len_q = seq_q.size(1)
padding_mask = seq_k.eq(Constants.PAD)
padding_mask = padding_mask.unsqueeze(1).expand(-1, len_q, -1) # b x lq x lk
return padding_mask
def get_subsequent_mask(seq):
''' For masking out the subsequent info. '''
sz_b, len_s = seq.size()
subsequent_mask = torch.triu(torch.ones((len_s, len_s), device=seq.device, dtype=torch.uint8),
diagonal=1)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1) # b x ls x ls
return subsequent_mask
def get_slf_attn_mask(attn_mask, lengths, device=None):
''' For masking out according to the given attention matrix '''
max_length = torch.max(lengths, 0)[0].item()
mask = torch.ones((lengths.size(0), max_length, max_length), device=device, dtype=torch.uint8)
for idx, sample in enumerate(attn_mask):
seq_len = int(len(sample) **0.5)
sample = sample.view(seq_len, seq_len)
pad_sample = sample if max_length == seq_len else torch.cat((sample, torch.ones((max_length - seq_len, seq_len),
dtype=torch.uint8)), dim=0)
mask[idx].narrow(1, 0, seq_len).copy_(pad_sample)
mask = mask.view(-1, max_length, max_length)
return mask
def get_slf_window_mask(seq, window_size=3, separate=-1):
''' For masking out the words in distance:
only allow a word to attend to those near to it
'near' means: within window_size words
'''
assert window_size >= 0, "Window size cannot be smaller than zero! "
sz_b, len_s = seq.size()
slf_window_mask = torch.ones((len_s, len_s), device=seq.device, dtype=torch.uint8)
if separate >= 0:
tmp_seq = [[w.item() for w in sent] for sent in seq]
indexes = [sent.index(separate) for sent in tmp_seq]
else:
for idx in range(len_s):
for i in range(idx - window_size, idx + window_size + 1):
if i >= 0 and i < len_s:
slf_window_mask[idx][i] = 0
slf_window_mask = slf_window_mask.unsqueeze(0).repeat(sz_b, 1, 1) # b x ls x ls
if separate >= 0:
for b_idx in range(sz_b):
sep = indexes[b_idx]
for idx in range(len_s):
sep_final = tmp_seq[b_idx].index(separate, sep + 1)
if idx == 0:
for i in range(0, sep_final + 1):
slf_window_mask[b_idx][idx][i] = 0
elif idx == sep:
for i in range(0, sep + 1):
slf_window_mask[b_idx][idx][i] = 0
elif idx == sep_final:
slf_window_mask[b_idx][idx][0] = 0
for i in range(sep + 1, sep_final + 1):
slf_window_mask[b_idx][idx][i] = 0
else:
slf_window_mask[b_idx][idx][0] = 0
for i in range(idx - window_size, idx + window_size + 1):
if i >= 0 and i < len_s:
if (idx <= sep and i <= sep) or (idx > sep and i > sep):
slf_window_mask[b_idx][idx][i] = 0
if idx <= sep:
slf_window_mask[b_idx][idx][sep] = 0
else:
slf_window_mask[b_idx][idx][sep_final] = 0
return slf_window_mask
def get_edge_mask(edges):
''' Get mask matrix for edges
edges - [batch_size, node_num * node_num]
return - [batch_size, node_num, node_num]
'''
len_edges = edges.size(1)
node_num = int(len_edges **0.5)
mask = edges.eq(Constants.PAD)
mask = mask.view(-1, node_num, node_num)
return mask
|
151529
|
from pylonemutestcase import PylonEmuTestCase
from pypylon import pylon
import unittest
class CallTestSuite(PylonEmuTestCase):
# Tests that you can set the GainRaw parameter of the camera
def test_gain_raw(self):
cam = self.create_first()
cam.Open()
# Set GainRaw to min value (192)
cam.GainRaw.Value = cam.GainRaw.Min
self.assertEqual(192, cam.GainRaw.Value)
# Set GainRaw to max value (1023)
cam.GainRaw.Value = cam.GainRaw.Max
self.assertEqual(1023, cam.GainRaw.Value)
# Set GainRaw to 500
cam.GainRaw.Value = 500
self.assertEqual(500, cam.GainRaw.Value)
cam.Close()
# Tests that you can set the Height parameter of the camera
def test_height(self):
cam = self.create_first()
cam.Open()
cam.Height.Value = cam.Height.Min
self.assertEqual(1, cam.Height.Value)
cam.Height.Value = cam.Height.Max
self.assertEqual(4096, cam.Height.Value)
cam.Height.Value = 500
self.assertEqual(500, cam.Height.Value)
cam.Close()
# Tests that you can set the Width parameter of the camera
def test_width(self):
cam = self.create_first()
cam.Open()
cam.Width.Value = cam.Width.Min
self.assertEqual(1, cam.Width.Value)
cam.Width.Value = cam.Width.Max
self.assertEqual(4096, cam.Width.Value)
cam.Width.Value = 500
self.assertEqual(500, cam.Width.Value)
cam.Close()
# Tests that you can set the ExposureTimeRaw parameter of the camera
def test_exposure_time_raw(self):
cam = self.create_first()
cam.Open()
cam.ExposureTimeRaw.Value = cam.ExposureTimeRaw.Min
self.assertEqual(100, cam.ExposureTimeRaw.Value)
cam.ExposureTimeRaw.Value = cam.ExposureTimeRaw.Max
self.assertEqual(3000000, cam.ExposureTimeRaw.Value)
cam.ExposureTimeRaw.Value = 1000
self.assertEqual(1000, cam.ExposureTimeRaw.Value)
cam.Close()
# Tests that an emulated camera has no hardware interface
def test_has_hardware_interface(self):
cam = self.create_first()
cam.Open()
self.assertFalse(cam.Is1394())
self.assertFalse(cam.IsUsb())
self.assertFalse(cam.IsCameraLink())
self.assertFalse(cam.IsGigE())
cam.Close()
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
151585
|
import torch
from sbibm.metrics import mmd
from .utils import sample_blobs_same
def test_mmd():
X, Y = sample_blobs_same(n=1000)
mmd_1 = mmd(X=X, Y=Y, implementation="tp_sutherland")
mmd_2 = mmd(X=X, Y=Y, implementation="tp_djolonga")
assert torch.allclose(mmd_1, mmd_2, rtol=1e-04, atol=1e-04)
|
151586
|
import functools
import warnings
import numpy as np
import numpy.linalg as npla
import sys, os
import time
from PES.compute_covariance import *
from PES.initial_sample import *
from PES.hyper_samples import *
from PES.utilities import *
from PES.sample_minimum import *
from PES.PES import *
from PES.compute_posterior import *
from PES.EP import *
from PES.global_optimization import *
from PES.target_function import *
#The function to run PES to minimize the target function.
#Parameters: @target_function: the obejective function we want to minimize
# @x_minimum: the lower bounds for each dimension
# @x_maximum: the upper bounds for each dimension
# @dimension: the dimensions of the objective function
# @number_of_hyperparameter_sets: the number of the samples of the hyperparameters of the kernel we want to draw.
# It is the M defined in the paper.
# @number_of_burnin: number of burnins
# @sampling_method: the method used to sample the posterior distribution of the hyperparameters. User can choose
# 'mcmc' or 'hmc'.
# @number_of_initial_points: the number of samples we want to use as initial observations
# @number_of_experiments: number of experiments we want to run. For each experiment, we use different randomizations
# for starting points.
# @number_of_iterations: number of iterations we want to run for each experiment
# @number_of_features: the number of features that we would like to use for feature mapping. It is the "m" in the paper.
# @optimization_method: optimization method used when calling global_optimization function. User can choose any method
# specified in the scipy.optimize.minimize
# @seed: seed specified for randomization
def run_PES(target_function, x_minimum, x_maximum, dimension, number_of_hyperparameter_sets = 100, number_of_burnin = 50, \
sampling_method = 'mcmc', number_of_initial_points = 3, number_of_experiments = 1, number_of_iterations = 60, \
number_of_features = 1000, optimization_method = 'SLSQP', seed = None):
warnings.filterwarnings('ignore')
check_result_file_exist()
if seed is not None:
np.random.seed(seed)
#For Hartmann6
x_min = x_minimum
x_max = x_maximum
target = target_function
#For Branin-Hoo
#x_min = np.asarray([0.0,0.0])
#x_max = np.asarray([1.0,1.0])
#target = Branin_Hoo
d = dimension
num_of_hyperSets_initial = number_of_hyperparameter_sets
number_burn = number_of_burnin
sample_method = sampling_method
bnds = get_bounds(x_min, x_max)
opt_method = 'L-BFGS-B'
#We obtain three random samples
num_initial_points = number_of_initial_points
final_result = []
for pp in range(number_of_experiments):
write_header_to_files(pp)
warnings.filterwarnings('ignore')
Xsamples = initial_samples(x_min, x_max, num_initial_points)
write_data_to_file("Xsamples.txt", Xsamples)
#Guesses first stores the initilized guesses
guesses = Xsamples
write_data_to_file("guesses.txt", guesses)
Ysamples = np.zeros((Xsamples.shape[0]))
for i in range(Xsamples.shape[0]):
Ysamples[i] = target(Xsamples[i,:])
Ysamples = np.asarray([Ysamples])
Ysamples = Ysamples.T
print('Best so far in the initial data ' + str((min(Ysamples))[0]))
write_data_to_file("Ysamples.txt", Ysamples)
#We sample from the posterior distribution of the hyper-parameters
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
#global_minimum = target(np.array([(5-np.pi)/15,12.275/15]))
#global_minimum = target(np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]))
valid_evaluation = 1
log10_scale_vec = []
for g in range(number_of_iterations):
print('PES, ' + str(pp) + 'th job, ' + str(g) + 'th iteration')
start_1 = time.time()
num_of_hyperSets = num_of_hyperSets_initial
Xsamples_count_before = len(Xsamples)
Ysamples_count_before = len(Ysamples)
guesses_count_before = len(guesses)
initial_point = guesses[-1,:]
num_of_features = number_of_features
num_of_obser = len(Ysamples)
x_minimum_vec = []
K_vec = []
K_star_min_vec = []
K_plus_W_tilde_inverse_vec = []
m_f_minimum_vec = []
v_f_minimum_vec = []
c_and_m_vec = []
opt_method = 'L-BFGS-B'
warnings.filterwarnings("error")
valid_num_hyperSets = 0
for j in range(num_of_hyperSets):
opt_method = 'L-BFGS-B'
try:
result = sample_min_with_randFeatures(num_of_features, d, Xsamples, Ysamples, sigma[j], l[j], noise[j], initial_point, opt_method, False, bnds)
x_minimum = result.x
x_minimum_vec.append(x_minimum)
if opt_method == 'L-BFGS-B':
hess_at_min_inverse = result.hess_inv.todense()
else:
hess_at_min_inverse = result.hess_inv
hess_at_min = compute_inverse(hess_at_min_inverse)
value_of_nObservations = (Ysamples.T)[0]
K, K_star_min, K_plus_W_tilde_inverse, m_f_minimum, v_f_minimum, c_and_m = Expectation_Propagation(Xsamples, value_of_nObservations, num_of_obser, x_minimum, d, l[j,:], sigma[j], noise[j], hess_at_min)
K_vec.append(K)
K_star_min_vec.append(K_star_min)
K_plus_W_tilde_inverse_vec.append(K_plus_W_tilde_inverse)
m_f_minimum_vec.append(m_f_minimum)
v_f_minimum_vec.append(v_f_minimum)
c_and_m_vec.append(c_and_m)
valid_num_hyperSets = valid_num_hyperSets + 1
except:
pass
num_of_hyperSets = valid_num_hyperSets
opt_method = optimization_method
warnings.filterwarnings("error")
PES_fail = False
try:
PES = functools.partial(PES_aquisition_function_multi, Xsamples = Xsamples, x_minimum = x_minimum_vec, l_vec = l, \
sigma = sigma, noise = noise, K = K_vec, K_star_min = K_star_min_vec, \
K_plus_W_tilde_inverse = K_plus_W_tilde_inverse_vec, \
m_f_minimum = m_f_minimum_vec, v_f_minimum = v_f_minimum_vec, c_and_m = c_and_m_vec, \
num_of_hyperSets = num_of_hyperSets)
ret = global_optimization(PES, d, x_min, x_max, gradient = None, gridsize = 500, stored_min_guesses = None, \
using_grid = True, optimize_method = opt_method, maxiter = 2000, bnds = bnds)
optimum = np.array(ret.x)
optimum_value = np.array([target(optimum)])
except:
print('PES falied')
PES_fail = True
pass
if PES_fail:
warnings.filterwarnings('ignore')
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
print('return back due to PES fail')
continue
Xsamples = np.vstack((Xsamples, optimum))
Ysamples = np.vstack((Ysamples, optimum_value))
end_1 = time.time()
print('PES takes ' + str(end_1 - start_1) + ' seconds')
print('PES suggests: ')
print(optimum)
start_2 = time.time()
#We sample from the posterior distribution of the hyper-parameters
warnings.filterwarnings('ignore')
num_of_hyperSets = num_of_hyperSets_initial
try:
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
except:
if len(Xsamples) > Xsamples_count_before:
Xsamples = Xsamples[:-1,:]
if len(Ysamples) > Ysamples_count_before:
Ysamples = Ysamples[:-1]
print('Sampling hyperparameters of posterior GP failed')
continue
end_2 = time.time()
print('Retraining the model takes '+ str(end_2 - start_2) + ' seconds')
write_data_to_file("Xsamples.txt", optimum)
write_data_to_file("Ysamples.txt", optimum_value)
start_3 = time.time()
K_plus_I_inverse_vec = []
num_of_obser = len(Xsamples)
for w in range(num_of_hyperSets):
K_plus_I_inverse = covNobeservations(Xsamples, num_of_obser, sigma[w], noise[w], l[w]) + sigma[w]*10**(-10)*np.eye((num_of_obser))
K_plus_I_inverse_vec.append(np.array(K_plus_I_inverse))
warnings.filterwarnings("error")
try:
pos_mean_function = functools.partial(posterior_mean_given_nObservations, X_nObservations = Xsamples, value_of_nObservations = Ysamples, \
K_plus_I_inverse = K_plus_I_inverse_vec, l = l, sigma = sigma, \
num_of_hyperSets = num_of_hyperSets)
pos_mean_grad_function = functools.partial(posterior_gradient_given_nObservations, X_nObservations = Xsamples, value_of_nObservations = Ysamples, \
K_plus_I_inverse = K_plus_I_inverse_vec, l = l, sigma = sigma, \
num_of_hyperSets = num_of_hyperSets, d = d)
ret_pos = global_optimization(pos_mean_function, d, x_min, x_max, gradient = pos_mean_grad_function, gridsize = 500, \
stored_min_guesses = None, using_grid = True, optimize_method = opt_method, \
maxiter = 2000, bnds = bnds)
except:
if len(Xsamples) > Xsamples_count_before:
Xsamples = Xsamples[:-1,:]
if len(Ysamples) > Ysamples_count_before:
Ysamples = Ysamples[:-1]
print('Find the minimum of posterior mean failed')
continue
pos_optimum = np.array(ret_pos.x)
write_data_to_file("guesses.txt", pos_optimum)
current_value = target(pos_optimum)
if current_value < (min(Ysamples))[0]:
print('The recommended point ' + str(pos_optimum))
else:
current_value = (min(Ysamples))[0]
print('The recommended point ' + str(Xsamples[np.argmin(Ysamples)]))
end_3 = time.time()
print('Recommending the point takes '+ str(end_3 - start_3) + ' seconds')
print('Best so far ' + str(current_value))
guesses = np.vstack((guesses, pos_optimum))
|
151595
|
import matplotlib.pyplot as p;
p.switch_backend("SVG")
import mpld3
import seaborn as sns; sns.set()
def Axis_FactorPlot(data, x, y=None, hue=None, row=None, col=None, kind="point"):
sns.set(style="ticks")
ax = sns.factorplot(x=x, y=y, hue=hue, data=data, kind=kind, row=row, col=col)
d = mpld3.fig_to_dict(ax.fig)
return d
def Axis_LMPlot(data, x, y=None, hue=None):
sns.set(color_codes=True)
ax = sns.lmplot(x=x, y=y, hue=hue, data=data)
d = mpld3.fig_to_dict(ax.fig)
return d
def Axis_PairPlot(data, vars=None, hue=None):
g = sns.pairplot(data, hue=hue, vars=vars)
d = mpld3.fig_to_dict(g.fig)
return d
def Axis_JointPlot(data, x, y, kind="scatter"):
sns.set(style="white", color_codes=True)
g = sns.jointplot(x, y, data, kind)
d = mpld3.fig_to_dict(g.fig)
return d
def Cat_StripPlot(data, x, y, hue=None, jitter=False):
sns.set_style("whitegrid")
g = sns.stripplot(x, y, data, hue=hue, jitter=jitter)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_SwarmPlot(data, x, y=None, hue=None):
sns.set_style("whitegrid")
g = sns.swarmplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_BoxPlot(data, x, y=None, hue=None):
sns.set_style("whitegrid")
g = sns.boxplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_ViolinPlot(data, x, y=None, hue=None):
sns.set_style("whitegrid")
g = sns.violinplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_LVPlot(data, x, y=None, hue=None):
sns.set_style("whitegrid")
g = sns.lvplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_PointPlot(data, x, y, hue=None):
sns.set_style("whitegrid")
g = sns.pointplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_BarPlot(data, x, y, hue=None):
sns.set_style("whitegrid")
g = sns.barplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Cat_CountPlot(data, x, y, hue=None):
sns.set_style("whitegrid")
g = sns.countplot(x, y, data, hue=hue)
d = mpld3.fig_to_dict(g.figure)
return d
def Reg_RegPlot(data, x, y):
sns.set_style("whitegrid")
g = sns.regplot(x, y, data)
d = mpld3.fig_to_dict(g.figure)
return d
def Reg_KDEPlot(data, x, y):
sns.set_style("whitegrid")
dx = data[x]
dy = data[y]
g = sns.kdeplot(dx, dy)
d = mpld3.fig_to_dict(g.figure)
return d
def Reg_RugPlot(data, x):
sns.set_style("whitegrid")
dx = data[x]
g = sns.rugplot(dx)
d = mpld3.fig_to_dict(g.figure)
return d
|
151634
|
import dataclasses
import cefconsole
from wecs.core import System
from wecs.core import and_filter
from wecs.core import Component
class WECSSubconsole(cefconsole.Subconsole):
name = "WECS"
package = 'wecs'
template_dir = 'templates'
html = "wecs.html"
funcs = {
'refresh_wecs_matrix': 'refresh_wecs_matrix',
'toggle_live_refresh_wecs_matrix': 'toggle_live_refresh_wecs_matrix',
}
refresh = True
live_refresh = False
def refresh_wecs_matrix(self):
self.refresh = True
def toggle_live_refresh_wecs_matrix(self):
self.live_refresh = not self.live_refresh
def update(self):
if not hasattr(base, 'console'):
return
if self.refresh or self.live_refresh:
entities = base.ecs_world.get_entities()
uids = {e._uid.name: e for e in entities}
uid_list = sorted(uids.keys())
component_types = set()
for entity in entities:
for component in entity.components:
component_types.add(type(component))
component_types = sorted(component_types, key=lambda ct: repr(ct))
def crepr(e, ct):
if ct in e:
return e[ct]
else:
return None
matrix = [
(uid, [crepr(uids[uid], ct) for ct in component_types])
for uid in uid_list
]
template = base.console.env.get_template('{}/wecs_matrix.html'.format(self.name))
content = template.render(
component_types=component_types,
matrix=matrix,
)
self.console.exec_js_func('update_wecs_matrix', content)
self.refresh = False
class UpdateWecsSubconsole(System):
entity_filters = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(base, 'console'):
self.has_console = False
return
self.has_console = True
self.subconsole = WECSSubconsole()
base.console.add_subconsole(self.subconsole)
def update(self, entities_by_filter):
if self.has_console:
self.subconsole.update()
class EntityWatcherSubconsole(cefconsole.Subconsole):
name = "Entity Watcher"
package = 'wecs'
template_dir = 'templates'
html = "entity.html"
funcs = {
}
def update(self, entities):
if not hasattr(base, 'console'):
return
entities = [
{'obj': e}
for e in sorted(
list(entities),
key=lambda e: repr(e._uid),
)
]
for entity in entities:
entity['uid'] = entity['obj']._uid
entity['components'] = sorted(
list(entity['obj'].get_components()),
key=lambda c: repr(c),
)
entity['components'] = [
{'obj': c}
for c in entity['components']
]
for component in entity['components']:
component['name'] = type(component['obj'])
component['fields'] = dataclasses.fields(component['obj'])
component['fields'] = [
{
'name': f.name,
'type': f.type,
'value': getattr(component['obj'], f.name)
}
for f in component['fields']
]
template = base.console.env.get_template('{}/watcher.html'.format(self.name))
content = template.render(
entities=entities,
)
self.console.exec_js_func('update_entity_watcher', content)
@Component()
class WatchedEntity:
pass
class WatchEntitiesInSubconsole(System):
entity_filters = {
'watched': and_filter([WatchedEntity]),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(base, 'console'):
self.has_console = False
return
self.has_console = True
self.subconsole = EntityWatcherSubconsole()
base.console.add_subconsole(self.subconsole)
def update(self, entities_by_filter):
if self.has_console:
self.subconsole.update(entities_by_filter['watched'])
|
151635
|
from sqlalchemy import Column, Integer, String, Numeric, func, distinct, Boolean
from app import db
class Rais(db.Model):
__tablename__ = 'rais'
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
occupation_family = Column(String(4), primary_key=True)
occupation_group = Column(String(1), primary_key=True)
industry_class = Column(String(5), primary_key=True)
industry_division = Column(String(2), primary_key=True)
industry_section = Column(String(1), primary_key=True)
establishment = Column(String(14), primary_key=True)
employee = Column(String(11), primary_key=True)
ethnicity = Column(String(2), primary_key=True)
establishment_size = Column(String(2), primary_key=True)
gender = Column(String(1), primary_key=True)
legal_nature = Column(String(2), primary_key=True)
literacy = Column(String(2), primary_key=True)
simple = Column(String(2), primary_key=True)
year = Column(Integer, primary_key=True)
age = Column(Integer)
wage = Column(Numeric(17, 2))
hidden = Column(Boolean)
@classmethod
def dimensions(cls):
return [
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'occupation_family',
'occupation_group',
'industry_class',
'industry_division',
'industry_section',
'establishment',
'employee',
'ethnicity',
'establishment_size',
'gender',
'legal_nature',
'literacy',
'simple',
'year',
]
@classmethod
def aggregate(cls, value):
return {
'average_age': func.avg(cls.age),
'average_wage': func.avg(cls.wage),
'wage': func.sum(cls.wage),
'jobs': func.count(cls.employee),
'average_establishment_size': (func.count(cls.employee)
/ func.count(distinct(cls.establishment)))
}[value]
@classmethod
def values(cls):
return [
'average_age',
'average_wage',
'wage',
'jobs',
'average_establishment_size'
]
|
151640
|
import pandas_flavor as pf
import pandas as pd
from typing import Union
@pf.register_dataframe_method
def move(
df: pd.DataFrame,
source: Union[int, str],
target: Union[int, str],
position: str = "before",
axis: int = 0,
) -> pd.DataFrame:
"""
Move column or row to a position adjacent to another column or row in
dataframe. Must have unique column names or indices.
This operation does not reset the index of the dataframe. User must
explicitly do so.
Does not apply to multilevel dataframes.
Functional usage syntax:
```python
df = move(df, source=3, target=15, position='after', axis=0)
```
Method chaining syntax:
```python
import pandas as pd
import janitor
df = (
pd.DataFrame(...)
.move(source=3, target=15, position='after', axis=0)
)
```
:param df: The pandas Dataframe object.
:param source: column or row to move
:param target: column or row to move adjacent to
:param position: Specifies whether the Series is moved to before or
after the adjacent Series. Values can be either `before` or `after`;
defaults to `before`.
:param axis: Axis along which the function is applied. 0 to move a
row, 1 to move a column.
:returns: The dataframe with the Series moved.
:raises ValueError: if `axis` is not `0` or `1``.
:raises ValueError: if `position` is not `before` or `after``.
:raises ValueError: if `source` row or column is not in dataframe.
:raises ValueError: if `target` row or column is not in dataframe.
"""
df = df.copy()
if axis not in [0, 1]:
raise ValueError(f"Invalid axis '{axis}'. Can only be 0 or 1.")
if position not in ["before", "after"]:
raise ValueError(
f"Invalid position '{position}'. Can only be 'before' or 'after'."
)
if axis == 0:
names = list(df.index)
if source not in names:
raise ValueError(f"Source row '{source}' not in dataframe.")
if target not in names:
raise ValueError(f"Target row '{target}' not in dataframe.")
names.remove(source)
pos = names.index(target)
if position == "after":
pos += 1
names.insert(pos, source)
df = df.loc[names, :]
else:
names = list(df.columns)
if source not in names:
raise ValueError(f"Source column '{source}' not in dataframe.")
if target not in names:
raise ValueError(f"Target column '{target}' not in dataframe.")
names.remove(source)
pos = names.index(target)
if position == "after":
pos += 1
names.insert(pos, source)
df = df.loc[:, names]
return df
|
151665
|
import torch
import torch.nn as nn
import numpy as np
import FrEIA.framework as Ff
from .. import InvertibleArchitecture
__all__ = ['beta_0', 'beta_1', 'beta_2', 'beta_4', 'beta_8', 'beta_16', 'beta_32', 'beta_inf']
model_base_url = 'https://heibox.uni-heidelberg.de/seafhttp/files/6f91503d-7459-4080-b10a-e979f8b3d20f/'
model_urls = {b : f'{model_base_url}{b}.avg.pt' for b in __all__}
class InvertibleImagenetClassifier(InvertibleArchitecture):
def __init__(self, lr, mu_init, mu_conv_init, mu_low_rank_k, input_dims, n_classes, n_loss_dims_1d, n_total_dims_1d, backbone: InvertibleArchitecture, head: InvertibleArchitecture, finetune_mu=False):
super().__init__()
self.model = None
self.lr = lr
self.n_classes = n_classes
self.backbone = backbone
self.head = head
self.construct_inn(Ff.InputNode(input_dims[0], input_dims[1], input_dims[2], name='input'), backbone, head)
self.n_total_dims_1d = n_total_dims_1d
self.n_loss_dims_1d = n_loss_dims_1d
init_scale = mu_init / np.sqrt(2 * (n_loss_dims_1d // n_classes))
self.mu_fc = nn.Parameter(torch.zeros(1, n_classes, n_loss_dims_1d))
for k in range(n_loss_dims_1d // n_classes):
self.mu_fc.data[0, :, n_classes * k: n_classes * (k + 1)] = init_scale * torch.eye(n_classes)
self.mu_low_rank_k = mu_low_rank_k
if self.mu_low_rank_k > 0:
mu_conv_dims = n_total_dims_1d - n_loss_dims_1d
self.mu_t = nn.Parameter(mu_conv_init * torch.randn(self.n_classes, self.mu_low_rank_k).cuda())
self.mu_m = nn.Parameter(mu_conv_init * torch.randn(self.mu_low_rank_k, mu_conv_dims).cuda())
else:
self.mu_conv = nn.Parameter(mu_conv_init * torch.randn(1, n_classes, n_total_dims_1d - n_loss_dims_1d))
self.train_mu = True
self.train_phi = False
self.train_inn = True
self.model_parameters = list(filter(lambda p: p.requires_grad, self.model.parameters()))
self.finetune_mu = finetune_mu
self.optimizer_params = [{
'params': self.model_parameters,
'lr': 0 * self.lr if finetune_mu else 1 * self.lr,
'weight_decay':0.}
]
if self.train_mu:
self.optimizer_params.append({
'params': [self.mu_fc],
'lr': 1. * self.lr,
'weight_decay': 0.
})
self.optimizer_params.append({
'params': [self.mu_m, self.mu_t] if self.mu_low_rank_k > 0 else [self.mu_conv],
'lr': 1. * self.lr,
'weight_decay': 0.
})
if self.train_phi:
self.optimizer_params.append({
'params': [self.phi],
'lr': 1. * self.lr,
'weight_decay': 0.
})
self.optimizer = torch.optim.SGD(self.optimizer_params, self.lr, momentum=0.9, weight_decay=1e-5)
def construct_inn(self, input, backbone: InvertibleArchitecture, head: InvertibleArchitecture):
nodes = []
split_nodes = []
nodes.append(input)
backbone_nodes, backbone_split_nodes, skip_connections = backbone.construct_inn(nodes[-1])
nodes += backbone_nodes
if skip_connections:
print("HAS SKIP CONNECTION")
head_nodes, head_split_nodes = head.construct_inn(nodes[-1], skip_connections)
split_nodes += backbone_split_nodes
else:
head_nodes, head_split_nodes = head.construct_inn(nodes[-1])
nodes.append(Ff.OutputNode(head_nodes[-1], name='out_fc'))
nodes += head_nodes
split_nodes += head_split_nodes
self.model = Ff.ReversibleGraphNet(nodes + split_nodes, verbose=True)
print(self.model)
return nodes
def calc_mu_conv(self):
self.mu_conv = torch.mm(self.mu_t, self.mu_m).unsqueeze(0)
def cluster_distances(self, z, mu):
z_i_z_i = torch.sum(z**2, dim=1, keepdim=True) # batchsize x 1
mu_j_mu_j = torch.sum(mu**2, dim=2) # 1 x n_classes
z_i_mu_j = torch.mm(z, mu.squeeze().t()) # batchsize x n_classes
return -2 * z_i_mu_j + z_i_z_i + mu_j_mu_j
def forward(self, x, y=None):
if self.finetune_mu:
with torch.no_grad():
z_fc, z_conv = self.model(x)
jac = self.model.log_jacobian(run_forward=False)
else:
z_fc, z_conv = self.model(x)
jac = self.model.log_jacobian(run_forward=False)
if self.mu_low_rank_k > 0:
self.calc_mu_conv()
cluster_distances = self.cluster_distances(z_fc, self.mu_fc)
cluster_distances += self.cluster_distances(z_conv, self.mu_conv)
losses = {'nll_joint_tr': ((- torch.logsumexp(- 0.5 * cluster_distances, dim=1)) - jac) / self.n_total_dims_1d, 'logits_tr': - 0.5 * cluster_distances}
if y is not None:
losses['nll_class_tr'] = ((0.5 * torch.sum(cluster_distances * y, dim=1)) - jac) / self.n_total_dims_1d
losses['cat_ce_tr'] = - torch.sum((torch.log_softmax(- 0.5 * cluster_distances, dim=1)) * y, dim=1)
losses['acc_tr'] = torch.mean((torch.argmax(y, dim=1) == torch.argmax(-cluster_distances, dim=1)).float())
for lname in ['nll_joint_tr', 'nll_class_tr', 'cat_ce_tr', 'acc_tr']:
losses[lname] = torch.mean(losses[lname])
return losses
def mu_pairwise_dist(self):
distances = []
for mu in [self.mu_fc, self.mu_conv]:
mu_i_mu_j = mu.squeeze().mm(mu.squeeze().t())
mu_i_mu_i = torch.sum(mu.squeeze()**2, 1, keepdim=True).expand(self.n_classes, self.n_classes)
dist = mu_i_mu_i + mu_i_mu_i.t() - 2 * mu_i_mu_j
dist = torch.masked_select(dist, (1 - torch.eye(self.n_classes).cuda()).byte()).clamp(min=0.)
distances.append(dist)
return distances[0] + distances[1]
def validate(self, x, y):
with torch.no_grad():
losses = self.forward(x, y)
nll_joint, nll_class, cat_ce, acc = (losses['nll_joint_tr'], losses['nll_class_tr'], losses['cat_ce_tr'], losses['acc_tr'])
mu_dist = torch.mean(torch.sqrt(self.mu_pairwise_dist()))
return {'nll_joint_val': nll_joint,
'nll_class_val': nll_class,
'cat_ce_val': cat_ce,
'acc_val': acc,
'delta_mu_val': mu_dist}
def sample(self, y, temperature=1.):
z = temperature * torch.randn(y.shape[0], self.n_loss_dims_1d).cuda()
mu = torch.sum(y.view(-1, self.n_classes, 1) * self.mu, dim=1)
z = z + mu
return self.inn(z, rev=True)
def save(self, fname):
if self.mu_low_rank_k > 0:
torch.save({'inn': self.model.state_dict(),
'mu': self.mu_fc,
'mu_t': self.mu_t,
'mu_m': self.mu_m,
'opt': self.optimizer.state_dict()}, fname)
else:
torch.save({'inn': self.model.state_dict(),
'mu': self.mu_fc,
'mu_conv': self.mu_conv,
'opt': self.optimizer.state_dict()}, fname)
def init_from_data(self, data):
self.model.load_state_dict(data['inn'], strict=True)
self.mu_fc.data.copy_(data['mu'].data)
if hasattr(self, "mu_low_rank_k") and self.mu_low_rank_k > 0:
self.mu_t.data.copy_(data['mu_t'].data)
self.mu_m.data.copy_(data['mu_m'].data)
self.calc_mu_conv()
else:
self.mu_conv.data.copy_(data['mu_conv'].data)
try:
self.optimizer.load_state_dict(data['opt'])
except:
print('Not loading the optimizer')
def load(self, fname):
data = torch.load(fname)
self.init_from_data(data)
from torch.hub import load_state_dict_from_url
from ..backbones.invertible_resnet import InvertibleResNet
from ..heads.invertible_multiclass_classifier import InvertibleMulticlassClassifier
def _trustworthy_gc(beta, layers, pretrained, progress, pretrained_model_path, **kwargs):
beta_str = "beta_" + str(beta)
# Loading and initializing the models made available under:
# https://heibox.uni-heidelberg.de/d/e7b5ba0d30f24cdca416/
backbone = InvertibleResNet(
64,
clamp=0.7,
act_norm=0.7,
blocks=layers,
strides=[1, 2, 2, 2],
dilations=[1, 1, 1, 1],
permute_soft = False
)
head = InvertibleMulticlassClassifier(
1024,
3072,
224*224*3,
clamp=0.7,
act_norm=0.7,
permute_soft=False
)
model = InvertibleImagenetClassifier(0.0, 0.0, 0.0, 128, [3,224,224], 1000, 3072, 224*224*3, backbone, head, finetune_mu=False, **kwargs)
if pretrained:
if pretrained_model_path is None:
data = load_state_dict_from_url(model_urls[beta_str], progress=progress)
model.init_from_data(data)
else:
model.load(pretrained_model_path)
return model
def trustworthy_gc_beta_0(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(0, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_1(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(1, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_2(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(2, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_4(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(4, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_8(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(8, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_16(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(16, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_32(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(32, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_inf(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc('inf', [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
|
151671
|
import pytest
from emrichen import Template
HASHES = {
'MD5': '8b1a9953c4611296a827abf8c47804d7',
'SHA1': 'f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0',
'SHA256': '185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969',
}
@pytest.mark.parametrize('h', sorted(HASHES.items()), ids=sorted(HASHES))
def test_hash(h):
algo, expected = h
assert Template.parse(f'!{algo} "Hello"').enrich({}) == [expected]
|
151705
|
from distutils.core import setup
from entmax import __version__
setup(name='entmax',
version=__version__,
url="https://github.com/deep-spin/entmax",
author="<NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
description=("The entmax mapping and its loss, a family of sparse "
"alternatives to softmax."),
license="MIT",
packages=['entmax'],
install_requires=['torch>=1.0'],
python_requires=">=3.5")
|
151721
|
import moai.nn.convolution as mic
import torch
__all__ = [
"StridedConv2d",
]
class StridedConv2d(torch.nn.Module): #TODO: Add optional activation as well?
def __init__(self,
features: int,
kernel_size: int=3,
conv_type: str="conv2d",
stride: int=2,
padding: int=1
):
super(StridedConv2d, self).__init__()
self.conv = mic.make_conv_op(
kernel_size=kernel_size,
dilation=1,
groups=1,
conv_type=conv_type,
in_channels=features,
out_channels=features,
bias=False,
stride=2
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.conv(x)
|
151739
|
from django.http.response import JsonResponse
from django.views.generic.base import View, TemplateView
from django.views.decorators.csrf import csrf_exempt
from PIL import Image, ImageFilter
from tesserocr import PyTessBaseAPI
class OcrFormView(TemplateView):
template_name = 'documents/ocr_form.html'
ocr_form_view = OcrFormView.as_view()
class OcrView(View):
def post(self, request, *args, **kwargs):
with PyTessBaseAPI() as api:
with Image.open(request.FILES['image']) as image:
sharpened_image = image.filter(ImageFilter.SHARPEN)
api.SetImage(sharpened_image)
utf8_text = api.GetUTF8Text()
return JsonResponse({'utf8_text': utf8_text})
ocr_view = csrf_exempt(OcrView.as_view())
|
151742
|
from os import listdir
from os.path import isfile, join
import tensorflow as tf
def get_image(path, height, width, preprocess_fn):
png = path.lower().endswith('png')
img_bytes = tf.read_file(path)
image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3)
return preprocess_fn(image, height, width)
def image(batch_size, height, width, path, preprocess_fn, epochs=2, shuffle=True):
filenames = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
if not shuffle:
filenames = sorted(filenames)
png = filenames[0].lower().endswith('png') # If first file is a png, assume they all are
filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle, num_epochs=epochs)
reader = tf.WholeFileReader()
_, img_bytes = reader.read(filename_queue)
image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3)
processed_image = preprocess_fn(image, height, width)
return tf.train.batch([processed_image], batch_size, dynamic_pad=True)
|
151761
|
import tests.hakoblog # noqa: F401
from hakoblog.db import DB
from hakoblog.model.user import User
from hakoblog.loader.user import UserLoader
from tests.util import random_string, create_user
def test_find_by_name():
db = DB()
user = create_user()
found_user = UserLoader.find_by_name(db, user.name)
assert isinstance(found_user, User)
assert found_user.name == user.name
not_found_user = UserLoader.find_by_name(db, random_string(10))
assert not_found_user is None
|
151774
|
def solution(l):
parsed = [e.split(".") for e in l]
toSort = [map(int, e) for e in parsed]
sortedINTs = sorted(toSort)
sortedJoined = [('.'.join(str(ee) for ee in e)) for e in sortedINTs]
return sortedJoined
|
151821
|
import logging
from flask_restplus import Resource
from biolink.datamodel.serializers import compact_association_set, association_results
from ontobio.golr.golr_associations import search_associations, GolrFields
from biolink.api.restplus import api
from biolink import USER_AGENT
from biolink.error_handlers import RouteNotImplementedException
MAX_ROWS=10000
log = logging.getLogger(__name__)
parser = api.parser()
parser.add_argument('subject', action='append', help='Entity ids to be examined, e.g. NCBIGene:9342, NCBIGene:7227, NCBIGene:8131, NCBIGene:157570, NCBIGene:51164, NCBIGene:6689, NCBIGene:6387')
parser.add_argument('background', action='append', help='Entity ids in background set, e.g. NCBIGene:84570, NCBIGene:3630; used in over-representation tests')
parser.add_argument('object_category', help='E.g. phenotype, function')
parser.add_argument('object_slim', help='Slim or subset to which the descriptors are to be mapped, NOT IMPLEMENTED')
class EntitySetSummary(Resource):
@api.expect(parser)
#@<EMAIL>_with(association)
def get(self):
"""
Summary statistics for objects associated
"""
args = parser.parse_args()
subjects = args.get('subject')
del args['subject']
M=GolrFields()
results = search_associations(
subjects=subjects,
rows=0,
facet_fields=[M.OBJECT_CLOSURE, M.IS_DEFINED_BY],
facet_limit=-1,
user_agent=USER_AGENT,
**args
)
print("RESULTS="+str(results))
obj_count_dict = results['facet_counts'][M.OBJECT_CLOSURE]
del results['facet_counts'][M.OBJECT_CLOSURE]
return {'results':obj_count_dict, 'facets': results['facet_counts']}
class EntitySetAssociations(Resource):
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self):
"""
Returns compact associations for a given input set
"""
args = parser.parse_args()
M=GolrFields()
subjects = args.get('subject')
del args['subject']
results = search_associations(
subjects=subjects,
select_fields=[M.SUBJECT, M.RELATION, M.OBJECT],
use_compact_associations=True,
rows=MAX_ROWS,
facet_fields=[],
user_agent=USER_AGENT,
**args
)
return results
<EMAIL>('/DEPRECATEDhomologs/')
#class EntitySetHomologsDEPRECATED(Resource):
#
# @api.expect(parser)
# @api.marshal_list_with(association_results)
# #@<EMAIL>.marshal_list_with(compact_association_set)
# def get(self):
# """
# Returns homology associations for a given input set of genes
# """
# args = parser.parse_args()
#
# M=GolrFields()
# rel = 'RO:0002434' # TODO; allow other types
# results = search_associations(subjects=args.get('subject'),
# select_fields=[M.SUBJECT, M.RELATION, M.OBJECT],
# use_compact_associations=True,
# relation=rel,
# rows=MAX_ROWS,
# facet_fields=[],
# **args)
# return results
class EntitySetGraphResource(Resource):
@api.expect(parser)
#@<EMAIL>.marshal_list_with(association)
def get(self):
"""
TODO Graph object spanning all entities
"""
args = parser.parse_args()
raise RouteNotImplementedException()
|
151844
|
from setuptools import setup, find_packages
__version__ = "1.0.a"
setup(name='dfw',
description='Implementation of the Deep Frank Wolfe (DFW) algorithm',
author='<NAME>',
packages=find_packages(),
license="GNU General Public License",
url='https://github.com/oval-group/dfw',
version=str(__version__),
install_requires=["numpy",
"nltk",
"torchvision>=0.2",
"torch>=1.0",
"tqdm",
"mlogger",
"waitGPU"])
|
151849
|
from pathlib import Path
from zipfile import ZipFile
import requests
class Taxonomy():
TAXONOMIES = {
"2013": "https://www.fsa.go.jp/search/20130821/editaxonomy2013New.zip",
"2014": "https://www.fsa.go.jp/search/20140310/1c.zip",
"2015": "https://www.fsa.go.jp/search/20150310/1c.zip",
"2016": "https://www.fsa.go.jp/search/20160314/1c.zip",
"2017": "https://www.fsa.go.jp/search/20170228/1c.zip",
"2018": "https://www.fsa.go.jp/search/20180228/1c_Taxonomy.zip",
"2019": "https://www.fsa.go.jp/search/20190228/1c_Taxonomy.zip",
"2019_cg_ifrs": "https://www.fsa.go.jp/search/20180316/1c_Taxonomy.zip",
"2020": "https://www.fsa.go.jp/search/20191101/1c_Taxonomy.zip",
"2021": "https://www.fsa.go.jp/search/20201110/1c_Taxonomy.zip"
}
def __init__(self, taxonomy_root):
self.root = taxonomy_root
self.prefix = "http://disclosure.edinet-fsa.go.jp/taxonomy/"
def __reduce_ex__(self, proto):
return type(self), (self.root,)
def download(self, year):
year = str(year)
expand_dir = self.root.joinpath("taxonomy").joinpath(year)
taxonomy_file = self.root.joinpath(f"{year}_taxonomy.zip")
download = False
if not self.root.exists():
self.root.mkdir(parents=True, exist_ok=True)
download = True
if not expand_dir.exists():
expand_dir.mkdir(parents=True, exist_ok=True)
download = True
if download:
# Download
r = requests.get(self.TAXONOMIES[year], stream=True)
with taxonomy_file.open(mode="wb") as f:
for chunk in r.iter_content(1024):
f.write(chunk)
# Extract
with ZipFile(taxonomy_file, "r") as zip:
for f in zip.namelist():
dirs = Path(f).parts
# Avoid Japanese path
taxonomy_at = dirs.index("taxonomy") if "taxonomy" in dirs else -1
if taxonomy_at > 0 and len(dirs) > (taxonomy_at + 1):
dirs = dirs[(dirs.index("taxonomy") + 1):]
_to = expand_dir.joinpath("/".join(dirs))
info = zip.getinfo(f)
if info.is_dir() and not _to.exists():
_to.mkdir(parents=True, exist_ok=True)
else:
_to.parent.mkdir(parents=True, exist_ok=True)
with _to.open("wb") as _to_f:
_to_f.write(zip.read(f))
taxonomy_file.unlink()
return expand_dir
|
151864
|
import numpy as np
def dtw(series_1, series_2, norm_func = np.linalg.norm):
matrix = np.zeros((len(series_1) + 1, len(series_2) + 1))
matrix[0,:] = np.inf
matrix[:,0] = np.inf
matrix[0,0] = 0
for i, vec1 in enumerate(series_1):
for j, vec2 in enumerate(series_2):
cost = norm_func(vec1 - vec2)
matrix[i + 1, j + 1] = cost + min(matrix[i, j + 1], matrix[i + 1, j], matrix[i, j])
matrix = matrix[1:,1:]
i = matrix.shape[0] - 1
j = matrix.shape[1] - 1
matches = []
mappings_series_1 = [list() for v in range(matrix.shape[0])]
mappings_series_2 = [list() for v in range(matrix.shape[1])]
while i > 0 or j > 0:
matches.append((i, j))
mappings_series_1[i].append(j)
mappings_series_2[j].append(i)
option_diag = matrix[i - 1, j - 1] if i > 0 and j > 0 else np.inf
option_up = matrix[i - 1, j] if i > 0 else np.inf
option_left = matrix[i, j - 1] if j > 0 else np.inf
move = np.argmin([option_diag, option_up, option_left])
if move == 0:
i -= 1
j -= 1
elif move == 1:
i -= 1
else:
j -= 1
matches.append((0, 0))
mappings_series_1[0].append(0)
mappings_series_2[0].append(0)
matches.reverse()
for mp in mappings_series_1:
mp.reverse()
for mp in mappings_series_2:
mp.reverse()
return matches, matrix[-1, -1], mappings_series_1, mappings_series_2, matrix
|
151938
|
from ..base import MultiGridEnv, MultiGrid
from ..objects import *
class EmptyMultiGrid(MultiGridEnv):
mission = "get to the green square"
metadata = {}
def _gen_grid(self, width, height):
self.grid = MultiGrid((width, height))
self.grid.wall_rect(0, 0, width, height)
self.put_obj(Goal(color="green", reward=1), width - 2, height - 2)
self.agent_spawn_kwargs = {}
self.place_agents(**self.agent_spawn_kwargs)
|
151949
|
from core.advbase import *
from slot.a import *
def module():
return Yuya
class Yuya(Adv):
a3 = ('primed_crit_chance', 0.05,5)
conf = {}
conf['slots.burn.a'] = Twinfold_Bonds()+Me_and_My_Bestie()
conf['acl'] = """
`dragon, s=1
`s3, not self.s3_buff
`s4
`s1
`fs, x=4
"""
coab = ['Blade', 'Marth', 'Dagger2']
share = ['Gala_Mym']
def prerun(self):
if self.condition('hp60'):
Selfbuff('a1',0.2,-1,'att','passive').on()
else:
Selfbuff('a1',-0.2,-1,'att','passive').on()
def s1_proc(self, e):
Spdbuff(e.name,0.2,10).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
151964
|
from typing import List
from ..error.friendly_error import FriendlyError
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.model import SlashCommandOptionType
from discord_slash.utils.manage_commands import create_option
from googlesearch import search
import modules.search.search_functions as sf
import config
class SearchCog(commands.Cog):
"""Searches Google for links and includes summaries from Wikipedia when relevant"""
def __init__(self, bot):
self.bot = bot
self.last_paragraph = {}
@cog_ext.cog_slash(
name="search",
description="Search the web for anything you want.",
guild_ids=[config.guild_id],
options=[
create_option(
name="query",
description="Your search query",
option_type=SlashCommandOptionType.STRING,
required=True,
),
],
)
async def search(self, ctx: SlashContext, query: str):
await ctx.defer()
links: List[str] = [link for link in search(query) if link.startswith("http")]
if not links:
raise FriendlyError("We searched far and wide, but nothing turned up.", ctx)
wiki_links = [link for link in links if "wikipedia.org" in link[:30]]
wiki_intro = (
sf.get_wiki_intro(wiki_links[0])
if wiki_links and wiki_links[0] != links[0]
else None
)
await ctx.send(content=sf.format_message(query, links[0], wiki_intro))
# setup functions for bot
def setup(bot):
bot.add_cog(SearchCog(bot))
|
151967
|
import os
from collections import Iterable
def flat(lis):
for item in lis:
if isinstance(item, list):# and not isinstance(item, basestring):
for x in flat(item):
yield x
else:
yield item
def flatten(lis):
return list(flat(lis))
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
class UnpicklableObject:
def __init__(self, init_string):
self.init_string = init_string
self.imports = []
def addimport(self,import_string):
self.imports.append(import_string)
def generate(self):
for i in self.imports:
exec(i)
return eval(self.init_string)
def __unicode__(self):
return self.init_string
def __str__(self):
return self.init_string
def __repr__(self):
return self.init_string
|
151978
|
from flask_login import login_user
from flaskbb.forum.models import Topic
def test_guest_user_cannot_see_hidden_posts(guest, topic, user,
request_context):
topic.hide(user)
login_user(guest)
assert Topic.query.filter(Topic.id == topic.id).first() is None
def test_regular_user_cannot_see_hidden_posts(topic, user, request_context):
topic.hide(user)
login_user(user)
assert Topic.query.filter(Topic.id == topic.id).first() is None
def test_moderator_user_can_see_hidden_posts(topic, moderator_user,
request_context):
topic.hide(moderator_user)
login_user(moderator_user)
assert Topic.query.filter(Topic.id == topic.id).first() is not None
def test_super_moderator_user_can_see_hidden_posts(topic, super_moderator_user,
request_context):
topic.hide(super_moderator_user)
login_user(super_moderator_user)
assert Topic.query.filter(Topic.id == topic.id).first() is not None
def test_admin_user_can_see_hidden_posts(topic, admin_user, request_context):
topic.hide(admin_user)
login_user(admin_user)
assert Topic.query.filter(Topic.id == topic.id).first() is not None
|
152013
|
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.docusign',
version='0.2.1',
description="Python docassemble package for integrating with DocuSign",
long_description=long_description,
long_description_content_type='text/markdown',
author="<NAME>, <NAME>",
author_email="<EMAIL>",
license='The MIT License (MIT)',
url="https://github.com/radiant-law/docassemble-docusign",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
keywords="docassemble docusign development",
project_urls={
"Radiant Law": "https://radiantlaw.com",
"Source": "https://github.com/radiant-law/docassemble-docusign",
},
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=['PyJWT', 'requests'],
zip_safe=False,
package_data=find_package_data(where='docassemble/docusign', package='docassemble.docusign'),
)
|
152022
|
from serpent.game_launcher import GameLauncher, GameLauncherException
from serpent.utilities import is_linux, is_macos, is_windows
import shlex
import subprocess
import webbrowser
class SteamGameLauncher(GameLauncher):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def launch(self, **kwargs):
app_id = kwargs.get("app_id")
app_args = kwargs.get("app_args")
if app_id is None:
raise GameLauncherException("An 'app_id' kwarg is required...")
protocol_string = f"steam://run/{app_id}"
if app_args is not None:
args_list = [f"--{k}={v}" for k, v in app_args.items()]
protocol_string += "/en/" + " ".join(args_list)
if is_linux():
subprocess.call(shlex.split(f"xdg-open '{protocol_string}'"))
elif is_macos():
subprocess.call(shlex.split(f"open '{protocol_string}'"))
elif is_windows():
webbrowser.open(f"{protocol_string}")
|
152035
|
import asyncio
import json
import pytest
import privatebinapi
from privatebinapi import common, deletion, download, upload
from tests import MESSAGE, RESPONSE_DATA, SERVERS_AND_FILES
@pytest.mark.parametrize("server, file", SERVERS_AND_FILES)
def test_full(server, file):
send_data = privatebinapi.send(
server, text=MESSAGE, file=file, password='<PASSWORD>', compression=None,
)
get_data = privatebinapi.get(send_data['full_url'], password='<PASSWORD>')
assert get_data['text'] == MESSAGE
if file:
with open(file, 'rb') as file:
assert get_data['attachment']['content'] == file.read()
try:
privatebinapi.delete(send_data['full_url'], send_data['deletetoken'])
except privatebinapi.UnsupportedFeatureError:
pass
def test_bad_compression():
try:
privatebinapi.send('', text=MESSAGE, compression='clearly-fake-compression')
except privatebinapi.BadCompressionTypeError:
pass
def test_bad_expiration():
try:
privatebinapi.send('', text=MESSAGE, expiration='clearly-incorrect-expiration')
except privatebinapi.BadExpirationTimeError:
pass
def test_bad_formatting():
try:
privatebinapi.send('', text=MESSAGE, formatting='clearly-incorrect-format')
except privatebinapi.BadFormatError:
pass
def test_send_nothing():
try:
privatebinapi.send('')
except ValueError:
pass
@pytest.mark.parametrize("server, _", SERVERS_AND_FILES)
@pytest.mark.asyncio
async def test_async_full(server, _):
send_data = await privatebinapi.send_async(server, text=MESSAGE)
get_data = await privatebinapi.get_async(send_data['full_url'])
assert get_data['text'] == MESSAGE
try:
await privatebinapi.delete_async(send_data['full_url'], send_data['deletetoken'])
except privatebinapi.UnsupportedFeatureError:
pass
await asyncio.sleep(0.5)
def test_bad_server():
try:
privatebinapi.send('https://example.com', text=MESSAGE)
except privatebinapi.BadServerResponseError:
pass
class FakeResponse:
url = ''
def __init__(self, error=False):
self.error = error
def json(self):
if self.error:
raise json.JSONDecodeError('', '', 0)
else:
return RESPONSE_DATA
def test_bad_response_verification():
try:
common.verify_response(FakeResponse(error=True)) # noqa
except privatebinapi.BadServerResponseError:
pass
def test_bad_process_result():
try:
upload.process_result(FakeResponse(), '') # noqa
except privatebinapi.PrivateBinAPIError:
pass
def test_bad_process_url():
try:
deletion.process_url('https://example.com')
except ValueError:
pass
def test_bad_status():
try:
common.verify_response(FakeResponse()) # noqa
except privatebinapi.PrivateBinAPIError:
pass
def test_bad_extract_passphrase():
try:
download.extract_passphrase('https://www.example.com')
except ValueError:
pass
|
152076
|
from django_bleach.models import BleachField
from tinymce.models import HTMLField
from django.conf import settings
from django.core.validators import URLValidator
from django.db.models import URLField
from django.forms.fields import URLField as FormURLField
################
# JobsURLField #
################
JobsURLValidator = URLValidator(schemes=settings.URL_SCHEMES)
class JobsURLFormField(FormURLField):
"""Form URLField with custom SCHEMES from settings.URL_SCHEMES"""
default_validators = [JobsURLValidator]
class JobsURLField(URLField):
"""URLField with custom SCHEMES from settings.URL_SCHEMES"""
default_validators = [JobsURLValidator]
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": JobsURLFormField,
}
)
######################
# SanitizedHTMLField #
######################
class SanitizedHTMLField(HTMLField, BleachField):
description = "Sanitized HTML field"
|
152080
|
import os
import tarfile
from github3 import login
token = os.getenv('GITHUB_TOKEN')
gh = login(token=token)
repo = gh.repository('gamechanger', 'dusty')
version = os.getenv('VERSION')
prerelease = os.getenv('PRERELEASE') == 'true'
release_name = version
release = repo.create_release(version, name=release_name, prerelease=prerelease)
for setup_file in ['com.gamechanger.dusty.plist', 'install.sh']:
with open(os.path.join('setup', setup_file), 'r') as f:
release.upload_asset(content_type='text/plain',
name=setup_file,
asset=f)
for binary in ['dusty']:
with open(os.path.join('dist', binary), 'r') as f:
release.upload_asset(content_type='application/octet-stream',
name=binary,
asset=f)
with tarfile.open('dusty.tar.gz', 'w:gz') as tarball:
tarball.add('dist/dusty', arcname='dusty')
tarball.add('setup/com.gamechanger.dusty.plist', arcname='com.gamechanger.dusty.plist')
tarball.add('setup/brew-install.sh', arcname='brew-install.sh')
with open('dusty.tar.gz', 'r') as f:
release.upload_asset(content_type='application/octet-stream',
name='dusty.tar.gz',
asset=f)
|
152101
|
import unittest
import os
import torch
from torch.optim import Optimizer
import apex
from apex.multi_tensor_apply import multi_tensor_applier
from itertools import product
class RefLAMB(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-6)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.01)
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RefLAMB, self).__init__(params, defaults)
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm
# Skip buffer
self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)
self.multi_tensor_lamb = amp_C.multi_tensor_lamb
else:
raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions')
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# create separate grad lists for fp32 and fp16 params
g_all_32, g_all_16 = [], []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if p.dtype == torch.float32:
g_all_32.append(p.grad.data)
elif p.dtype == torch.float16:
g_all_16.append(p.grad.data)
else:
raise RuntimeError('FusedLAMB only support fp16 and fp32.')
device = self.param_groups[0]["params"][0].device
g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device)
# compute grad norm for two lists
if len(g_all_32) > 0:
g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_32], False)[0]
if len(g_all_16) > 0:
g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[g_all_16], False)[0]
# blend two grad norms to get global grad norm
global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm,
self._dummy_overflow_buf,
[[g_norm_32, g_norm_16]],
False)[0]
max_grad_norm = 1.0
clipped_ratio = max_grad_norm / max(global_grad_norm, max_grad_norm)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
p.grad.data *= clipped_ratio
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['v'] = torch.zeros_like(p.data)
m_t, v_t = state['m'], state['v']
beta1, beta2 = group['betas']
state['step'] += 1
# m_t = beta1 * m + (1 - beta1) * g_t
m_t.mul_(beta1).add_(grad, alpha=1-beta1)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v_t.mul_(beta2).addcmul_(grad, grad, value=1-beta2)
# Debiasing
m_t_hat = m_t / (1.0 - beta1 ** state['step'])
v_t_hat = v_t / (1.0 - beta2 ** state['step'])
update = m_t_hat / v_t_hat.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
update.add_(p.data, alpha=group['weight_decay'])
trust_ratio = 1.0
w_norm = p.data.pow(2).sum().sqrt()
g_norm = update.pow(2).sum().sqrt()
if w_norm > 0 and g_norm > 0:
trust_ratio = w_norm / g_norm
state['w_norm'] = w_norm
state['g_norm'] = g_norm
state['trust_ratio'] = trust_ratio
step_size = group['lr']
p.data.add_(update, alpha=-step_size*trust_ratio)
return loss
class TestLamb(unittest.TestCase):
def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7):
self.max_abs_diff = max_abs_diff
self.max_rel_diff = max_rel_diff
self.iters = iters
torch.cuda.manual_seed(9876)
def tearDown(self):
pass
def gen_param_optim(self, tensors, lamb_option):
ref_param = []
tst_param = []
for tensor in tensors:
ref_param.append(torch.nn.Parameter(tensor.clone()))
tst_param.append(torch.nn.Parameter(tensor.clone()))
ref_optim = self.ref_optim(ref_param, **lamb_option)
tst_optim = self.tst_optim(tst_param, use_nvlamb=True, **lamb_option)
return (ref_param, tst_param, ref_optim, tst_optim)
def gen_grad(self, ref_param, tst_param):
for p_ref, p_tst in zip(ref_param, tst_param):
p_ref.grad = torch.rand_like(p_ref)
p_tst.grad = p_ref.grad
def gen_mixed_grad(self, ref_param, tst_param, scale=1.0):
half_grads = []
for p_ref, _ in zip(ref_param, tst_param):
half_grads.append(torch.rand_like(p_ref).half())
p_ref.grad = half_grads[-1].float() / scale
return half_grads
def get_max_diff(self, ref_param, tst_param):
max_abs_diff = max_rel_diff = 0
for p_ref, p_tst in zip(ref_param, tst_param):
max_abs_diff_p = (p_ref - p_tst).abs().max().item()
max_rel_diff_p = ((p_ref - p_tst) / p_ref).abs().max().item()
if max_abs_diff_p > max_abs_diff: max_abs_diff = max_abs_diff_p
if max_rel_diff_p > max_rel_diff: max_rel_diff = max_rel_diff_p
return max_abs_diff, max_rel_diff
def gen_single_type_test(self, param_type=torch.float, device="cuda"):
nelem = 278011
tensor = torch.rand(nelem, dtype=param_type, device=device)
weight_decay = [0, 0.01]
for wd in weight_decay:
lamb_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08, 'weight_decay':wd}
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], lamb_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
torch.cuda.synchronize()
tst_optim.step()
torch.cuda.synchronize()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
class TestFusedLAMB(TestLamb):
def __init__(self, *args, **kwargs):
super(TestLamb, self).__init__(*args, **kwargs)
self.ref_optim = RefLAMB
self.tst_optim = apex.optimizers.FusedLAMB
def test_float(self):
self.gen_single_type_test(param_type=torch.float)
@unittest.skip("PyTorch optimizer is not numerically correct for fp16")
def test_half(self):
self.gen_single_type_test(param_type=torch.float16)
@unittest.skipIf(torch.cuda.device_count()<2, "more than 1 GPU required")
def test_multi_device(self):
devices = ("cuda:0", "cuda:1")
for current_dev, tensor_dev in product(devices, devices):
with torch.cuda.device(current_dev):
self.gen_single_type_test(param_type=torch.float, device=tensor_dev)
def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
weight_decay = [0, 0.01]
for wd in weight_decay:
lamb_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08, 'weight_decay':wd}
tensors = []
for size in sizes:
tensors.append(torch.rand(size, dtype=torch.float, device='cuda'))
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim(tensors, lamb_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_lamb_option(self):
nelem = 1
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
weight_decay = [0, 0.01]
for wd in weight_decay:
lamb_option = {'lr':0.01, 'betas':(0.6, 0.9), 'eps':3e-06, 'weight_decay':wd}
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], lamb_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
class TestFusedMixedPrecisionLamb(TestLamb):
def __init__(self, *args, **kwargs):
super(TestLamb, self).__init__(*args, **kwargs)
self.ref_optim = RefLAMB
self.tst_optim = apex.optimizers.FusedMixedPrecisionLamb
def test_float(self):
self.gen_single_type_test(param_type=torch.float)
@unittest.skip("PyTorch optimizer is not numerically correct for fp16")
def test_half(self):
self.gen_single_type_test(param_type=torch.float16)
@unittest.skipIf(torch.cuda.device_count()<2, "more than 1 GPU required")
def test_multi_device(self):
devices = ("cuda:0", "cuda:1")
for current_dev, tensor_dev in product(devices, devices):
with torch.cuda.device(current_dev):
self.gen_single_type_test(param_type=torch.float, device=tensor_dev)
def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
weight_decay = [0, 0.01]
for wd in weight_decay:
lamb_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08, 'weight_decay':wd}
tensors = []
for size in sizes:
tensors.append(torch.rand(size, dtype=torch.float, device='cuda'))
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim(tensors, lamb_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_lamb_option(self):
nelem = 1
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
weight_decay = [0, 0.01]
for wd in weight_decay:
lamb_option = {'lr':0.01, 'betas':(0.6, 0.9), 'eps':3e-06, 'weight_decay':wd}
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], lamb_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
if __name__ == '__main__':
script_path = os.path.dirname(os.path.realpath(__file__))
unittest.main()
|
152191
|
import pika
import json
from init_judge import initialize_judge
class authenticate_judge():
username = 'Nouser'
password = '<PASSWORD>'
judge_id = 'NULL'
channel = ''
login_status = ''
key = initialize_judge.key()
my_ip = initialize_judge.my_ip()
def login(channel, host, username, password):
authenticate_judge.channel = channel
authenticate_judge.username = username
authenticate_judge.password = password
username, password, judge_id = initialize_judge.get_credentials()
if judge_id != 'NULL':
authenticate_judge.judge_id = judge_id
print("\n[ VALIDATNG ] : " + authenticate_judge.username + "@" + authenticate_judge.password )
channel.queue_declare(
queue = authenticate_judge.username,
durable=True
)
authenticate_judge.channel.queue_bind(
exchange = 'connection_manager',
queue = authenticate_judge.username
)
message = {
'Client Key': authenticate_judge.key,
'Code': 'LOGIN',
'Username': authenticate_judge.username,
'Password': <PASSWORD>,
'ID': authenticate_judge.judge_id,
'Type': 'JUDGE',
'IP' : authenticate_judge.my_ip
}
message = json.dumps(message)
authenticate_judge.channel.basic_publish(
exchange = 'connection_manager',
routing_key = 'client_requests',
body = message
)
print("Request sent for authentication... ")
print("[ LISTENING ]:" + authenticate_judge.username + '@' + authenticate_judge.password )
authenticate_judge.channel.basic_consume(
queue = authenticate_judge.username,
on_message_callback = authenticate_judge.response_handler,
auto_ack = True
)
authenticate_judge.channel.start_consuming()
return authenticate_judge.login_status
def response_handler(ch, method, properties, body):
server_data = body.decode('utf-8')
json_data = json.loads(server_data)
status = json_data['Code']
if( status == 'VALID' ):
print("[STATUS]: " + status )
judge_id = json_data['ID']
authenticate_judge.channel.stop_consuming()
authenticate_judge.login_status = status
initialize_judge.save_details(
authenticate_judge.username,
authenticate_judge.password,
judge_id
)
elif( status == 'INVLD' ):
print("[STATUS] INVALID USER !!!")
authenticate_judge.channel.stop_consuming()
authenticate_judge.channel.queue_delete(
queue = authenticate_judge.username
)
authenticate_judge.login_status = status
elif( status == 'LRJCT'):
authenticate_judge.channel.stop_consuming()
authenticate_judge.channel.queue_delete(
queue = authenticate_judge.username
)
authenticate_judge.login_status = status
def get_judge_details():
return authenticate_judge.judge_id, authenticate_judge.username, authenticate_judge.password
|
152265
|
from django.db import models
from django.conf import settings
from django.utils import timezone
from .allocation import TASAPIDriver
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", 'auth.User')
class TASAllocationReport(models.Model):
"""
Keep track of each Allocation Report that is sent to TACC.API
"""
user = models.ForeignKey(
AUTH_USER_MODEL, related_name='tas_reports'
) # User that matches the report
username = models.CharField(max_length=128) # TACC_USERNAME
project_name = models.CharField(
max_length=128
) # TACC_PROJECT_NAME aka OpenStack Tenant Credential
compute_used = models.DecimalField(
max_digits=19, decimal_places=3
) # up to approximately one billion with a resolution of 10 decimal places
queue_name = models.CharField(max_length=128, default="Atmosphere")
resource_name = models.CharField(max_length=128, default="Jetstream")
scheduler_id = models.CharField(
max_length=128, default="use.jetstream-cloud.org"
)
start_date = models.DateTimeField() # Required
end_date = models.DateTimeField() # Required
# Meta-Metrics
tacc_api = models.CharField(max_length=512)
# FIXME: Save a response confirmation -instead of- success
report_date = models.DateTimeField(blank=True, null=True)
success = models.BooleanField(default=False)
class Meta:
app_label = 'jetstream'
def send(self, use_beta=False):
if not self.id:
raise Exception(
"ERROR -- This report should be *saved* before you send it!"
)
if self.success:
raise Exception(
"ERROR -- This report has already been *saved*! Create a new report!"
)
try:
if use_beta:
from atmosphere.settings.local import BETA_TACC_API_URL, BETA_TACC_API_USER, BETA_TACC_API_PASS
driver = TASAPIDriver(
BETA_TACC_API_URL, BETA_TACC_API_USER, BETA_TACC_API_PASS
)
else:
driver = TASAPIDriver()
success = driver.report_project_allocation(
self.id, self.username, self.project_name,
float(self.compute_used), self.start_date, self.end_date,
self.queue_name, self.scheduler_id
)
self.success = True if success else False
if self.success:
self.report_date = timezone.now()
self.save()
except:
return
@property
def cpu_count(self):
"""
NOTE: This is currently not returning the values we expect
Outputs: 0.999, 3.684, 8.999, etc. etc.
Expected Outputs: 1, 3, 9, ...
"""
hours_between = (self.end_date -
self.start_date).total_seconds() / 3600.0
cpu_count = float(self.compute_used) / hours_between
return cpu_count
def __unicode__(self):
"""
"""
duration = self.end_date - self.start_date
return "%s (Username:%s Project:%s) used %s AU over the Duration:%s (%s - %s) Reported:%s" % \
(self.user.username,
self.username, self.project_name,
self.compute_used, duration,
self.end_date, self.start_date,
self.report_date)
|
152273
|
import inflect
def test_an():
p = inflect.engine()
assert p.an("cat") == "a cat"
assert p.an("ant") == "an ant"
assert p.an("a") == "an a"
assert p.an("b") == "a b"
assert p.an("honest cat") == "an honest cat"
assert p.an("dishonest cat") == "a dishonest cat"
assert p.an("Honolulu sunset") == "a Honolulu sunset"
assert p.an("mpeg") == "an mpeg"
assert p.an("onetime holiday") == "a onetime holiday"
assert p.an("Ugandan person") == "a Ugandan person"
assert p.an("Ukranian person") == "a Ukranian person"
assert p.an("Unabomber") == "a Unabomber"
assert p.an("unanimous decision") == "a unanimous decision"
assert p.an("US farmer") == "a US farmer"
assert p.an("wild PIKACHU appeared") == "a wild PIKACHU appeared"
|
152330
|
import logging
import torch
from algorithms.BaseDistanceEmbedder import BaseDistanceEmbedder
class RawDistanceEmbedder(BaseDistanceEmbedder):
"""
Returns the distance as is for embedding
"""
def __init__(self, max_pos=5):
self.max_pos = max_pos
def logger(self):
return logging.getLogger(__name__)
def __call__(self):
position_enc = torch.tensor([[i for i in range(self.max_pos + 1)]]).float()
return position_enc
|
152340
|
from setuptools import setup, find_packages
if __name__ == '__main__':
name = 'ppca'
setup(
name = name,
version = "0.0.4",
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Probabilistic PCA',
packages = find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: Unix',
'Operating System :: MacOS',
],
setup_requires = [
'setuptools>=3.4.4',
],
install_requires = [
'numpy',
'scipy',
],
)
|
152342
|
from PIL import Image
import StringIO
import urllib
def resize_and_pad_image(img, output_image_dim):
"""Resize the image to make it IMAGE_DIM x IMAGE_DIM pixels in size.
If an image is not square, it will pad the top/bottom or left/right
with black pixels to ensure the image is square.
Args:
img: the input 3-color image
output_image_dim: resized and padded output length (and width)
Returns:
resized and padded image
"""
old_size = img.size # old_size[0] is in (width, height) format
ratio = float(output_image_dim) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# use thumbnail() or resize() method to resize the input image
# thumbnail is a in-place operation
# im.thumbnail(new_size, Image.ANTIALIAS)
scaled_img = img.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
padded_img = Image.new("RGB", (output_image_dim, output_image_dim))
padded_img.paste(scaled_img, ((output_image_dim - new_size[0]) // 2,
(output_image_dim - new_size[1]) // 2))
return padded_img
def preprocess_and_encode_images(image_paths, output_image_dim):
"""Read an image, preprocess it, and encode as a jpeg.
The image can be read from either a local path or url.
The image must be RGB format.
Preprocessing involves resizing and padding until the image is exactly
output_image_dim x output_image_dim in size.
After preprocessing, the image is encoded as a jpeg string to reduce the
number of bytes. This jpeg string will be transmitted to the server.
Args:
image_paths: list of image paths and/or urls
output_image_dim: resized and padded output length (and width)
Returns:
the same images as a list of jpeg-encoded strings
"""
jpeg_batch = []
for image_path in image_paths:
image = None
if 'http' in image_path:
image = Image.open(urllib.urlopen(image_path))
else:
image = Image.open(image_path) # Parse the image from your local disk.
# Resize and pad the image
image = resize_and_pad_image(image, output_image_dim)
jpeg_image = StringIO.StringIO()
image.save(jpeg_image, format='JPEG')
# Append to features_array
jpeg_batch.append(jpeg_image.getvalue())
return jpeg_batch
|
152349
|
from __future__ import unicode_literals
class WinRMError(Exception):
""""Generic WinRM error"""
code = 500
class WinRMTransportError(Exception):
"""WinRM errors specific to transport-level problems (unexpected HTTP error codes, etc)"""
@property
def protocol(self):
return self.args[0]
@property
def code(self):
return self.args[1]
@property
def message(self):
return 'Bad HTTP response returned from server. Code {0}'.format(self.code)
@property
def response_text(self):
return self.args[2]
def __str__(self):
return self.message
class WinRMOperationTimeoutError(Exception):
"""
Raised when a WinRM-level operation timeout (not a connection-level timeout) has occurred. This is
considered a normal error that should be retried transparently by the client when waiting for output from
a long-running process.
"""
code = 500
class AuthenticationError(WinRMError):
"""Authorization Error"""
code = 401
class BasicAuthDisabledError(AuthenticationError):
message = 'WinRM/HTTP Basic authentication is not enabled on remote host'
class InvalidCredentialsError(AuthenticationError):
pass
|
152404
|
from torch import nn
import torch
class FactorList(nn.Module):
def __init__(self, parameters=None):
super().__init__()
self.keys = []
self.counter = 0
if parameters is not None:
self.extend(parameters)
def _unique_key(self):
"""Creates a new unique key"""
key = f'factor_{self.counter}'
self.counter += 1
return key
def append(self, element):
key = self._unique_key()
setattr(self, key, element)
self.keys.append(key)
def insert(self, index, element):
key = self._unique_key()
setattr(self ,key, element)
self.keys.insert(index, key)
def pop(self, index=-1):
item = self[index]
self.__delitem__(index)
return item
def __getitem__(self, index):
keys = self.keys[index]
if isinstance(keys, list):
return self.__class__([getattr(self, key) for key in keys])
return getattr(self, keys)
def __setitem__(self, index, value):
setattr(self, self.keys[index], value)
def __delitem__(self, index):
delattr(self, self.keys[index])
self.keys.__delitem__(index)
def __len__(self):
return len(self.keys)
def extend(self, parameters):
for param in parameters:
self.append(param)
def __iadd__(self, parameters):
return self.extend(parameters)
def __add__(self, parameters):
instance = self.__class__(self)
instance.extend(parameters)
return instance
def __radd__(self, parameters):
instance = self.__class__(parameters)
instance.extend(self)
return instance
def extra_repr(self) -> str:
child_lines = []
for k, p in self._parameters.items():
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ParameterList(nn.Module):
def __init__(self, parameters=None):
super().__init__()
self.keys = []
self.counter = 0
if parameters is not None:
self.extend(parameters)
def _unique_key(self):
"""Creates a new unique key"""
key = f'param_{self.counter}'
self.counter += 1
return key
def append(self, element):
# p = nn.Parameter(element)
key = self._unique_key()
self.register_parameter(key, element)
self.keys.append(key)
def insert(self, index, element):
# p = nn.Parameter(element)
key = self._unique_key()
self.register_parameter(key, element)
self.keys.insert(index, key)
def pop(self, index=-1):
item = self[index]
self.__delitem__(index)
return item
def __getitem__(self, index):
keys = self.keys[index]
if isinstance(keys, list):
return self.__class__([getattr(self, key) for key in keys])
return getattr(self, keys)
def __setitem__(self, index, value):
self.register_parameter(self.keys[index], value)
def __delitem__(self, index):
delattr(self, self.keys[index])
self.keys.__delitem__(index)
def __len__(self):
return len(self.keys)
def extend(self, parameters):
for param in parameters:
self.append(param)
def __iadd__(self, parameters):
return self.extend(parameters)
def extra_repr(self) -> str:
child_lines = []
for k, p in self._parameters.items():
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
|
152411
|
from flask import redirect, render_template, flash, g, session, url_for, request, jsonify
from flask.ext.login import current_user, login_required
from . import main
from .forms import TodoForm
from .. import db
from ..models import User, Todo
from collections import Counter
@main.app_errorhandler(404)
def page_not_found(e):
return redirect(url_for('main.index'))
@main.app_errorhandler(500)
def page_not_found(e):
return redirect(url_for('main.index'))
@main.route('/',methods=["GET","POST"])
def index():
if current_user.is_authenticated():
url = url_for('main.profile')
return redirect(url)
return render_template("index.html")
@main.route('/profile',methods=["GET","POST"])
@login_required
def profile():
form = TodoForm()
user_todo_all = sorted(current_user.todo.all(), key=lambda x: x.created_at)
user_todo, user_todo_done = filter(lambda x: x.done == False, user_todo_all), filter(lambda x: x.done == True, user_todo_all)
user_hashtags_raw = [x.hashtag for x in user_todo]
user_hashtags_split = [split_tag for hashtag in user_hashtags_raw for split_tag in hashtag.replace("#"," #").split() if split_tag != ""]
user_todo_hashtags = Counter(user_hashtags_split)
tags_size = len(user_todo)
del user_todo_hashtags[""]
if request.method == "POST" and form.validate_on_submit():
parsed_todo = parse_todo(form.todo.data)
newtodo = Todo(parsed_todo[0], parsed_todo[1]).save()
form.todo.data = ""
url = url_for('main.profile')
return redirect(url)
return render_template("profile.html", form = form, td = user_todo, hsh = user_todo_hashtags, sz = tags_size, tdone = user_todo_done)
def parse_todo(todo):
todo = todo.strip().split()
words = " ".join(filter(lambda x: not x.startswith("#"), todo))
tags = "".join(filter(lambda x: x.startswith("#"), todo))
return words, tags
@main.route('/done/<id>', methods=["POST"])
@login_required
def done(id):
iid = int(id);
current_user.todo.filter_by(id = iid).first().toggleDone();
db.session.commit()
return url_for('main.profile')
@main.route('/deleteTodo/<id>', methods=["POST"])
@login_required
def deleteTodo(id):
iid = int(id)
db.session.query(Todo).filter_by(id = iid).delete()
db.session.commit()
return url_for('main.profile')
@main.route('/edit', methods=["POST"])
@login_required
def editTodo():
id = request.json['id']
task_input = request.json['task']
task, hashtags = parse_todo(task_input)
current_user.todo.filter_by(id = id).first().editTodo(task, hashtags).save()
db.session.commit()
resp = {'url':url_for('main.profile'), 'task':task, 'hashtags':hashtags}
return jsonify(resp)
|
152420
|
import sys
from unittest import TestCase, main
from mock import patch, Mock
from pymongo.errors import ConnectionFailure
import ming
from ming import Session
from ming import mim
from ming import create_datastore, create_engine
from ming.datastore import Engine
from ming.exc import MingConfigError
class DummyConnection(object):
def __init__(*args, **kwargs):
pass
class TestEngineConnection(TestCase):
@patch('ming.datastore.MongoClient', spec=True)
def test_normal(self, MockConnection):
from pymongo import MongoClient
result = create_engine('master')
conn = result.connect()
assert isinstance(conn, MongoClient)
@patch('ming.datastore.MongoClient', spec=True)
def test_get_db(self, MockConnection):
from pymongo import MongoClient
result = create_engine('master')
conn = result.connect()
assert isinstance(conn, MongoClient)
self.assertEqual(conn.read_preference, result.read_preference)
class TestConnectionFailure(TestCase):
def test_connect(self):
failures = [ 0 ]
def Connection(*a,**kw):
failures[0] += 1
raise ConnectionFailure()
engine = Engine(Connection, (), {}, 17, True,
_sleep=lambda x:None)
self.assertRaises(ConnectionFailure, engine.connect)
self.assertEqual(failures[0], 18)
class TestEngineMim(TestCase):
def test_mim(self):
with patch('ming.datastore.mim.Connection', spec=True) as Connection:
result = create_engine('mim:///')
conn = result.connect()
assert conn is Connection.get()
class TestReplicaSet(TestCase):
@patch('ming.datastore.MongoClient', spec=True)
def test_replica_set(self, MockConn):
from pymongo import MongoClient
result = create_engine(
'mongodb://localhost:23,localhost:27017,localhost:999/',
replicaSet='foo')
conn = result.connect()
assert isinstance(conn, MongoClient)
class TestDatastore(TestCase):
def setUp(self):
self.patcher_conn = patch('ming.datastore.MongoClient')
self.MockConn = self.patcher_conn.start()
def tearDown(self):
self.patcher_conn.stop()
def test_one_uri(self):
self._check_datastore(
create_datastore('mongodb://localhost/test_db'),
'test_db')
def test_engine_with_name(self):
self._check_datastore(
create_datastore('test_db', bind=create_engine('master')),
'test_db')
def test_database_only(self):
self._check_datastore(
create_datastore('test_db'),
'test_db')
@patch('ming.datastore.MongoClient', spec=True)
def test_replica_set(self, MockConn):
from pymongo import MongoClient
result = create_datastore(
'mongodb://localhost:23,localhost:27017,localhost:999/test_db',
replicaSet='foo')
print(result.bind._conn_args[0])
assert result.bind._conn_args[0].startswith('mongodb://localhost:23,localhost:27017,localhost:999')
@patch('ming.datastore.MongoClient', spec=True)
def test_configure_no_formencode(self, Connection):
with patch.dict(sys.modules, {"formencode": None}):
self.assertRaises(
MingConfigError,
ming.configure,
**{
"ming.main.uri": "mongodb://localhost:27017/test_db",
"ming.main.connect_retry": 1,
"ming.main.tz_aware": False,
}
)
@patch('ming.datastore.MongoClient', spec=True)
def test_configure_no_formencode_variabledecode(self, Connection):
with patch.dict(sys.modules, {"formencode.variabledecode": None}):
self.assertRaises(
MingConfigError,
ming.configure,
**{
"ming.main.uri": "mongodb://localhost:27017/test_db",
"ming.main.connect_retry": 1,
"ming.main.tz_aware": False,
}
)
@patch('ming.datastore.MongoClient', spec=True)
def test_configure(self, Connection):
ming.configure(**{
'ming.main.uri':'mongodb://localhost:27017/test_db',
'ming.main.connect_retry': 1,
'ming.main.tz_aware': False,
})
session = Session.by_name('main')
assert session.bind.conn is not None
assert session.bind.db is not None
assert session.bind.bind._auto_ensure_indexes
args, kwargs = Connection.call_args
assert 'database' not in kwargs
@patch('ming.datastore.MongoClient', spec=True)
def test_configure_with_database(self, Connection):
ming.configure(
**{
"ming.main.uri": "mongodb://localhost:27017/test_db",
"ming.main.database": "another_test_db",
"ming.main.connect_retry": 1,
"ming.main.tz_aware": False,
}
)
session = Session.by_name("main")
assert session.bind.conn is not None
assert session.bind.db is not None
assert session.bind.bind._auto_ensure_indexes
args, kwargs = Connection.call_args
assert "database" in kwargs
@patch('ming.datastore.MongoClient', spec=True)
def test_configure_auto_ensure_indexes(self, Connection):
ming.configure(**{
'ming.main.uri':'mongodb://localhost:27017/test_db',
'ming.main.connect_retry': 1,
'ming.main.tz_aware': False,
'ming.main.auto_ensure_indexes': 'False',
})
session = Session.by_name('main')
assert session.bind.conn is not None
assert session.bind.db is not None
assert not session.bind.bind._auto_ensure_indexes
args, kwargs = Connection.call_args
assert 'database' not in kwargs
@patch('ming.datastore.MongoClient', spec=True)
def test_configure_optional_params(self, Connection):
ming.configure(**{
'ming.main.uri':'mongodb://localhost:27017/test_db',
'ming.main.replicaSet': 'foobar',
'ming.main.w': 2,
'ming.main.ssl': True,
})
session = Session.by_name('main')
assert session.bind.conn is not None
assert session.bind.db is not None
def test_no_kwargs_with_bind(self):
self.assertRaises(
ming.exc.MingConfigError,
create_datastore,
'test_db', bind=create_engine('master'), replicaSet='foo')
def test_mim_ds(self):
ds = create_datastore('mim:///test_db')
conn = ds.bind.connect()
assert conn is mim.Connection.get()
def test_create_datastore_bind_not_allowed(self):
self.assertRaises(
ming.exc.MingConfigError,
create_datastore,
'mim://test_db', bind=create_engine('master'))
def _check_datastore(self, ds, db_name):
assert ds.db is self.MockConn()[db_name]
assert ds.name == db_name
if __name__ == '__main__':
main()
|
152421
|
import inspect
import json
import os
from pytube import YouTube, Playlist
from pytube.exceptions import RegexMatchError, PytubeError
CONFIGURATIONS = {'destination_path': '', 'video_quality': '',
'audio_quality': '', 'when_unavailable': ''}
CONFIGS_FILE = 'configs.json'
def create_config_file():
with open(CONFIGS_FILE, 'w') as config_file:
config_data = {'destination_path': '', 'video_quality': '',
'audio_quality': '', 'when_unavailable': 'Highest'}
json.dump(config_data, config_file)
def load_config_file():
with open(CONFIGS_FILE) as config_file:
config_data = json.load(config_file)
CONFIGURATIONS['destination_path'] = config_data['destination_path']
CONFIGURATIONS['video_quality'] = config_data['video_quality']
CONFIGURATIONS['audio_quality'] = config_data['audio_quality']
CONFIGURATIONS['when_unavailable'] = config_data['when_unavailable']
if not os.path.exists(CONFIGS_FILE):
create_config_file()
load_config_file()
def main():
try:
if not CONFIGURATIONS['destination_path']:
print("A default path can be setted on settings menu.")
destination_path = input(
"\nInsert a destination path for the downloaded media ")
if not destination_path:
destination_path = "./"
destination_path = CONFIGURATIONS['destination_path']
start()
except KeyboardInterrupt:
exit()
def start():
clear_terminal()
print("\tYouTube Downloader\n\n")
menu_option = input(
"Select and option to continue\n\n\t1) Start Downloading\n\t2) Settings\n\t3) Help\n\t4) Exit\n").lower()
if menu_option in ['1', '1)', 'start downloading']:
return downloads_menu()
elif menu_option in ['2', '2)', 'settings']:
return settings_menu()
elif menu_option in ['3', '3)', 'help']:
return help_menu()
elif menu_option in ['4', '4)', 'exit']:
return exit()
else:
return handle_invalid_input()
def downloads_menu():
clear_terminal()
print("\tDownloads Menu\n\n")
download_source_url = ""
try:
download_source_url = input("Input the download source url ")
if not download_source_url:
return handle_invalid_input()
except KeyboardInterrupt:
return start()
if not validate_youtube_url(download_source_url):
return start()
pytube_object = YouTube(download_source_url)
playlist_videos = look_for_playlist(pytube_object)
format_selection = input(
"\n\nSelect a download option\n\t1) Audio only\n\t2) Video and audio\n")
if format_selection in ['1', '1)']:
for element in playlist_videos:
download_audio(element)
else:
download_audio(pytube_object)
elif format_selection in ['2', '2)']:
for element in playlist_videos:
download_video(element)
else:
download_video(pytube_object)
else:
return handle_invalid_input()
input("\nPress enter to continue...")
return start()
def look_for_playlist(pytube_object):
if validate_playlist(pytube_object.watch_url):
pytube_object = Playlist(pytube_object.watch_url)
return pytube_object.videos
return []
def validate_youtube_url(url):
try:
YouTube(url)
return True
except RegexMatchError as e:
input(
f"Error: An invalid URL has been inserted.\n{e}\n\nPress enter to continue...")
return False
def validate_playlist(url):
try:
Playlist(url)
return True
except KeyError:
return False
def download_audio(pytube_object):
print(f"\nDownloading {pytube_object.title}")
try:
if not CONFIGURATIONS['audio_quality']:
unavailable_audio(pytube_object)
else:
default_quality = CONFIGURATIONS['audio_quality'] + 'kbps'
filtered_pytube_object = pytube_object.streams.filter(
type='audio', abr=default_quality,
mime_type='audio/mp4').order_by('abr').desc()
if not filtered_pytube_object:
when_unavailable = CONFIGURATIONS['when_unavailable']
print(
f"\nDefault quality isn't available. {when_unavailable}" +
" quality will be downloaded.")
return unavailable_audio(pytube_object)
filtered_pytube_object = filtered_pytube_object[0]
name_with_resolution = filtered_pytube_object.title + \
" " + filtered_pytube_object.abr + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
filtered_pytube_object.download(destination_path)
os.rename(destination_path + filtered_pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
except (IOError, OSError, PytubeError) as e:
print(f"{pytube_object.title} couldn't be downloaded.\n{e}\n")
return
def unavailable_audio(pytube_object):
if CONFIGURATIONS['when_unavailable'] == "Highest":
pytube_object = pytube_object.streams.filter(type='audio', mime_type='audio/mp4')
pytube_object = pytube_object.order_by('abr').desc()[0]
else:
pytube_object = pytube_object.streams.filter(type='audio', mime_type='audio/mp4')
pytube_object = pytube_object.order_by('abr')[0]
name_with_resolution = pytube_object.title + " " + pytube_object.abr + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
pytube_object.download(destination_path)
os.rename(destination_path + pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
def download_video(pytube_object):
print(f"\nDownloading {pytube_object.title}")
try:
if not CONFIGURATIONS['video_quality']:
unavailable_video(pytube_object)
else:
default_quality = CONFIGURATIONS['video_quality'] + 'p'
filtered_pytube_object = pytube_object.streams.filter(
type='video', res=default_quality,
mime_type='video/mp4',
progressive='True').order_by('resolution').desc()
if not filtered_pytube_object:
when_unavailable = CONFIGURATIONS['when_unavailable']
print(
f"\nDefault quality isn't available. {when_unavailable}" +
" quality will be downloaded.")
return unavailable_video(pytube_object)
filtered_pytube_object = filtered_pytube_object[0]
name_with_resolution = filtered_pytube_object.title + \
" " + filtered_pytube_object.resolution + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
filtered_pytube_object.download(destination_path)
os.rename(destination_path + filtered_pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
except (IOError, OSError, PytubeError) as e:
print(f"{pytube_object.title} couldn't be downloaded.\n{e}\n")
return
def unavailable_video(pytube_object):
if CONFIGURATIONS['when_unavailable'] == "Highest":
pytube_object = pytube_object.streams.filter(type='video',
mime_type='video/mp4',
progressive='True')
pytube_object = pytube_object.order_by('resolution').desc()[0]
else:
pytube_object = pytube_object.streams.filter(type='video',
mime_type='video/mp4',
progressive='True')
pytube_object = pytube_object.order_by('resolution')[0]
name_with_resolution = pytube_object.title + \
" " + pytube_object.resolution + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
pytube_object.download(destination_path)
os.rename(destination_path + pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
def settings_menu():
clear_terminal()
selected_option = input(
f"\tSettings Menu\n\nSelect an option to continue" +
"\n\n\t1) List actual settings" +
"\n\t2) Set destination path\n\t3) Set qualities\n\t4) Go back\n").lower().replace(" ", "")
if selected_option in ["1", "listactualsettings"]:
return list_settings()
elif selected_option in ["2", "setdestinationpath"]:
set_destination_path()
elif selected_option in ["3", "setqualities"]:
set_qualities()
elif selected_option in ["4", "goback"]:
return start()
else:
return handle_invalid_input()
def set_destination_path():
clear_terminal()
default_destination_path = input(
"\n\nInsert the default destination path ")
if not default_destination_path:
default_destination_path = "./"
if (os.path.exists(default_destination_path) or
os.access(os.path.dirname(default_destination_path), os.W_OK)):
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['destination_path'] = default_destination_path
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
else:
return handle_invalid_input()
return settings_menu()
def set_qualities():
clear_terminal()
video_qualities = ["1080", "720", "480", "360", "144"]
audio_qualities = ["160", "128", "70", "50"]
print("\n\n\t\tTo go back leave both in blank.")
default_video_quality = input(
"\n\tSelect the default video quality \n1) 1080px\n2) 720px\n3) 480px\n4) 360px\n5) 144px\n")
default_audio_quality = input(
"\n\tSelect the default audio quality \n1) 160kbps\n2) 128kbps\n3) 70kbps\n4) 50kbps\n")
if default_video_quality in ["1", "2", "3", "4", "5"]:
default_video_quality = video_qualities[int(default_video_quality) - 1]
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['video_quality'] = default_video_quality
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
if default_audio_quality in ["1", "2", "3", "4"]:
default_audio_quality = audio_qualities[int(
default_audio_quality) - 1]
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['audio_quality'] = default_audio_quality
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
elif default_video_quality == "" and default_audio_quality == "":
return settings_menu()
else:
return handle_invalid_input()
set_default_when_unavailable()
return settings_menu()
def set_default_when_unavailable():
clear_terminal()
print(f"\t\tIf the default quality selected isn't " +
"available then the highest quality will be downloaded.")
change_default = input(
f"\n\nSet lowest quality as default if" +
" default one is unavailable\n\n\tYes\n\tNo\n").lower()
if change_default in ["yes", "y"]:
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['when_unavailable'] = 'Lowest'
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
elif change_default in ["no", "n"]:
return
else:
return handle_invalid_input()
def list_settings():
clear_terminal()
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
for setting, value in config_data.items():
print(f"{setting.capitalize().replace('_', ' ')} = {value}")
input("\n\nPress enter to continue...")
return settings_menu()
def help_menu():
clear_terminal()
input("Sorry, this menu is being developed\nPress enter to continue...")
return start()
def exit():
clear_terminal()
print("YouTube Downloader has been closed.")
def handle_invalid_input():
input("\n\nError: Invalid input.\nPress enter to continue...")
clear_terminal()
return globals()[inspect.stack()[1][3]]()
def clear_terminal():
return os.system('cls' if os.name == 'nt' else 'clear')
if __name__ == '__main__':
main()
|
152434
|
from sevenbridges.meta.fields import IntegerField, DateTimeField
from sevenbridges.meta.resource import Resource
class Rate(Resource):
"""
Rate resource.
"""
limit = IntegerField(read_only=True)
remaining = IntegerField(read_only=True)
reset = DateTimeField(read_only=True)
def __str__(self):
return f'<Rate: limit={self.limit}, remaining={self.remaining}>'
|
152462
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation
import json
import nibabel as nib
from scipy.ndimage.interpolation import zoom
def save_history(filename, trainer):
"""Save the history from a torchsample trainer to file."""
with open(filename, 'w+') as f:
json.dump(trainer.history.epoch_metrics, f)
def load_history(filename):
"""Load the history from a torchsample trainer from file."""
with open(filename) as f:
return json.load(f)
def plot_learning_curve(history):
"""
Plot loss and accuracy over epochs, as recorded in a History object
from training with keras or torchsample.
"""
# noinspection PyTypeChecker
fig, axes = plt.subplots(2, sharex=True, figsize=(10, 7))
epochs = range(1, len(history['loss']) + 1)
plt.sca(axes[0])
plt.grid()
plt.plot(epochs, history['loss'], 'b-', label='Train')
try:
plt.plot(epochs, history['val_loss'], 'b--', label='Val')
except KeyError:
pass
plt.ylabel('Loss')
plt.ylim(0, 1.5)
plt.legend()
plt.sca(axes[1])
plt.grid()
plt.plot(epochs, history['acc_metric'], 'r-', label='Train')
try:
plt.plot(epochs, history['val_acc_metric'], 'r--', label='Val')
except KeyError:
pass
plt.xlabel('Epoch')
plt.ylabel('Accuracy / %')
plt.legend()
def load_nifti(file_path, mask=None, z_factor=None, remove_nan=True):
"""Load a 3D array from a NIFTI file."""
img = nib.load(file_path)
struct_arr = np.array(img.get_data())
if remove_nan:
struct_arr = np.nan_to_num(struct_arr)
if mask is not None:
struct_arr *= mask
if z_factor is not None:
struct_arr = np.around(zoom(struct_arr, z_factor), 0)
return struct_arr
def save_nifti(file_path, struct_arr):
"""Save a 3D array to a NIFTI file."""
img = nib.Nifti1Image(struct_arr, np.eye(4))
nib.save(img, file_path)
# Transparent colormap (alpha to red), that is used for plotting an overlay.
# See https://stackoverflow.com/questions/37327308/add-alpha-to-an-existing-matplotlib-colormap
alpha_to_red_cmap = np.zeros((256, 4))
alpha_to_red_cmap[:, 0] = 0.8
alpha_to_red_cmap[:, -1] = np.linspace(0, 1, 256) # cmap.N-20) # alpha values
alpha_to_red_cmap = mpl.colors.ListedColormap(alpha_to_red_cmap)
red_to_alpha_cmap = np.zeros((256, 4))
red_to_alpha_cmap[:, 0] = 0.8
red_to_alpha_cmap[:, -1] = np.linspace(1, 0, 256) # cmap.N-20) # alpha values
red_to_alpha_cmap = mpl.colors.ListedColormap(red_to_alpha_cmap)
def plot_slices(struct_arr, num_slices=7, cmap='gray', vmin=None, vmax=None, overlay=None,
overlay_cmap=alpha_to_red_cmap, overlay_vmin=None, overlay_vmax=None):
"""
Plot equally spaced slices of a 3D image (and an overlay) along every axis
Args:
struct_arr (3D array or tensor): The 3D array to plot (usually from a nifti file).
num_slices (int): The number of slices to plot for each dimension.
cmap: The colormap for the image (default: `'gray'`).
vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `struct_arr`.
vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `struct_arr`.
overlay (3D array or tensor): The 3D array to plot as an overlay on top of the image. Same size as `struct_arr`.
overlay_cmap: The colomap for the overlay (default: `alpha_to_red_cmap`).
overlay_vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `overlay`.
overlay_vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `overlay`.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
print(vmin, vmax, overlay_vmin, overlay_vmax)
fig, axes = plt.subplots(3, num_slices, figsize=(15, 6))
intervals = np.asarray(struct_arr.shape) / num_slices
for axis, axis_label in zip([0, 1, 2], ['x', 'y', 'z']):
for i, ax in enumerate(axes[axis]):
i_slice = int(np.round(intervals[axis] / 2 + i * intervals[axis]))
# print(axis_label, 'plotting slice', i_slice)
plt.sca(ax)
plt.axis('off')
plt.imshow(sp.ndimage.rotate(np.take(struct_arr, i_slice, axis=axis), 90), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=None)
plt.text(0.03, 0.97, '{}={}'.format(axis_label, i_slice), color='white',
horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
if overlay is not None:
plt.imshow(sp.ndimage.rotate(np.take(overlay, i_slice, axis=axis), 90), cmap=overlay_cmap,
vmin=overlay_vmin, vmax=overlay_vmax, interpolation=None)
def animate_slices(struct_arr, overlay=None, axis=0, reverse_direction=False, interval=40, vmin=None, vmax=None,
overlay_vmin=None, overlay_vmax=None):
"""
Create a matplotlib animation that moves through a 3D image along a specified axis.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
fig, ax = plt.subplots()
axis_label = ['x', 'y', 'z'][axis]
# TODO: If I select slice 50 here at the beginning, the plots look different.
im = ax.imshow(np.take(struct_arr, 0, axis=axis), vmin=vmin, vmax=vmax, cmap='gray', interpolation=None,
animated=True)
if overlay is not None:
im_overlay = ax.imshow(np.take(overlay, 0, axis=axis), vmin=overlay_vmin, vmax=overlay_vmax,
cmap=alpha_to_red_cmap, interpolation=None, animated=True)
text = ax.text(0.03, 0.97, '{}={}'.format(axis_label, 0), color='white',
horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.axis('off')
def update(i):
im.set_array(np.take(struct_arr, i, axis=axis))
if overlay is not None:
im_overlay.set_array(np.take(overlay, i, axis=axis))
text.set_text('{}={}'.format(axis_label, i))
return im, text
num_frames = struct_arr.shape[axis]
if reverse_direction:
frames = np.arange(num_frames - 1, 0, -1)
else:
frames = np.arange(0, num_frames)
# noinspection PyTypeChecker
return mpl.animation.FuncAnimation(fig, update, frames=frames, interval=interval, blit=True)
def resize_image(img, size, interpolation=0):
"""Resize img to size. Interpolation between 0 (no interpolation) and 5 (maximum interpolation)."""
zoom_factors = np.asarray(size) / np.asarray(img.shape)
return sp.ndimage.zoom(img, zoom_factors, order=interpolation)
|
152480
|
import os
class Config(object):
SECRET_KEY = os.urandom(32)
BOOTSTRAP_SERVE_LOCAL = True
SQLALCHEMY_DATABASE_URI = "sqlite:///app.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
152481
|
import calendar
import json
from json.decoder import JSONDecodeError
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Case, IntegerField, Q, Value, When
from django.http import HttpResponseServerError
from django.http.response import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect, render
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
from oauth2_provider.models import get_access_token_model
from oauth2_provider.views import IntrospectTokenView
from oauth2_provider.views.mixins import ProtectedResourceMixin
from rest_framework import generics, viewsets
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView, Response
from rest_framework_api_key.permissions import HasAPIKey
from sentry_sdk import capture_message
from accounts.models import Major, School, User
from accounts.serializers import (
EmailSerializer,
MajorSerializer,
PhoneNumberSerializer,
SchoolSerializer,
UserSearchSerializer,
UserSerializer,
)
from accounts.verification import sendEmailVerification, sendSMSVerification
class LoginView(View):
"""
Log in a user.
WARNING: You must ensure this page is protected by Shibboleth and Clean Headers
See https://github.com/nginx-shib/nginx-http-shibboleth
"""
def get(self, request):
pennid = int(request.META.get("HTTP_EMPLOYEENUMBER", "-1"))
pennkey = request.META.get("HTTP_EPPN", "").lower().split("@")[0]
first_name = request.META.get("HTTP_GIVENNAME", "").title()
last_name = request.META.get("HTTP_SN", "").lower().title()
affiliation = request.META.get("HTTP_UNSCOPED_AFFILIATION", "").split(";")
shibboleth_attributes = {
"username": pennkey,
"first_name": first_name,
"last_name": last_name,
"affiliation": affiliation,
}
user = auth.authenticate(
remote_user=pennid, shibboleth_attributes=shibboleth_attributes
)
if user:
auth.login(request, user)
return redirect(request.GET.get("next", "/"))
capture_message("Invalid user returned from shibboleth")
return HttpResponseServerError()
class LogoutView(View):
"""
Log out a user from both Platform and Shibboleth.
"""
def get(self, request):
auth.logout(request)
return redirect(
"/Shibboleth.sso/Logout?return=https://idp.pennkey.upenn.edu/logout"
)
class DevLoginView(View):
"""
Log in a test user.
Does not use Shibboleth
"""
def get(self, request):
user_objects = (
get_user_model().objects.filter(~Q(username="admin")).order_by("pennid")
)
serialized_data = UserSerializer(user_objects, many=True).data
return render(request, "accounts/devlogin.html", {"user_data": serialized_data})
def post(self, request):
choice = int(request.POST.get("userChoice", ""))
try:
user = get_user_model().objects.get(pennid=choice)
except User.DoesNotExist:
user = get_user_model().objects.filter(~Q(username="admin")).first()
affiliations = user.groups.all().values_list("name", flat=True)
shibboleth_attributes = {
"username": user.username,
"first_name": user.first_name,
"last_name": user.last_name,
"affiliation": affiliations,
}
user = auth.authenticate(
remote_user=user.pennid, shibboleth_attributes=shibboleth_attributes
)
auth.login(request, user)
return redirect(request.GET.get("next", "/accounts/me/"))
class DevLogoutView(View):
"""
Log out a test user from Platform
"""
def get(self, request):
auth.logout(request)
return redirect("accounts:login")
@method_decorator(csrf_exempt, name="dispatch")
class UUIDIntrospectTokenView(IntrospectTokenView):
@staticmethod
def get_token_response(token_value=None):
try:
token = get_access_token_model().objects.get(token=token_value)
except ObjectDoesNotExist:
return HttpResponse(
content=json.dumps({"active": False}),
status=401,
content_type="application/json",
)
else:
if token.is_valid():
data = {
"active": True,
"scope": token.scope,
"exp": int(calendar.timegm(token.expires.timetuple())),
}
if token.application:
data["client_id"] = token.application.client_id
if token.user:
data["user"] = UserSerializer(token.user).data
return HttpResponse(
content=json.dumps(data),
status=200,
content_type="application/json",
)
else:
return HttpResponse(
content=json.dumps({"active": False}),
status=200,
content_type="application/json",
)
class UserSearchView(ProtectedResourceMixin, generics.ListAPIView):
"""
Search for users by first name, last name, or pennkey. Authentication Required.
"""
serializer_class = UserSearchSerializer
def get_queryset(self):
query = self.request.query_params.get("q", "")
if len(query) < 2: # Do not show anything if query is less than two characters
return None
qs = User.objects.none()
if " " in query: # First and last name provided
# Returns the following results in sorted order:
# 1. Exact match on first and last name
# 2. Starting match on first name and exact match on last name
# 3. Starting match on first and last name
first, last = query.split()
q1 = Q(first_name__iexact=first) & Q(last_name__iexact=last)
q2 = Q(first_name__istartswith=first) & Q(last_name__iexact=last)
q3 = Q(first_name__istartswith=first) & Q(last_name__istartswith=last)
qs = (
User.objects.filter(q1 | q2 | q3)
.annotate(
search_type_ordering=Case(
When(q1, then=Value(2)),
When(q2, then=Value(1)),
When(q3, then=Value(0)),
default=Value(-1),
output_field=IntegerField(),
)
)
.order_by("-search_type_ordering")
)
else:
# Returns the following results in sorted order:
# 1. Exact first name match
# 2. Exact last name match
# 3. Starting first name match
# 4. Starting last name match
# 5. Exact pennkey match
q1 = Q(first_name__iexact=query)
q2 = Q(last_name__iexact=query)
q3 = Q(first_name__istartswith=query)
q4 = Q(last_name__istartswith=query)
q5 = Q(username__iexact=query)
qs = (
User.objects.filter(q1 | q2 | q3 | q4 | q5)
.annotate(
search_type_ordering=Case(
When(q1, then=Value(5)),
When(q2, then=Value(4)),
When(q3, then=Value(3)),
When(q4, then=Value(2)),
When(q5, then=Value(1)),
default=Value(-1),
output_field=IntegerField(),
)
)
.order_by("-search_type_ordering")
)
return qs
class UserView(generics.RetrieveUpdateAPIView):
"""
get:
Return information about the logged in user.
update:
Update information about the logged in user.
You must specify all of the fields or use a patch request.
patch:
Update information about the logged in user.
Only updates fields that are passed to the server.
"""
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
return self.request.user
class PhoneNumberViewSet(viewsets.ModelViewSet):
"""
retrieve:
Return a single phone number with all information fields present.
list:
Return a list of phone numbers associated with current user.
create:
Add new unverified phone number.
update:
Update all fields.
You must specify all of the fields or use a patch request.
partial_update:
Update certain fields.
Only specify the fields that you want to change.
destroy:
Delete a phone number.
"""
serializer_class = PhoneNumberSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return self.request.user.phone_numbers.all()
def destroy(self, request, *args, **kwargs):
is_primary = self.get_object().primary
self.get_object().delete()
next_number = self.get_queryset().filter(verified=True).first()
if is_primary and next_number is not None:
next_number.primary = True
next_number.save()
return Response({"detail": "Phone number successfully deleted"}, status=200)
@action(detail=True, methods=["post"])
def resend_verification(self, request, pk=None):
obj = self.get_object()
elapsed_time = timezone.now() - obj.verification_timestamp
if elapsed_time.total_seconds() > User.VERIFICATION_EXPIRATION_MINUTES * 60:
obj.verification_code = get_random_string(
length=6, allowed_chars="1234567890"
)
obj.verification_timestamp = timezone.now()
sendSMSVerification(obj.value, obj.verification_code)
obj.save()
return Response({"detail": "success"})
return HttpResponseBadRequest()
class EmailViewSet(viewsets.ModelViewSet):
"""
retrieve:
Return a single email with all information fields present.
list:
Return a list of emails associated with current user.
create:
Add new unverified email.
update:
Update all fields.
You must specify all of the fields or use a patch request.
partial_update:
Update certain fields.
Only specify the fields that you want to change.
destroy:
Delete an email.
"""
serializer_class = EmailSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return self.request.user.emails.all()
def destroy(self, request, *args, **kwargs):
is_primary = self.get_object().primary
if is_primary and self.get_queryset().filter(verified=True).count() < 2:
return Response(
{"detail": "You can't delete the only verified email"}, status=405
)
self.get_object().delete()
next_email = self.get_queryset().filter(verified=True).first()
if is_primary and next_email is not None:
next_email.primary = True
next_email.save()
return Response({"detail": "Email successfully deleted"}, status=200)
@action(detail=True, methods=["post"])
def resend_verification(self, request, pk=None):
obj = self.get_object()
elapsed_time = timezone.now() - obj.verification_timestamp
if elapsed_time.total_seconds() > User.VERIFICATION_EXPIRATION_MINUTES * 60:
obj.verification_code = get_random_string(
length=6, allowed_chars="1234567890"
)
obj.verification_timestamp = timezone.now()
sendEmailVerification(obj.value, obj.verification_code)
obj.save()
return Response({"detail": "success"})
return HttpResponseBadRequest()
class MajorViewSet(viewsets.ReadOnlyModelViewSet):
"""
list:
Retrieve a list of all of the active majors/programs
(supports search functionality on name and degree type)
retrieve:
Retrieve a specific major by id
"""
serializer_class = MajorSerializer
filter_backends = [SearchFilter]
search_fields = ["name", "degree_type"]
queryset = Major.objects.filter(is_active=True)
class SchoolViewSet(viewsets.ReadOnlyModelViewSet):
"""
list:
Retrieve a list of all of the schools
(supports search functionality on name)
retrieve:
Retrieve a specific school by id
"""
serializer_class = SchoolSerializer
filter_backends = [SearchFilter]
search_fields = ["name"]
queryset = School.objects.all()
class ProductAdminView(APIView):
"""
Idempotently set admin permissions on all of our products.
Takes in a POST body in the form {"pennkey": ["permissions"]}
"""
permission_classes = [HasAPIKey]
def post(self, request, format=None):
# Revoke all existing admin permissions
content_type = ContentType.objects.get(app_label="accounts", model="user")
perms = Permission.objects.filter(
content_type=content_type, codename__endswith="_admin"
)
for perm in perms:
perm.user_set.clear()
User.objects.filter(Q(is_superuser=True) | Q(is_staff=True)).update(
is_superuser=False, is_staff=False
)
try:
body = json.loads(request.body)
except JSONDecodeError:
return HttpResponseBadRequest()
for pennkey in body:
user = User.objects.filter(username=pennkey).first()
if user is not None:
for permission_slug in body[pennkey]:
# Handle platform separately
if permission_slug == "platform_admin":
user.is_superuser = True
user.is_staff = True
user.save()
continue
product_name = permission_slug[:-6].replace("_", " ").title()
permission_name = f"{product_name} Admin"
permission, _ = Permission.objects.get_or_create(
content_type=content_type,
codename=permission_slug,
defaults={"name": permission_name},
)
user.user_permissions.add(permission)
return Response({"detail": "success"})
|
152492
|
import itertools
import numpy as np
from ..sequences import Genome
def in_silico_mutagenesis_sequences(sequence,
mutate_n_bases=1,
reference_sequence=Genome,
start_position=0,
end_position=None):
"""
Creates a list containing each mutation that occurs from an
*in silico* mutagenesis across the whole sequence.
Please note that we have not parallelized this function yet, so
runtime increases exponentially when you increase `mutate_n_bases`.
Parameters
----------
sequence : str
A string containing the sequence we would like to mutate.
mutate_n_bases : int, optional
Default is 1. The number of base changes to make with each set of
mutations evaluated, e.g. `mutate_n_bases = 2` considers all
pairs of SNPs.
reference_sequence : class, optional
Default is `selene_sdk.sequences.Genome`. The type of sequence
that has been passed in.
start_position : int, optional
Default is 0. The starting position of the subsequence to be
mutated.
end_position : int or None, optional
Default is None. The ending position of the subsequence to be
mutated. If left as `None`, then `len(sequence)` will be
used.
Returns
-------
list(list(tuple))
A list of all possible mutations. Each element in the list is
itself a list of tuples, e.g. element = [(0, 'T')] when only mutating
1 base at a time. Each tuple is the position to mutate and the base
with which we are replacing the reference base.
For a sequence of length 1000, mutating 1 base at a time means that
we return a list with length of 3000-4000, depending on the number of
unknown bases in the input sequences.
Raises
------
ValueError
If the value of `start_position` or `end_position` is negative.
ValueError
If there are fewer than `mutate_n_bases` between `start_position`
and `end_position`.
ValueError
If `start_position` is greater or equal to `end_position`.
ValueError
If `start_position` is not less than `len(sequence)`.
ValueError
If `end_position` is greater than `len(sequence)`.
"""
if end_position is None:
end_position = len(sequence)
if start_position >= end_position:
raise ValueError(("Starting positions must be less than the ending "
"positions. Found a starting position of {0} with "
"an ending position of {1}.").format(start_position,
end_position))
if start_position < 0:
raise ValueError("Negative starting positions are not supported.")
if end_position < 0:
raise ValueError("Negative ending positions are not supported.")
if start_position >= len(sequence):
raise ValueError(("Starting positions must be less than the sequence length."
" Found a starting position of {0} with a sequence length "
"of {1}.").format(start_position, len(sequence)))
if end_position > len(sequence):
raise ValueError(("Ending positions must be less than or equal to the sequence "
"length. Found an ending position of {0} with a sequence "
"length of {1}.").format(end_position, len(sequence)))
if (end_position - start_position) < mutate_n_bases:
raise ValueError(("Fewer bases exist in the substring specified by the starting "
"and ending positions than need to be mutated. There are only "
"{0} currently, but {1} bases must be mutated at a "
"time").format(end_position - start_position, mutate_n_bases))
sequence_alts = []
for index, ref in enumerate(sequence):
alts = []
for base in reference_sequence.BASES_ARR:
if base == ref:
continue
alts.append(base)
sequence_alts.append(alts)
all_mutated_sequences = []
for indices in itertools.combinations(
range(start_position, end_position), mutate_n_bases):
pos_mutations = []
for i in indices:
pos_mutations.append(sequence_alts[i])
for mutations in itertools.product(*pos_mutations):
all_mutated_sequences.append(list(zip(indices, mutations)))
return all_mutated_sequences
def mutate_sequence(encoding,
mutation_information,
reference_sequence=Genome):
"""
Transforms a sequence with a set of mutations.
Parameters
----------
encoding : numpy.ndarray
An :math:`L \\times N` array (where :math:`L` is the sequence's
length and :math:`N` is the size of the sequence type's
alphabet) holding the one-hot encoding of the
reference sequence.
mutation_information : list(tuple)
List of tuples of (`int`, `str`). Each tuple is the position to
mutate and the base to which to mutate that position in the
sequence.
reference_sequence : class, optional
Default is `selene_sdk.sequences.Genome`. A reference sequence
from which to retrieve smaller sequences..
Returns
-------
numpy.ndarray
An :math:`L \\times N` array holding the one-hot encoding of
the mutated sequence.
"""
mutated_seq = np.copy(encoding)
for (position, alt) in mutation_information:
replace_base = reference_sequence.BASE_TO_INDEX[alt]
mutated_seq[position, :] = 0
mutated_seq[position, replace_base] = 1
return mutated_seq
def _ism_sample_id(sequence, mutation_information):
"""
TODO
Parameters
----------
sequence : str
The input sequence to mutate.
mutation_information : list(tuple)
TODO
Returns
-------
TODO
TODO
"""
positions = []
refs = []
alts = []
for (position, alt) in mutation_information:
positions.append(str(position))
refs.append(sequence[position])
alts.append(alt)
return (';'.join(positions), ';'.join(refs), ';'.join(alts))
|
152527
|
import json
import sys
from easyprocess import EasyProcess
python = sys.executable
def pass_env(e):
prog = "import os,json;print(json.dumps(dict(os.environ)))"
s = EasyProcess([python, "-c", prog], env=e).call().stdout
return json.loads(s)
def test_env():
assert len(pass_env(None)) > 0
e = pass_env(None)
assert pass_env(e).get("FOO") is None
e["FOO"] = "2"
assert pass_env(e).get("FOO") == "2"
|
152537
|
template = "#include \"kernel.h\"\n#include \"ecrobot_interface.h\"\n@@BALANCER@@\n@@VARIABLES@@\n\nvoid ecrobot_device_initialize(void)\n{\n@@INITHOOKS@@\n}\n\nvoid ecrobot_device_terminate(void)\n{\n@@TERMINATEHOOKS@@\n}\n\n/* nxtOSEK hook to be invoked from an ISR in category 2 */\nvoid user_1ms_isr_type2(void){ /* do nothing */ }\n\n@@CODE@@"
task_template = "TASK(OSEK_Task_Number_0)\n{\n@@CODE@@\n}"
template = template.replace("@@CODE@@", task_template)
number_of_ports = 4
port_values = [initBlock.port_1, initBlock.port_2, initBlock.port_3, initBlock.port_4]
for i in xrange(number_of_ports):
init_ecrobot_color_sensor_port_s = "ecrobot_init_nxtcolorsensor(NXT_PORT_S"
if port_values[i] == "Ультразвуковой сенсор":
init_code.append("ecrobot_init_sonar_sensor(NXT_PORT_S" + str(i + 1) + ");\n")
terminate_code.append("ecrobot_term_sonar_sensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_sonar_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (все цвета)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) +", NXT_LIGHTSENSOR_WHITE);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (красный)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_RED);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (зеленый)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_GREEN);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (синий)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_BLUE);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (пассивный)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_COLORSENSOR);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
else:
port_values[i] = "ecrobot_get_touch_sensor(NXT_PORT_S"
initBlock.id = max_used_id
cur_node_is_processed = True
|
152568
|
from tests.utils import W3CTestCase
class TestTtwfReftestFlexAlignContentCenter(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'ttwf-reftest-flex-align-content-center'))
|
152570
|
from scipy import stats
import numpy as np
__all__ = ['chisquare', 'kolsmi']
def kolsmi(dist, fit_result, data):
"""Perform a Kolmogorow-Smirnow-Test for goodness of fit.
This tests the H0 hypothesis, if data is a sample of dist
Args:
dist: A mle.Distribution instance
fit_result: The solution dict, returned by the Distribution.fit method
data: The data used in Distribution.fit
Returns:
teststat: the test statistic, e.g. the max distance between the
cumulated distributions
p-value: the p-value, probability that dist describes the data
"""
variables = dist.get_vars()
if len(variables) > 1:
raise ValueError("Kolmogorov-Smirnov-Test is only valid for 1d distributions")
var = variables[0]
teststat, pvalue = stats.kstest(data[var.name], lambda x: dist.cdf(x, **fit_result["x"]))
return teststat, pvalue
def chisquare(dist, fit_result, data, bins=None, range=None):
"""Perform a Chi^2 test for goodness of fit.
Tests the H0 hypothesis if the distances between fit result and
data are compatible with random fluctuations.
Args:
dist: A mle.Distribution instance
fit_result: The solution dict, returned by the Distribution.fit method
data: The data used in Distribution.fit
bins: Number of bins for the histogram (default: 1+log2(N))
range: Range for the histogram (default: min(data), max(data))
Returns:
chisquare: the test statistic, chi^2/ndf
p-value: the p-value, probability that differences between dist
and data are compatible with random fluctuation
"""
variables = dist.get_vars()
if len(variables) > 1:
raise ValueError("This is a 1d only chisquare test")
var = variables[0]
# rule of thumb for number if bins if not provided
if bins is None:
bins = np.ceil(2*len(data[var.name])**(1.0/3.0))
entries, edges = np.histogram(data[var.name], bins=bins, range=range)
# get expected frequencies from the cdf
cdf = dist.cdf(edges, **fit_result["x"])
exp_entries = np.round(len(data[var.name]) * (cdf[1:]-cdf[:-1]))
# use only bins where more then 4 entries are expected
mask = exp_entries >= 5
chisq, pvalue = stats.chisquare(entries[mask], exp_entries[mask], ddof=len(fit_result["x"]))
chisq = chisq/(np.sum(mask) - len(fit_result["x"]) - 1)
return chisq, pvalue
|
152615
|
from abc import abstractmethod
from math import ceil, log2
from ..util import SaveLoad, load
class Traversal(SaveLoad):
"""Base image traversal class.
Attributes
----------
classes : `dict`
Image traversal class group.
"""
classes = {}
@abstractmethod
def __call__(self, width, height, ends=True):
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Generate block ends (default: `True`).
Raises
------
NotImplementedError
Returns
-------
`generator` of ((`int`, `int`) or `None`)
Points and block ends.
"""
pass
class Lines(Traversal): # pylint: disable=abstract-method
"""Base line traversal class.
Attributes
----------
reverse : `int`
If greater than 0, reverse every nth line.
line_sentences : `bool`
Generate a block end after each line.
"""
def __init__(self, reverse=0, line_sentences=False):
"""Base line traversal constructor.
Parameters
----------
reverse : `int`, optional
If greater than 0, reverse every nth line (default: 0).
line_sentences : `bool`, optional
Generate a block end after each line (default: `False`).
"""
super().__init__()
self.reverse = reverse
self.line_sentences = line_sentences
def __eq__(self, t):
return (self.reverse == t.reverse
and self.line_sentences == t.line_sentences)
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['reverse'] = self.reverse
data['line_sentences'] = self.line_sentences
return data
class HLines(Lines):
"""Horizontal line traversal.
Examples
--------
>>> traverse = HLines()
>>> list(traverse(2, 2))
[(0, 0), (1, 0), (0, 1), (1, 1)]
>>> traverse = HLines(reverse=1)
>>> list(traverse(2, 2))
[(1, 0), (0, 0), (1, 1), (0, 1)]
>>> traverse = HLines(reverse=2)
>>> list(traverse(2, 2))
[(1, 0), (0, 0), (0, 1), (1, 1)]
>>> traverse = HLines(line_sentences=True)
>>> list(traverse(2, 2))
[(0, 0), (1, 0), None, (0, 1), (1, 1), None]
>>> list(traverse(2, 2, ends=False))
[(0, 0), (1, 0), (0, 1), (1, 1)]
"""
def __call__(self, width, height, ends=True):
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Generate block ends (default: `True`).
Returns
-------
`generator` of ((`int`, `int`) or `None`)
Points and block ends.
"""
for y in range(height):
if self.reverse > 0 and y % self.reverse == 0:
xs = range(width - 1, -1, -1)
else:
xs = range(width)
for x in xs:
yield x, y
if self.line_sentences and ends:
yield None
class VLines(Lines):
"""Vertical line traversal.
Examples
--------
>>> traverse = VLines()
>>> list(traverse(2, 2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
>>> traverse = VLines(reverse=1)
>>> list(traverse(2, 2))
[(0, 1), (0, 0), (1, 1), (1, 0)]
>>> traverse = VLines(reverse=2)
>>> list(traverse(2, 2))
[(1, 0), (0, 0), (1, 0), (1, 1)]
>>> traverse = VLines(line_sentences=True)
>>> list(traverse(2, 2))
[(0, 0), (0, 1), None, (1, 0), (1, 1), None]
>>> list(traverse(2, 2, ends=False))
[(0, 0), (0, 1), (1, 0), (1, 1)]
"""
def __call__(self, width, height, ends=True):
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Generate block ends (default: `True`).
Returns
-------
generator of ((`int`, `int`) or `None`)
Points and block ends.
"""
for x in range(width):
if self.reverse > 0 and x % self.reverse == 0:
ys = range(height - 1, -1, -1)
else:
ys = range(height)
for y in ys:
yield x, y
if self.line_sentences and ends:
yield None
class Spiral(Traversal):
"""Spiral traversal.
Attributes
----------
reverse : `bool`
Reverse traversal order.
Examples
--------
>>> traverse = Spiral()
>>> list(traverse(3, 3))
[(1, 1), (0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0), (0, 0)]
>>> traverse = Spiral(True)
>>> list(traverse(3, 3))
[(0, 0), (1, 0), (2, 0), (2, 1), (2, 2), (1, 2), (0, 2), (0, 1), (1, 1)]
"""
def __init__(self, reverse=False):
"""Spiral traversal constructor.
Parameters
----------
reverse : `bool`, optional
Reverse traversal order (default: `False`).
"""
super().__init__()
self.reverse = reverse
def __eq__(self, t):
return self.reverse == t.reverse
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['reverse'] = self.reverse
return data
@staticmethod
def _rspiral(width, height):
"""Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1:
for x in range(x0, x1):
yield x, y0
for y in range(y0, y1):
yield x1, y
for x in range(x1, x0, -1):
yield x, y1
for y in range(y1, y0, -1):
yield x0, y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1:
for y in range(y0, y1 + 1):
yield x0, y
elif y0 == y1:
for x in range(x0, x1 + 1):
yield x, y0
@staticmethod
def _spiral(width, height):
"""Spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
if width == 1:
for y in range(height - 1, -1, -1):
yield 0, y
return
if height == 1:
for x in range(width - 1, -1, -1):
yield x, 0
return
if width <= height:
x0 = width // 2
if width % 2:
for y in range(height - 1 - x0, x0 - 1, -1):
yield x0, y
x0 -= 1
y0 = x0
else:
y0 = height // 2
if height % 2:
for x in range(width - 1 - y0, y0 - 1, -1):
yield x, y0
y0 -= 1
x0 = y0
while x0 >= 0:
x1 = width - x0 - 1
y1 = height - y0 - 1
for y in range(y0 + 1, y1):
yield x0, y
for x in range(x0, x1):
yield x, y1
for y in range(y1, y0, -1):
yield x1, y
for x in range(x1, x0 - 1, -1):
yield x, y0
x0 -= 1
y0 -= 1
def __call__(self, width, height, ends=True): # pylint: disable=unused-argument
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Unused (default: `True`).
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
if self.reverse:
yield from self._rspiral(width, height)
else:
yield from self._spiral(width, height)
class Hilbert(Traversal):
"""Hilbert curve traversal.
Attributes
----------
POSITION : `list` of (`int`, `int`)
Block positions.
Examples
--------
>>> traverse = Hilbert()
>>> list(traverse(2, 2))
[(0, 0), (0, 1), (1, 1), (1, 0)]
>>> list(traverse(3, 5))
[(0, 0), (0, 1), (1, 1), (1, 0), (2, 0), (2, 1),
(2, 2), (2, 3), (1, 3), (1, 2), (0, 2), (0, 3),
(0, 4), (1, 4), (2, 4)]
"""
POSITION = [(0, 0), (0, 1), (1, 1), (1, 0)]
@classmethod
def get_point_in_block(cls, x, y, block_idx, block_size):
"""Get point coordinates in next block.
Parameters
----------
x : `int`
X coordinate in current block.
y : `int`
Y coordinate in current block.
block_index : `int`
Current block index in next block.
block_size : `int`
Current block size.
Raises
------
IndexError
If block index is out of range.
Returns
-------
(`int`, `int`)
Point coordinates.
"""
if block_idx == 0:
return y, x
if block_idx == 1:
return x, y + block_size
if block_idx == 2:
return x + block_size, y + block_size
if block_idx == 3:
x, y = block_size - 1 - y, block_size - 1 - x
return x + block_size, y
raise IndexError('block index out of range: %d' % block_idx)
@classmethod
def get_point(cls, idx, size):
"""Get curve point coordinates by index.
Parameters
----------
idx : `int`
Point index.
size : `int`
Curve size.
Returns
-------
(`int`, `int`)
Point coordinates.
"""
x, y = cls.POSITION[idx % 4]
idx //= 4
block_size = 2
while block_size < size:
block_idx = idx % 4
x, y = cls.get_point_in_block(x, y, block_idx, block_size)
idx //= 4
block_size *= 2
return x, y
def __call__(self, width, height, ends=True): # pylint: disable=unused-argument
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Unused (default: `True`).
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
size = max(width, height)
size = 2 ** ceil(log2(size))
generated = 0
points = width * height
if generated >= points or width <= 0 or height <= 0:
return
for i in range(size * size):
x, y = self.get_point(i, size)
if x < width and y < height:
yield x, y
generated += 1
if generated >= points:
return
def __eq__(self, tr):
return isinstance(tr, self.__class__)
class Blocks(Traversal):
"""Block traversal.
Attributes
----------
block_size : (`int`, `int`)
Block size.
block_sentences : `bool`
Generate a block end after each block.
traverse_image : `markovchain.image.traversal.Traversal`
Image traversal.
traverse_block : `markovchain.image.traversal.Traversal`
Block traversal.
Examples
--------
>>> traverse = Blocks(block_size=(2, 2),
... traverse_image=HLines(),
... traverse_block=VLines())
>>> list(traverse(4, 4))
[(0, 0), (0, 1), (1, 0), (1, 1),
(2, 0), (2, 1), (3, 0), (3, 1),
(0, 2), (0, 3), (1, 2), (1, 3),
(2, 2), (2, 3), (3, 2), (3, 3)]
"""
def __init__(self,
block_size=(16, 16),
block_sentences=False,
traverse_image=None,
traverse_block=None):
""" Block traversal constructor.
Parameters
----------
block_size : (`int`, `int`), optional
Block size (default: (16, 16)).
block_sentences : `bool`, optional
Generate a block end after each block (default: False).
traverse_image : `markovchain.image.traversal.Traversal` \
or `dict`, optional
Image traversal object or JSON data (default: HLines()).
traverse_block : `markovchain.image.traversal.Traversal` \
or `dict`, optional
Block traversal object or JSON data (default: HLines()).
"""
super().__init__()
self.block_size = tuple(block_size)
self.block_sentences = block_sentences
self.traverse_image = load(traverse_image, Traversal, HLines)
self.traverse_block = load(traverse_block, Traversal, HLines)
def __call__(self, width, height, ends=True):
"""Traverse an image.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
ends : `bool`, optional
Generate block ends (default: `True`).
Returns
-------
`generator` of ((`int`, `int`) or `None`)
Points and block ends.
"""
block_width, block_height = self.block_size
img_width = width // block_width
img_height = height // block_height
for xy in self.traverse_image(img_width, img_height, ends):
if xy is None:
yield None
continue
x, y = xy
x, y = x * block_width, y * block_height
for dx, dy in self.traverse_block(block_width, block_height, False):
yield x + dx, y + dy
if self.block_sentences and ends:
yield None
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['block_size'] = list(self.block_size)
data['block_sentences'] = self.block_sentences
data['traverse_image'] = self.traverse_image.save()
data['traverse_block'] = self.traverse_block.save()
return data
def __eq__(self, t):
return (self.block_size == t.block_size
and self.block_sentences == t.block_sentences
and self.traverse_image == t.traverse_image
and self.traverse_block == t.traverse_block)
|
152637
|
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('create/<int:pk>/', views.create_post, name='create_post'),
]
|
152647
|
import arcpy
#gdb = r'X:\Env-dat.081\source\yt_courbe_niveau_imperial.gdb'
gdb = r'D:\s\yt_courbe_niveau_imperial.gdb'
def arcpy_listFC(gdb):
arcpy.env.workspace = gdb
print 'Looking in "%s" ' % arcpy.env.workspace
fcs = arcpy.ListFeatureClasses()
return fcs
fcs = arcpy_listFC(gdb)
print 'Feature classes found: %s' % len(fcs)
|
152692
|
import matplotlib
matplotlib.use('Agg')
from ecolopy_dev import Community
from ecolopy_dev.utils import draw_shannon_distrib
##test_abund.txt would be the genome abundance / the diploid number of chromosomes
## j_tot is obtained by taking the total number of individuals
com = Community('test_log_abund.txt', j_tot=2150924886)
print com
com.fit_model('ewens')
com.set_current_model('ewens')
ewens_model = com.get_model('ewens')
print ewens_model
com.fit_model('lognormal')
com.set_current_model('lognormal')
lognormal_model = com.get_model('lognormal')
print lognormal_model
com.fit_model('etienne')
com.set_current_model('etienne')
etienne_model = com.get_model('etienne')
print etienne_model
tmp = {}
likelihoods = []
for met in ['fmin', 'slsqp', 'l_bfgs_b', 'tnc']:
print 'Optimizing with %s...' % met
try:
com.fit_model(name='etienne', method=met, verbose=False)
model = com.get_model('etienne')
tmp[met] ={}
tmp[met]['model'] = model
tmp[met]['theta'] = model.theta
tmp[met]['I'] = model.I
tmp[met]['m'] = model.m
tmp[met]['lnL'] = model.lnL
# in case you reach two times the same likelihood it may not be necessary
# to go on with other optimization strategies...
# of course if time is not limiting it is not worth to check :)
if round(model.lnL,1) in likelihoods:
break
likelihoods.append(round(model.lnL, 1))
except Exception as e:
print ' optimization failed: ' + e.args[0]
# in case optimization by fmin failed to found correct values for theta and m:
if not (1 <= tmp['fmin']['theta'] < com.S and \
1e-50 <= tmp['fmin']['m'] < 1-1e-50):
del (tmp['fmin'])
# find the model with the higher likelihood:
met = min(tmp, key=lambda x: tmp[x]['lnL'])
# load it as 'etienne' model
com.set_model(tmp[met]['model'])
lrt = com.lrt('ewens', 'etienne')
best = 'ewens' if lrt > 0.05 else 'etienne'
print 'Best model by LRT was: ' + best
com.generate_random_neutral_distribution(model=best)
pval, neut_h = com.test_neutrality (model=best, gens=10000, full=True, method='shannon', n_cpus=4)
draw_shannon_distrib(neut_h, com.shannon, outfile='test_log_shannon_dist.pdf', filetype='pdf')
print 'P-value for neutrality test was: ', pval
out = open('test_log_shannon_neutral_data.tsv', 'w')
out.write('# shannon:' + str(com.shannon) + '\n')
out.write('\n'.join([str(s) for s in neut_h]) + '\n')
out.close()
com.dump_community('test_log_ecolopy.pik')
|
152725
|
from django.template import Library
from evap.evaluation.models import Semester
from evap.settings import DEBUG, LANGUAGES
register = Library()
@register.inclusion_tag("navbar.html")
def include_navbar(user, language):
return {
"user": user,
"current_language": language,
"languages": LANGUAGES,
"published_result_semesters": Semester.get_all_with_published_unarchived_results(),
"result_semesters": Semester.get_all_with_unarchived_results(),
"grade_document_semesters": Semester.objects.filter(grade_documents_are_deleted=False),
"debug": DEBUG,
}
|
152728
|
from functools import partial
# from ..config_new import BTE_FILTERS
BTE_FILTERS = ["nodeDegree", "ngd", "drugPhase", "survivalProbability"]
def filter_response(res, criteria):
"""
Filter API response based on filtering criteria
:param res: API Response
:param criteria: filtering criteria
"""
def filter_by_operation(rec, key, val, operation):
if rec.get(key):
if isinstance(rec.get(key), list):
rec[key] = rec[key][0]
try:
if operation == "=" and type(val)(rec[key]) == val:
return True
if operation == ">" and type(val)(rec[key]) > val:
return True
if operation == "<" and type(val)(rec[key]) < val:
return True
return False
except (ValueError, TypeError):
return False
return False
if not res or not isinstance(res, list) or not len(res) > 0:
return res
if not isinstance(criteria, dict):
return res
for f, v in criteria.items():
if not isinstance(v, dict):
continue
if f not in BTE_FILTERS:
if "=" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v["="], operation="="),
res,
)
)
continue
if ">" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v[">"], operation=">"),
res,
)
)
elif "<" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v["<"], operation="<"),
res,
)
)
return res
|
152738
|
import random
print('Enter the two different values')
first = input('Enter first side : ')
second = input('Enter second side : ')
fate = [first,second]
x=random.randint(0,1)
print(fate[x])
|
152749
|
from django.conf.urls import patterns
from status.views import StatusView, ExtraStatusView
urlpatterns = patterns('status.views',
(r'^/?$', StatusView.as_view()),
(r'^/(?P<machine_name>[^/]+)$', StatusView.as_view()),
(r'^(?P<query>.+)/$', ExtraStatusView.as_view()),
)
|
152759
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.acls.permissions import permission_acl_edit, permission_acl_view
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_multi_item, menu_object, menu_secondary, menu_tools
)
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from .events import (
event_cache_edited, event_cache_partition_purged, event_cache_purged
)
from .links import (
link_caches_list, link_cache_multiple_purge, link_cache_purge
)
from .permissions import permission_cache_purge, permission_cache_view
class FileCachingConfig(MayanAppConfig):
app_namespace = 'file_caching'
app_url = 'file_caching'
has_tests = True
name = 'mayan.apps.file_caching'
verbose_name = _('File caching')
def ready(self):
super().ready()
Cache = self.get_model(model_name='Cache')
CachePartition = self.get_model(model_name='CachePartition')
EventModelRegistry.register(model=Cache)
EventModelRegistry.register(model=CachePartition)
ModelEventType.register(
event_types=(event_cache_edited, event_cache_purged,),
model=Cache
)
ModelEventType.register(
event_types=(event_cache_partition_purged,), model=CachePartition
)
ModelPermission.register(
model=Cache, permissions=(
permission_acl_edit, permission_acl_view,
permission_cache_purge, permission_cache_view
)
)
SourceColumn(
attribute='label', is_identifier=True, is_object_absolute_url=True, source=Cache
)
SourceColumn(
attribute='get_maximum_size_display', include_label=True,
is_sortable=True, sort_field='maximum_size', source=Cache
)
SourceColumn(
attribute='get_total_size_display', include_label=True,
source=Cache
)
menu_object.bind_links(
links=(link_cache_purge,),
sources=(Cache,)
)
menu_multi_item.bind_links(
links=(link_cache_multiple_purge,),
sources=(Cache,)
)
menu_secondary.bind_links(
links=(link_caches_list,), sources=(
Cache,
)
)
menu_tools.bind_links(links=(link_caches_list,))
|
152761
|
class SOAPError(Exception):
"""
**Custom SOAP exception**
Custom SOAP exception class.
Raised whenever an error response has been received during action invocation.
"""
def __init__(self, description, code):
self.description = description
self.error = code
class IGDError(Exception):
"""
**Custom Internet Gateway Device exception**
Custom IGD exception class.
Raised whenever a problem with the IGD has been detected.
"""
pass
class ArgumentError(Exception):
"""
**Custom Argument exception**
Custom Argument exception class.
Raised whenever an error has been detected during action invocation.
"""
def __init__(self, message, argument):
self.message = message
self.argument = argument
class ServiceNotFoundError(Exception):
"""
**Custom Service exception**
Custom Service exception class.
Raised whenever a particular service was not found for a device.
"""
def __init__(self, message, service_name):
self.message = message
self.service = service_name
class ActionNotFoundError(Exception):
"""
**Custom Action exception**
Custom Action exception class.
Raised whenever a particular action is not available for a service.
"""
def __init__(self, message, action_name):
self.message = message
self.action = action_name
class NotRetrievedError(Exception):
"""
**Custom exception for objects that have not been retrieved**
Custom object not retrieved exception class.
Raised whenever a certain property for a device or service was not retrieved.
"""
pass
class NotAvailableError(Exception):
"""
**Custom exception for when a certain URL could not be retrieved**
Custom element not retrieved exception class.
Raised whenever a value needed to be accessed could not be retrieved from the URL.
"""
pass
|
152773
|
from copy import deepcopy
from .transforms import TRANSFORMS
SCHEME_CACHE = {} # global scheme registry
class Scheme():
def __init__(self, scheme_list, agent_flatten=True):
self.scheme_list = scheme_list
if agent_flatten:
self.agent_flatten() # NEW!
self.t_id_depth = self._get_t_id_depth(self.scheme_list)
return
def _get_t_id_depth(self, scheme_list):
# calculates required minimum sequence depth when t_id is set (based on shift transforms)
min_depth = 0
for _scheme in scheme_list:
for _transf in _scheme.get("transforms", []):
if _transf[0] in ["shift"]:
min_depth = max(_transf[1].get("steps", 0), min_depth)
return min_depth
def _agent_flatten_dict(self, dic):
"""
flattens dict values that are lists into flat entries labelled by __agent<id>
"""
dic = dict(dic) # copy
for k in list(dic.keys()):
if isinstance(dic[k], (tuple, list)):
for i, v in enumerate(dic[k]):
dic[k+"__agent{:d}".format(i)] = v
del dic[k]
return dic
def _check_scheme_history_compatibility(self, scheme_list, history):
"""
check whether all history columns are found in the scheme
"""
scheme_list_keys = [_s["name"] for _s in scheme_list if _s.get("switch", True)]
history_keys = history.get_keys()
for _k in scheme_list_keys:
assert _k in history_keys, "key {} not in scheme_list".format(_k)
pass
def __contains__(self, item): # "in" operator
_name_dict = { _s["name"]:None for _s in self.scheme_list}
return item in _name_dict
def get_by_name(self, name):
"""
return scheme_list element with given name (list if there is more than one)
"""
ret = []
for _scheme_entry in self.scheme_list:
if _scheme_entry["name"] == name:
if isinstance(ret, (list, tuple)):
ret.append(_scheme_entry)
return ret[0] if len(ret) == 1 else ret
pass
def agent_flatten(self):
flat_scheme_list = []
for _scheme in self.scheme_list:
if _scheme.get("select_agent_ids", None) is not None:
# remove agent data entries that are not applicable
for _agent_id in _scheme.get("select_agent_ids"):
tmp_scheme = deepcopy(_scheme) #deepcopy(_scheme)
tmp_scheme["name"] = _scheme["name"] + "__agent{}".format(_agent_id)
if tmp_scheme.get("rename", None) is not None:
tmp_scheme["rename"] = _scheme["rename"] + "__agent{}".format(_agent_id)
del tmp_scheme["select_agent_ids"]
# remove transforms that are not applicable
if tmp_scheme.get("transforms", None) is not None:
for _id, transform in enumerate(tmp_scheme.get("transforms", None)):
if transform[1].get("select_agent_ids", None):
if _agent_id not in transform[1]["select_agent_ids"]:
del tmp_scheme["transforms"][_id]
else:
del tmp_scheme["transforms"][_id][1]["select_agent_ids"]
flat_scheme_list.append(tmp_scheme)
else:
flat_scheme_list.append(_scheme)
self.scheme_list = flat_scheme_list
return Scheme(flat_scheme_list, agent_flatten=False)
def get_output_sizes(self, transition_scheme):
"""
calculates output sizes given a transition scheme
"""
def _apply_transform(scheme_item, _data):
if scheme_item.get("transforms", None) is not None:
for _transform in scheme_item["transforms"]:
if callable(_transform[0]):
f_transform = _transform[0]
elif isinstance(_transform[0], str) and _transform[0] in TRANSFORMS:
f_transform = TRANSFORMS[_transform[0]]
else:
assert False, "Transform unknown!"
_data = f_transform(_data, **_transform[1], output_size_only=True)
pass
return _data
transition_scheme_dic = {_item["name"]:_item for _item in transition_scheme.scheme_list}
output_size_dic = {}
for scheme_entry in self.scheme_list:
if not scheme_entry.get("switch", True):
continue
if not scheme_entry["name"] in transition_scheme:
assert False, "cannot find '{}' in transition_scheme - have you misspelled it?".format(scheme_entry["name"])
input_size = transition_scheme_dic[scheme_entry["name"]]["shape"][0]
output_size = _apply_transform(scheme_entry, input_size)
output_size_dic[scheme_entry.get("rename", scheme_entry["name"])] = output_size
return output_size_dic
|
152796
|
from django.conf import settings
from django.urls import path
from dictionary.views.detail import Chat, ChatArchive, UserProfile
from dictionary.views.edit import UserPreferences
from dictionary.views.images import ImageList, ImageUpload, ImageDetailProduction, ImageDetailDevelopment
from dictionary.views.list import ActivityList, ConversationArchiveList, ConversationList, PeopleList
ImageDetailView = ImageDetailDevelopment if settings.DEBUG else ImageDetailProduction
"""
This should be set to ImageDetailProduction if your media files served outside
Django. (Check ImageDetailProduction view for info)
"""
urlpatterns_user = [
# user related urls
path("settings/", UserPreferences.as_view(), name="user_preferences"),
path("people/", PeopleList.as_view(), name="people"),
path("people/<slug:tab>/", PeopleList.as_view(), name="people-tab"),
path("activity/", ActivityList.as_view(), name="activity"),
path("messages/", ConversationList.as_view(), name="messages"),
path("messages/archive/", ConversationArchiveList.as_view(), name="messages-archive"),
path("messages/<slug:slug>/", Chat.as_view(), name="conversation"),
path("messages/archive/<slug:slug>/", ChatArchive.as_view(), name="conversation-archive"),
path("author/<slug:slug>/", UserProfile.as_view(), name="user-profile"),
path("author/<slug:slug>/<slug:tab>/", UserProfile.as_view(), name="user-profile-stats"),
path("myimages/", ImageList.as_view(), name="image-list"),
path("upload/", ImageUpload.as_view(), name="image-upload"),
path("img/<slug:slug>/", ImageDetailView.as_view(), name="image-detail"),
]
|
152835
|
import tensorflow as tf
input = tf.placeholder(tf.string, None)
'''
{
"input": {
"foo": {
"bar": "bar"
}
}
}
'''
root = tf.parse_single_example(input[0], features={
'foo': tf.FixedLenFeature(shape=[], dtype=tf.string),
})
foo = tf.parse_single_example(root['foo'], features={
'bar': tf.FixedLenFeature(shape=[], dtype=tf.string),
})
sess = tf.Session()
tf.saved_model.simple_save(
sess,
'../../testdata/test_models/nested_example/1/',
{
'input': input
},
{
'foo': foo['bar'],
}
)
|
152922
|
from .base import BasePlayer, Option
from .cli import CLIPlayer
# debug.py is not included in published package
try:
from sxm_player.debug.player import DebugPlayer
except ImportError:
DebugPlayer = None # type: ignore
__all__ = ["BasePlayer", "CLIPlayer", "DebugPlayer", "Option"]
|
152945
|
import datetime
import os
import sys
from configparser import SafeConfigParser, ConfigParser
import pkg_resources
import logging
from trustworthiness.definitions import OUTPUT_FOLDER
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class DeFactoConfig(object):
__metaclass__ = Singleton
def set_logger(self, log_file, file_level=logging.DEBUG, console_level=logging.INFO):
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
fileHandler = logging.FileHandler(log_file)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(file_level)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(console_level)
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(fileHandler)
self.logger.addHandler(consoleHandler)
self.logger.propagate = False
def __init__(self):
fine = False
config = None
self.logger = logging.getLogger('defacto')
for ini_file in os.curdir, os.path.expanduser("~"), "/etc/defacto", os.environ.get('DEFACTO_CONF'), '/':
try:
self.version = "3.0.1"
self.version_label = "DeFacto 3.0.1"
with open(os.path.join(ini_file, "defacto.ini")) as source:
parser = ConfigParser()
parser.read(source.name)
self.root_dir = os.path.dirname(os.path.abspath(__file__)) + '/'
self.root_dir_data = self.root_dir + 'data/'
#self.log_level = parser.get('conf', 'log_level')
# absolute path
self.database = parser.get('database', 'path')
self.datasets_ext = parser.get('dataset_external_path', 'path')
self.datasets = self.root_dir_data + parser.get('dir', 'datasets')
#self.dir_output = self.root_dir_data + parser.get('dir', 'output')
self.dir_output = OUTPUT_FOLDER
self.dir_models = self.root_dir_data + parser.get('dir', 'models')
self.dir_encoders = self.root_dir_data + parser.get('dir', 'encoders')
self.dir_log = self.root_dir_data + 'log/'
# encoders and models
self.enc_domain = self.dir_encoders + parser.get('encoders', 'enc_webdomain')
self.model_credibility = self.dir_models + parser.get('models', 'clf_credibility')
# external datasets
self.dataset_ext_spam = self.datasets_ext + parser.get('dataset_external_path', 'spam_kaggle')
self.dataset_ext_uci_news = self.datasets_ext + parser.get('dataset_external_path', 'uci_news')
self.dataset_ext_microsoft_webcred_webpages_cache = self.datasets_ext + parser.get('dataset_external_path',
'microsoft_webcred_webpages_cached')
self.dataset_ext_microsoft_webcred_webpages_cache_missing = self.datasets_ext + parser.get('dataset_external_path',
'microsoft_webcred_webpages_missing')
self.dataset_ext_bbc_folder = self.datasets_ext + parser.get('dataset_external_path', 'bbc_folder')
# internal datasets
self.dataset_microsoft_webcred = self.datasets + parser.get('dataset', 'microsoft_webcred')
# relative path
#models_rootdir = pkg_resources.resource_filename('resources', 'models') + "/"
# configurations
self.nr_threads_feature_extractor = parser.get('defacto', 'number_threads_feature_extractor')
self.search_engine_api = parser.get('search-engine', 'api')
self.search_engine_key = parser.get('search-engine', 'key')
self.search_engine_tot_resources = parser.get('search-engine', 'tot_resources')
self.open_page_rank_api = parser.get('search-engine', 'open_pagerank_api')
self.translation_id = parser.get('translation', 'microsoft_client_id')
self.translation_secret = parser.get('translation', 'microsoft_client_secret')
self.wot_key = parser.get('wot', 'key')
self.waybackmachine_tot = parser.get('waybackmachine', 'tot_archive_lookup')
self.waybackmachine_weight = parser.get('waybackmachine', 'weight_domain')
fine = True
break
#config.readfp(source)
except IOError:
pass
if fine is False:
raise ValueError('error on trying to read the conf file (defacto.conf)! Please set DEFACTO_CONF with its '
'path or place it at your home dir')
else:
if len(self.logger.handlers) == 0:
# first file logger (can add more..)
now = datetime.datetime.now()
self.set_logger(self.dir_log + 'defacto_' + now.strftime("%Y-%m-%d") + '.log')
#ini_file = pkg_resources.resource_filename('resource', "horus.conf")
#rootdir = os.getcwd()
#
@staticmethod
def get_report():
return 'to be implemented'
|
152967
|
from brainflow.board_shim import *
from brainflow.exit_codes import *
from brainflow.data_filter import *
from brainflow.ml_model import *
from brainflow.utils import *
|
152991
|
import sandstone.lib.decorators
from sandstone.lib.handlers.base import BaseHandler
from terminado import TermSocket
class AuthTermSocket(TermSocket,BaseHandler):
@sandstone.lib.decorators.authenticated
def get(self, *args, **kwargs):
return super(AuthTermSocket, self).get(*args, **kwargs)
|
153004
|
import os
import dash
def _init_app():
""" Intializes the dash app."""
this_dir = os.path.dirname(os.path.abspath(__file__))
css_file = os.path.join(this_dir, "stylesheet.css")
app = dash.Dash(
__name__,
external_stylesheets=[css_file],
suppress_callback_exceptions=True,
)
return app
_app = _init_app()
def get_app():
return _app
|
153007
|
from ctypes import CDLL, POINTER, byref, c_void_p, c_size_t
from numba import cuda
from numba.cuda import (HostOnlyCUDAMemoryManager, GetIpcHandleMixin,
MemoryPointer, MemoryInfo)
# Open the CUDA runtime DLL and create bindings for the cudaMalloc, cudaFree,
# and cudaMemGetInfo functions.
cudart = CDLL('libcudart.so')
cudaMalloc = cudart.cudaMalloc
cudaMalloc.argtypes = [POINTER(c_size_t), c_size_t]
cudaFree = cudart.cudaFree
cudaFree.argtypes = [c_void_p]
cudaMemGetInfo = cudart.cudaMemGetInfo
cudaMemGetInfo.argtypes = [POINTER(c_size_t), POINTER(c_size_t)]
# Python functions for allocation, deallocation, and memory info
def my_alloc(size):
"""
Allocate `size` bytes of device memory and return a device pointer to the
allocated memory.
"""
ptr = c_size_t()
ret = cudaMalloc(byref(ptr), size)
if ret:
raise RuntimeError(f'Unexpected return code {ret} from cudaMalloc')
return ptr
def my_free(ptr):
"""
Free device memory pointed to by `ptr`.
"""
cudaFree(ptr)
def my_memory_info():
free = c_size_t()
total = c_size_t()
cudaMemGetInfo(byref(free), byref(total))
return free, total
# EMM Plugin implementation
class MyEMMPlugin(GetIpcHandleMixin, HostOnlyCUDAMemoryManager):
def memalloc(self, size):
ptr = my_alloc(size)
ctx = self.context
finalizer = make_finalizer(ptr.value)
return MemoryPointer(ctx, ptr, size, finalizer=finalizer)
def initialize(self):
# No setup required to use the EMM Plugin in a given context
pass
def get_memory_info(self):
free, total = my_memory_info()
return MemoryInfo(free=free.value, total=total.value)
@property
def interface_version(self):
return 1
def make_finalizer(ptr):
def finalizer():
my_free(ptr)
return finalizer
# If NUMBA_CUDA_MEMORY_MANAGER is set to this module (e.g.
# `NUMBA_CUDA_MEMORY_MANAGER=simple_emm_plugin`), then Numba will look at the
# _numba_memory_manager global to determine what class to use for memory
# management.
#
# This can be used to run the Numba test suite with the plugin, to verify that
# the plugin is working correctly. For example, if the directory of this module
# is on PYTHONPATH, then running:
#
# NUMBA_CUDA_MEMORY_MANAGER=simple_emm_plugin python -m numba.runtests \
# numba.cuda.tests
#
# will run all Numba CUDA tests with the plugin enabled.
_numba_memory_manager = MyEMMPlugin
if __name__ == '__main__':
# Quick test of setting the memory manager and allocating/deleting an array
cuda.set_memory_manager(MyEMMPlugin)
ctx = cuda.current_context()
print(f"Free before creating device array: {ctx.get_memory_info().free}")
x = cuda.device_array(1000)
print(f"Free after creating device array: {ctx.get_memory_info().free}")
del x
print(f"Free after freeing device array: {ctx.get_memory_info().free}")
|
153027
|
from appdirs import *
from pathlib import Path
import requests
from tqdm import tqdm
from enum import Enum
COVEO_INTERACTION_DATASET_S3_URL = 'https://reclist-datasets-6d3c836d-6djh887d.s3.us-west-2.amazonaws.com/coveo_sigir.zip'
SPOTIFY_PLAYLIST_DATASET_S3_URL = 'https://reclist-datasets-6d3c836d-6djh887d.s3.us-west-2.amazonaws.com/small_spotify_playlist.zip'
MOVIELENS_DATASET_S3_URL = "https://reclist-datasets-6d3c836d-6djh887d.s3.us-west-2.amazonaws.com/movielens_25m.zip"
def download_with_progress(url, destination):
"""
Downloads a file with a progress bar
:param url: url from which to download from
:destination: file path for saving data
"""
try:
response = requests.get(url, stream=True)
response.raise_for_status()
except requests.exceptions.RequestException as e:
raise SystemExit(e)
with tqdm.wrapattr(open(destination, "wb"), "write",
miniters=1, desc=url.split('/')[-1],
total=int(response.headers.get('content-length', 0))) as fout:
for chunk in response.iter_content(chunk_size=4096):
fout.write(chunk)
def get_cache_directory():
"""
Returns the cache directory on the system
"""
appname = "reclist"
appauthor = "reclist"
cache_dir = user_cache_dir(appname, appauthor)
Path(cache_dir).mkdir(parents=True, exist_ok=True)
return cache_dir
class Dataset(Enum):
COVEO = 'coveo'
COVEO_INTERNAL = 'coveo-internal'
MOVIELENS = 'movielens'
SPOTIFY = 'spotify'
|
153100
|
import numpy as np
import string
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
print("单词级别")
print("构建标记索引,为每个单词指定唯一索引,从 1 开始")
word_token_index = {}
for sample in samples:
for word in sample.split():
if word not in word_token_index:
word_token_index[word] = len(word_token_index) + 1
print("对样本进行分词,只考虑前 10 个单词")
word_max_length = 10
word_results = np.zeros(shape=(len(samples), word_max_length, max(word_token_index.values()) + 1))
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:word_max_length]:
index = word_token_index.get(word)
word_results[i, j, index] = 1
print("sample %d:" % i, word_results[i, :, :])
print("===========")
print("字符级别")
characters = string.printable
char_token_index = dict(zip(range(1, len(characters) + 1), characters))
char_max_length = 50
char_results = np.zeros((len(samples), char_max_length, max(char_token_index.keys()) + 1))
for i, sample in enumerate(samples):
for j, character in enumerate(sample):
index = char_token_index.get(character)
char_results[i, j, index] = 1
print("sample %d:" % i, char_results[i, :, :])
print("===========")
print("Keras 实现单词级别的 one-hot 编码")
tokenizer = Tokenizer(num_words=1000)
tokenizer.fit_on_texts(samples)
sequences = tokenizer.texts_to_sequences(samples)
one_hot_result = tokenizer.texts_to_matrix(samples, mode='binary')
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
|
153106
|
from .abc import ABCErrorHandler
from .error_handler import ErrorHandler
__all__ = ("ABCErrorHandler", "ErrorHandler")
|
153127
|
import os
import sys
import boto3
import botocore
import json
import time
from debug import debug_print
from debug import error_print
from botoHelper import get_boto_client
def handler(event, context):
debug_print(json.dumps(event, indent=2))
s3_event = event["Records"][0]["s3"]
s3_bucket = s3_event["bucket"]["name"]
s3_object = s3_event["object"]["key"]
client = get_boto_client("s3")
response = client.get_object(Bucket=s3_bucket, Key=s3_object)
content = json.loads(response['Body'].read().decode('utf-8'))
debug_print(json.dumps(content, indent=2))
step_function_arn = os.environ["ACCOUNT_CREATOR_STEPFUNCTION"]
invoke_statemachine(step_function_arn, content)
def invoke_statemachine(arn, input):
client = get_boto_client("stepfunctions")
account_name = input.get("accountName")
response = client.start_execution(
stateMachineArn=arn,
name="{}-creation-{}".format(account_name, time.time()),
input=json.dumps(input)
)
debug_print(response)
return(response)
|
153153
|
import math
def amplitude_to_db(amplitude: float, reference: float = 1e-6) -> float:
"""
Convert amplitude from volts to decibel (dB).
Args:
amplitude: Amplitude in volts
reference: Reference amplitude. Defaults to 1 µV for dB(AE)
Returns:
Amplitude in dB(ref)
"""
return 20 * math.log10(amplitude / reference)
def db_to_amplitude(amplitude_db: float, reference: float = 1e-6) -> float:
"""
Convert amplitude from decibel (dB) to volts.
Args:
amplitude_db: Amplitude in dB
reference: Reference amplitude. Defaults to 1 µV for dB(AE)
Returns:
Amplitude in volts
"""
return reference * 10 ** (amplitude_db / 20)
|
153166
|
import time
import threading
import asyncio
from aiohttp import ClientSession
from aiohttp_proxy import ProxyConnector, ProxyType
class Checker(object):
def __init__(self, proxies: list, proxy_type: str, threads: int, timeout: int, savedir: str):
# Arguments:
self.proxies = set(proxies)
self.threads = threads
self.timeout = timeout
self.savedir = savedir
proxy_type = proxy_type.lower()
if proxy_type not in ("http", "https", "socks4", "socks5"):
raise ValueError("`proxy_type` must be either http, https, socks4, or socks5")
if proxy_type == "http":
self.proxy_type = ProxyType.HTTP
elif proxy_type == "https":
self.proxy_type = ProxyType.HTTPS
elif proxy_type == "socks4":
self.proxy_type = ProxyType.SOCKS4
else:
self.proxy_type = ProxyType.SOCKS5
# Counters:
self.good = 0
self.bad = 0
self.checked = 0
self.total = len(self.proxies)
self.start_time = 0
self.cpm = 0
self.elapsed = 0
# Thread Lock:
self.lock = asyncio.Lock()
# Running:
self.running = False
def parse_proxy(self, proxy):
spl = proxy.split(":")
return ProxyConnector(
host=spl[0],
port=int(spl[1]),
proxy_type=self.proxy_type,
username=None if len(spl) != 4 else spl[2],
password=None if len(spl) != 4 else spl[3]
)
async def check(self, proxy):
try:
async with ClientSession(connector=self.parse_proxy(proxy)) as session:
async with session.get("http://httpbin.org/ip", timeout=self.timeout) as response:
if response.ok:
async with self.lock:
open(self.savedir, "a", encoding="utf-8", errors="ignore").write(f"{proxy}\n")
self.good += 1
else:
self.bad += 1
except ConnectionResetError:
self.bad += 1
except Exception:
self.bad += 1
self.checked += 1
async def worker(self, queue: asyncio.Queue):
while self.running:
try:
proxy = queue.get_nowait()
except asyncio.QueueEmpty:
return
await self.check(proxy)
queue.task_done()
def stop_thread(self):
while True:
time.sleep(1)
if self.total == self.checked:
self.running = False
return
def cpm_counter(self):
while self.running:
run_time = int(time.time()) - self.start_time
if run_time > 0:
self.cpm = int(self.checked / run_time) * 60
time.sleep(0.05)
def elapsed_counter(self):
while self.running:
self.elapsed = time.strftime('%H:%M:%S', time.gmtime(int(time.time() - self.start_time)))
time.sleep(0.5)
def start(self):
asyncio.set_event_loop(asyncio.ProactorEventLoop())
self.running = True
self.start_time = int(time.time())
threading.Thread(target=self.stop_thread, daemon=True).start()
threading.Thread(target=self.elapsed_counter, daemon=True).start()
threading.Thread(target=self.cpm_counter, daemon=True).start()
queue = asyncio.Queue()
for proxy in self.proxies:
queue.put_nowait(proxy)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*[self.worker(queue) for _ in range(self.threads)]))
loop.close()
|
153254
|
from django import template
from django.urls import reverse
from tickets.core.middlewares import get_current_request
register = template.Library()
@register.simple_tag
def is_active(url):
request = get_current_request()
# Main idea is to check if the url and the current path is a match
if request.path == reverse(url):
return "active"
return ""
|
153260
|
from typing import Union
import spacy
regex = [r"\bsofa\b"]
method_regex = (
r"sofa.*?((?P<max>max\w*)|(?P<vqheures>24h\w*)|"
r"(?P<admission>admission\w*))(?P<after_value>(.|\n)*)"
)
value_regex = r".*?.[\n\W]*?(\d+)[^h\d]"
score_normalization_str = "score_normalization.sofa"
@spacy.registry.misc(score_normalization_str)
def score_normalization(extracted_score: Union[str, None]):
"""
Sofa score normalization.
If available, returns the integer value of the SOFA score.
"""
score_range = list(range(0, 30))
if (extracted_score is not None) and (int(extracted_score) in score_range):
return int(extracted_score)
|
153262
|
import torch
import torch.nn as nn
from losses import *
from net_pwc import *
from model_base import *
from reblur_package import *
from flow_utils import *
class ModelSelfFlowNet(ModelBase):
def __init__(self, opts):
super(ModelSelfFlowNet, self).__init__()
self.opts = opts
# create network
self.model_names = ['flow']
self.net_flow = PWCDCNet().cuda()
# print network
self.print_networks(self.net_flow)
if not opts.is_training or opts.continue_train:
self.load_checkpoint(opts.model_label)
self.upsampleX4 = nn.Upsample(scale_factor=4, mode='nearest')
self.downsampleX2 = nn.AvgPool2d(2, stride=2)
if opts.is_training:
# initialize optimizers
self.optimizer_G = torch.optim.Adam(self.net_flow.parameters(), lr=opts.lr)
self.optimizer_names = ['G']
self.build_lr_scheduler()
# define loss functions
self.loss_fn_data = L1Loss()
# create flow blurrer
self.mask_fn = [None] * self.opts.pwc_lvls
for l in range(self.opts.pwc_lvls):
scale = 2 ** -(l+2) # we start at quarter resolution
H = int(scale * opts.crop_sz_H)
W = int(scale * opts.crop_sz_W)
self.mask_fn[l] = FlowWarpMask.create_with_implicit_mesh(opts.batch_sz,2,H,W)
def set_input(self, _input):
imgs, = _input
im_prev = imgs[:,:self.opts.n_channels,:,:]
im_curr = imgs[:,self.opts.n_channels:self.opts.n_channels*2,:,:]
self.im_prev = im_prev.cuda()
self.im_curr = im_curr.cuda()
def forward(self):
pred_flows_prev_to_curr = self.net_flow(self.im_prev, self.im_curr)
pred_flows_curr_to_prev = self.net_flow(self.im_curr, self.im_prev)
pred_flow_prev_to_curr_0 = self.upsampleX4(pred_flows_prev_to_curr[0])*20.0
pred_flow_curr_to_prev_0 = self.upsampleX4(pred_flows_curr_to_prev[0])*20.0
pred_flow_prev_to_curr_1 = self.upsampleX4(pred_flows_prev_to_curr[1])*10.0 # 20 / (2 ^ 1)
pred_flow_curr_to_prev_1 = self.upsampleX4(pred_flows_curr_to_prev[1])*10.0 # 20 / (2 ^ 1)
pred_flow_prev_to_curr_2 = self.upsampleX4(pred_flows_prev_to_curr[2])*5.0 # 20 / (2 ^ 2)
pred_flow_curr_to_prev_2 = self.upsampleX4(pred_flows_curr_to_prev[2])*5.0 # 20 / (2 ^ 2)
return pred_flows_prev_to_curr, \
pred_flows_curr_to_prev, \
[pred_flow_prev_to_curr_0, pred_flow_prev_to_curr_1, pred_flow_prev_to_curr_2], \
[pred_flow_curr_to_prev_0, pred_flow_curr_to_prev_1, pred_flow_curr_to_prev_2]
def optimize_parameters(self):
pred_flows_prev_to_curr, \
pred_flows_curr_to_prev, \
self.pred_flow_prev_to_curr_0, \
self.pred_flow_curr_to_prev_0 = self.forward()
im_prev_clone = self.im_prev.clone()
im_curr_clone = self.im_curr.clone()
res = (im_prev_clone.shape[2] * im_prev_clone.shape[3]) / self.opts.pwc_lvls
pyr_weights = [0.005, 0.01, 0.02, 0.08, 0.32]
pyr_weights = [i * res for i in pyr_weights]
# import pdb; pdb.set_trace()
self.syn_im_prev = [None] * self.opts.pwc_lvls
self.syn_im_curr = [None] * self.opts.pwc_lvls
self.syn_flow_prev_to_curr = [None] * self.opts.pwc_lvls
self.syn_flow_curr_to_prev = [None] * self.opts.pwc_lvls
self.syn_flow_prev_to_curr = [None] * self.opts.pwc_lvls
self.syn_flow_curr_to_prev = [None] * self.opts.pwc_lvls
self.mask_im_prev = [None] * self.opts.pwc_lvls
self.mask_im_curr = [None] * self.opts.pwc_lvls
self.loss_lr_image = torch.zeros([1], requires_grad=True).cuda()
self.loss_lr_flow = torch.zeros([1], requires_grad=True).cuda()
for l in range(self.opts.pwc_lvls):
if l > 0:
im_prev_clone = self.downsampleX2(im_prev_clone)
im_curr_clone = self.downsampleX2(im_curr_clone)
fs = 20.0 / (2 ** l)
self.mask_im_prev[l] = self.upsampleX4(self.mask_fn[l](pred_flows_prev_to_curr[l].detach()*fs).float())
self.mask_im_curr[l] = self.upsampleX4(self.mask_fn[l](pred_flows_curr_to_prev[l].detach()*fs).float())
self.syn_im_prev[l], _ = warp_image_flow(im_curr_clone, self.pred_flow_prev_to_curr_0[l])
self.syn_im_curr[l], _ = warp_image_flow(im_prev_clone, self.pred_flow_curr_to_prev_0[l])
self.syn_flow_prev_to_curr[l], _ = warp_image_flow(self.pred_flow_curr_to_prev_0[l], self.pred_flow_prev_to_curr_0[l])
self.syn_flow_curr_to_prev[l], _ = warp_image_flow(self.pred_flow_prev_to_curr_0[l], self.pred_flow_curr_to_prev_0[l])
self.syn_flow_prev_to_curr[l] = -1.*self.syn_flow_prev_to_curr[l]
self.syn_flow_curr_to_prev[l] = -1.*self.syn_flow_curr_to_prev[l]
self.loss_lr_image += pyr_weights[l] * (self.loss_fn_data(self.syn_im_prev[l], im_prev_clone, self.mask_im_prev[l], True) \
+ self.loss_fn_data(self.syn_im_curr[l], im_curr_clone, self.mask_im_curr[l], True))
self.loss_lr_flow += pyr_weights[l] * self.opts.pwc_fwdbwd * \
(self.loss_fn_data(self.syn_flow_prev_to_curr[l], self.pred_flow_prev_to_curr_0[l], self.mask_im_prev[l], True) +\
self.loss_fn_data(self.syn_flow_curr_to_prev[l], self.pred_flow_curr_to_prev_0[l], self.mask_im_curr[l], True))
self.loss_G = self.loss_lr_image + self.loss_lr_flow
#=============== Optimize generator =============#
self.optimizer_G.zero_grad()
self.loss_G.backward()
self.optimizer_G.step()
# save networks to file
def save_checkpoint(self, label):
self.save_network(self.net_flow, 'flow', label, self.opts.log_dir)
def load_checkpoint(self, label):
self.load_network(self.net_flow, 'flow', label, self.opts.log_dir)
def get_current_scalars(self):
losses = {}
losses['loss_lr_image'] = self.loss_lr_image.item()
losses['loss_lr_flow'] = self.loss_lr_flow.item()
losses['loss_G'] = self.loss_G.item()
return losses
def get_current_visuals(self):
output_visuals = {}
output_visuals['im_prev'] = self.im_prev
output_visuals['im_curr'] = self.im_curr
output_visuals['pred_flows_prev_to_curr_0'] = torch.from_numpy(flow_to_numpy_rgb(self.pred_flow_prev_to_curr_0[0]).transpose(0,3,1,2)).float()/255.
output_visuals['pred_flows_prev_to_curr_1'] = torch.from_numpy(flow_to_numpy_rgb(self.pred_flow_prev_to_curr_0[1]).transpose(0,3,1,2)).float()/255.
output_visuals['pred_flows_prev_to_curr_2'] = torch.from_numpy(flow_to_numpy_rgb(self.pred_flow_prev_to_curr_0[2]).transpose(0,3,1,2)).float()/255.
output_visuals['mask_im_curr']=self.mask_im_curr[0].clone().repeat(1,3,1,1)
return output_visuals
|
153268
|
from urllib.parse import urljoin
import pytest
import requests
from selenium.webdriver.common.keys import Keys
from tests.selenium_tests.conftest import skip_selenium_tests, first_panel_on_excerpts_export_overview_xpath
from tests.selenium_tests.new_excerpt import new_excerpt
@skip_selenium_tests
@pytest.mark.parametrize("file_name, file_format", [("gdb", 'id_formats_1'), ("shp", 'id_formats_2'),
("gpkg", 'id_formats_3'), ("spatialite", 'id_formats_4'),
("img_tdb", 'id_formats_5')])
def test_new_excerpt(base_url, login, file_name, file_format, selenium, reload_until_condition):
new_excerpt(selenium, base_url)
# insert excerpt name
excerpt_name = selenium.find_element_by_id('id_name')
excerpt_name.send_keys(file_name)
# choose the file format
formats = selenium.find_element_by_id(file_format)
formats.click()
# submit
create = selenium.find_element_by_name('submit')
create.send_keys(Keys.RETURN)
# wait until download link appears
selenium.find_element_by_xpath(first_panel_on_excerpts_export_overview_xpath + "div[1]/h3")
first_a = first_panel_on_excerpts_export_overview_xpath + "div[2]/div[1]/div[1]/div[2]/div/div[1]/p/a"
element = reload_until_condition(selenium.find_element_by_xpath, first_a)
# check if the download link is a valid link
url = urljoin(base_url, element.get_attribute('href'))
r = requests.head(url)
assert r.status_code == requests.codes.ok
|
153392
|
def func():
print("func")
func() # $ resolved=func
class MyBase:
def base_method(self):
print("base_method", self)
class MyClass(MyBase):
def method1(self):
print("method1", self)
@classmethod
def cls_method(cls):
print("cls_method", cls)
@staticmethod
def static():
print("static")
def method2(self):
print("method2", self)
self.method1() # $ resolved=method1
self.base_method()
self.cls_method() # $ resolved=cls_method
self.static() # $ resolved=static
MyClass.cls_method() # $ resolved=cls_method
MyClass.static() # $ resolved=static
x = MyClass()
x.base_method()
x.method1()
x.cls_method()
x.static()
x.method2()
|
153447
|
import logging
import os
from collections import namedtuple
import googleapiclient.discovery
import neo4j
from googleapiclient.discovery import Resource
from oauth2client.client import ApplicationDefaultCredentialsError
from oauth2client.client import GoogleCredentials
from cartography.config import Config
from cartography.intel.gsuite import api
from cartography.util import timeit
# GSuite Delegated admin e-mail https://developers.google.com/admin-sdk/directory/v1/guides/delegation
GSUITE_DELEGATED_ADMIN = os.environ.get('GSUITE_DELEGATED_ADMIN')
GSUITE_CREDS = os.environ.get('GSUITE_GOOGLE_APPLICATION_CREDENTIALS')
OAUTH_SCOPE = [
'https://www.googleapis.com/auth/admin.directory.user.readonly',
'https://www.googleapis.com/auth/admin.directory.group.readonly',
'https://www.googleapis.com/auth/admin.directory.group.member',
]
logger = logging.getLogger(__name__)
Resources = namedtuple('Resources', 'admin')
def _get_admin_resource(credentials: GoogleCredentials) -> Resource:
"""
Instantiates a Google API resource object to call the Google API.
Used to pull users and groups. See https://developers.google.com/admin-sdk/directory/v1/guides/manage-users
:param credentials: The GoogleCredentials object
:return: An admin api resource object
"""
return googleapiclient.discovery.build('admin', 'directory_v1', credentials=credentials, cache_discovery=False)
def _initialize_resources(credentials: GoogleCredentials) -> Resources:
"""
Create namedtuple of all resource objects necessary for Google API data gathering.
:param credentials: The GoogleCredentials object
:return: namedtuple of all resource objects
"""
return Resources(
admin=_get_admin_resource(credentials),
)
@timeit
def start_gsuite_ingestion(neo4j_session: neo4j.Session, config: Config) -> None:
"""
Starts the GSuite ingestion process by initializing
:param neo4j_session: The Neo4j session
:param config: A `cartography.config` object
:return: Nothing
"""
common_job_parameters = {
"UPDATE_TAG": config.update_tag,
}
try:
credentials = GoogleCredentials.from_stream(GSUITE_CREDS)
credentials = credentials.create_scoped(OAUTH_SCOPE)
credentials = credentials.create_delegated(GSUITE_DELEGATED_ADMIN)
except ApplicationDefaultCredentialsError as e:
logger.debug('Error occurred calling GoogleCredentials.get_application_default().', exc_info=True)
logger.error(
(
"Unable to initialize GSuite creds. If you don't have GSuite data or don't want to load "
'Gsuite data then you can ignore this message. Otherwise, the error code is: %s '
'Make sure your GSuite credentials are configured correctly, your credentials file (if any) is valid. '
'For more details see README'
),
e,
)
return
resources = _initialize_resources(credentials)
api.sync_gsuite_users(neo4j_session, resources.admin, config.update_tag, common_job_parameters)
api.sync_gsuite_groups(neo4j_session, resources.admin, config.update_tag, common_job_parameters)
|
153493
|
import time
import os
import torch
from utils.utils import NanError
def depth2class(depth, depth_start, depth_interval, depth_num, inv=False):
if not inv:
return (depth - depth_start) / (depth_interval + 1e-9)
else:
depth_end = depth_start + (depth_num-1) * depth_interval
inv_interv = (1/(depth_start+1e-9) - 1/(depth_end+1e-9)) / (depth_num-1+1e-9)
return (1/(depth+1e-9) - 1/(depth_end+1e-9)) / (inv_interv + 1e-9)
def class2depth(class_, depth_start, depth_interval, depth_num, inv=False):
if not inv:
return depth_start + class_ * depth_interval
else:
depth_end = depth_start + (depth_num-1) * depth_interval
inv_interv = (1/(depth_start+1e-9) - 1/(depth_end+1e-9)) / (depth_num-1+1e-9)
return 1/( 1/(depth_end+1e-9) + class_ * inv_interv + 1e-9)
def get_homographies(left_cam, right_cam, depth_num, depth_start, depth_interval, inv=False):
# n244 n244 1 n111/n1hw n111/n1hw
with torch.no_grad():
n, _, sh, sw = depth_start.size()
n, _, ih, iw = depth_interval.size()
d = depth_num
# cameras (K, R, t)
R_left = left_cam[:, 0, :3, :3] # n33
R_right = right_cam[:, 0, :3, :3] # n33
t_left = left_cam[:, 0, :3, 3:4] # n31
t_right = right_cam[:, 0, :3, 3:4] # n31
K_left = left_cam[:, 1, :3, :3] # n33
K_right = right_cam[:, 1, :3, :3] # n33
# depth nd1111/ndhw11
if not inv:
depth = depth_start + depth_interval * torch.arange(0, depth_num, dtype=left_cam.dtype, device=left_cam.device).view(1,d,1,1)
else:
depth_end = depth_start + (depth_num-1) * depth_interval
inv_interv = (1/(depth_start+1e-9) - 1/(depth_end+1e-9)) / (depth_num-1+1e-9)
depth = 1/( 1/(depth_end+1e-9) + inv_interv * torch.arange(0, depth_num, dtype=left_cam.dtype, device=left_cam.device).view(1,d,1,1) )
depth = depth.unsqueeze(-1).unsqueeze(-1)
# preparation
K_left_inv = K_left.float().inverse().to(left_cam.dtype)
R_left_trans = R_left.transpose(-2, -1)
R_right_trans = R_right.transpose(-2, -1)
fronto_direction = R_left[:, 2:3, :3] # n13
c_left = -R_left_trans @ t_left
c_right = -R_right_trans @ t_right # n31
c_relative = c_right - c_left
# compute
temp_vec = (c_relative @ fronto_direction).view(n,1,1,1,3,3) # n11133
middle_mat0 = torch.eye(3, dtype=left_cam.dtype, device=left_cam.device).view(1,1,1,1,3,3) - temp_vec / (depth + 1e-9) # ndhw33
middle_mat1 = (R_left_trans @ K_left_inv).view(n,1,1,1,3,3) # n11133
middle_mat2 = (middle_mat0 @ middle_mat1) # ndhw33
homographies = K_right.view(n,1,1,1,3,3) @ R_right.view(n,1,1,1,3,3) @ middle_mat2 # ndhw33
if (homographies!=homographies).any():
raise NanError
return homographies
def get_pixel_grids(height, width):
x_coord = (torch.arange(width, dtype=torch.float32).cuda() + 0.5).repeat(height, 1)
y_coord = (torch.arange(height, dtype=torch.float32).cuda() + 0.5).repeat(width, 1).t()
ones = torch.ones_like(x_coord)
indices_grid = torch.stack([x_coord, y_coord, ones], dim=-1).unsqueeze(-1) # hw31
return indices_grid
def interpolate(image, coord): # nchw, nhw2 => nchw
with torch.no_grad():
warped_coord = coord.clone()
warped_coord[..., 0] /= (warped_coord.size()[2])
warped_coord[..., 1] /= (warped_coord.size()[1])
warped_coord = (warped_coord * 2 - 1).clamp(-1.1, 1.1)
warped = torch.nn.functional.grid_sample(image, warped_coord, mode='bilinear', padding_mode='zeros', align_corners=False)
if (warped != warped).any():
raise NanError
return warped
def homography_warping(input, H): # nchw, n33/nhw33 -> nchw
if len(H.size()) == 3: H = H.view(-1, 1, 1, 3, 3)
with torch.no_grad():
pixel_grids = get_pixel_grids(*input.size()[-2:]).unsqueeze(0) # 1hw31
warped_homo_coord = (H @ pixel_grids).squeeze(-1) # nhw3
warped_coord = warped_homo_coord[..., :2] / (warped_homo_coord[..., 2:3] + 1e-9) # nhw2
warped = interpolate(input, warped_coord)
return warped # nchw
|
153515
|
import argparse
import json
from bart_score import BARTScorer
def main(args):
instances = []
with open(args.input_file, "r") as f:
for line in f:
data = json.loads(line)
instances.append(data)
sources = []
targets = []
for instance in instances:
candidate = instance["candidate"]
for reference in instance["references"]:
sources.append(candidate)
targets.append(reference)
if args.device == -1:
device = "cpu"
else:
device = f"cuda:{args.device}"
if args.model == "default":
metric = BARTScorer(device=device, checkpoint="facebook/bart-large")
elif args.model == "cnn":
metric = BARTScorer(device=device, checkpoint="facebook/bart-large-cnn")
elif args.model == "parabank":
metric = BARTScorer(device=device, checkpoint="facebook/bart-large-cnn")
metric.load(path="bart.pth")
else:
raise Exception(f"Unknown model: {args.model}")
scores = metric.score(sources, targets, batch_size=args.batch_size)
with open(args.output_file, "w") as out:
index = 0
for instance in instances:
total = 0
for _ in instance["references"]:
total += scores[index]
index += 1
bartscore = total / len(instance["references"])
out.write(json.dumps({"bartscore": bartscore}) + "\n")
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("--input-file", required=True)
argp.add_argument("--device", required=True, type=int)
argp.add_argument("--batch-size", required=True, type=int)
argp.add_argument("--model", required=True)
argp.add_argument("--output-file", required=True)
args = argp.parse_args()
main(args)
|
153521
|
import numpy as np
from .utils import memo, validate_tuple
__all__ = ['binary_mask', 'r_squared_mask', 'cosmask', 'sinmask',
'theta_mask']
@memo
def binary_mask(radius, ndim):
"Elliptical mask in a rectangular array"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
return sum(r) <= 1
@memo
def N_binary_mask(radius, ndim):
return np.sum(binary_mask(radius, ndim))
@memo
def r_squared_mask(radius, ndim):
"Mask with values r^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
r2 = np.sum(coords**2, 0).astype(int)
r2[sum(r) > 1] = 0
return r2
@memo
def x_squared_masks(radius, ndim):
"Returns ndim masks with values x^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
masks = np.asarray(coords**2, dtype=int)
masks[:, sum(r) > 1] = 0
return masks
@memo
def theta_mask(radius):
"""Mask of values giving angular position relative to center. The angle is
defined according to ISO standards in which the angle is measured counter-
clockwise from the x axis, measured in a normal coordinate system with y-
axis pointing up and x axis pointing right.
In other words: for increasing angle, the coordinate moves counterclockwise
around the feature center starting on the right side.
However, in most images, the y-axis will point down so that the coordinate
will appear to move clockwise around the feature center.
"""
# 2D only
radius = validate_tuple(radius, 2)
tan_of_coord = lambda y, x: np.arctan2(y - radius[0], x - radius[1])
return np.fromfunction(tan_of_coord, [r * 2 + 1 for r in radius])
@memo
def sinmask(radius):
"Sin of theta_mask"
return np.sin(2*theta_mask(radius))
@memo
def cosmask(radius):
"Sin of theta_mask"
return np.cos(2*theta_mask(radius))
@memo
def gaussian_kernel(sigma, truncate=4.0):
"1D discretized gaussian"
lw = int(truncate * sigma + 0.5)
x = np.arange(-lw, lw+1)
result = np.exp(x**2/(-2*sigma**2))
return result / np.sum(result)
def get_slice(coords, shape, radius):
"""Returns the slice and origin that belong to ``slice_image``"""
# interpret parameters
ndim = len(shape)
radius = validate_tuple(radius, ndim)
coords = np.atleast_2d(np.round(coords).astype(int))
# drop features that have no pixels inside the image
in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r)
for i, sh, r in zip(range(ndim), shape, radius)])
coords = coords[np.all(in_bounds, axis=0)]
# return if no coordinates are left
if len(coords) == 0:
return tuple([slice(None, 0)] * ndim), None
# calculate the box
lower = coords.min(axis=0) - radius
upper = coords.max(axis=0) + radius + 1
# calculate the slices
origin = [None] * ndim
slices = [None] * ndim
for i, sh, low, up in zip(range(ndim), shape, lower, upper):
lower_bound_trunc = max(0, low)
upper_bound_trunc = min(sh, up)
slices[i] = slice(int(round(lower_bound_trunc)),
int(round(upper_bound_trunc)))
origin[i] = lower_bound_trunc
return tuple(slices), origin
def slice_image(pos, image, radius):
""" Slice a box around a group of features from an image.
The box is the smallest box that contains all coordinates up to `radius`
from any coordinate.
Parameters
----------
image : ndarray
The image that will be sliced
pos : iterable
An iterable (e.g. list or ndarray) that contains the feature positions
radius : number or tuple of numbers
Defines the size of the slice. Every pixel that has a distance lower or
equal to `radius` to a feature position is included.
Returns
-------
tuple of:
- the sliced image
- the coordinate of the slice origin (top-left pixel)
"""
slices, origin = get_slice(pos, image.shape, radius)
return image[slices], origin
def get_mask(pos, shape, radius, include_edge=True, return_masks=False):
""" Create a binary mask that masks pixels farther than radius to all
given feature positions.
Optionally returns the masks that recover the individual feature pixels from
a masked image, as follows: ``image[mask][masks_single[i]]``
Parameters
----------
pos : ndarray (N x 2 or N x 3)
Feature positions
shape : tuple
The shape of the image
radius : number or tuple
Radius of the individual feature masks
include_edge : boolean, optional
Determine whether pixels at exactly one radius from a position are
included. Default True.
return_masks : boolean, optional
Also return masks that recover the single features from a masked image.
Default False.
Returns
-------
ndarray containing a binary mask
if return_masks==True, returns a tuple of [masks, masks_singles]
"""
ndim = len(shape)
radius = validate_tuple(radius, ndim)
pos = np.atleast_2d(pos)
if include_edge:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) <= 1
for p in pos]
else:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) < 1
for p in pos]
mask_total = np.any(in_mask, axis=0).T
if return_masks:
masks_single = np.empty((len(pos), mask_total.sum()), dtype=bool)
for i, _in_mask in enumerate(in_mask):
masks_single[i] = _in_mask.T[mask_total]
return mask_total, masks_single
else:
return mask_total
def mask_image(pos, image, radius, origin=None, invert=False,
include_edge=None):
""" Masks an image so that pixels farther than radius to all given feature
positions become 0.
Parameters
----------
pos : ndarray
Feature positions (N x 2 or N x 3)
image : ndarray
radius : number or tuple
Radius of the individual feature masks
origin : tuple, optional
The topleft coordinate (origin) of the image.
invert : boolean, optional
If invert==True, the features instead of the background will become 0.
include_edge : boolean, optional
Determine whether pixels at exactly one radius from a position are
included in the feature mask.
Defaults to True if invert==False, and to False if invert==True.
"""
if origin is not None:
pos = np.atleast_2d(pos) - np.array(origin)[np.newaxis, :]
if include_edge is None:
include_edge = not invert
mask_cluster = get_mask(pos, image.shape, radius, include_edge=include_edge)
if invert:
mask_cluster = ~mask_cluster
return image * mask_cluster.astype(np.uint8)
|
153571
|
from django.conf.urls import url, include
from kitsune.groups import views
group_patterns = [
url(r"^$", views.profile, name="groups.profile"),
url(r"^/edit$", views.edit, name="groups.edit"),
url(r"^/avatar$", views.edit_avatar, name="groups.edit_avatar"),
url(r"^/avatar/delete$", views.delete_avatar, name="groups.delete_avatar"),
url(r"^/add-member$", views.add_member, name="groups.add_member"),
url(r"^/remove-member/(?P<user_id>\d+)$", views.remove_member, name="groups.remove_member"),
url(r"^/add-leader$", views.add_leader, name="groups.add_leader"),
url(r"^/remove-leader/(?P<user_id>\d+)$", views.remove_leader, name="groups.remove_leader"),
]
urlpatterns = [
url(r"^$", views.list, name="groups.list"),
url(r"^/join-contributors$", views.join_contributors, name="groups.join_contributors"),
url(r"^/(?P<group_slug>[^/]+)", include(group_patterns)),
]
|
153641
|
import socket
host_1 = '127.0.0.1'
host_2 = '127.0.0.1'
port_1 = 8000
port_2 = 8001
# Server 1 must serve client 1
ServerSock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ServerSock1.bind(('', serverport1)) # connect server 1 to port 1
ServerSock1.listen(1)
print('(*) Server 1 started on ('+str(host_1)+':'+str(port_1)+')')
ServerSock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ServerSock2.connect((host_2, port_2))
while True:
(NewClientSock1, addr1) = ServerSock1.accept()
print('(@) Client connected ' + str(addr1))
ClientMessage = NewClientSock1.recv(1000)
if ClientMessage:
print('(#) Message received from customer --> '+ClientMessage.decode("utf-8"))
NewClientSock1.send(ClientMessage)
# server 1 connects with server 2
# server 1 now behaves like a "client" to connect to
print('\n(**) Sending msg to server 2...')
print('(OK) Message sent!')
ServerSock2.send(ClientMessage)
NewClientSock1.close()
|
153644
|
from ScopeFoundry import Measurement
from ScopeFoundry.scanning.base_raster_scan import BaseRaster2DScan
import time
import numpy as np
class BaseNonRaster2DScan(BaseRaster2DScan):
name = "base_non_raster_2Dscan"
def gen_raster_scan(self, gen_arrays=True):
self.Npixels = self.Nh.val*self.Nv.val
self.scan_shape = (1, self.Nv.val, self.Nh.val)
if gen_arrays:
#print "t0", time.time() - t0
self.create_empty_scan_arrays()
#print "t1", time.time() - t0
# t0 = time.time()
# pixel_i = 0
# for jj in range(self.Nv.val):
# #print "tjj", jj, time.time() - t0
# self.scan_slow_move[pixel_i] = True
# for ii in range(self.Nh.val):
# self.scan_v_positions[pixel_i] = self.v_array[jj]
# self.scan_h_positions[pixel_i] = self.h_array[ii]
# self.scan_index_array[pixel_i,:] = [0, jj, ii]
# pixel_i += 1
# print "for loop raster gen", time.time() - t0
t0 = time.time()
H, V = np.meshgrid(self.h_array, self.v_array)
self.scan_h_positions[:] = H.flat
self.scan_v_positions[:] = V.flat
II,JJ = np.meshgrid(np.arange(self.Nh.val), np.arange(self.Nv.val))
self.scan_index_array[:,1] = JJ.flat
self.scan_index_array[:,2] = II.flat
#self.scan_v_positions
print("array flatten raster gen", time.time() - t0)
def gen_spiral_scan(self, gen_arrays=True):
#self.Npixels = self.Nh.val*self.Nv.val
self.scan_shape = (1, Npixels)
if gen_arrays:
#print "t0", time.time() - t0
self.create_empty_scan_arrays()
#print "t1", time.time() - t0
# t0 = time.time()
# pixel_i = 0
# for jj in range(self.Nv.val):
# #print "tjj", jj, time.time() - t0
# self.scan_slow_move[pixel_i] = True
# for ii in range(self.Nh.val):
# self.scan_v_positions[pixel_i] = self.v_array[jj]
# self.scan_h_positions[pixel_i] = self.h_array[ii]
# self.scan_index_array[pixel_i,:] = [0, jj, ii]
# pixel_i += 1
# print "for loop raster gen", time.time() - t0
h = ix * np.cos(ix)
v = ix * np.sin(ix)
t0 = time.time()
H, V = np.meshgrid(self.h_array, self.v_array)
self.scan_h_positions[:] = H.flat
self.scan_v_positions[:] = V.flat
II,JJ = np.meshgrid(np.arange(self.Nh.val), np.arange(self.Nv.val))
self.scan_index_array[:,1] = JJ.flat
self.scan_index_array[:,2] = II.flat
#self.scan_v_positions
print("array flatten raster gen", time.time() - t0)
|
153650
|
from django.conf.urls.defaults import url, patterns
from django.contrib import admin
from django.shortcuts import render_to_response
from django.template import RequestContext
from django_histograms.utils import Histogram
class HistogramAdmin(admin.ModelAdmin):
histogram_field = None
histogram_months = 2
histogram_days = None
def get_urls(self):
urlpatterns = patterns("",
url(r"^report/$", self.admin_site.admin_view(self.report_view),
name="%s_report" % self.model._meta.object_name)
)
return urlpatterns + super(HistogramAdmin, self).get_urls()
def report_view(self, request):
assert self.histogram_field is not None, "Set histogram_field you idiot"
histogram = Histogram(self.model, self.histogram_field,
self.queryset(request), months=self.histogram_months,
days=self.histogram_days)
context = {
'title': "Histogram for %s" % self.model._meta.object_name,
'histogram': histogram,
}
return render_to_response("admin/report.html", context,
context_instance=RequestContext(request, current_app=self.admin_site.name))
|
153697
|
from functools import singledispatch
from functools import update_wrapper
class singledispatchmethod:
"""Single-dispatch generic method descriptor.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError(f"{func!r} is not callable or a descriptor")
self.dispatcher = singledispatch(func)
self.func = func
def register(self, cls, method=None):
"""generic_method.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_method*.
"""
return self.dispatcher.register(cls, func=method)
def __get__(self, obj, cls=None):
def _method(*args, **kwargs):
method = self.dispatcher.dispatch(args[0].__class__)
return method.__get__(obj, cls)(*args, **kwargs)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method.register = self.register
update_wrapper(_method, self.func)
return _method
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
|
153760
|
import yaml
import pandas as pd
import numpy as np
from glob import glob
import sys
# Create the datatable containing the samples, units and paths of all
# fastq files formatted correctly. This is vital for the snakemake
# pipeline, without it, the wildcards can't be created.
with open(sys.argv[1]) as f_:
config = yaml.load(f_, Loader=yaml.FullLoader)
def create_dataframe(fl, fpl, config, slice):
if config['merge']['paired_End'] and not config['general']['already_assembled']:
df = pd.DataFrame(columns=['sample', 'unit', 'fq1', 'fq2'],
index =range(int(len(fl)/2)), dtype=str)
i, j = (0, 0)
while i < len(fl)/2:
df.loc[i]['sample'] = fl[j].split('_')[0]
df.loc[i]['unit'] = fl[j].split('_')[1]
df.loc[i]['fq1'] = fpl[j][:slice]
df.loc[i]['fq2'] = fpl[j+1][:slice]
j += 2
i += 1
else:
df = pd.DataFrame(columns=['sample', 'unit', 'fq1', 'fq2'],
index = range(int(len(fl))), dtype=str)
i = 0
while i < len(fl):
df.loc[i]['sample'] = fl[i].split('_')[0]
df.loc[i]['unit'] = fl[i].split('_')[1]
df.loc[i]['fq1'] = fpl[i][:slice]
df.loc[i]['fq2'] = np.nan
i += 1
return df
if __name__ == '__main__':
if not config['general']['already_assembled']:
file_path_list = ['demultiplexed/' + name.split('/')[-1] for name in
sorted(glob(config['general']['filename'] + '/*.gz'))]
file_list = sorted([file_.split('/')[-1] for file_
in file_path_list])
slice = -3 # Remove the .gz extension from the file paths.
else:
file_path_list = sorted(glob('results/assembly/*/*.fastq'))
file_list = sorted([file_.split('/')[-1] for file_
in file_path_list])
slice = None
df = create_dataframe(file_list, file_path_list, config, slice)
df.to_csv('units.tsv', sep='\t')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.