id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11528675
|
from django.contrib import admin
from .models import Letter, Attachment
class AttachmentInline(admin.StackedInline):
"""
Stacked Inline View for Attachment
"""
model = Attachment
@admin.register(Letter)
class LetterAdmin(admin.ModelAdmin):
"""
Admin View for Letter
"""
list_display = (
"title",
"author",
"created",
"modified",
"is_draft",
"is_incoming",
"is_spam",
)
list_filter = ("created", "modified", "is_spam")
inlines = [AttachmentInline]
search_fields = ("title", "body")
raw_id_fields = ("author_user", "author_institution", "record")
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.with_author()
|
11528711
|
import pymel.core as pm
def optionVarProperty(key, default):
"""
Create a property that is saved in the user's preferences
by using Maya optionVars.
Args:
key (str): The option var key for the property
default: The default value of the property
"""
def fget(self):
return pm.optionVar.get(key, default)
def fset(self, value):
pm.optionVar[key] = value
def fdel(self):
if key in pm.optionVar:
del pm.optionVar[key]
return property(fget, fset, fdel, 'Get or set the optionVar: {0}'.format(key))
|
11528749
|
from django.shortcuts import render
from .models import Profile
def dashboard(request):
return render(request, "base.html")
def profile_list(request):
profiles = Profile.objects.exclude(user=request.user)
return render(request, "dwitter/profile_list.html", {"profiles": profiles})
|
11528815
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
cscDaqInfo = DQMEDHarvester("CSCDaqInfo")
|
11528818
|
import demistomock as demisto
from SetByIncidentId import main
def test_set_by_incident_id(mocker):
"""
Given:
- ID (1) of incident to update
- Key (Key) to update
- Value (Value) to update
- Argument append set to false
- Argument errorUnfinished set to false
When:
- Running SetByIncidentId
Then:
- Ensure executeCommand is called with expected args
"""
mocker.patch.object(demisto, 'args', return_value={
'id': '1',
'key': 'Key',
'value': 'Value',
'append': 'false',
'errorUnfinished': 'false',
})
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'executeCommand')
main()
demisto.executeCommand.assert_called_with(
'executeCommandAt',
{
'arguments': {'append': 'false', 'key': 'Key', 'value': 'Value'},
'command': 'Set',
'incidents': '1',
}
)
|
11528906
|
from pypugjs.lexer import Lexer
from pypugjs.utils import odict
expected_results = {
"p Here is some #[strong: em text] and look at #[a(href='http://google.com') this link!]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'Here is some ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': ':', 'line': 1, 'inline_level': 1, 'val': None},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'em'},
{'buffer': None, 'type': 'text', 'line': 1, 'inline_level': 1, 'val': u' text'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u' and look at ',
},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'a'},
{
'inline_level': 1,
'val': None,
'buffer': None,
'static_attrs': set([u'href']),
'attrs': odict([(u'href', u"'http://google.com'")]),
'line': 1,
'type': 'attrs',
},
{
'buffer': None,
'inline_level': 1,
'line': 1,
'type': 'text',
'val': u' this link!',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"p Other inline #[strong= 'test']": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'Other inline ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{
'inline_level': 1,
'val': u" 'test'",
'buffer': True,
'escape': True,
'line': 1,
'type': 'code',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"p Test #[|text line]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'Test ',
},
{
'buffer': None,
'type': 'string',
'line': 1,
'inline_level': 1,
'val': u'text line',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"p Test buffered #[= map(str, zip('iln', 'nie')) + 'code']": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'Test buffered ',
},
{
'inline_level': 1,
'val': u" map(str, zip('iln', 'nie')) + 'code'",
'buffer': True,
'escape': True,
'line': 1,
'type': 'code',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"p #[- abcf = [[123, [[],[]], []],'abc']] #[= abcf]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{
'inline_level': 1,
'val': u" abcf = [[123, [[],[]], []],'abc']",
'buffer': False,
'escape': False,
'line': 1,
'type': 'code',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u' '},
{
'inline_level': 1,
'val': u' abcf',
'buffer': True,
'escape': True,
'line': 1,
'type': 'code',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"#[#[#[a a#[b #[i a] b]] d]e]": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 3, 'val': u'a'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 3, 'val': u'a'},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 4, 'val': u'b'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 4, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 5, 'val': u'i'},
{'buffer': None, 'type': 'text', 'line': 1, 'inline_level': 5, 'val': u' a'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 4, 'val': u' b'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 3, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u' d'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u'e'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"p We can also #[strong combine #[em multiple #[img(src='http://jade-lang.com/style/logo.png')]]]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'We can also ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{
'buffer': None,
'type': 'string',
'line': 1,
'inline_level': 1,
'val': u'combine ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 2, 'val': u'em'},
{
'buffer': None,
'type': 'string',
'line': 1,
'inline_level': 2,
'val': u'multiple ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 3, 'val': u'img'},
{
'inline_level': 3,
'val': None,
'buffer': None,
'static_attrs': set([u'src']),
'attrs': odict([(u'src', u"'http://jade-lang.com/style/logo.png'")]),
'line': 1,
'type': 'attrs',
},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u''},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"#[strong start] line with #[i]\#[j] inline": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{
'buffer': None,
'type': 'text',
'line': 1,
'inline_level': 1,
'val': u' start',
},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u' line with ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'i'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'#[j] inline',
},
],
"p Another #[strong.lil#okf(acs=[1,2]) test [[with brackets]] [in#[='side']]]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'Another ',
},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': 'class', 'line': 1, 'inline_level': 1, 'val': u'lil'},
{'buffer': None, 'type': 'id', 'line': 1, 'inline_level': 1, 'val': u'okf'},
{
'val': None,
'buffer': None,
'static_attrs': set([]),
'attrs': odict([(u'acs', u'[1,2]')]),
'line': 1,
'type': 'attrs',
'inline_level': 1,
},
{
'buffer': None,
'type': 'string',
'line': 1,
'inline_level': 1,
'val': u'test [[with brackets]] [in',
},
{
'inline_level': 2,
'val': u"'side'",
'buffer': True,
'escape': True,
'line': 1,
'type': 'code',
},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u']'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"""mixin lala(a, b)
span lala(#{a}, #{b})
p Test inline mixin #[+lala(123, 'lala inside inline')] end""": [
{
'args': u'a, b',
'buffer': None,
'line': 1,
'type': 'mixin',
'inline_level': 0,
'val': u'lala',
},
{'buffer': None, 'line': 2, 'type': 'indent', 'inline_level': 0, 'val': 2},
{'buffer': None, 'line': 2, 'type': 'tag', 'inline_level': 0, 'val': u'span'},
{
'buffer': None,
'line': 2,
'type': 'text',
'inline_level': 0,
'val': u' lala(#{a}, #{b})',
},
{'buffer': None, 'line': 3, 'type': 'outdent', 'inline_level': 0, 'val': None},
{'buffer': None, 'line': 3, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 3,
'type': 'string',
'inline_level': 0,
'val': u'Test inline mixin ',
},
{
'inline_level': 1,
'val': u'lala',
'buffer': None,
'args': u"123, 'lala inside inline'",
'line': 1,
'type': 'call',
},
{
'buffer': None,
'line': 3,
'type': 'string',
'inline_level': 0,
'val': u' end',
},
],
"p only class #[.strong: em inline]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{
'buffer': None,
'line': 1,
'type': 'string',
'inline_level': 0,
'val': u'only class ',
},
{
'buffer': None,
'inline_level': 1,
'line': 1,
'type': 'class',
'val': u'strong',
},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': ':', 'val': None},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'em'},
{
'buffer': None,
'inline_level': 1,
'line': 1,
'type': 'text',
'val': u' inline',
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"#[asdf.lol(fff)#[asdf]]": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'asdf'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'class', 'val': u'lol'},
{
'inline_level': 1,
'val': None,
'buffer': None,
'static_attrs': set([u'fff']),
'attrs': odict([(u'fff', True)]),
'line': 1,
'type': 'attrs',
},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'string', 'val': u''},
{'buffer': None, 'inline_level': 2, 'line': 1, 'type': 'tag', 'val': u'asdf'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'string', 'val': u''},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"#[= '[[[[[[[[[[']": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{
'buffer': True,
'line': 1,
'type': 'code',
'val': u" '[[[[[[[[[['",
'escape': True,
'inline_level': 1,
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
"#[= ']]]]]]]]]]']": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{
'buffer': True,
'line': 1,
'type': 'code',
'val': u" ']]]]]]]]]]'",
'escape': True,
'inline_level': 1,
},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
],
}
def generate_expected(pugjs):
lx = Lexer(pugjs)
res = []
while True:
tok = lx.advance()
if tok.type == 'eos':
break
res.append(tok.__dict__)
return res
def process(pugjs):
assert expected_results[pugjs] == generate_expected(pugjs)
def test_lexer():
import six
for k, v in six.iteritems(expected_results):
yield process, k
|
11528989
|
def show_graph_from_adjmtx(A,B,C,title=''):
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
gr = nx.DiGraph()
nodes=list(range(1,A.shape[0]))
gr.add_nodes_from(nodes)
gr.add_node('u')
rows, cols = np.where(A == 1)
edges_A = list(zip(cols.tolist(), rows.tolist()))
gr.add_edges_from(edges_A)
rows, cols = np.where(B == 1)
edges_B = list(zip(cols.tolist(), rows.tolist()))
gr.add_edges_from(edges_B)
mylabels={i:'%d'%i for i in gr.nodes() if not i=='u'} # {(gr.nodes(),['%d'%i for i in gr.nodes()])
rows=np.where(C==1)[0]
edges_C=[]
for r in rows:
edges_C.append(('u',r))
gr.add_edges_from(edges_C)
mylabels['u']='u'
pos=nx.circular_layout(gr)
print(gr.edges())
nx.draw_networkx_nodes(gr, node_size=500, pos=pos,labels=mylabels, with_labels=True)
nx.draw_networkx_labels(gr,pos=pos,labels=mylabels)
nx.draw_networkx_edges(gr,pos,edgelist=edges_C,edge_color='k')
print('Black: input')
nx.draw_networkx_edges(gr,pos,edgelist=edges_A,edge_color='r')
print('Red: unmodulated')
if edges_B:
nx.draw_networkx_edges(gr,pos,edgelist=edges_B,edge_color='b')
print('Blue: modulated')
plt.title(title)
plt.show()
return gr
def show_graph_from_pattern(pattern_file,nnodes=5):
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
pf=[i.strip().replace('"','') for i in open(pattern_file).readlines()]
if not pf[0].find('digraph')>-1:
raise RuntimeError('input is not a valid dot file')
gr = nx.DiGraph()
nodes=[]
edges=[]
for l in pf[1:]:
l_s=l.split(' ')
print(l_s)
if len(l_s)>1:
# if it's a numeric node, add to the list
try:
nodes.append(int(l_s[0]))
n1=int(l_s[0])
except:
n1=l_s[0]
try:
nodes.append(int(l_s[2]))
n2=int(l_s[2])
except:
n2=l_s[2]
edges.append((n1,n2))
assert l_s[4].find('arrowhead')>-1
if l_s[4].find('none')>-1:
edges.append((n2,n1))
nodes=list(range(0,nnodes)) # include any nodes that had no connnections
mylabels={i:'%d'%i for i in nodes} # {(gr.nodes(),['%d'%i for i in gr.nodes()])
mylabels['u']='u'
nodes.append('u')
gr.add_nodes_from(nodes)
gr.add_edges_from(edges)
pos=nx.circular_layout(gr)
nx.draw_networkx_nodes(gr, node_size=500, pos=pos,labels=mylabels, with_labels=True)
nx.draw_networkx_labels(gr,pos=pos,labels=mylabels)
nx.draw_networkx_edges(gr,pos,edge_color='r')
print('Red: unmodulated')
plt.show()
return gr
|
11528997
|
import re
from typing import List
from unidecode import unidecode
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text: str, delim: str = "-") -> str:
"""
Generates an ASCII-only slug.
"""
result: List[str] = []
for word in _punct_re.split(text.lower()):
result.extend(unidecode(word).split())
return str(delim.join(result))
|
11529015
|
import csv
import json
import os
import StringIO
import tempfile
import unittest
import parquet
class TestFileFormat(unittest.TestCase):
def test_header_magic_bytes(self):
with tempfile.NamedTemporaryFile() as t:
t.write("PAR1_some_bogus_data")
t.flush()
self.assertTrue(parquet._check_header_magic_bytes(t))
def test_footer_magic_bytes(self):
with tempfile.NamedTemporaryFile() as t:
t.write("PAR1_some_bogus_data_PAR1")
t.flush()
self.assertTrue(parquet._check_footer_magic_bytes(t))
def test_not_parquet_file(self):
with tempfile.NamedTemporaryFile() as t:
t.write("blah")
t.flush()
self.assertFalse(parquet._check_header_magic_bytes(t))
self.assertFalse(parquet._check_footer_magic_bytes(t))
class TestMetadata(unittest.TestCase):
f = "test-data/nation.impala.parquet"
def test_footer_bytes(self):
with open(self.f) as fo:
self.assertEquals(327, parquet._get_footer_size(fo))
def test_read_footer(self):
footer = parquet.read_footer(self.f)
self.assertEquals(
set([s.name for s in footer.schema]),
set(["schema", "n_regionkey", "n_name", "n_nationkey",
"n_comment"]))
def test_dump_metadata(self):
data = StringIO.StringIO()
parquet.dump_metadata(self.f, data)
class Options(object):
def __init__(self, col=None, format='csv', no_headers=True, limit=-1):
self.col = col
self.format = format
self.no_headers = no_headers
self.limit = limit
class TestReadApi(unittest.TestCase):
def test_projection(self):
pass
def test_limit(self):
pass
class TestCompatibility(object):
td = "test-data"
files = [(os.path.join(td, p), os.path.join(td, "nation.csv")) for p in
["gzip-nation.impala.parquet", "nation.dict.parquet",
"nation.impala.parquet", "nation.plain.parquet",
"snappy-nation.impala.parquet"]]
def _test_file_csv(self, parquet_file, csv_file):
""" Given the parquet_file and csv_file representation, converts the
parquet_file to a csv using the dump utility and then compares the
result to the csv_file.
"""
expected_data = []
with open(csv_file, 'rb') as f:
expected_data = list(csv.reader(f, delimiter='|'))
actual_raw_data = StringIO.StringIO()
parquet.dump(parquet_file, Options(), out=actual_raw_data)
actual_raw_data.seek(0, 0)
actual_data = list(csv.reader(actual_raw_data, delimiter='\t'))
assert expected_data == actual_data, "{0} != {1}".format(
str(expected_data), str(actual_data))
actual_raw_data = StringIO.StringIO()
parquet.dump(parquet_file, Options(no_headers=False),
out=actual_raw_data)
actual_raw_data.seek(0, 0)
actual_data = list(csv.reader(actual_raw_data, delimiter='\t'))[1:]
assert expected_data == actual_data, "{0} != {1}".format(
str(expected_data), str(actual_data))
def _test_file_json(self, parquet_file, csv_file):
""" Given the parquet_file and csv_file representation, converts the
parquet_file to json using the dump utility and then compares the
result to the csv_file using column agnostic ordering.
"""
expected_data = []
with open(csv_file, 'rb') as f:
expected_data = list(csv.reader(f, delimiter='|'))
actual_raw_data = StringIO.StringIO()
parquet.dump(parquet_file, Options(format='json'),
out=actual_raw_data)
actual_raw_data.seek(0, 0)
actual_data = [json.loads(x.rstrip()) for x in
actual_raw_data.read().split("\n") if len(x) > 0]
assert len(expected_data) == len(actual_data)
footer = parquet.read_footer(parquet_file)
cols = [s.name for s in footer.schema]
for expected, actual in zip(expected_data, actual_raw_data):
assert len(expected) == len(actual)
for i, c in enumerate(cols):
if c in actual:
assert expected[i] == actual[c]
def test_all_files(self):
for parquet_file, csv_file in self.files:
yield self._test_file_csv, parquet_file, csv_file
yield self._test_file_json, parquet_file, csv_file
|
11529017
|
import timeit
from logging import getLogger
import numpy as np
import pytest
import torch
from torch import nn
from pfrl.utils import clip_l2_grad_norm_
def _get_grad_vector(model):
return np.concatenate(
[p.grad.cpu().numpy().ravel().copy() for p in model.parameters()]
)
def _test_clip_l2_grad_norm_(gpu):
if gpu >= 0:
device = torch.device("cuda:{}".format(gpu))
else:
device = torch.device("cpu")
model = nn.Sequential(
nn.Linear(2, 10),
nn.ReLU(),
nn.Linear(10, 3),
).to(device)
x = torch.rand(7, 2).to(device)
def backward():
model.zero_grad()
loss = model(x).mean()
loss.backward()
backward()
raw_grads = _get_grad_vector(model)
# Threshold large enough not to affect grads
th = 10000
backward()
nn.utils.clip_grad_norm_(model.parameters(), th)
clipped_grads = _get_grad_vector(model)
backward()
clip_l2_grad_norm_(model.parameters(), th)
our_clipped_grads = _get_grad_vector(model)
np.testing.assert_allclose(raw_grads, clipped_grads)
np.testing.assert_allclose(raw_grads, our_clipped_grads)
# Threshold small enough to affect grads
th = 1e-2
backward()
nn.utils.clip_grad_norm_(model.parameters(), th)
clipped_grads = _get_grad_vector(model)
backward()
clip_l2_grad_norm_(model.parameters(), th)
our_clipped_grads = _get_grad_vector(model)
with pytest.raises(AssertionError):
np.testing.assert_allclose(raw_grads, clipped_grads, rtol=1e-5)
with pytest.raises(AssertionError):
np.testing.assert_allclose(raw_grads, our_clipped_grads, rtol=1e-5)
np.testing.assert_allclose(clipped_grads, our_clipped_grads, rtol=1e-5)
def test_clip_l2_grad_norm_cpu():
_test_clip_l2_grad_norm_(-1)
@pytest.mark.gpu
def test_clip_l2_grad_norm_gpu():
_test_clip_l2_grad_norm_(0)
@pytest.mark.slow
def test_clip_l2_grad_norm_speed():
logger = getLogger(__name__)
# Speed difference is large when model is large
model = nn.Sequential(
nn.Linear(2, 1000),
nn.ReLU(),
nn.Linear(1000, 1000),
nn.ReLU(),
nn.Linear(1000, 3),
)
x = torch.rand(7, 2)
def backward():
model.zero_grad()
loss = model(x).mean()
loss.backward()
# Threshold large enough not to affect grads
th = 10000
backward()
def torch_clip():
nn.utils.clip_grad_norm_(model.parameters(), th)
torch_time = timeit.timeit(torch_clip, number=100)
logger.debug("torch.nn.utils.clip_grad_norm_ took %s", torch_time)
def our_clip():
clip_l2_grad_norm_(model.parameters(), th)
our_time = timeit.timeit(our_clip, number=100)
logger.debug("pfrl.misc.clip_l2_grad_norm_ took %s", our_time)
assert our_time < torch_time
|
11529063
|
import numpy as np
from bokeh.layouts import column, gridplot
from bokeh.models import BoxSelectTool, Div
from bokeh.plotting import figure, show, output_file
x = np.linspace(0, 4*np.pi, 100)
y = np.sin(x)
TOOLS = "wheel_zoom,save,box_select,lasso_select"
div = Div(text="""
<p>Selection behaviour in Bokeh can be configured in various ways. For instance,
the selection event can be set to happen on every mouse move, or only on mouseup.
Additionally the appearance of standard, selected, and non-selected glyphs is
fully customizable.</p>
<p>Make selections on the plots below to see these possibilities.</p>
""")
opts = dict(tools=TOOLS, plot_width=350, plot_height=350)
p1 = figure(title="selection on mouseup", **opts)
p1.circle(x, y, color="navy", size=6, alpha=0.6)
p2 = figure(title="selection on mousemove", **opts)
p2.square(x, y, color="olive", size=6, alpha=0.6)
p2.select_one(BoxSelectTool).select_every_mousemove = True
p3 = figure(title="default highlight", **opts)
p3.circle(x, y, color="firebrick", alpha=0.5, size=6)
p4 = figure(title="custom highlight", **opts)
p4.square(x, y, color="navy", size=6, alpha=0.6,
nonselection_color="orange", nonselection_alpha=0.6)
output_file("scatter_selection.html", title="scatter_selection.py example")
layout = column(div,
gridplot([[p1, p2], [p3, p4]], toolbar_location="right"),
sizing_mode="scale_width")
show(layout)
|
11529116
|
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from revproxy.views import ProxyView
from proxy.process import FavaProcess
from child import fava_child
from contextlib import closing
from multiprocessing import Process
import socket
import sys
import argparse
import shlex
import logging
from time import sleep
# too noisy...
logging.getLogger('revproxy.view').setLevel(logging.ERROR)
logging.getLogger('revproxy.response').setLevel(logging.ERROR)
@method_decorator(login_required(login_url = '/'), 'dispatch')
class ReverseFava(ProxyView):
def __init__(self):
super().__init__()
self.process = FavaProcess.instance()
self.upstream = 'http://127.0.0.1:' + str(self.process.port) + '/fava/'
def get_request_headers(self):
headers = super(ReverseFava, self).get_request_headers()
headers['Key'] = self.process.key
return headers
@login_required(login_url = '/')
def restart(request):
process = FavaProcess.instance()
process.restart()
return HttpResponse("done")
|
11529133
|
import unittest
from mock import Mock, patch
from lti import ToolProxy
import requests
import json
from oauthlib.oauth1 import SignatureOnlyEndpoint
test_profile = {'@context': ['http://purl.imsglobal.org/ctx/lti/v2/ToolConsumerProfile'],
'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9',
'@type': 'ToolConsumerProfile',
'capability_offered': ['basic-lti-launch-request',
'User.id',
'Canvas.api.domain',
'LtiLink.custom.url',
'ToolProxyBinding.custom.url',
'ToolProxy.custom.url',
'Canvas.placements.accountNavigation',
'Canvas.placements.courseNavigation',
'Canvas.placements.assignmentSelection',
'Canvas.placements.linkSelection',
'Canvas.placements.postGrades',
'User.username',
'Person.email.primary',
'vnd.Canvas.Person.email.sis',
'Person.name.given',
'Person.name.family',
'Person.name.full',
'CourseSection.sourcedId',
'Person.sourcedId',
'Membership.role',
'ToolConsumerProfile.url',
'Security.splitSecret',
'Context.id',
'ToolConsumerInstance.guid',
'CourseSection.sourcedId',
'Membership.role',
'Person.email.primary',
'Person.name.given',
'Person.name.family',
'Person.name.full',
'Person.sourcedId',
'User.id',
'User.image',
'Message.documentTarget',
'Message.locale',
'Context.id',
'vnd.Canvas.root_account.uuid'],
'guid': '339b6700-e4cb-47c5-a54f-3ee0064921a9',
'lti_version': 'LTI-2p0',
'product_instance': {'guid': '07adb3e60637ff02d9ea11c7c74f1ca921699bd7.canvas.instructure.com',
'product_info': {'product_family': {'code': 'canvas',
'vendor': {'code': 'https://instructure.com',
'timestamp': '2008-03-27T06:00:00Z',
'vendor_name': {'default_value': 'Instructure',
'key': 'vendor.name'}}},
'product_name': {'default_value': 'Canvas '
'by '
'Instructure',
'key': 'product.name'},
'product_version': 'none'},
'service_owner': {'description': {'default_value': 'Free '
'For '
'Teachers',
'key': 'service_owner.description'},
'service_owner_name': {'default_value': 'Free '
'For '
'Teachers',
'key': 'service_owner.name'}}},
'security_profile': [{'digest_algorithm': 'HMAC-SHA1',
'security_profile_name': 'lti_oauth_hash_message_security'},
{'digest_algorithm': 'HS256',
'security_profile_name': 'oauth2_access_token_ws_security'}],
'service_offered': [{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxy.collection',
'@type': 'RestService',
'action': ['POST'],
'endpoint': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_proxy',
'format': ['application/vnd.ims.lti.v2.toolproxy+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxy.item',
'@type': 'RestService',
'action': ['GET'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_proxy/{tool_proxy_guid}',
'format': ['application/vnd.ims.lti.v2.toolproxy+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#vnd.Canvas.authorization',
'@type': 'RestService',
'action': ['POST'],
'endpoint': 'https://canvas.instructure.com/api/lti/courses/1157004/authorize',
'format': ['application/json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxySettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/tool_proxy/{tool_proxy_id}',
'format': ['application/vnd.ims.lti.v2.toolsettings+json',
'application/vnd.ims.lti.v2.toolsettings.simple+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxyBindingSettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/bindings/{binding_id}',
'format': ["application/vnd.ims.lti.v2.toolsettings+json'",
'application/vnd.ims.lti.v2.toolsettings.simple+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#LtiLinkSettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/links/{tool_proxy_id}',
'format': ['application/vnd.ims.lti.v2.toolsettings+json',
'application/vnd.ims.lti.v2.toolsettings.simple+json']}]}
test_params = {'ext_api_domain': 'canvas.instructure.com',
'ext_tool_consumer_instance_guid': '07adb3e60637ff02d9ea11c7c74f1ca921699bd7.canvas.instructure.com',
'launch_presentation_document_target': 'iframe',
'launch_presentation_return_url': 'https://canvas.instructure.com/courses/1157004/lti/registration_return',
'lti_message_type': 'ToolProxyRegistrationRequest',
'lti_version': 'LTI-2p0',
'reg_key': 'eb9031ac-2e12-422e-8238-beb9c41419b3',
'reg_password': '<PASSWORD>',
'tc_profile_url': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile'}
class TestToolProxy(unittest.TestCase):
def test_load_tc_profile(self):
#Mock out the call to the requests library
response = Mock()
response.text = json.dumps(test_profile)
proxy = ToolProxy(params=test_params)
with patch('lti.tool_proxy.requests.get') as mock_get:
mock_get.return_value = response
proxy.load_tc_profile()
self.assertEqual(proxy.tc_profile, test_profile)
def test_tool_consumer_profile_url(self):
proxy = ToolProxy(params=test_params)
self.assertEqual(proxy.tool_consumer_profile_url, test_params['tc_profile_url'])
def test_find_registration_url(self):
proxy = ToolProxy(params=test_params)
proxy.tc_profile = test_profile
registration_url = proxy.find_registration_url()
self.assertEqual(registration_url, 'https://canvas.instructure.com/api/lti/courses/1157004/tool_proxy')
def test_not_find_registration_url(self):
proxy = ToolProxy(params=test_params)
proxy.tc_profile = {'@context': ['http://purl.imsglobal.org/ctx/lti/v2/ToolConsumerProfile'],
'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9',
'@type': 'ToolConsumerProfile',
'capability_offered': ['basic-lti-launch-request',
'vnd.Canvas.root_account.uuid'],
'guid': '339b6700-e4cb-47c5-a54f-3ee0064921a9',
'lti_version': 'LTI-2p0',
'product_instance': {'guid': '07adb3e60637ff02d9ea11c7c74f1ca921699bd7.canvas.instructure.com',
'product_info': {'product_family': {'code': 'canvas',
'vendor': {'code': 'https://instructure.com',
'timestamp': '2008-03-27T06:00:00Z',
'vendor_name': {'default_value': 'Instructure',
'key': 'vendor.name'}}},
'product_name': {'default_value': 'Canvas '
'by '
'Instructure',
'key': 'product.name'},
'product_version': 'none'},
'service_owner': {'description': {'default_value': 'Free '
'For '
'Teachers',
'key': 'service_owner.description'},
'service_owner_name': {'default_value': 'Free '
'For '
'Teachers',
'key': 'service_owner.name'}}},
'security_profile': [{'digest_algorithm': 'HMAC-SHA1',
'security_profile_name': 'lti_oauth_hash_message_security'},
{'digest_algorithm': 'HS256',
'security_profile_name': 'oauth2_access_token_ws_security'}],
'service_offered': [{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxy.item',
'@type': 'RestService',
'action': ['GET'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_proxy/{tool_proxy_guid}',
'format': ['application/vnd.ims.lti.v2.toolproxy+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#vnd.Canvas.authorization',
'@type': 'RestService',
'action': ['POST'],
'endpoint': 'https://canvas.instructure.com/api/lti/courses/1157004/authorize',
'format': ['application/json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxySettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/tool_proxy/{tool_proxy_id}',
'format': ['application/vnd.ims.lti.v2.toolsettings+json',
'application/vnd.ims.lti.v2.toolsettings.simple+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#ToolProxyBindingSettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/bindings/{binding_id}',
'format': ["application/vnd.ims.lti.v2.toolsettings+json'",
'application/vnd.ims.lti.v2.toolsettings.simple+json']},
{'@id': 'https://canvas.instructure.com/api/lti/courses/1157004/tool_consumer_profile/339b6700-e4cb-47c5-a54f-3ee0064921a9#LtiLinkSettings',
'@type': 'RestService',
'action': ['GET', 'PUT'],
'endpoint': 'https://canvas.instructure.com/api/lti/tool_settings/links/{tool_proxy_id}',
'format': ['application/vnd.ims.lti.v2.toolsettings+json',
'application/vnd.ims.lti.v2.toolsettings.simple+json']}]}
registration_url = proxy.find_registration_url()
self.assertIsNone(registration_url)
def test_register_proxy(self):
proxy = ToolProxy(params=test_params)
proxy.tc_profile = test_profile
signed_request = proxy.register_proxy({'tool_profile': 'A Real Tool Profile Goes here'})
self.assertIsInstance(signed_request, requests.PreparedRequest)
|
11529140
|
from __future__ import absolute_import, division, print_function, unicode_literals
from django.core import mail
from django.db import IntegrityError
from django.test.utils import override_settings
from django_otp.forms import OTPAuthenticationForm
from django_otp.tests import TestCase
from .models import EmailDevice
class AuthFormTest(TestCase):
def setUp(self):
try:
alice = self.create_user('alice', 'password')
except IntegrityError:
self.skipTest("Failed to create user.")
else:
alice.emaildevice_set.create()
if hasattr(alice, 'email'):
alice.email = '<EMAIL>'
alice.save()
else:
self.skipTest("User model has no email.")
@override_settings(OTP_EMAIL_SENDER='<EMAIL>')
def test_email_interaction(self):
data = {
'username': 'alice',
'password': 'password',
'otp_device': 'otp_email.emaildevice/1',
'otp_token': '',
'otp_challenge': '1',
}
form = OTPAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
alice = form.get_user()
self.assertEqual(alice.get_username(), 'alice')
self.assertIsNone(alice.otp_device)
self.assertEqual(len(mail.outbox), 1)
data['otp_token'] = mail.outbox[0].body
del data['otp_challenge']
form = OTPAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertIsInstance(form.get_user().otp_device, EmailDevice)
|
11529163
|
from distutils.core import setup
setup(
name = 'boltiot',
packages = ['boltiot'],
version = '1.11.2',
install_requires=['twilio','requests'],
description = 'A Python module for communicating with the Bolt Cloud API.',
author = 'Inventrom Pvt. Ltd.',
author_email = '<EMAIL>',
url = 'https://github.com/Inventrom/bolt-api-python',
download = 'https://github.com/Inventrom/bolt-api-python/archive/1.11.2.tar.gz',
keywords = ['iot-platform','bolt','bolt-python'],
classifiers = []
)
|
11529192
|
import json
import math
import sys
import os
'''
This script is used for generating tuning scripts for the benchmarks:
How to generate a tuning script ?
=================================
1. Create a file named autotune.json in the benchmark's directory
2. Fill the file according to the example autotune.json format.
a. Specify tuning header name
b. Specify executable's directory
c. Specify the generated executable's name
d. Specify the compilation (build) path (where to execute make)
e. Specify compile options, these are appended to make, for example, if
you set compile_options to run_lstm, the following command will be executed
make run_lstm
f. Specify the output log file of all the values and their corresponding time.
d. Specify the parameters to tune and their values ranges. (Ranges are inclusive, both extremities)
3. Note that it'd be better if you use absolute paths, but you can specify paths
relative to the directory you'll execute the tuning script in.
4. Execute this script python autotune_generator.py to generate the output tuning script file
'''
def getDivisors(n):
divisors = [1, n]
nSQRT = int(math.sqrt(n))
if n%2 == 0:
for i in range(2, nSQRT):
if(n%i == 0):
divisors.append(i)
divisors.append(n//i)
else:
for i in range(3, nSQRT, 2):
if(n%i == 0):
divisors.append(i)
divisors.append(n//i)
if nSQRT * nSQRT == n:
divisors.append(nSQRT)
divisors.sort()
return divisors
# read file
with open('autotune.json', 'r') as autotune_file:
config = autotune_file.read()
# parse file
autotune_config = json.loads(config)
################ Get relative paths
path_script_to_tuning_header=os.path.relpath(autotune_config["tuning_header_file_dir"], autotune_config["tuning_script_file_dir"])
path_script_to_output_file = os.path.relpath(autotune_config["output_file_path"], autotune_config["tuning_script_file_dir"])
path_script_to_compile_dir=os.path.relpath(autotune_config["compile_path"], autotune_config["tuning_script_file_dir"])
path_compile_dir_to_executable_dir=os.path.relpath(autotune_config["executable_dir"], autotune_config["compile_path"])
path_executable_to_tuning_script=os.path.relpath(autotune_config["tuning_header_file_dir"], autotune_config["executable_dir"])
path_executable_to_output_file=os.path.relpath(autotune_config["output_file_path"], autotune_config["executable_dir"])
tune_parameters_script = "printf \"\" > "+path_script_to_output_file+"\n"
param_number=1
tabbing=""
for param in autotune_config["parameters_to_tune"]:
# Create values range
if "," in param["values"]: # case of a list of values to try
values = [int(val) for val in param["values"].split(",")]
elif ":" in param["values"]: # case of a range of values to try
range_values = param["values"].split(":")
if len(range_values)==3: # case of range with step
values = [*(range(int(range_values[0]), int(range_values[1]) + 1, int(range_values[2])))]
elif len(range_values)==2: # case of range with step=1
values = [*(range(int(range_values[0]), int(range_values[1]) + 1))]
elif param["values"].isdigit(): # case of a single integer
values=[int(param["values"])]
else :
print("Error : values field in parameter number %d must be an integer, a start:end:step range, or a list of comma separated integers i1,i2,i3,i4"%param_number)
sys.exit(1)
if(len(values)==0):
print("Error, there are no values represented by parameter %d's values field"%param_number)
sys.exit(2)
# Consider divisor_of parameter
if "divisor_of" in param:
divisors = getDivisors(int(param["divisor_of"]))
values = [val for val in values if val in divisors]
#Error if there are no values left after taking off non divisors
if(len(values) == 0):
print("Error : There are no divisors of %d for parameter number %d's values"% (param["divisor_of"], param_number))
sys.exit(3)
vals_string = " ".join(str(val) for val in values)
tune_parameters_script+= tabbing + "for "+param["name"]+" in "+ vals_string + "; do\n"
tabbing += "\t"
param_number+=1
header_file = path_script_to_tuning_header +"/"+ autotune_config["tuning_header_file_name"]
param_number=1
for param in autotune_config["parameters_to_tune"]:
tune_parameters_script+= tabbing+"printf \"#ifdef "+param["name"]+"\\n\t"
if param_number == 1:
tune_parameters_script+= tabbing+"#undef "+param["name"]+"\\n#endif\\n\" > "+header_file+"\n"
else:
tune_parameters_script+= tabbing+"#undef "+param["name"]+"\\n#endif\\n\" >> "+header_file+"\n"
tune_parameters_script+= tabbing+"printf \"#define "+param["name"]+" $"+param["name"]+"\\n\" >> "+header_file+"\n"
param_number+=1
tune_parameters_script+= tabbing+"printf \"\" >> "+header_file+"\n"
tune_parameters_script+="\n"
#Logging and showing progress
printf_string = "printf \""
for param in autotune_config["parameters_to_tune"]:
printf_string+=param["name"]+"=$"+param["name"]+", "
tune_parameters_script+=tabbing+printf_string+"\";\n"+tabbing+printf_string+"\""+" >> "+ path_script_to_output_file+";\n"
#Building part
tune_parameters_script+= tabbing+"cd "+path_script_to_compile_dir+"\n"
tune_parameters_script+= tabbing+"make "+autotune_config["compile_options"]+" > /dev/null 2>&1;\n"
tune_parameters_script+= tabbing+"cd "+path_compile_dir_to_executable_dir+";\n"
tune_parameters_script+= tabbing+"./"+autotune_config["executable_name"]+" |tee -a "+path_executable_to_output_file+";\n"
tune_parameters_script+= tabbing+"./clean.sh;\n"
tune_parameters_script+= tabbing+"cd "+path_executable_to_tuning_script+";\n"
#at this stage param_number = number of parameters + 1
param_number-=2
for param in autotune_config["parameters_to_tune"]:
tune_parameters_script+= param_number *"\t"+"done\n"
param_number-=1
with open(autotune_config["tuning_script_file_dir"]+"/"+autotune_config["tuning_script_file_name"], "w") as f:
f.write(tune_parameters_script)
print(tune_parameters_script)
|
11529251
|
import tkinter as tk
class TextLineNumbers(tk.Canvas):
def __init__(self, parent, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs)
self._text_font = parent.settings['font_family']
self._parent = parent
self.textwidget = parent.textarea
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, *args):
font_color = self._parent.menu_fg
bg_color = self._parent.bg_color
indicator_on = self._parent.current_line_indicator
current_line_symbol = self._parent.current_line_symbol
if not self.visible:
return
self.delete('all')
self.config(width=(self._parent.font_size * 3),
bd=0, bg=bg_color, highlightthickness=0)
i = self.textwidget.index('@0,0')
while True:
dline= self.textwidget.dlineinfo(i)
if dline is None: break
y = dline[1]
index = self.textwidget.index(tk.INSERT)
pos = index.split('.')[0]
if float(i) >= 10:
linenum = str(i).split('.')[0]
if pos == linenum and indicator_on:
linenum = linenum + current_line_symbol
else:
linenum = '~' + str(i).split('.')[0]
if '~' + pos == linenum and indicator_on:
linenum = linenum + current_line_symbol
self.create_text(2, y, anchor='nw',
text=linenum,
font=(self._text_font, self._parent.font_size),
fill=font_color)
i = self.textwidget.index('%s+1line' % i)
@property
def visible(self):
return self.cget('state') == 'normal'
@visible.setter
def visible(self, visible):
self.config(state='normal' if visible else 'disabled')
if visible:
self.redraw()
else:
self.delete('all')
self.config(width=0)
|
11529281
|
import unittest
class AuthenticatedPredicateTests(unittest.TestCase):
def _getFUT(self):
from repoze.who.restrict import authenticated_predicate
return authenticated_predicate()
def test___call___no_identity_returns_False(self):
predicate = self._getFUT()
environ = {}
self.assertFalse(predicate(environ))
def test___call___w_REMOTE_AUTH_returns_True(self):
predicate = self._getFUT()
environ = {'REMOTE_USER': 'fred'}
self.assertTrue(predicate(environ))
def test___call___w_repoze_who_identity_returns_True(self):
predicate = self._getFUT()
environ = {'repoze.who.identity': {'login': 'fred'}}
self.assertTrue(predicate(environ))
class MakeAuthenticatedRestrictionTests(unittest.TestCase):
def _getFUT(self):
from repoze.who.restrict import make_authenticated_restriction
return make_authenticated_restriction
def test_enabled(self):
fut = self._getFUT()
app = DummyApp()
filter = fut(app, {}, enabled=True)
self.assertTrue(filter.app is app)
self.assertTrue(filter.enabled)
predicate = filter.predicate
self.assertTrue(predicate({'REMOTE_USER': 'fred'}))
self.assertTrue(predicate({'repoze.who.identity': {'login': 'fred'}}))
class PredicateRestrictionTests(unittest.TestCase):
def _getTargetClass(self):
from repoze.who.restrict import PredicateRestriction
return PredicateRestriction
def _makeOne(self, app=None, **kw):
if app is None:
app = DummyApp()
return self._getTargetClass()(app, **kw)
def test___call___disabled_predicate_false_calls_app_not_predicate(self):
_tested = []
def _factory():
def _predicate(env): # pragma: no cover
assert False
return _predicate
def _start_response(status, headers):
assert False # pragma: no cover
environ = {'testing': True}
restrict = self._makeOne(predicate=_factory, enabled=False)
restrict(environ, _start_response)
self.assertEqual(len(_tested), 0)
self.assertEqual(restrict.app.environ, environ)
def test___call___enabled_predicate_false_returns_401(self):
_tested = []
def _factory():
def _predicate(env):
_tested.append(env)
return False
return _predicate
_started = []
def _start_response(status, headers):
_started.append((status, headers))
environ = {'testing': True}
restrict = self._makeOne(predicate=_factory)
restrict(environ, _start_response)
self.assertEqual(len(_tested), 1)
self.assertEqual(len(_started), 1, _started)
self.assertEqual(_started[0][0], '401 Unauthorized')
self.assertEqual(restrict.app.environ, None)
def test___call___enabled_predicate_true_calls_app(self):
_tested = []
def _factory():
def _predicate(env):
_tested.append(env)
return True
return _predicate
def _start_response(status, headers):
assert False # pragma: no cover
environ = {'testing': True, 'REMOTE_USER': 'fred'}
restrict = self._makeOne(predicate=_factory)
restrict(environ, _start_response)
self.assertEqual(len(_tested), 1)
self.assertEqual(restrict.app.environ, environ)
class MakePredicateRestrictionTests(unittest.TestCase):
def _getFUT(self):
from repoze.who.restrict import make_predicate_restriction
return make_predicate_restriction
def test_non_string_predicate_no_args(self):
fut = self._getFUT()
app = DummyApp()
def _predicate(env):
return True # pragma: no cover
def _factory():
return _predicate
filter = fut(app, {}, predicate=_factory)
self.assertTrue(filter.app is app)
self.assertTrue(filter.predicate is _predicate)
self.assertTrue(filter.enabled)
def test_disabled_non_string_predicate_w_args(self):
fut = self._getFUT()
app = DummyApp()
filter = fut(app, {}, predicate=DummyPredicate, enabled=False,
foo='Foo')
self.assertTrue(filter.app is app)
self.assertTrue(isinstance(filter.predicate, DummyPredicate))
self.assertEqual(filter.predicate.foo, 'Foo')
self.assertFalse(filter.enabled)
def test_enabled_string_predicate_w_args(self):
fut = self._getFUT()
app = DummyApp()
filter = fut(app, {},
predicate='repoze.who.tests.test_restrict:DummyPredicate',
enabled=True, foo='Foo')
self.assertTrue(filter.app is app)
self.assertTrue(isinstance(filter.predicate, DummyPredicate))
self.assertEqual(filter.predicate.foo, 'Foo')
self.assertTrue(filter.enabled)
class DummyApp(object):
environ = None
def __call__(self, environ, start_response):
self.environ = environ
return []
class DummyPredicate(object):
def __init__(self, **kw):
self.__dict__.update(kw)
|
11529295
|
from __future__ import division
import numpy as np
from scipy.optimize import fsolve
def dahlquist(_, x, lam):
"""
dahlquist test equation ode.
:param _: place holder for time, not used
:param x: x value
:param lam: lambda
:return: slope dx/dt
"""
dx = lam * x
return dx
def dahlquist_ref(t, x0, lam):
"""
reference solution for dahlquist test equation ode. x' = lam*x -> y = x0 * exp(lam*t)
:param t: time
:param x0: initial value
:param lam: lambda
:return: samples of reference solution for time t
"""
x_ref = np.exp(lam * t) * x0 # analytical solution of the dahlquist test equation
return x_ref
def definition_area(t, x):
"""
for the initial value x0 = 1 this ODE only has a solution for x in (-sqrt(2),sqrt(2)). Therefore the ode is only
defined in a certain area.
:param t: time
:param x: x value
:return: slope dx/dt
"""
dx = t * x ** 2
return dx
def definition_area_ref(t, x0):
"""
reference solution for ode with respricted definition area.
:param t: time
:param x0: initial value
:return: samples of reference solution for time t
"""
x_ref = 1. / (1. / x0 - 1. / 2. * (t ** 2)) # analytical solution of this ODE
return x_ref
def logistic_equation(_, x, k, g):
"""
ode for the logistic equation
:param _: place holder for time, not used
:param x: x value
:param k: slope of logistic equation
:param g: upper bound of logistic equation
:return: slope dx/dt
"""
dx = k * x * (g - x)
return dx
def logistic_equation_ref(t, x0, k, g):
"""
reference solution for logistic equation ode
:param t: time
:param x0: initial value
:param k: slope of logistic equation
:param g: upper bound of logistic equation
:return: samples of reference solution for time t
"""
if 0 != x0:
x_ref = g * 1 / (1 + np.exp(-k * g * t) * (g / x0 - 1))
else:
x_ref = 0
return x_ref
def oscillator_equation(_, x, omega):
"""
two dimensionaly ode describing the harmonic oszillator
:param _: place holder for time, not used
:param x: x value
:param omega: frequency of oszillation
:return: slope dx/dt
"""
A = np.array([[0, 1], [-omega ** 2, 0]])
dx = np.dot(A, x)
return dx
def oscillator_equation_ref(t, x0, omega, v0=0):
"""
reference solution for two dimensional ode describing the harmonic oszillator
:param t: time
:param x0: initial displacements
:param omega: frequency of oszillation
:param v0: initial velocity
:return: samples of reference solution (only displacement) for time t
"""
x = x0 * np.exp(1j * omega * t) + v0 * np.exp(-1j * omega * t)
return np.real(x)
def ref_sol(f_ref, x0, t_min = 0, t_max = 1, n_samples = 1000):
"""
computes samples of the reference solution for a given timespan
:param f_ref: reference solution function handle
:param x0: initial value of ode
:param t_min: starting time
:param t_max: end time
:param n_samples: number of samples to be produced
:return: tuple of time and x value samples of the reference solution
"""
t_ref = np.linspace(t_min, t_max, n_samples)
x_ref = f_ref(t_ref, x0)
return t_ref, x_ref
def expl_euler(f, x0, h, timespan):
"""
explicit euler solver. Computes the solution for a given ode using explicit euler scheme.
:param f: function handle for ode
:param x0: initial value
:param h: constant step size
:param timespan: integration time
:return: numerical solution in time and x
"""
n = int(np.ceil(timespan / h))
t = np.empty(n + 1)
x = np.empty([x0.shape[0], n + 1])
t[0] = 0
x[:, 0] = x0
for k in range(n):
dx = f(t[k], x[:, k])
t[k + 1] = (k + 1) * h
x[:, k + 1] = x[:, k] + dx * h
return t, x
def impl_euler(f, x0, h, timespan):
"""
implicit euler solver. Computes the solution for a given ode using implicit euler scheme.
:param f: function handle for ode
:param x0: initial value
:param h: constant step size
:param timespan: integration time
:return: numerical solution in time and x
"""
n = int(np.ceil(timespan / h))
t = np.empty(n + 1)
x = np.empty([x0.shape[0], n + 1])
t[0] = 0
x[:, 0] = x0
for k in range(n):
t[k + 1] = (k + 1) * h
try:
x[:, k + 1] = fsolve(lambda arg: x[:, k] - arg + h * f(t[k + 1], arg), x[:, k])
except RuntimeError:
print("newton did not converge!")
for k in range(k, n):
t[k + 1] = (k + 1) * h
break
return t, x
def impl_midpoint(f, x0, h, timespan):
"""
implicit midpoint rule solver. Computes the solution for a given ode using the implicit midpoint rule scheme.
:param f: function handle for ode
:param x0: initial value
:param h: constant step size
:param timespan: integration time
:return: numerical solution in time and x
"""
n = int(np.ceil(timespan / h))
t = np.empty(n + 1)
x = np.empty([x0.shape[0], n + 1])
t[0] = 0
x[:, 0] = x0
for k in range(n):
t[k + 1] = (k + 1) * h
try:
dx_left = f(t[k], x[:, k])
x[:, k + 1] = fsolve(lambda arg: x[:, k] - arg + h / 2 * (f(t[k + 1], arg) + dx_left), x[:, k])
except RuntimeError:
print("newton did not converge!")
for k in range(k, n):
t[k + 1] = (k + 1) * h
break
return t, x
|
11529305
|
import numpy
import matplotlib.pyplot as plt
import geojsoncontour
# Create lat and lon vectors and grid data
grid_size = 1.0
latrange = numpy.arange(-90.0, 90.0, grid_size)
lonrange = numpy.arange(-180.0, 180.0, grid_size)
X, Y = numpy.meshgrid(lonrange, latrange)
Z = numpy.sqrt(X * X + Y * Y)
n_contours = 20
levels = numpy.linspace(start=0, stop=100, num=n_contours)
# Create a contour plot plot from grid (lat, lon) data
figure = plt.figure()
ax = figure.add_subplot(111)
contourf = ax.contourf(lonrange, latrange, Z, levels=levels, cmap=plt.cm.jet)
# Convert matplotlib contourf to geojson
geojson = geojsoncontour.contourf_to_geojson(
contourf=contourf,
min_angle_deg=3.0,
ndigits=3,
stroke_width=2,
fill_opacity=0.5
)
print(geojson)
|
11529314
|
import sys
import os
sys.path.append(os.getcwd())
from simuleval.agents import Agent,TextAgent, SpeechAgent
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from typing import List,Dict, Optional
import numpy as np
import math
import torch
from collections import deque
from torch import Tensor
import torch.nn as nn
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel
from fairseq.data import encoders, Dictionary
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from argparse import Namespace
import rain
from rain.data.transforms import audio_encoder, text_encoder
from rain.simul.transducer_agent import TransducerAgent
class SpeechTransducerAgent(TransducerAgent):
data_type= "speech"
speech_segment_size = 10
|
11529327
|
import os
import time
import argparse
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.utils as vutils
import torchvision.transforms as transforms
from utils import *
from network.lightcnn import LightCNN_29v2
from data.dataset_mix import Real_Dataset, Mix_Dataset
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', default=725, type=int)
parser.add_argument('--gpu_ids', default='0,1', type=str)
parser.add_argument('--workers', default=8, type=int)
parser.add_argument('--epochs', default=15, type=int)
parser.add_argument('--pre_epoch', default=0, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--weight_decay', default=2e-4)
parser.add_argument('--step_size', default=5, type=int)
parser.add_argument('--print_iter', default=5, type=int)
parser.add_argument('--save_name', default='LightCNN', type=str)
parser.add_argument('--seed', default=1000, type=int)
parser.add_argument('--weights_lightcnn', default='./pre_train/LightCNN_29Layers_V2_checkpoint.pth.tar', type=str)
parser.add_argument('--img_root_A', default='', type=str)
parser.add_argument('--train_list_A', default='', type=str)
parser.add_argument('--img_root_B', default='./gen_images/nir', type=str)
parser.add_argument('--train_list_B', default='./gen_images/img_list.txt', type=str)
def main():
global args
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
cudnn.benchmark = True
cudnn.enabled = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# lightcnn
model = LightCNN_29v2(num_classes=args.num_classes)
# load pre trained model
if args.pre_epoch:
print('load pretrained model of epoch %d' % args.pre_epoch)
load_model(model, "./model/lightCNN_epoch_%d.pth.tar" % args.pre_epoch)
else:
print("=> loading pretrained lightcnn '{}'".format(args.weights_lightcnn))
load_model(model, args.weights_lightcnn)
# train loader of real data
train_loader_real = torch.utils.data.DataLoader(
Real_Dataset(args), batch_size=2*args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
# train loader of mix data (real + fake)
train_loader_mix = torch.utils.data.DataLoader(
Mix_Dataset(args), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
# criterion
criterion = nn.CrossEntropyLoss().cuda()
'''
Stage I: model pretrained for last fc2 parameters
'''
params_pretrain = []
for name, value in model.named_parameters():
if "fc2_" in name:
params_pretrain += [{"params": value, "lr": 1 * args.lr}]
# optimizer
optimizer_pretrain = torch.optim.SGD(params_pretrain, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(1, 5):
pre_train(train_loader_real, model, criterion, optimizer_pretrain, epoch)
save_checkpoint(model, epoch, "LightCNN_pretrain")
'''
Stage II: model finetune for full network
'''
# optimizer
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
start_epoch = args.pre_epoch + 1
for epoch in range(start_epoch, args.epochs + 1):
adjust_learning_rate(args.lr, args.step_size, optimizer, epoch)
train(train_loader_mix, model, criterion, optimizer, epoch)
save_checkpoint(model, epoch, args.save_name)
# pretrain for the last fc2 parameters
def pre_train(train_loader, model, criterion, optimizer, epoch):
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
for i, data in enumerate(train_loader):
# get data
input = Variable(data["img"].cuda())
label = Variable(data["label"].cuda())
batch_size = input.size(0)
if batch_size < 2*args.batch_size:
continue
# forward
output = model(input)[0]
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, label.data, topk=(1, 5))
top1.update(prec1.item(), batch_size)
top5.update(prec5.item(), batch_size)
# print log
if i % args.print_iter == 0:
info = "====> Epoch: [{:0>3d}][{:3d}/{:3d}] | ".format(epoch, i, len(train_loader))
info += "Loss: ce: {:4.3f} | ".format(loss.item())
info += "Prec@1: {:4.2f} ({:4.2f}) Prec@5: {:4.2f} ({:4.2f})".format(top1.val, top1.avg, top5.val, top5.avg)
print(info)
def train(train_loader, model, criterion, optimizer, epoch):
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
for i, data in enumerate(train_loader):
# real data
input_real = Variable(data["img_A"].cuda())
label = Variable(data["label"].cuda())
# fake data
fake_nir = Variable(data["img_B"].cuda())
fake_vis = Variable(data["img_B_pair"].cuda())
batch_size = input_real.size(0)
if batch_size < args.batch_size:
continue
# forward
output = model(input_real)[0]
loss_ce = criterion(output, label)
fc_nir = model(fake_nir)[1]
fc_vis = model(fake_vis)[1]
# creat index for negtive pairs
arange = torch.arange(batch_size).cuda()
idx = torch.randperm(batch_size).cuda()
while 0.0 in (idx - arange):
idx = torch.randperm(batch_size).cuda()
# contrastive loss
loss_ct = - ang_loss(fc_nir, fc_vis) + \
0.1 * F.relu((fc_nir * fc_vis[idx, :]).sum(dim=1) - 0.5).sum() / float(batch_size)
loss = loss_ce + 0.001 * loss_ct
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, label.data, topk=(1, 5))
top1.update(prec1.item(), batch_size)
top5.update(prec5.item(), batch_size)
# print log
if i % args.print_iter == 0:
info = "====> Epoch: [{:0>3d}][{:3d}/{:3d}] | ".format(epoch, i, len(train_loader))
info += "Loss: ce: {:4.3f} ct: {:4.3f} | ".format(loss_ce.item(), loss_ct.item())
info += "Prec@1: {:4.2f} ({:4.2f}) Prec@5: {:4.2f} ({:4.2f})".format(top1.val, top1.avg, top5.val, top5.avg)
print(info)
if __name__ == "__main__":
main()
|
11529333
|
import mido
import time
MIDI_MANUF_ID = 0x7D
ERASE_FLASH = 52
WRITE_FLASH = 54
SYSEX_CMD_RESET = 60
File_To_Write = r"D:\midi-commander-custom\python\config_image.bin"
midi_inputs = [x for x in mido.get_input_names() if 'STM' in x]
midi_outputs = [x for x in mido.get_output_names() if 'STM' in x]
if len(midi_inputs) == 0:
print('no input found')
exit()
if len(midi_outputs) == 0:
print('no outputs found')
exit()
inport = mido.open_input(midi_inputs[0])
outport = mido.open_output(midi_outputs[0])
# Erase Flash settings pages
print('Erasing Flash Settings')
outmsg = mido.Message('sysex', data=[MIDI_MANUF_ID, ERASE_FLASH, 0x42, 0x24])
outport.send(outmsg)
# Wait for response
time.sleep(0.05)
inmsg = inport.receive()
print('Erase Complete')
with open(File_To_Write, 'rb') as f:
flash_contents = bytes(f.read())
no_chunks = int(len(flash_contents)/16)
for x in range(0, no_chunks):
print('Writing Flash Chunk: ', x+1, '/', no_chunks)
flash_chunk_low_byte = x & 0x7F
flash_chunk_high_byte = (x >> 7) & 0x7F
outmsg = mido.Message('sysex', data=[MIDI_MANUF_ID, WRITE_FLASH, flash_chunk_high_byte, flash_chunk_low_byte])
for i in range(0, 16):
outmsg.data += [flash_contents[x*16 + i] >> 4]
outmsg.data += [flash_contents[x*16 + i] & 0xF]
outport.send(outmsg)
inmsg = inport.receive()
time.sleep(0.01)
print('Finshed, reseting device...')
outmsg = mido.Message('sysex', data=[MIDI_MANUF_ID, SYSEX_CMD_RESET])
outport.send(outmsg)
inport.close()
outport.close()
|
11529346
|
from django.contrib import admin
from durin.models import Client
from .models import ClientSettings
class ClientSettingsInlineAdmin(admin.StackedInline):
"""
Django's StackedInline for :class:`ClientSettings` model.
"""
model = ClientSettings
list_select_related = True
extra = 1
class ClientAdmin(admin.ModelAdmin):
"""
Django's ModelAdmin for :class:`Client` model.
"""
inlines = [
ClientSettingsInlineAdmin,
]
list_display = (
"id",
"name",
"token_ttl",
"throttle_rate",
)
# Unregister default admin view
admin.site.unregister(Client)
admin.site.register(Client, ClientAdmin)
|
11529375
|
import os, time, re
import subprocess
import tempfile
def main():
#Useful constants, what we probably want to modify in order to write out the right file names
output_dir = '../../dist/'
relative_dir = output_dir + "a-starry-sky.master.js"
file_dir = os.path.abspath(relative_dir)
minified_file_dir = os.path.abspath(output_dir + "a-starry-sky.master.min.js")
#Directy and ordered list of files to load
js_dir = '../js/'
js_fil_names = ['three_js_extensions/BufferGeometryUtils.js',\
'three_js_extensions/StarrySkyGPUComputeRenderer.js',\
'three_js_extensions/StarrySkyGPUComputeRenderer.js',\
'StarrySky.js',\
'materials/atmosphere/atmosphere-functions.js',\
'materials/atmosphere/transmittance.js',\
'materials/atmosphere/single-scattering.js',\
'materials/atmosphere/inscattering-sum.js',\
'materials/atmosphere/kth-inscattering.js',\
'materials/atmosphere/atmosphere-pass.js',\
'materials/postprocessing/high-pass-filter.js',\
'materials/postprocessing/seperable-blur-filter.js',\
'materials/sun/combination-pass.js',\
'materials/sun/base-sun-partial.js',\
'materials/moon/combination-pass.js',\
'materials/moon/base-moon-partial.js',\
'materials/stars/star-data-map.js',\
'materials/autoexposure/metering-survey.js',\
'materials/autoexposure/test-pass.js',\
'html_tags/SkyAssetsDir.js',\
'html_tags/SkyAtmosphericParameters.js',\
'html_tags/SkyLocation.js',\
'html_tags/SkyTime.js',\
'html_tags/SkyLighting.js',\
'lut_libraries/AtmosphericLUTLibrary.js',\
'lut_libraries/StellarLUTLibrary.js',\
'renderers/AtmosphereRenderer.js',\
'renderers/BloomRenderer.js',\
'renderers/SunRenderer.js',\
'renderers/MoonRenderer.js',\
'renderers/MeteringSurveyRenderer.js',\
'components/LightingManager.js',\
'components/AssetManager.js',\
'components/SkyDirector.js',\
'primitives/a-starry-sky.js',\
'components/starry-sky-wrapper.js']
#Grab the strings for each of these files, and pull out any branches of code related to if(typeof exports !== 'undefined') {..}
code_blocks = []
regex = re.compile(r"(if\(typeof\sexports\s.*?\})", re.DOTALL)
for js_fil_name in js_fil_names:
fil_path = os.path.abspath(js_dir + js_fil_name)
code_block = ''
with open(fil_path, 'r') as f:
code_block = f.read()
out_code = re.sub(regex, '', code_block)
code_blocks = code_blocks + [out_code]
#Concatonate each of these files into the master files
combined_code = '\n'.join(code_blocks)
with open(file_dir, 'w') as w:
w.write(combined_code)
print ("Combined file written...")
#Remove comments from our GLSL code to further compress our code for the minified file
shaderFil = code_blocks[1];
select_normal_comments = re.compile(r"([\'|\"]\/\/.*[\'|\"],)")
code_without_normal_comments = re.sub(select_normal_comments, '', shaderFil)
select_trailing_normal_comments = re.compile(r"([\'|\"].*)(\/\/.*)([\'|\"],)")
code_without_trailing_normal_comments = re.sub(select_trailing_normal_comments, r"\1\3", code_without_normal_comments)
select_normal_multiline_comments = re.compile(r"[\'|\"]/\*[^*]*\*+(?:[^/*][^*]*\*+)*/[\'|\"]\,")
code_without_normal_multiline_comments = re.sub(select_normal_multiline_comments, '', code_without_trailing_normal_comments)
code_blocks[1] = code_without_normal_multiline_comments
combined_code = '\n'.join(code_blocks)
#Write this into a temporary file for usage in the subprocess
temp_dir = os.path.abspath(output_dir)
with tempfile.NamedTemporaryFile(dir = temp_dir) as tmp:
#Write our temporary file
tmp.write(combined_code)
tmp.seek(0)
#Use that temporary file in our sub process
proc = subprocess.Popen(['terser', tmp.name], stdout=subprocess.PIPE, shell=True)
(uglified_js, err) = proc.communicate()
if err == None:
with open(minified_file_dir, 'w') as w:
w.write(uglified_js)
print ("Uglified file written...")
#Run everything you see above
main()
|
11529382
|
import numpy
class pyramid: # pyramid
# properties
pyr = []
pyrSize = []
pyrType = ''
image = ''
# constructor
def __init__(self):
print "please specify type of pyramid to create (Gpry, Lpyr, etc.)"
return
# methods
def nbands(self):
return len(self.pyr)
def band(self, bandNum):
return numpy.array(self.pyr[bandNum])
|
11529437
|
from pyjo.fields.field import Field
class EnumField(Field):
def __init__(self, enum, use_name=True, **kwargs):
"""
:type enum: T
:rtype: T
"""
super(EnumField, self).__init__(type=enum, **kwargs)
self.enum_cls = enum
self.use_name = use_name
def to_dict(self, value):
if value is not None:
return value.name if self.use_name else value.value
def from_dict(self, name):
if name is not None:
return self.enum_cls[name] if self.use_name else self.enum_cls(name)
|
11529511
|
from pcf.particle.aws.sagemaker.notebook_instance import NotebookInstance
from pcf.core import State
# Edit example json to work in your account
notebook_instance_name = "pcf-test"
notebook_example_json = {
"pcf_name": "pcf_notebook", # Required
"flavor": "sagemaker_notebook_instance", # Required
"aws_resource": {
# Refer to https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_notebook_instance for full list of parameters
"NotebookInstanceName": notebook_instance_name, # Required
"InstanceType": "ml.t2.medium", # Required
"RoleArn": "arn:aws:iam::123456789012:role/some-role", # Required,
"Tags": [
{
'Key': 'ggwp',
'Value': 'lolcat'
}
],
"RootAccess": "Enabled"
}
}
# create Sagemaker Notebook particle
notebook = NotebookInstance(notebook_example_json)
# example start
notebook.set_desired_state(State.running)
notebook.apply()
print(notebook.get_state())
rsp = notebook.client.create_presigned_notebook_instance_url(NotebookInstanceName=notebook_instance_name)
print(rsp.get("AuthorizedUrl"))
# example stop
notebook.set_desired_state(State.stopped)
notebook.apply()
print(notebook.get_state())
# example terminate
notebook.set_desired_state(State.terminated)
notebook.apply()
print(notebook.get_state())
|
11529524
|
import subprocess
import json
import concurrent.futures
filename = 'IAM_users_without_any_groups.csv'
def writeIntoFile(filename, stdout, method='w+'):
with open(filename, method) as f: f.write(stdout)
def getUsersGroups(username):
user = username['UserName']
creationD = username['CreateDate'].split("T")[0]
arn = username['Arn']
command = ['aws', 'iam', 'list-groups-for-user', '--user-name', user]
command = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = command.communicate()
out = json.loads(out)
out = out['Groups']
print("[#] " + user)
print("```")
print("Group Count: " + str(len(out)))
if len(out) == 0:
output = user + "," + arn + "," + creationD + "," + str(len(out))
writeIntoFile(filename, output + "\n", method='a+')
print(output)
print("```\n")
def main():
output = "Usernames,ARN,CreationDate,GroupCount"; writeIntoFile(filename, output + "\n", method='w+')
command = ['aws', 'iam', 'list-users']
command = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = command.communicate()
out = json.loads(out)
users = out['Users']
with concurrent.futures.ProcessPoolExecutor(max_workers=50) as executor:
executor.map(getUsersGroups, users)
if __name__ == '__main__':
main()
|
11529530
|
from conans import ConanFile, CMake
class ConanMgsBase32hex(ConanFile):
name = "mgs_base32hex"
version = "0.1.0"
generators = "cmake"
exports_sources = "include/*", "CMakeLists.txt", "test/*"
settings = "os", "arch", "build_type", "compiler"
def build_requirements(self):
self.build_requires("mgs_cmake/%s" % self.version)
if self.develop:
self.build_requires("catch2/2.13.6")
self.build_requires("mgs_meta/%s" % self.version)
def requirements(self):
self.requires("mgs_base_n/%s" % self.version)
self.requires("mgs_codecs/%s" % self.version)
self.requires("mgs_config/%s" % self.version)
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_TESTING"] = "OFF"
cmake.configure()
cmake.build()
cmake.install()
def package_id(self):
self.info.header_only()
|
11529533
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
import numpy as np
from progress.bar import Bar
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.data_parallel import (
DataParallel,
)
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.utils.utils import (
AverageMeter,
)
from opendr.perception.object_tracking_2d.logger import Logger
class ModelWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModelWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
if self.model.ort_session is None:
outputs = self.model(batch["input"])
else:
outputs_flat = self.rpn_ort_session.run(None, {'data': np.array(batch["input"].cpu())})
outputs = {}
for i in range(len(self.model.heads_names)):
outputs[self.model.heads_names[i]] = outputs_flat[i]
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self,
model,
gpus,
num_iters,
exp_id,
device,
hide_data_time,
print_iter,
optimizer,
mse_loss,
reg_loss,
dense_wh,
cat_spec_wh,
reid_dim,
nID,
norm_wh,
num_stack,
wh_weight,
off_weight,
id_weight,
reg_offset,
hm_weight,
):
self.gpus = gpus
self.num_iters = num_iters
self.exp_id = exp_id
self.device = device
self.hide_data_time = hide_data_time
self.print_iter = print_iter
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(
mse_loss,
reg_loss,
dense_wh,
cat_spec_wh,
reid_dim,
nID,
norm_wh,
num_stack,
wh_weight,
off_weight,
id_weight,
reg_offset,
hm_weight,
)
self.model_with_loss = ModelWithLoss(model, self.loss)
self.optimizer.add_param_group({"params": self.loss.parameters()})
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus, chunk_sizes=chunk_sizes
).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader, save_iter, save, log=print):
model_with_loss = self.model_with_loss
if phase == "train":
model_with_loss.train()
else:
if len(self.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if self.num_iters < 0 else self.num_iters
bar = Bar("{}/{}".format("mot", self.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != "meta":
batch[k] = batch[k].to(device=self.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = "{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} ".format(
epoch,
iter_id,
num_iters,
phase=phase,
total=bar.elapsed_td,
eta=bar.eta_td,
)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch["input"].size(0)
)
Bar.suffix = Bar.suffix + "|{} {:.4f} ".format(l, avg_loss_stats[l].avg)
if not self.hide_data_time:
Bar.suffix = (
Bar.suffix + "|Data {dt.val:.3f}s({dt.avg:.3f}s) "
"|Net {bt.avg:.3f}s".format(dt=data_time, bt=batch_time)
)
if self.print_iter > 0:
if iter_id % self.print_iter == 0:
log(
Logger.LOG_WHEN_NORMAL,
"{}/{}| {}".format("mot", self.exp_id, Bar.suffix),
)
if save_iter > 0:
if iter_id % save_iter == 0:
save(iter_id + num_iters * epoch)
log(
Logger.LOG_WHEN_NORMAL,
"Model saved",
)
del output, loss, loss_stats, batch
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret["time"] = bar.elapsed_td.total_seconds() / 60.0
return ret, results
def debug(self, batch, output, iter_id):
raise NotImplementedError
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch("val", epoch, data_loader, -1, None)
def train(self, epoch, data_loader, save_iter, save, log=print):
return self.run_epoch("train", epoch, data_loader, save_iter, save, log)
|
11529571
|
import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
from copy import deepcopy
import models.main
from classes import Paladin
from exceptions import NoSuchCharacterError
from models.characters.loader import load_saved_character, load_all_saved_characters_general_info
from tests.models.character.character_mock import character
class LoaderTests(unittest.TestCase):
def setUp(self):
self.character = deepcopy(character)
self.expected_general_info = [
{'name': 'Netherblood', 'class': 'paladin', 'level': 3},
{'name': 'Visionary', 'class': 'paladin', 'level': 1}
]
def test_load_valid_character(self):
loaded_char = load_saved_character(character.name)
self.assertIsNotNone(loaded_char)
self.assertTrue(isinstance(loaded_char, Paladin))
# Separately test out the inventory and equipment, because they do not compare well in the
# overall vars() assert equal, even though the Item object has an __eq__ method
received_eq = loaded_char.equipment
char_eq = self.character.equipment
self.assertCountEqual(received_eq, char_eq)
loaded_char.equipment = None
self.character.equipment = None
received_inv = loaded_char.inventory
char_inv = self.character.inventory
self.assertCountEqual(received_inv, char_inv)
loaded_char.inventory = None
self.character.inventory = None
self.assertEqual(vars(loaded_char), vars(self.character))
def test_load_invalid_character(self):
invalid_name = 'AaAa'
expected_message = f'There is no saved character by the name of {invalid_name}!'
try:
load_saved_character(invalid_name)
self.fail('The test should have raised a NoSuchCharacterError!')
except NoSuchCharacterError as e:
self.assertEqual(str(e), expected_message)
def test_load_all_saved_characters_general_info(self):
loaded_general_info = load_all_saved_characters_general_info()
self.assertEqual(loaded_general_info, self.expected_general_info)
if __name__ == '__main__':
unittest.main()
|
11529586
|
from globus_cli.commands import main
from globus_cli.version import __version__
__all__ = ["main", "__version__"]
|
11529595
|
import magma as m
class _And2(m.Circuit):
name = "And2"
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit), O=m.Out(m.Bit))
def test_declare_repr():
assert str(_And2) == 'And2(I0: In(Bit), I1: In(Bit), O: Out(Bit))'
assert repr(_And2) == ('And2 = DeclareCircuit("And2", "I0", In(Bit), "I1", '
'In(Bit), "O", Out(Bit))')
class _Top(m.Circuit):
io = m.IO()
and2 = _And2(name="and2")
and2 = _Top.and2
assert str(and2) == "and2<And2(I0: In(Bit), I1: In(Bit), O: Out(Bit))>"
assert str(and2.I0) == "I0"
def test_declare_simple():
assert isinstance(_And2.I0, m.Bit)
def test_declare_interface_polarity():
class _And2Defn(m.Circuit):
name = "And2"
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit), O=m.Out(m.Bit))
io.O <= io.I0 # just something to make this a definition
assert (_And2.interface.ports["I0"].is_input() ==
_And2Defn.interface.ports["I0"].is_input())
|
11529613
|
import json
from mudpi.exceptions import MudPiError
from mudpi.constants import FONT_YELLOW, FONT_RESET
from mudpi.logger.Logger import Logger, LOG_LEVEL
class Registry:
""" Key-Value database for managing object instances """
def __init__(self, mudpi, name):
self.mudpi = mudpi
self.name = name
self._registry = {}
def all(self):
""" Return all items in the registry """
return self._registry
def items(self):
""" Dict items() helper for iteration """
return self.all().items()
def keys(self):
""" Dict keys() helper for iteration """
return self.all().keys()
def get(self, key):
""" Get an item for the specified key """
return self._registry[key]
def exists(self, key):
""" Return if key exists in the registry """
return key in self._registry
def register(self, key, value):
""" Registers the value into the registry """
if key not in self._registry:
self.mudpi.events.publish(self.name, {'event': 'Registered', 'action': key})
self._registry[key] = value
return value
@property
def length(self):
return len(self.all())
class ComponentRegistry(Registry):
""" Comopnent Database
Stores components per namespace for MudPi
"""
def get(self, component_id):
""" Get an item for the specified key """
try:
component = [ component
for components in self._registry.values()
for _id, component in components.items()
if _id in component_id ][0]
except Exception as error:
component = None
return component
def for_namespace(self, namespace=None):
""" Get all the components for a given namespace """
return self._registry.setdefault(namespace, {})
def for_interface(self, interface=None):
""" Get all the components for a given interface """
try:
components = [ component
for components in self._registry.values()
for _id, component in components.items()
if component.interface in interface ]
except Exception as error:
components = []
return components
def exists(self, component_ids):
""" Return if key exists in the registry """
return any([ exists for components in self._registry.values()
for exists in components
if exists in component_ids ])
def register(self, component_id, component, namespace=None):
""" Registers the component into the registry """
namespace_registry = self._registry.setdefault(namespace, {})
if component_id not in namespace_registry:
self.mudpi.events.publish('core', {'event': 'ComponentRegistered', 'component': component_id, 'namespace': namespace})
namespace_registry[component_id] = component
return component
def ids(self):
""" Return all the registered component ids """
return [ component.id
for components in self._registry.values()
for component in components.values() ]
class ActionRegistry(Registry):
""" Database of actions available to MudPi from
user configs or components.
None = global
"""
def register(self, action_key, func, namespace=None, validator=None):
""" Register the action under the specified namespace. """
namespace_registry = self._registry.setdefault(namespace, {})
if action_key not in namespace_registry:
self.mudpi.events.publish('core', {'event': 'ActionRegistered', 'action': action_key, 'namespace': namespace})
namespace_registry[action_key] = Action(func, validator)
def for_namespace(self, namespace=None):
""" Get all the actions for a given namespace """
return self._registry.setdefault(namespace, {})
def exists(self, action_key):
""" Return if action exists for given action command """
action = self.parse_call(action_key)
registry = self._registry.setdefault(action['namespace'], {})
return action['action'] in registry
def parse_call(self, action_call):
""" Parse a command string and extract the namespace and action """
parsed_action = {}
if action_call.startswith('.'):
# Empty Namespace
parsed_action['namespace'] = None
action_call = action_call.replace('.', '', 1)
parsed_action['action'] = action_call
elif '.' in action_call:
parts = action_call.split('.')
if len(parts) > 2:
parsed_action['namespace'] = f'{parts[0]}.{parts[1]}'
parsed_action['action'] = parts[2]
else:
parts = action_call.split('.', 1)
parsed_action['namespace'] = parts[0]
parsed_action['action'] = parts[1]
else:
parsed_action['namespace'] = None
parsed_action['action'] = action_call
return parsed_action
def call(self, action_call, action_data={}):
""" Call an action from the registry
Format: {namespace}.{action} or
{namespace}.{component}.{action}
"""
command = self.parse_call(action_call)
action = self._registry.get(command['namespace'], {}).get(command['action'])
if not action:
# raise MudPiError("Call to action that doesn't exists!")
Logger.log(
LOG_LEVEL["error"],
f'{FONT_YELLOW}Call to action {action_call} that doesn\'t exists!.{FONT_RESET}'
)
validated_data = action.validate(action_data)
if not validated_data and action_data:
# raise MudPiError("Action data was not valid!")
Logger.log(
LOG_LEVEL["error"],
f'{FONT_YELLOW}Action data was not valid for {action_call}{FONT_RESET}'
)
self.mudpi.events.publish('core', {'event': 'ActionCall', 'action': action_call, 'data': action_data, 'namespace': command['namespace']})
action(data=validated_data)
def handle_call(self, event_data={}):
""" Handle an Action call from event bus """
if event_data:
try:
_data = json.loads(event_data.get('data', {}))
except Exception:
_data = event_data
action = _data.get('action')
if action:
return self.call(action, _data.get('data', {}))
class Action:
""" A callback associated with a string """
def __init__(self, func, validator):
self.func = func
self.validator = None
def validate(self, data):
if not self.validator:
return data
if callable(self.validator):
return self.validator(data)
return False
def __call__(self, data=None, **kwargs):
if self.func:
if callable(self.func):
if data:
return self.func(data)
else:
return self.func()
|
11529614
|
from .settings import Settings
from .custom_user import User
from .filter import DataFilter
from .parametrised_filter import ParametrisedFilter
from .version import DatasetVersion
from .variable import DataVariable
from .dataset import Dataset
from .dataset_configuration import DatasetConfiguration
from .copied_validations import CopiedValidations
from .validation_run import ValidationRun
from .celery_task import CeleryTask
from .statistics import Statistics
from .networks import ISMNNetworks
from .uptime_ping import UptimePing
from .uptime_agent import UptimeAgent
from .uptime_report import UptimeReport
from .email import Email
|
11529653
|
main = {
'General': {
'Prop': {
'Labels': 'rw',
'AlarmStatus': 'r-'
}
}
}
cfgm = {
'General': {
'Cmd': {
'CreatePort',
'DeletePort'
}
}
}
|
11529655
|
import json
import NLTK_Chink
import time
import NLTK_log
if __name__ == '__main__':
startTime = time.time()
with open('./Training.json') as file:
obj = json.loads(file.read())
questions = obj['questions']
for i, question in enumerate(questions):
sentenece = question['question'][0]['string'].replace('?', '')
print('Question', i, sentenece)
NLTK_Chink.test(i, sentenece)
endTime = time.time()
NLTK_log.write('totally used', format((endTime - startTime), '.2f'))
|
11529665
|
import bitcoin
from bitcoin.deterministic import bip32_harden as h
mnemonic='saddle observe obtain scare burger nerve electric alone minute east walnut motor omit coyote time'
seed=bitcoin.mnemonic_to_seed(mnemonic)
mpriv=bitcoin.bip32_master_key(seed)
accountroot=mpriv
accountroot=bitcoin.bip32_ckd(accountroot,h(44))
accountroot=bitcoin.bip32_ckd(accountroot,h(0))
accountroot=bitcoin.bip32_ckd(accountroot,h(0))
for i in range(19):
dkey=bitcoin.bip32_descend(accountroot,0,i)
print(bitcoin.privtoaddr(dkey))
|
11529710
|
from __future__ import print_function
import sys
import Pyro4
test = Pyro4.core.Proxy("PYRONAME:example.exceptions")
print(test.div(2.0, 9.0))
try:
print(2 // 0)
except ZeroDivisionError as x:
print("DIVIDE BY ZERO: %s" % x)
try:
print(test.div(2, 0))
except ZeroDivisionError as x:
print("DIVIDE BY ZERO: %s" % x)
try:
result = test.error()
print("%r, %s" % (result, result))
except ValueError as x:
print("VALUERROR: %s" % x)
try:
result = test.error2()
print("%r, %s" % (result, result))
except ValueError as x:
print("VALUERROR: %s" % x)
try:
result = test.othererr()
print("%r, %s" % (result, result))
except Exception as x:
print("ANOTHER ERROR: %s" % x)
try:
result = test.unserializable()
print("%r, %s" % (result, result))
except Exception as x:
print("UNSERIALIZABLE ERROR: %s" % x)
print("\n*** invoking server method that crashes, catching traceback ***")
try:
print(test.complexerror())
except Exception as x:
print("CAUGHT ERROR >>> %s" % x)
print("Printing Pyro traceback >>>>>>")
print("".join(Pyro4.util.getPyroTraceback()))
print("<<<<<<< end of Pyro traceback")
print("\n*** installing pyro's excepthook")
sys.excepthook = Pyro4.util.excepthook
print("*** invoking server method that crashes, not catching anything ***")
print(test.complexerror()) # due to the excepthook, the exception will show the pyro error
|
11529715
|
import pytest
from jina.peapods.pods.factory import PodFactory
from jina.peapods import Pea
from jina.parsers import set_pod_parser, set_pea_parser
from jina import Flow, Executor, requests, Document, DocumentArray
from jina.helper import random_port
def validate_response(resp, expected_docs=50):
assert len(resp.data.docs) == expected_docs
for doc in resp.data.docs:
assert 'external_real' in doc.tags['name']
@pytest.fixture(scope='function')
def input_docs():
return DocumentArray([Document() for _ in range(50)])
@pytest.fixture
def num_replicas(request):
return request.param
@pytest.fixture
def num_shards(request):
return request.param
@pytest.fixture(scope='function')
def external_pod_args(num_replicas, num_shards):
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--polling',
'all',
]
return set_pod_parser().parse_args(args)
@pytest.fixture
def external_pod(external_pod_args):
return PodFactory.build_pod(external_pod_args)
class MyExternalExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['name'] = self.runtime_args.name
@pytest.mark.parametrize('num_replicas', [1, 2], indirect=True)
@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)
def test_flow_with_external_pod(
external_pod, external_pod_args, input_docs, num_replicas, num_shards
):
with external_pod:
external_args = vars(external_pod_args)
del external_args['name']
del external_args['external']
del external_args['pod_role']
del external_args['dynamic_routing']
flow = Flow().add(
**external_args,
name='external_fake',
external=True,
)
with flow:
resp = flow.index(inputs=input_docs, return_results=True)
validate_response(resp[0], 50 * num_shards)
@pytest.fixture(scope='function')
def external_executor_args():
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--socket-in',
'ROUTER_BIND',
'--dynamic-routing-out',
]
return set_pea_parser().parse_args(args)
@pytest.fixture
def external_executor(external_executor_args):
return Pea(external_executor_args)
def test_flow_with_external_executor(
external_executor, external_executor_args, input_docs
):
with external_executor:
external_args = vars(external_executor_args)
del external_args['name']
flow = Flow().add(
**external_args,
name='external_fake',
external=True,
)
with flow:
resp = flow.index(inputs=input_docs, return_results=True)
validate_response(resp[0])
@pytest.mark.parametrize('num_replicas', [2], indirect=True)
@pytest.mark.parametrize('num_shards', [2], indirect=True)
def test_two_flow_with_shared_external_pod(
external_pod, external_pod_args, input_docs, num_replicas, num_shards
):
with external_pod:
external_args = vars(external_pod_args)
del external_args['name']
del external_args['external']
del external_args['pod_role']
del external_args['dynamic_routing']
flow1 = Flow().add(
**external_args,
name='external_fake',
external=True,
)
flow2 = (
Flow()
.add(name='foo')
.add(
**external_args,
name='external_fake',
external=True,
needs=['gateway', 'foo'],
)
)
with flow1, flow2:
results = flow1.index(inputs=input_docs, return_results=True)
validate_response(results[0], 50 * num_shards)
results = flow2.index(inputs=input_docs, return_results=True)
validate_response(results[0], 50 * num_shards * 2)
def test_two_flow_with_shared_external_executor(
external_executor,
external_executor_args,
input_docs,
):
with external_executor:
external_args = vars(external_executor_args)
del external_args['name']
flow1 = Flow().add(
**external_args,
name='external_fake',
external=True,
)
flow2 = (
Flow()
.add(name='foo')
.add(
**external_args,
name='external_fake',
external=True,
needs=['gateway', 'foo'],
)
)
with flow1, flow2:
results = flow1.index(inputs=input_docs, return_results=True)
validate_response(results[0])
results = flow2.index(inputs=input_docs, return_results=True)
validate_response(results[0], 50 * 2)
@pytest.fixture(scope='function')
def external_pod_shards_1_args(num_replicas, num_shards):
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real_1',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--polling',
'all',
]
return set_pod_parser().parse_args(args)
@pytest.fixture
def external_pod_shards_1(external_pod_shards_1_args):
return PodFactory.build_pod(external_pod_shards_1_args)
@pytest.fixture(scope='function')
def external_pod_shards_2_args(num_replicas, num_shards):
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real_2',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--polling',
'all',
]
return set_pod_parser().parse_args(args)
@pytest.fixture
def external_pod_shards_2(external_pod_shards_2_args):
return PodFactory.build_pod(external_pod_shards_2_args)
@pytest.mark.parametrize('num_replicas', [1, 2], indirect=True)
@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)
def test_flow_with_external_pod_shards(
external_pod_shards_1,
external_pod_shards_2,
external_pod_shards_1_args,
external_pod_shards_2_args,
input_docs,
num_replicas,
num_shards,
):
with external_pod_shards_1, external_pod_shards_2:
external_args_1 = vars(external_pod_shards_1_args)
external_args_2 = vars(external_pod_shards_2_args)
del external_args_1['name']
del external_args_1['external']
del external_args_1['pod_role']
del external_args_1['dynamic_routing']
del external_args_2['name']
del external_args_2['external']
del external_args_2['pod_role']
del external_args_2['dynamic_routing']
flow = (
Flow()
.add(name='executor1')
.add(
**external_args_1,
name='external_fake_1',
external=True,
needs=['executor1'],
)
.add(
**external_args_2,
name='external_fake_2',
external=True,
needs=['executor1'],
)
.join(needs=['external_fake_1', 'external_fake_2'], port_in=random_port())
)
with flow:
resp = flow.index(inputs=input_docs, return_results=True)
validate_response(resp[0], 50 * num_shards * 2)
@pytest.fixture(scope='function')
def external_pod_pre_shards_args(num_replicas, num_shards):
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--polling',
'all',
]
return set_pod_parser().parse_args(args)
@pytest.fixture
def external_pod_pre_shards(external_pod_pre_shards_args):
return PodFactory.build_pod(external_pod_pre_shards_args)
@pytest.mark.parametrize('num_replicas', [1, 2], indirect=True)
@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)
def test_flow_with_external_pod_pre_shards(
external_pod_pre_shards,
external_pod_pre_shards_args,
input_docs,
num_replicas,
num_shards,
):
with external_pod_pre_shards:
external_args = vars(external_pod_pre_shards_args)
del external_args['name']
del external_args['external']
del external_args['pod_role']
del external_args['dynamic_routing']
flow = (
Flow()
.add(
**external_args,
name='external_fake',
external=True,
)
.add(
name='executor1',
needs=['external_fake'],
)
.add(
name='executor2',
needs=['external_fake'],
)
.join(needs=['executor1', 'executor2'])
)
with flow:
resp = flow.index(inputs=input_docs, return_results=True)
validate_response(resp[0], 50 * num_shards * 2)
@pytest.fixture(scope='function')
def external_pod_join_args(num_replicas, num_shards):
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real',
'--port-in',
str(random_port()),
'--host-in',
'0.0.0.0',
'--pod-role',
'JOIN',
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--polling',
'all',
]
return set_pod_parser().parse_args(args)
@pytest.fixture
def external_pod_join(external_pod_join_args):
return PodFactory.build_pod(external_pod_join_args)
@pytest.mark.parametrize('num_replicas', [1, 2], indirect=True)
@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)
def test_flow_with_external_pod_join(
external_pod_join,
external_pod_join_args,
input_docs,
num_replicas,
num_shards,
):
with external_pod_join:
external_args = vars(external_pod_join_args)
del external_args['name']
del external_args['external']
del external_args['pod_role']
del external_args['dynamic_routing']
flow = (
Flow()
.add(
**external_args,
external=True,
)
.add(
name='executor1',
needs=['executor0'],
)
.add(
name='executor2',
needs=['executor0'],
)
.join(
**external_args,
external=True,
needs=['executor1', 'executor2'],
)
)
with flow:
resp = flow.index(inputs=input_docs, return_results=True)
validate_response(resp[0], 50 * num_shards * num_shards * 2)
|
11529727
|
from LAUG.aug import Word_Perturbation
from LAUG.aug import Text_Paraphrasing
from LAUG.aug import Speech_Recognition
from LAUG.aug import Speech_Disfluency
if __name__=="__main__":
text = "I want a train to Cambridge"
span_info = [["Train-Infrom","Dest","Cambridge",5,5]]
WP = Word_Perturbation('multiwoz')
TP = Text_Paraphrasing('multiwoz')
SR = Speech_Recognition('multiwoz')
SD = Speech_Disfluency('multiwoz')
WP_text,WP_span_info = WP.aug(text,span_info)
print('Word Perturbation:')
print(WP_text)
print(WP_span_info)
TP_text,TP_span_info = TP.aug(text,span_info)
print('Text Paraphrasing:')
print(TP_text)
print(TP_span_info)
SR_text,SR_span_info = SR.aug(text,span_info)
print('Speech Recognition:')
print(SR_text)
print(SR_span_info)
SD_text,SD_span_info = SD.aug(text,span_info)
print('Speech Disfluency:')
print(SD_text)
print(SD_span_info)
|
11529754
|
import os
import importlib
import math
import numpy as np
import tensorflow as tf
from source.network.detection import ssd_common
CLASS_WEIGHTS = 1.0
BBOXES_WEIGHTS = 1.0
# Priorboxes
ANCHORS_STRIDE = [8, 16, 32, 64, 128, 256, 512]
ANCHORS_ASPECT_RATIOS = [[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]
# control the size of the default square priorboxes
# REF: https://github.com/weiliu89/caffe/blob/ssd/src/caffe/layers/prior_box_layer.cpp#L164
MIN_SIZE_RATIO = 10
MAX_SIZE_RATIO = 90
INPUT_DIM = 512
ANCHORS_MAP, NUM_ANCHORS = ssd_common.get_anchors(ANCHORS_STRIDE,
ANCHORS_ASPECT_RATIOS,
MIN_SIZE_RATIO,
MAX_SIZE_RATIO,
INPUT_DIM)
def encode_gt(inputs, batch_size):
image_id, image, labels, boxes, scale, translation, file_name = inputs
gt_labels, gt_bboxes, gt_masks = ssd_common.encode_gt(labels, boxes, ANCHORS_MAP, batch_size)
return gt_labels, gt_bboxes, gt_masks
def ssd_feature(outputs, data_format):
outputs_conv6_2 = ssd_common.ssd_block(outputs, "conv6", data_format, [1, 2], [1, 3], [256, 512], ["SAME", "SAME"])
outputs_conv7_2 = ssd_common.ssd_block(outputs_conv6_2, "conv7", data_format, [1, 2], [1, 3], [128, 256], ["SAME", "SAME"])
outputs_conv8_2 = ssd_common.ssd_block(outputs_conv7_2, "conv8", data_format, [1, 2], [1, 3], [128, 256], ["SAME", "SAME"])
outputs_conv9_2 = ssd_common.ssd_block(outputs_conv8_2, "conv9", data_format, [1, 2], [1, 3], [128, 256], ["SAME", "SAME"])
outputs_conv10_2 = ssd_common.ssd_block(outputs_conv9_2, "conv10", data_format, [1, 1], [1, 2], [128, 256], ["SAME", "VALID"])
return outputs_conv6_2, outputs_conv7_2, outputs_conv8_2, outputs_conv9_2, outputs_conv10_2
def net(inputs,
num_classes,
is_training,
feature_net,
feature_net_path,
data_format="channels_last"):
image_id, image, labels, boxes, scale, translation, file_name = inputs
feature_net = getattr(
importlib.import_module("source.network." + feature_net),
"net")
feature_net_path = os.path.join(os.path.expanduser("~"), feature_net_path)
outputs = feature_net(image, data_format, feature_net_path)
with tf.variable_scope(name_or_scope='SSD',
values=[outputs],
reuse=tf.AUTO_REUSE):
outputs_conv4_3 = outputs[0]
outputs_fc7 = outputs[1]
# # Add shared features
outputs_conv6_2, outputs_conv7_2, outputs_conv8_2, outputs_conv9_2, outputs_conv10_2 = ssd_feature(outputs_fc7, data_format)
classes = []
bboxes = []
feature_layers = (outputs_conv4_3, outputs_fc7, outputs_conv6_2, outputs_conv7_2, outputs_conv8_2, outputs_conv9_2, outputs_conv10_2)
name_layers = ("VGG/conv4_3", "VGG/fc7", "SSD/conv6_2", "SSD/conv7_2", "SSD/conv8_2", "SSD/conv9_2", "SSD/conv10_2")
for name, feat, num in zip(name_layers, feature_layers, NUM_ANCHORS):
# According to the original SSD paper, normalize conv4_3 with learnable scale
# In pratice doing so indeed reduce the classification loss significantly
if name == "VGG/conv4_3":
l2_w_init = tf.constant_initializer([20.] * 512)
weight_scale = tf.get_variable('l2_norm_scaler',
initializer=[20.] * 512,
trainable=is_training)
feat = tf.multiply(weight_scale,
tf.math.l2_normalize(feat, axis=-1, epsilon=1e-12))
classes.append(ssd_common.class_graph_fn(feat, num_classes, num, name))
bboxes.append(ssd_common.bbox_graph_fn(feat, num, name))
classes = tf.concat(classes, axis=1)
bboxes = tf.concat(bboxes, axis=1)
return classes, bboxes
def loss(gt, outputs):
return ssd_common.loss(gt, outputs, CLASS_WEIGHTS, BBOXES_WEIGHTS)
def detect(feat_classes, feat_bboxes, batch_size, num_classes, confidence_threshold):
score_classes = tf.nn.softmax(feat_classes)
feat_bboxes = ssd_common.decode_bboxes_batch(feat_bboxes, ANCHORS_MAP, batch_size)
detection_topk_scores, detection_topk_labels, detection_topk_bboxes, detection_topk_anchors = ssd_common.detect_batch(
score_classes, feat_bboxes, ANCHORS_MAP, batch_size, num_classes, confidence_threshold)
return detection_topk_scores, detection_topk_labels, detection_topk_bboxes,detection_topk_anchors
|
11529769
|
import json
import os
import shutil
import subprocess
import tempfile
import aptbranch
def gs_rsync(local_path: str, remote_path: str, boto_path: str):
# TODO: do this directly rather than by shelling out
env = dict(os.environ)
env["BOTO_PATH"] = boto_path
subprocess.check_call(["gsutil", "-h", "Cache-Control:private, max-age=0, no-transform", "-m", "rsync", "-d", "-r", "-c", local_path, remote_path], env=env)
BOTO_PATH = "/homeworld/boto-key"
BOTO_TEMPLATE = """
[Credentials]
gs_service_key_file = %s
[Boto]
https_validate_certificates = True
[GSUtil]
content_language = en
default_api_version = 2
default_project_id = %s
parallel_composite_upload_threshold = 100M
""".lstrip()
def upload_gs(staging, root: str, branch_config: aptbranch.Config):
upload_path = branch_config.upload_config['gcs-target']
if not os.path.exists(BOTO_PATH):
raise Exception("you need to put the GCP service account private key file into {}".format(BOTO_PATH))
botoconfig_name = os.path.join(staging, "boto.config")
with open(botoconfig_name, "w") as bout:
with open(BOTO_PATH, "r") as f:
project_id = json.load(f)["project_id"]
bout.write(BOTO_TEMPLATE % (BOTO_PATH, project_id))
bout.flush()
gs_rsync(root, upload_path, botoconfig_name)
def upload_rsync(staging, root: str, branch_config: aptbranch.Config):
target = branch_config.upload_config['rsync-target']
subprocess.check_call(["rsync", "-avzc", "--progress", "--delete-delay", "--", root + "/", target])
UPLOAD_FUNCS = {
"google-cloud-storage": upload_gs,
"rsync": upload_rsync
}
def perform_uploads(uploads: dict, branch_config: aptbranch.Config) -> None:
with tempfile.TemporaryDirectory() as staging:
root = os.path.join(staging, "root")
for remote_path, local_path in uploads.items():
target = os.path.join(root, remote_path.lstrip("/"))
if not os.path.isdir(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy2(local_path, target)
upload_method = branch_config.upload_config["method"]
if upload_method not in UPLOAD_FUNCS:
raise Exception("unrecognized upload method %s" % upload_method)
UPLOAD_FUNCS[upload_method](staging, root, branch_config)
|
11529784
|
import socket
import sys
import textwrap
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file")
parser.add_option("-a", "--host", dest="host")
parser.add_option("-p", "--port", dest="port")
(options, args) = parser.parse_args()
def SendToNuke(options):
PY_CMD_TEMPLATE = textwrap.dedent('''
import traceback
import sys
import __main__
namespace = __main__.__dict__.get('_atom_plugin_SendToNuke')
if not namespace:
namespace = __main__.__dict__.copy()
__main__.__dict__['_atom_plugin_SendToNuke'] = namespace
namespace['__file__'] = r\'{0}\'
try:
execfile(r\'{0}\', namespace, namespace)
except:
sys.stdout.write(traceback.format_exc())
traceback.print_exc()
''')
command_tpl = PY_CMD_TEMPLATE.format(options.file)
host = options.host.replace('\'', '')
port = int(options.port)
ADDR = (host, port)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
client.send(command_tpl)
data = client.recv(4096)
print(data)
client.close()
if __name__=='__main__':
if options.file:
SendToNuke(options)
else:
sys.exit("No command given")
|
11529806
|
from django.conf.urls import url
from apps.user_login.views import user_login
from apps.dubbo_interface.views import dubbo_interface,dubbo_interface_debug
from apps.dubbo_testcase.views import dubbo_testcase,dubbo_testcase_step,dubbo_testcasedebug
from apps.dubbo_task.views import dubbo_task,testReport,batchTask
from apps.dubbo_task_suite.views import dubbo_task_suite
urlpatterns = [
####interface######################
url(r'^dubbo/interfaceList$', dubbo_interface.dubbo_interfaceCheck, name="dubboInterfaceList"),
url(r'^dubbo/interfaceAddPage$', dubbo_interface.interfaceAddPage, name="dubboInterfaceAdd"),
url(r'^dubbo/interfaceAdd$', dubbo_interface.interfaceAdd, name="dubboInterfaceDoAdd"),
url(r'^dubbo/interfaceListDesc$', dubbo_interface.dubbo_interfaceListCheck, name="dubboInterfaceListDesc"),
url(r'^dubbo/getDubboServices$', dubbo_interface.getDubboServices, name="dubboGetServices"),
url(r'^dubbo/getDubboMethods$', dubbo_interface.getDubboMethodsInService, name="dubboGetMethods"),
url(r'^dubbo/operationInterface$', dubbo_interface.operationInterface, name="dubboOperationInterface"),
url(r'^dubbo/operationInterfaceByInterfaceId$', dubbo_interface.operationInterfaceByInterfaceId, name="dubboOperationInterfaceByInterfaceId"),
url(r'^dubbo/getInterfaceDataById$', dubbo_interface.getInterfaceDataById, name="dubboGetInterfaceDataById"),
url(r'^dubbo/interfaceSaveEdit$', dubbo_interface.interfaceSaveEdit, name="dubboInterfaceSaveEdit"),
url(r'^dubbo/interfaceDel$', dubbo_interface.interfaceDel, name="dubboInterfaceDel"),
url(r'^dubbo/interfaceQueryPeopleInterface$', dubbo_interface.queryPeopleInterface,name="dubboQueryPeopleInterface"),
#debug
url(r'^dubbo/interfaceDebugAdd$', dubbo_interface_debug.interfaceDebugAdd, name="dubboInterfaceDebugAdd"),
url(r'^dubbo/sendDebugInterfaceTcpMsg$', dubbo_interface_debug.sendDebugInterfaceTcpMsg, name="dubboSendDebugInterfaceTcpMsg"),
url(r'^dubbo/getDebugResult$', dubbo_interface_debug.getDebugResult,name="dubboGetDebugResult"),
#dubbo 快速调试
url(r'^dubbo/quickDebugPage$', dubbo_interface.interfaceQuickDebugPage, name="dubboQuickDebugPage"),
url(r'^dubbo/getRequestAddr$', dubbo_interface.dubboGetRequestAddr, name="dubboGetRequestAddr"),
url(r'^dubbo/getDubboServicesByAddr$', dubbo_interface.getDubboServicesByAddr, name="dubboGetDubboServicesByAddr"),
url(r'^dubbo/getParamByServiceMethod$', dubbo_interface.getParamByServiceMethod, name="dubboGetParamByServiceMethod"),
url(r'^dubbo/quickDebug$', dubbo_interface.quickDebug, name="dubboQuickDebug"),
url(r'^dubbo/getRbecentQueryDebug$', dubbo_interface.getRbecentQueryDebug, name="dubboGetRbecentQueryDebug"),
url(r'^dubbo/doTelnetCommand$', dubbo_interface.doTelnetCommand, name="dubboDoTelnetCommand"),
##########testcase##############################
url(r'^dubbo/testcaseList$', dubbo_testcase.dubbo_testCaseCheck, name="dubboTestcaseList"),
url(r'^dubbo/TestcaseListCheck$', dubbo_testcase.dubbo_testCaseListCheck, name="dubboTestcaseListCheck"),
url(r'^dubbo/queryPeopleTestCase$', dubbo_testcase.queryPeopleTestCase, name="dubboDueryPeopleTestCase"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_TestCaseAddPage"),
url(r'^dubbo/TestCaseAddPage/TestCaseStep$', dubbo_testcase.testCaseStepPage, name="dubbo_TestCaseStepPage"),
url(r'^dubbo/TestCaseAddPage/TestCaseStepDetail$', dubbo_testcase.testCaseStepDetailPage,name="dubbo_TestCaseStepDetailPage"),
url(r'^dubbo/TestCaseAdd$', dubbo_testcase.testCaseAdd, name="dubbo_testCaseAdd"),
url(r'^dubbo/SelectInterfaceAddStep$', dubbo_testcase.selectInterfaceAddStep,name="dubbo_SelectInterfaceAddStep"),
url(r'^dubbo/TestCaseSelectInterfaceList$', dubbo_testcase.testCaseSelectInterfaceCheckList,name="dubbo_TestCaseSelectInterfaceListCheck"),
url(r'^dubbo/operationTestCase$', dubbo_testcase.operationTestCase, name="dubbo_operationTestCase"),
url(r'^dubbo/getTestCaseDataForId$', dubbo_testcase.getTestCaseDataForId, name="dubbo_GetTestCaseDataForId"),
url(r'^dubbo/TestCaseSaveEdit$', dubbo_testcase.testCaseSaveEdit, name="dubbo_testCaseSaveEdit"),
url(r'^dubbo/TestCaseDel$', dubbo_testcase.testCaseDel, name="dubbo_testCaseDel"),
url(r'^dubbo/TestCaseDebugAdd', dubbo_testcasedebug.testCaseDebugAdd, name="dubbo_TestCaseDebugAdd"),
url(r'^dubbo/TestCaseDebug$', dubbo_testcasedebug.debugTestCase, name="dubbo_TestCaseDebug"),
url(r'^dubbo/TestCaseDebugGetResult$', dubbo_testcasedebug.getDebugResult,name="dubbo_TestCaseDebugGetResult"),
url(r'^dubbo/InterfaceDelTip$', dubbo_interface.interfaceGetSyncTestCaseStep, name="dubbo_InterfaceDelTip"),
#task
url(r'^dubbo/TaskCheck$', dubbo_task.dubbo_testCheck, name="dubbo_taskCheck"),
url(r'^dubbo/TaskListCheck$', dubbo_task.dubbo_taskListCheck,name="dubbo_TaskListCheck"),
url(r'^dubbo/TaskGetTaskFotTaskId$', dubbo_task.getTaskForTaskId, name="dubbo_TaskGetTaskFotTaskId"),
url(r'^dubbo/TaskAddPage$', dubbo_task.taskAdd, name="dubbo_TaskAddPage"),
url(r'^dubbo/TaskQueryPeopleTask$', dubbo_task.queryPeopleTask, name="dubbo_queryPeopleTask"),
url(r'^dubbo/TaskSelectInterfacePage$', dubbo_task.TestCaseSelectInterfaceCheckList, name="dubbo_TaskSelectInterfacePage"),
url(r'^dubbo/TaskSelectTestCasePage$', dubbo_task.dubboTaskSelectTestCaseCheckList,name="dubbo_TaskSelectTestCasePage"),
url(r'^dubbo/TaskAddData$', dubbo_task.taskAddData,name="dubbo_TaskAddData"),
url(r'^dubbo/operationTask$', dubbo_task.operationTask, name="dubbo_operationTask"),
url(r'^dubbo/getTaskDataForTaskId$', dubbo_task.getTaskData, name="dubbo_taskGetTaskData"),
url(r'^dubbo/TaskSaveEdit$', dubbo_task.taskDataEdit, name="dubbo_TaskSaveEdit"),
url(r'^dubbo/TaskDel$', dubbo_task.taskDel, name="dubbo_TaskDel"),
url(r'^dubbo/TaskDelTheSameCase$', dubbo_task.taskDelTheSameCase, name="dubbo_TaskDelTheSameCase"),
url(r'^dubbo/getInterfaceListDataForTask$', dubbo_task.getInterfeceListDataForTask, name="dubboGetInterfaceListDataForTask"),
url(r'^dubbo/getTestCaseListDataForTask$', dubbo_task.getTestCaseListDataForTask, name="dubboGetTestCaseListDataForTask"),
url(r'^dubbo/TaskExecuteResult$', dubbo_task.taskResultCheck, name="dubbo_TaskExecuteResult"),
url(r'^dubbo/TaskExecuteQueryPeopleTask$', dubbo_task.queryPeopleTaskExecute, name="dubbo_queryPeopleTaskExecute"),
url(r'^dubbo/getTaskExecuteResult$', dubbo_task.getTaskResultList, name="dubbo_getTaskExecuteResult"),
url(r'^dubbo/getTaskExecuteResultData$', dubbo_task.getTaskRestltDetail, name="dubbo_getTaskExecuteResultData"),
url(r'^dubbo/getInterfaceListData$', dubbo_task.getInterfeceListData, name="dubbo_getInterfaceListData"),
url(r'^dubbo/getTestCaseListData$', dubbo_task.getTestCaseListData, name="dubbo_getTestCaseListData"),
url(r'^dubbo/TaskAgainRunTask$', dubbo_task.againRunTask, name="dubbo_TaskAgainRunTask"),
url(r'^dubbo/TaskStopTaskRun$', dubbo_task.stopTaskRun, name="dubbo_TaskStopTaskRun"),
url(r'^dubbo/TaskRunTask$', dubbo_task.taskRunAdd, name="dubbo_TaskRunTask"),
url(r'^dubbo/GetSelectExecuteStatus$', dubbo_task.getSelectExecuteStatus, name="dubbo_GetSelectExecuteStatus"),
url(r'^dubbo/UpdateTaskExecuteProgressData$', dubbo_task.updateTaskExecuteProgressData,
name="dubbo_UpdateTaskExecuteProgressData"),
#taskSuite
#taskSuite
url(r'^dubbo/TaskSuiteCheck$', dubbo_task_suite.dubboTaskSuiteCheck, name="dubbo_taskSuiteCheck"),
url(r'^dubbo/TaskSuiteListCheck$', dubbo_task_suite.dubbo_taskSuiteListCheck, name="dubbo_TaskSuiteListCheck"),
url(r'^dubbo/TaskSuiteGetTaskSuiteFotTaskId$', dubbo_task.getTaskForTaskId, name="dubbo_TaskSuiteGetTaskSuiteFotTaskId"),
url(r'^dubbo/TaskSuiteAddPage$', dubbo_task_suite.taskSuiteAdd, name="dubbo_TaskSuiteAddPage"),
url(r'^dubbo/TaskSuiteSelectTaskPage$', dubbo_task_suite.dubboTaskSuiteSelectTaskList,name="dubbo_TaskSuiteSelectTaskPage"),
url(r'^dubbo/TaskSuiteAddData$', dubbo_task_suite.taskSuiteAddData, name="dubbo_TaskSuiteAddData"),
url(r'^dubbo/TaskSuiteSaveEdit$', dubbo_task_suite.taskSuitSaveEdit, name="dubbo_TaskSuiteSaveEdit"),
url(r'^dubbo/operationTaskSuite$', dubbo_task_suite.operationTaskSuite, name="dubbo_operationTaskSuite"),
url(r'^dubbo/getTaskSuiteDataForTaskSuiteId$', dubbo_task_suite.getTaskSuiteForTaskSuiteId, name="dubbo_getTaskSuiteForTaskSuiteId"),
url(r'^dubbo/getTaskSuiteData$', dubbo_task_suite.getTaskSuiteData, name="dubbo_taskSuiteGetTaskSuiteData"),
url(r'^dubbo/TaskSuiteDel$', dubbo_task_suite.taskSuiteDel, name="dubbo_TaskSuiteDel"),
url(r'^dubbo/getTaskListDataForTaskSuite$', dubbo_task_suite.getTaskListDataForTaskSuite,
name="dubbo_getTaskListDataForTaskSuite"),
url(r'^dubbo/TaskSuiteExecuteResult$', dubbo_task_suite.taskSuiteResultCheck, name="dubbo_TaskSuiteExecuteResult"),
url(r'^dubbo/getTaskSuiteExecuteResult$', dubbo_task_suite.getTaskSuiteResultList, name="dubbo_getTaskSuiteExecuteResult"),
url(r'^dubbo/UpdateTaskSuiteExecuteProgressData$', dubbo_task_suite.updateTaskSuiteExecuteProgressData,
name="dubbo_UpdateTaskSuiteExecuteProgressData"),
url(r'^dubbo/TaskSuiteRunTask$', dubbo_task_suite.taskSuiteRunAdd, name="dubbo_TaskSuiteRunTask"),
url(r'^dubbo/getTaskSuiteExecuteResultData$', dubbo_task_suite.getTaskSuiteRestltDetail, name="dubbo_getTaskSuiteExecuteResultData"),
url(r'^dubbo/TaskSuiteAgainRun$', dubbo_task_suite.againRunTaskSuite, name="dubbo_TaskSuiteAgainRun"),
url(r'^dubbo/TaskStopTaskSuiteRun$', dubbo_task_suite.stopTaskSuiteRun, name="dubbo_TaskStopTaskSuiteRun"),
url(r'^dubbo/TaskSuiteGetSelectExecuteStatus$', dubbo_task_suite.getSelectExecuteStatus,
name="dubbo_TaskSuiteGetSelectExecuteStatus"),
##########################未完成##################################
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_TestCaseStepCheck"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_History"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_SrcFileAnalyze"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_SrcFileCoverCheck"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_GlobalVarsConf"),
url(r'^dubbo/testCaseAddPage$', dubbo_testcase.testCaseAddPage, name="dubbo_GlobalTextConf"),
url(r'^dubbo/importLogPage$', dubbo_interface.importLogPage, name="dubbo_importLogPage"),
url(r'^dubbo/saveLogDataToDubboInterfaces$', dubbo_interface.saveLogDataToDubboInterfaces, name="dubbo_saveLogDataToDubboInterfaces"),
]
|
11529825
|
import asyncore
import matplotlib.pyplot as plt
import zlib,socket
import numpy as np
import MFSKDemodulator, DePacketizer, MFSKSymbolDecoder, time, logging, sys
from scipy.io import wavfile
import MFSKModulator,Packetizer
import sounddevice as sd
import soundfile as sf
from scipy.io import wavfile
from thread import start_new_thread
import StringIO
#Networkrelated variables
Connection_status = False
compression = 1
packet_size = 8192
port_host = 8080
#Audio Related variables
symbol_rate = 15.625
base_freq = 1500
bits_per_symbol = 4
preamble_tones = [0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15,0,15]
#non changeables
symb_dec = ''
packet_extract = ''
handler = ''
recordable_file = "test.wav"
def zlib_compress(text):
text_size=sys.getsizeof(text)
compressed = zlib.compress(text)
csize=sys.getsizeof(compressed)
return compressed
def zlib_decompress(compressed):
decompressed=zlib.decompress(compressed)
return decompressed
def recover_packet(payload):
print 'Packet recieved:',payload
if not Connection_status:
handler.handle_sent_data(payload)
def parse_symbol(tone):
tone_bits = symb_dec.tone_to_bits(tone['symbol'])
packet_extract.process_data(tone_bits)
def callback_mic(indata, frames, time, status):
wavhndl_to_data(indata.copy())
def launch_record():
# Make sure the file is opened before recording anything:
with sf.SoundFile(recordable_file, mode='x', samplerate=8000,
channels=1) as file:
with sd.InputStream(samplerate=8000, device=0,
channels=1, callback=callback_mic):
print('#' * 80)
def wavhndl_to_data():
global symb_dec,packet_extract
symb_dec = MFSKSymbolDecoder.MFSKSymbolDecoder(num_tones=16, gray_coded=True)
# De-Packetizer
packet_extract = DePacketizer.DePacketizer(callback=recover_packet)
#get symbol back
demod = MFSKDemodulator.MFSKDemodulator(callback=parse_symbol)
fs, data = wavfile.read('generated_MFSK16_packets.wav')
# Convert to float
if(data.dtype == np.int16):
data = data.astype(np.float)/2**16
elif(data.dtype == np.int32):
data = data.astype(np.float)/2**32
# Feed the demod the entire file.
demod.consume(data)
def data_to_wavhndl(data):
mod = MFSKModulator.MFSKModulator(symbol_rate = symbol_rate, tone_spacing = symbol_rate, start_silence=5, base_freq=base_freq)
p = Packetizer.Packetizer()
mod.modulate_symbol(preamble_tones)
#adding msg together
fs = p.pack_message(data)
tx_bits = np.unpackbits(np.fromstring(fs, dtype=np.uint8))
print(str(tx_bits))
mod.modulate_bits(bits_per_symbol,tx_bits)
out = mod.get_mem()
return out
class data_recv(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(packet_size)
modulated = data_to_wavhndl(data)
sd.play(modulated[0],modulated[1])
sd.wait() #wait for data to play
print 'stat:',sd.get_status()
if data:
print ":Transmitting ("+str(len(modulated[0]))+") to dest"
print "Array:",modulated
print "data sent:",data
def handle_close(self):
self.close()
Connection_status = False
def handle_sent_data(self,data):
self.send(data)
class proxy(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
global handler
Connection_status = True
pair = self.accept()
if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
handler = data_recv(sock)
#slk = data_to_wavhndl("silence")
#sd.play(slk[0],slk[1])
#sd.wait()
#wavfile.write('generated_MFSK16_packets.wav',slk[1],slk[0])
#wavhndl_to_data()
server = proxy('localhost', port_host)
start_new_thread(launch_record,())
asyncore.loop()
|
11529850
|
import copy
import nose.tools
import numpy as np
import os
import pandas as pd
import skimage.io as sk_im_io
from testfixtures import TempDirectory
import unittest
import warnings
import micro_dl.preprocessing.tile_nonuniform_images as tile_images
import micro_dl.utils.aux_utils as aux_utils
class TestImageTilerNonUniform(unittest.TestCase):
def setUp(self):
"""Set up a dir for tiling with flatfield"""
self.tempdir = TempDirectory()
self.temp_path = self.tempdir.path
# Start frames meta file
self.meta_name = 'frames_meta.csv'
frames_meta = aux_utils.make_dataframe()
self.im = 127 * np.ones((15, 11), dtype=np.uint8)
self.im2 = 234 * np.ones((15, 11), dtype=np.uint8)
self.int2str_len = 3
self.channel_idx = [1, 2]
self.pos_idx1 = 7
self.pos_idx2 = 8
# write pos1 with 3 time points and 5 slices
for z in range(5):
for t in range(3):
for c in self.channel_idx:
im_name = aux_utils.get_im_name(
channel_idx=c,
slice_idx=z,
time_idx=t,
pos_idx=self.pos_idx1,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sk_im_io.imsave(
os.path.join(self.temp_path, im_name),
self.im,
)
frames_meta = frames_meta.append(
aux_utils.parse_idx_from_name(im_name),
ignore_index=True,
)
# write pos2 with 2 time points and 3 slices
for z in range(3):
for t in range(2):
for c in self.channel_idx:
im_name = aux_utils.get_im_name(
channel_idx=c,
slice_idx=z,
time_idx=t,
pos_idx=self.pos_idx2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sk_im_io.imsave(
os.path.join(self.temp_path, im_name),
self.im,
)
frames_meta = frames_meta.append(
aux_utils.parse_idx_from_name(im_name),
ignore_index=True,
)
# Write metadata
frames_meta.to_csv(os.path.join(self.temp_path, self.meta_name),
sep=',',)
# Instantiate tiler class
self.output_dir = os.path.join(self.temp_path, 'tile_dir')
self.tile_inst = tile_images.ImageTilerNonUniform(
input_dir=self.temp_path,
output_dir=self.output_dir,
tile_size=[5, 5],
step_size=[4, 4],
depths=3,
channel_ids=[1, 2],
normalize_channels=[False, True]
)
def tearDown(self):
"""Tear down temporary folder and file structure"""
TempDirectory.cleanup_all()
nose.tools.assert_equal(os.path.isdir(self.temp_path), False)
def test_init(self):
"""Test init"""
nose.tools.assert_equal(self.tile_inst.channel_ids, [1, 2])
nose.tools.assert_list_equal(list(self.tile_inst.time_ids),
[0, 1, 2])
nose.tools.assert_equal(self.tile_inst.flat_field_dir, None)
# Depth is 3 so first and last frame will not be used
nose.tools.assert_list_equal(list(self.tile_inst.slice_ids), [1, 2, 3])
np.testing.assert_array_equal(self.tile_inst.pos_ids,
np.asarray([7, 8]))
# for each tp, ch, pos check if slice_ids are the same
for tp_idx in range(3):
for ch_idx in self.channel_idx:
for pos_idx in [7, 8]:
if ch_idx == 1:
if pos_idx == 7:
sl_ids = [0, 1, 2, 3, 4]
np.testing.assert_array_equal(
self.tile_inst.nested_id_dict[tp_idx][ch_idx][
pos_idx],
np.asarray(sl_ids)
)
elif pos_idx == 8 and tp_idx < 2:
sl_ids = [0, 1, 2]
np.testing.assert_array_equal(
self.tile_inst.nested_id_dict[tp_idx][ch_idx][
pos_idx],
np.asarray(sl_ids)
)
def test_tile_first_channel(self):
"""Test tile_first_channel"""
ch0_ids = {}
# get the indices for first channel
for tp_idx, tp_dict in self.tile_inst.nested_id_dict.items():
for ch_idx, ch_dict in tp_dict.items():
if ch_idx == 1:
ch0_dict = {ch_idx: ch_dict}
ch0_ids[tp_idx] = ch0_dict
# get the expected meta df
exp_meta = []
for row in [0, 4, 8, 10]:
for col in [0, 4, 6]:
for z in [1, 2, 3]:
for t in [0, 1, 2]:
fname = aux_utils.get_im_name(
channel_idx=1,
slice_idx=z,
time_idx=t,
pos_idx=7,
ext='.npy',
)
tile_id = '_r{}-{}_c{}-{}_sl0-3'.format(row, row+5,
col, col+5)
fname = fname.split('.')[0] + tile_id + '.npy'
cur_meta = {'channel_idx': 1,
'slice_idx': z,
'time_idx': t,
'file_name': fname,
'pos_idx': 7,
'row_start': row,
'col_start': col}
exp_meta.append(cur_meta)
for t in [0, 1]:
fname = aux_utils.get_im_name(
channel_idx=1,
slice_idx=1,
time_idx=t,
pos_idx=8,
ext='.npy',
)
tile_id = '_r{}-{}_c{}-{}_sl0-3'.format(row, row + 5,
col, col + 5)
fname = fname.split('.')[0] + tile_id + '.npy'
cur_meta = {'channel_idx': 1,
'slice_idx': 1,
'time_idx': t,
'file_name': fname,
'pos_idx': 8,
'row_start': row,
'col_start': col}
exp_meta.append(cur_meta)
exp_meta_df = pd.DataFrame.from_dict(exp_meta)
exp_meta_df = exp_meta_df.sort_values(by=['file_name'])
ch0_meta_df = self.tile_inst.tile_first_channel(ch0_ids, 3)
ch0_meta_df = ch0_meta_df.sort_values(by=['file_name'])
# compare values of the returned and expected dfs
np.testing.assert_array_equal(exp_meta_df.values, ch0_meta_df.values)
def test_tile_remaining_channels(self):
"""Test tile_remaining_channels"""
# tile channel 1
nested_id_dict_copy = copy.deepcopy(self.tile_inst.nested_id_dict)
ch0_ids = {}
for tp_idx, tp_dict in self.tile_inst.nested_id_dict.items():
for ch_idx, ch_dict in tp_dict.items():
if ch_idx == 1:
ch0_dict = {ch_idx: ch_dict}
del nested_id_dict_copy[tp_idx][ch_idx]
ch0_ids[tp_idx] = ch0_dict
ch0_meta_df = self.tile_inst.tile_first_channel(ch0_ids, 3)
# tile channel 2
self.tile_inst.tile_remaining_channels(nested_id_dict_copy,
tiled_ch_id=1,
cur_meta_df=ch0_meta_df)
frames_meta = pd.read_csv(os.path.join(self.tile_inst.tile_dir,
'frames_meta.csv'),
sep=',')
# get the expected meta df which is a concat of the first channel df
# and the current. it does seem to retain orig index, not sure how to
# replace index in-place!
exp_meta = []
for row in [0, 4, 8, 10]:
for col in [0, 4, 6]:
for z in [1, 2, 3]:
for t in [0, 1, 2]:
for c in self.channel_idx:
fname = aux_utils.get_im_name(
channel_idx=c,
slice_idx=z,
time_idx=t,
pos_idx=7,
ext='.npy',
)
tile_id = '_r{}-{}_c{}-{}_sl0-3'.format(row, row+5,
col, col+5)
fname = fname.split('.')[0] + tile_id + '.npy'
cur_meta = {'channel_idx': c,
'slice_idx': z,
'time_idx': t,
'file_name': fname,
'pos_idx': 7,
'row_start': row,
'col_start': col}
exp_meta.append(cur_meta)
for t in [0, 1]:
for c in self.channel_idx:
fname = aux_utils.get_im_name(
channel_idx=c,
slice_idx=1,
time_idx=t,
pos_idx=8,
ext='.npy',
)
tile_id = '_r{}-{}_c{}-{}_sl0-3'.format(row, row + 5,
col, col + 5)
fname = fname.split('.')[0] + tile_id + '.npy'
cur_meta = {'channel_idx': c,
'slice_idx': 1,
'time_idx': t,
'file_name': fname,
'pos_idx': 8,
'row_start': row,
'col_start': col}
exp_meta.append(cur_meta)
exp_meta_df = pd.DataFrame.from_dict(exp_meta, )
frames_meta = frames_meta.sort_values(by=['file_name'])
nose.tools.assert_equal(len(exp_meta_df), len(frames_meta))
for i in range(len(frames_meta)):
act_row = frames_meta.loc[i]
row_idx = ((exp_meta_df['channel_idx'] == act_row['channel_idx']) &
(exp_meta_df['slice_idx'] == act_row['slice_idx']) &
(exp_meta_df['time_idx'] == act_row['time_idx']) &
(exp_meta_df['pos_idx'] == act_row['pos_idx']) &
(exp_meta_df['row_start'] == act_row['row_start']) &
(exp_meta_df['col_start'] == act_row['col_start']))
exp_row = exp_meta_df.loc[row_idx]
nose.tools.assert_equal(len(exp_row), 1)
np.testing.assert_array_equal(act_row['file_name'],
exp_row['file_name'])
def test_tile_mask_stack(self):
"""Test tile_mask_stack"""
# create a mask
mask_dir = os.path.join(self.temp_path, 'mask_dir')
os.makedirs(mask_dir, exist_ok=True)
mask_images = np.zeros((15, 11, 5), dtype='bool')
mask_images[4:12, 4:9, 2:4] = 1
# timepoints for testing
mask_meta = []
for z in range(5):
for t in range(3):
cur_im = mask_images[:, :, z]
im_name = aux_utils.get_im_name(
channel_idx=3,
slice_idx=z,
time_idx=t,
pos_idx=self.pos_idx1,
ext='.npy',
)
np.save(os.path.join(mask_dir, im_name), cur_im)
cur_meta = {'channel_idx': 3,
'slice_idx': z,
'time_idx': t,
'pos_idx': self.pos_idx1,
'file_name': im_name}
mask_meta.append(cur_meta)
mask_meta_df = pd.DataFrame.from_dict(mask_meta)
mask_meta_df.to_csv(os.path.join(mask_dir, 'frames_meta.csv'), sep=',')
self.tile_inst.pos_ids = [7]
self.tile_inst.normalize_channels = [None, None, None, False]
self.tile_inst.tile_mask_stack(mask_dir,
mask_channel=3,
min_fraction=0.5,
mask_depth=3)
nose.tools.assert_equal(self.tile_inst.mask_depth, 3)
frames_meta = pd.read_csv(os.path.join(self.tile_inst.tile_dir,
'frames_meta.csv'),
sep=',')
# only 4 tiles have >= min_fraction. 4 tiles x 3 slices x 3 tps
nose.tools.assert_equal(len(frames_meta), 36)
nose.tools.assert_list_equal(
frames_meta['row_start'].unique().tolist(),
[4, 8])
nose.tools.assert_equal(frames_meta['col_start'].unique().tolist(),
[4])
nose.tools.assert_equal(frames_meta['slice_idx'].unique().tolist(),
[2, 3])
self.assertSetEqual(set(frames_meta.channel_idx.tolist()), {1, 2, 3})
self.assertSetEqual(set(frames_meta.time_idx.tolist()), {0, 1, 2})
self.assertSetEqual(set(frames_meta.pos_idx.tolist()), {self.pos_idx1})
|
11529852
|
from loguru import logger
import asyncio
import aiohttp
from app.config import STATISTICS_TOKEN
from chatbase import Message
class AsyncMessage(Message):
def __init__(self, user_id, message, intent: str = "", not_handled: bool = False):
self.api_key = STATISTICS_TOKEN
self.platform = "tg"
Message.__init__(self,
api_key=self.api_key,
platform=self.platform,
user_id=user_id,
message=message,
intent=intent,
not_handled=not_handled)
async def _make_request(self):
url = "https://chatbase.com/api/message"
async with aiohttp.ClientSession() as session:
async with session.post(url,
data=self.to_json(),
headers=Message.get_content_type()) as response:
logger.info(f"Chatbase response {response}")
return response
async def send(self):
loop = asyncio.get_running_loop()
loop.create_task(self._make_request())
|
11529880
|
import numpy as np
import unittest
from optml.gridsearch_optimizer import GridSearchOptimizer, objective
from optml import Parameter
from copy import deepcopy
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from functools import partial
def clf_score(y_true,y_pred):
return np.sum(y_true==y_pred)/float(len(y_true))
class TestGridsearchOptimizer(unittest.TestCase):
def grid_spacing(self):
interval = [1,10]
p1 = Parameter('A', 'integer', lower=interval[0], upper=interval[1])
p2 = Parameter('B', 'continuous', lower=interval[0], upper=interval[1])
p3 = Parameter('C', 'categorical', possible_values=['Bla1', 'Bla2'])
p4 = Parameter('D', 'boolean')
grid_sizes = {'A': 5, 'B': 6}
grid_search = GridSearchOptimizer(model, [p1, p2, p3, p4], clf_score, grid_sizes)
grid = grid_search.grid
for params in grid:
self.assertIn(params['A'], range(*interval))
self.assertIn(params['B']>=interval[0])
self.assertIn(params['B']<=interval[1])
self.assertIn(params['C'], ['Bla1', 'Bla2'])
self.assertIn(params['D'], ['True', 'False'])
lenA = len(np.unique([params['A'] for params in grid]))
lenB = len(np.unique([params['B'] for params in grid]))
lenC = len(np.unique([params['C'] for params in grid]))
lenD = len(np.unique([params['D'] for params in grid]))
self.assertTrue((lenA==grid_sizes['A']) or (lenA==grid_sizes['A']+1))
self.assertTrue((lenB==grid_sizes['B']) or (lenB==grid_sizes['B']+1))
self.assertTrue((lenC==grid_sizes['C']) or (lenC==grid_sizes['C']+1))
self.assertTrue((lenD==grid_sizes['D']) or (lenD==grid_sizes['D']+1))
def test_improvement(self):
np.random.seed(4)
data, target = make_classification(n_samples=100,
n_features=45,
n_informative=15,
n_redundant=5,
class_sep=1,
n_clusters_per_class=4,
flip_y=0.4)
model = RandomForestClassifier(max_depth=5)
model.fit(data, target)
start_score = clf_score(target, model.predict(data))
p1 = Parameter('max_depth', 'integer', lower=1, upper=10)
grid_sizes = {'max_depth': 5}
grid_search = GridSearchOptimizer(model, [p1], clf_score, grid_sizes)
best_params, best_model = grid_search.fit(X_train=data, y_train=target)
best_model.fit(data, target)
final_score = clf_score(target, best_model.predict(data))
self.assertTrue(final_score>start_score)
def test_objective_function(self):
np.random.seed(4)
data, target = make_classification(n_samples=100,
n_features=10,
n_informative=10,
n_redundant=0,
class_sep=100,
n_clusters_per_class=1,
flip_y=0.0)
model = RandomForestClassifier(max_depth=5)
model.fit(data, target)
fun = partial(objective, model,
'sklearn',
clf_score,
data, target, data, target)
# model should fit the data perfectly
final_score = fun(model.get_params())[0]
self.assertEqual(final_score,1)
|
11529882
|
from dummy import Hyper, Param, Var, Runtime, Input, Output, Inline
maxScalar = Hyper()
numRegisters = Hyper()
programLen = Hyper()
numTimesteps = Hyper()
inputNum = Hyper()
inputStackSize = Hyper()
numRegInstrs = 13
numBranchInstrs = 2
numInstrTypes = 3
numInstructions = numBranchInstrs + numRegInstrs + 1
mutableStackSize = maxScalar - inputStackSize - 1
# Inputs
inputRegVal = Input(maxScalar)[inputNum]
inputStackCarVal = Input(maxScalar)[inputStackSize]
inputStackCdrVal = Input(maxScalar)[inputStackSize]
# Outputs
outputTermState = Output(2)
outputRegVal = Output(maxScalar)
outputListVal = Output(maxScalar)[maxScalar]
# Runtime state
stackCarValue = Var(maxScalar)[numTimesteps + 1, mutableStackSize]
stackCdrValue = Var(maxScalar)[numTimesteps + 1, mutableStackSize]
stackPtr = Var(mutableStackSize)[numTimesteps + 1]
registers = Var(maxScalar)[numTimesteps + 1, numRegisters]
instrPtr = Var(programLen)[numTimesteps + 1]
returnValue = Var(maxScalar)[numTimesteps + 1]
isHalted = Var(2)[numTimesteps + 1]
# Program
# Register Instructions: cons, car, cdr, zero/nil, add, inc, eq, gt, and, dec, or
# Branch Instructions: jz, jnz, halt
instructions = Param(numInstructions)[programLen]
arg1s = Param(numRegisters)[programLen]
arg2s = Param(numRegisters)[programLen]
outs = Param(numRegisters)[programLen]
branchAddr = Param(programLen)[programLen]
# Temporary values
tmpArg1Val = Var(maxScalar)[numTimesteps]
tmpArg2Val = Var(maxScalar)[numTimesteps]
tmpOutVal = Var(maxScalar)[numTimesteps]
tmpArg1DerefCarValue = Var(maxScalar)[numTimesteps]
tmpArg1DerefCdrValue = Var(maxScalar)[numTimesteps]
tmpBranchCond = Var(2)[numTimesteps]
tmpDoPushStack = Var(2)[numTimesteps]
tmpDoWriteStack = Var(2)[numTimesteps, maxScalar]
tmpIsRegInstr = Var(2)[numTimesteps]
tmpRegInstr = Var(numRegInstrs)[numTimesteps]
tmpDoWriteReg = Var(2)[numTimesteps, numRegisters]
@Runtime([maxScalar, maxScalar], maxScalar)
def Add(x, y): return (x + y) % maxScalar
@Runtime([maxScalar], maxScalar)
def Inc(x): return (x + 1) % maxScalar
@Runtime([maxScalar], maxScalar)
def Dec(x): return (x - 1) % maxScalar # Python normalizes to 0..maxScalar
@Runtime([maxScalar, maxScalar], maxScalar)
def EqTest(a, b): return 1 if a == b else 0
@Runtime([maxScalar, maxScalar], maxScalar)
def GtTest(a, b): return 1 if a > b else 0
@Runtime([maxScalar, maxScalar], maxScalar)
def And(a, b): return 1 if a == 1 and b == 1 else 0
@Runtime([maxScalar, maxScalar], maxScalar)
def Or(a, b): return 1 if a == 1 or b == 1 else 0
@Runtime([maxScalar], 2)
def ScalarIsZero(x): return 1 if x == 0 else 0
# Modeling helper functions, not actual instructions:
@Runtime([programLen, programLen], 2)
def InstrPtrEquality(a, b): return 1 if a == b else 0
@Runtime([numRegisters, numRegisters], 2)
def RegisterEquality(a, b): return 1 if a == b else 0
@Runtime([programLen], programLen)
def IncInstrPtr(x): return programLen - 1 if x + 1 >= programLen else x + 1
@Runtime([numInstructions], 2)
def RequiresStackPush(instrIndex): return 1 if instrIndex == 0 else 0
@Runtime([numInstructions], 2)
def IsRegInstr(x): return 1 if x < numRegInstrs else 0
@Runtime([numInstructions], numRegInstrs)
def ToRegInstr(x): return x if x < numRegInstrs else 0
@Runtime([numInstructions], 2)
def IsCons(x): return 1 if x == 0 else 0
@Runtime([mutableStackSize], maxScalar)
def StackPtrToScalar(x): return x + inputStackSize + 1
@Runtime([mutableStackSize], mutableStackSize)
def IncStackPtr(x): return (x + 1) % (mutableStackSize)
@Runtime([mutableStackSize, mutableStackSize], 2)
def PtrEquality(a, b): return 1 if a == b else 0
# Copy input registers to main registers.
for i in range(inputNum):
registers[0, i].set_to(inputRegVal[i])
for i in range(inputNum, numRegisters):
registers[0, i].set_to(0)
# Copy input stack to main stack.
for i in range(0, mutableStackSize):
stackCarValue[0, i].set_to(0)
stackCdrValue[0, i].set_to(0)
stackPtr[0].set_to(0)
# Start with first instruction, program starts not-halted.
instrPtr[0].set_to(0)
isHalted[0].set_to(0)
returnValue[0].set_to(0)
for t in range(numTimesteps): # !! factor: numTimesteps
if isHalted[t] == 0:
with instrPtr[t] as ip: # !! factor: numTimesteps * numInstructions
instruction = instructions[ip]
arg1Val = tmpArg1Val[t]
arg2Val = tmpArg2Val[t]
with arg1s[ip] as r: # !! factor: numTimesteps * numInstructions * numRegisters
arg1Val.set_to(registers[t, r])
with arg2s[ip] as r: # !! factor: numTimesteps * numInstructions * numRegisters
arg2Val.set_to(registers[t, r])
with arg1Val as p: # !! factor: numTimesteps * numInstructions * maxScalar
if p == 0:
tmpArg1DerefCarValue[t].set_to(0)
tmpArg1DerefCdrValue[t].set_to(0)
elif p <= inputStackSize:
tmpArg1DerefCarValue[t].set_to(inputStackCarVal[p - 1])
tmpArg1DerefCdrValue[t].set_to(inputStackCdrVal[p - 1])
else:
tmpArg1DerefCarValue[t].set_to(stackCarValue[t, p - inputStackSize - 1])
tmpArg1DerefCdrValue[t].set_to(stackCdrValue[t, p - inputStackSize - 1])
# Build registers for next timestep, where branching instructions don't do anything:
tmpIsRegInstr[t].set_to(IsRegInstr(instruction))
if tmpIsRegInstr[t] == 0:
for r in range(numRegisters): # !! factor: numTimesteps * numInstructions * numRegisters
registers[t+1, r].set_to(registers[t, r])
elif tmpIsRegInstr[t] == 1:
tmpRegInstr[t].set_to(ToRegInstr(instruction))
if tmpRegInstr[t] == 0: # cons
tmpOutVal[t].set_to(StackPtrToScalar(stackPtr[t]))
elif tmpRegInstr[t] == 1: # car
tmpOutVal[t].set_to(tmpArg1DerefCarValue[t])
elif tmpRegInstr[t] == 2: # cdr
tmpOutVal[t].set_to(tmpArg1DerefCdrValue[t])
elif tmpRegInstr[t] == 3: # zero/nil
tmpOutVal[t].set_to(0)
elif tmpRegInstr[t] == 4: # add
tmpOutVal[t].set_to(Add(arg1Val, arg2Val))
elif tmpRegInstr[t] == 5: # inc
tmpOutVal[t].set_to(Inc(arg1Val))
elif tmpRegInstr[t] == 6: # eq
tmpOutVal[t].set_to(EqTest(arg1Val, arg2Val))
elif tmpRegInstr[t] == 7: # gt
tmpOutVal[t].set_to(GtTest(arg1Val, arg2Val))
elif tmpRegInstr[t] == 8: # and
tmpOutVal[t].set_to(And(arg1Val, arg2Val))
elif tmpRegInstr[t] == 9: # one/true
tmpOutVal[t].set_to(1)
elif tmpRegInstr[t] == 10: # noop/copy
tmpOutVal[t].set_to(arg1Val)
elif tmpRegInstr[t] == 11: # dec
tmpOutVal[t].set_to(Dec(arg1Val))
elif tmpRegInstr[t] == 12: # or
tmpOutVal[t].set_to(Or(arg1Val, arg2Val))
for r in range(numRegisters): # !! factor: numTimesteps * numInstructions * numRegisters
tmpDoWriteReg[t, r].set_to(RegisterEquality(outs[ip], r))
if tmpDoWriteReg[t, r] == 1:
registers[t+1, r].set_to(tmpOutVal[t])
elif tmpDoWriteReg[t, r] == 0:
registers[t+1, r].set_to(registers[t, r])
# Build stack for next timestep, only Cons changes anything
tmpDoPushStack[t].set_to(IsCons(instruction))
if tmpDoPushStack[t] == 1:
for p in range(mutableStackSize): # !! factor: numTimesteps * numInstructions * maxScalar
tmpDoWriteStack[t, p].set_to(PtrEquality(stackPtr[t], p))
if tmpDoWriteStack[t, p] == 1:
stackCarValue[t+1, p].set_to(arg1Val)
stackCdrValue[t+1, p].set_to(arg2Val)
elif tmpDoWriteStack[t, p] == 0:
stackCarValue[t+1, p].set_to(stackCarValue[t, p])
stackCdrValue[t+1, p].set_to(stackCdrValue[t, p])
stackPtr[t+1].set_to(IncStackPtr(stackPtr[t]))
elif tmpDoPushStack[t] == 0:
for p in range(mutableStackSize): # !! factor: numTimesteps * numInstructions * maxScalar
stackCarValue[t+1, p].set_to(stackCarValue[t, p])
stackCdrValue[t+1, p].set_to(stackCdrValue[t, p])
stackPtr[t+1].set_to(stackPtr[t])
# Set instruction pointer for next timestep:
tmpBranchCond[t].set_to(ScalarIsZero(arg1Val))
if instruction == numRegInstrs + 0: # jz
if tmpBranchCond[t] == 1:
instrPtr[t+1].set_to(branchAddr[ip])
elif tmpBranchCond[t] == 0:
instrPtr[t+1].set_to((ip + 1) % programLen)
isHalted[t+1].set_to(0)
returnValue[t+1].set_to(0)
elif instruction == numRegInstrs + 1: # jnz
if tmpBranchCond[t] == 1:
instrPtr[t+1].set_to((ip + 1) % programLen)
elif tmpBranchCond[t] == 0:
instrPtr[t+1].set_to(branchAddr[ip])
isHalted[t+1].set_to(0)
returnValue[t+1].set_to(0)
elif instruction == numRegInstrs + 2: # return
instrPtr[t+1].set_to(ip)
isHalted[t+1].set_to(1)
returnValue[t+1].set_to(arg1Val)
else:
instrPtr[t+1].set_to((ip + 1) % programLen)
isHalted[t+1].set_to(0)
returnValue[t+1].set_to(0)
elif isHalted[t] == 1:
for r in range(numRegisters):
registers[t+1, r].set_to(registers[t, r])
for p in range(mutableStackSize):
stackCarValue[t+1, p].set_to(stackCarValue[t, p])
stackCdrValue[t+1, p].set_to(stackCdrValue[t, p])
stackPtr[t+1].set_to(stackPtr[t])
instrPtr[t+1].set_to(instrPtr[t])
isHalted[t+1].set_to(1)
returnValue[t+1].set_to(returnValue[t])
# Penalize non-halting programs.
outputTermState.set_to(isHalted[numTimesteps])
outputListCopyPos = Var(maxScalar)[maxScalar + 1]
# Copy register value to output:
outputRegVal.set_to(returnValue[numTimesteps])
# Copy list values out:
outputListCopyPos[0].set_to(returnValue[numTimesteps])
for n in range(maxScalar):
with outputListCopyPos[n] as p:
if p == 0:
outputListVal[n].set_to(0)
outputListCopyPos[n + 1].set_to(0)
elif p <= inputStackSize:
outputListVal[n].set_to(inputStackCarVal[p - 1])
outputListCopyPos[n + 1].set_to(inputStackCdrVal[p - 1])
else:
outputListVal[n].set_to(stackCarValue[numTimesteps, p - inputStackSize - 1])
outputListCopyPos[n + 1].set_to(stackCdrValue[numTimesteps, p - inputStackSize - 1])
|
11529910
|
class CommandDTOV4(object):
serviceId = "serviceId"
method = "method"
paras = "paras"
def __init__(self):
# self.paras = dict()
print("self.paras = None")
|
11529932
|
from setuptools import setup
setup(
name = 'pyxing',
version = '0.0.5',
description = 'python wrapper for eBest Xing API',
url = 'https://github.com/sharebook-kr/pyxing',
author = '<NAME>, <NAME>',
author_email = '<EMAIL>, <EMAIL>, <EMAIL>',
install_requires= ['pandas'],
license = 'MIT',
packages = ['pyxing'],
zip_safe = False
)
|
11529939
|
from abstract_plotter import AbstractPlotter, Frame
class Plotter(AbstractPlotter):
def __init__(self, frame= None):
AbstractPlotter.__init__(self, frame)
self.circles = []
def circle(self, x, y, r, color):
self.circles.append((x, y, r, color))
def triangle(self, x, y, r, color):
self.triangle.append((x, y, r, color))
def default_frame(self):
return Frame(320, 240, 20, 20, 60, 20)
|
11529953
|
from __future__ import (
annotations,
)
from minos.common import (
Config,
Inject,
NotProvidedException,
)
@Inject()
def get_service_name(config: Config) -> str:
"""Get the service name."""
if config is None:
raise NotProvidedException("The config object must be provided.")
return config.get_name()
|
11529957
|
from __future__ import absolute_import
import copy
import types
import operator
from sqlbuilder.smartsql.compiler import compile
from sqlbuilder.smartsql.constants import CONTEXT
from sqlbuilder.smartsql.exceptions import Error
from sqlbuilder.smartsql.expressions import Operable, Expr, ExprList, Constant, Parentheses, OmitParentheses, func, expr_repr
from sqlbuilder.smartsql.factory import factory
from sqlbuilder.smartsql.fields import Field, FieldList
from sqlbuilder.smartsql.operators import Asc, Desc
from sqlbuilder.smartsql.pycompat import string_types
from sqlbuilder.smartsql.tables import TableJoin
from sqlbuilder.smartsql.utils import is_list, opt_checker, same, warn
__all__ = (
'Result', 'Executable', 'Select', 'Query', 'SelectCount', 'Raw',
'Modify', 'Insert', 'Update', 'Delete',
'Set', 'Union', 'Intersect', 'Except',
)
SPACE = " "
class settable(object):
"""
alternatives:
1. mutable keyword argument:
query = query.where(T.author.first_name == 'Ivan', mutable=True)
2. Select.mutable() method:
query = query.mutable(True)
query.where(T.author.first_name == 'Ivan')
"""
def __init__(self, property_name):
self._property_name = property_name
self._method = None
def __call__(self, method):
self._method = method
return self
def __get__(self, instance, owner):
if instance is None:
return self._method
return types.MethodType(self._method, instance)
def __set__(self, instance, value):
if is_list(self._property_name):
for attr, val in zip(self._property_name, value):
setattr(instance, attr, val)
else:
setattr(instance, self._property_name, value)
class Result(object):
"""Default implementation of Query class.
It uses the Bridge pattern to separate implementation from interface.
"""
compile = compile
def __init__(self, compile=None):
if compile is not None:
self.compile = compile
self._query = None
def execute(self):
return self.compile(self._query)
select = count = insert = update = delete = execute
def __call__(self, query):
c = self # Don't clone here to keep link to cache in this instance
c._query = query
return c
def clone(self):
c = copy.copy(super(Result, self))
c._query = None
return c
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __getitem__(self, key):
if isinstance(key, slice):
offset = key.start or 0
limit = key.stop - offset if key.stop else None
else:
offset, limit = key, 1
return self._query.limit(offset, limit)
__copy__ = clone
class Executable(object):
result = Result() # IoC
def __init__(self, result=None):
""" Query class.
It uses the Bridge pattern to separate implementation from interface.
:param result: Object of implementation.
:type result: Result
"""
if result is not None:
self.result = result
else:
self.result = self.result.clone()
def clone(self, *attrs):
c = super(Executable, self).clone(*attrs)
c.result = c.result.clone()
return c
def __getattr__(self, name):
"""Delegates unknown attributes to object of implementation."""
try:
return super(Executable, self).__getattr__(name)
except AttributeError:
if hasattr(self.result, name):
attr = getattr(self.result, name)
if isinstance(attr, types.MethodType):
c = self.clone()
return getattr(c.result(c), name)
else:
return attr
raise AttributeError
@factory.register
class Select(Expr):
def __init__(self, tables=None):
""" Select class.
:param tables: tables
:type tables: Table, TableAlias, TableJoin or None
"""
Operable.__init__(self)
self._distinct = ExprList().join(", ")
self._fields = FieldList().join(", ")
self._tables = tables
self._where = None
self._having = None
self._group_by = ExprList().join(", ")
self._order_by = ExprList().join(", ")
self._limit = None
self._offset = None
self._for_update = False
def tables(self, tables=None):
if tables is None:
return self._tables
c = self.clone('_tables')
c._tables = tables
return c
@opt_checker(["reset", ])
def distinct(self, *args, **opts):
if not args and not opts:
return self._distinct
if args:
if is_list(args[0]):
return self.distinct(*args[0], reset=True)
elif args[0] is True and not opts.get("reset"):
return self.distinct(*args, reset=True)
elif args[0] is False:
return self.distinct(reset=True)
c = self.clone('_distinct')
if opts.get("reset"):
c._distinct.reset()
if args:
c._distinct.extend(args)
return c
@opt_checker(["reset", ])
def fields(self, *args, **opts):
# Why not name the "args" by "expressions", or "exprs" or "fields"?
# Because it wil be a lie. The argument can be a list of expressions.
# The name "argument" is not entirely clear, but truthful and not misleading.
if not args and not opts:
return self._fields
if args and is_list(args[0]):
return self.fields(*args[0], reset=True)
c = self.clone('_fields')
if opts.get("reset"):
c._fields.reset()
if args:
c._fields.extend([Field(f) if isinstance(f, string_types) else f for f in args])
return c
def on(self, cond):
# TODO: Remove?
c = self.clone()
if not isinstance(c._tables, TableJoin):
raise Error("Can't set on without join table")
c._tables = c._tables.on(cond)
return c
@settable('_where')
def where(self, cond=None, op=operator.and_):
if cond is None:
return self._where
c = self.clone()
if c._where is None or op is None:
c._where = cond
else:
c._where = op(c._where, cond)
return c
def or_where(self, cond):
warn('or_where(cond)', 'where(cond, op=operator.or_)')
return self.where(cond, op=operator.or_)
@opt_checker(["reset", ])
def group_by(self, *args, **opts):
if not args and not opts:
return self._group_by
if args and is_list(args[0]):
return self.group_by(*args[0], reset=True)
c = self.clone('_group_by')
if opts.get("reset"):
c._group_by.reset()
if args:
c._group_by.extend(args)
return c
def having(self, cond=None, op=operator.and_):
if cond is None:
return self._having
c = self.clone()
if c._having is None or op is None:
c._having = cond
else:
c._having = op(self._having, cond)
return c
def or_having(self, cond):
warn('or_having(cond)', 'having(cond, op=operator.or_)')
return self.having(cond, op=operator.or_)
@opt_checker(["desc", "reset", ])
def order_by(self, *args, **opts):
if not args and not opts:
return self._order_by
if args and is_list(args[0]):
return self.order_by(*args[0], reset=True)
c = self.clone('_order_by')
if opts.get("reset"):
c._order_by.reset()
if args:
wraps = Desc if opts.get("desc") else Asc
c._order_by.extend([f if isinstance(f, (Asc, Desc)) else wraps(f) for f in args])
return c
def limit(self, *args, **kwargs):
if not args and not kwargs:
return (self._offset, self._limit)
c = self.clone()
if args:
if len(args) < 2:
args = (0,) + args
c._offset, c._limit = args
else:
c._limit = kwargs.get('limit')
c._offset = kwargs.get('offset', 0)
return c
def as_table(self, alias):
return factory.get(self).TableAlias(self, alias)
def clone(self, *attrs):
c = copy.copy(super(Select, self))
# if not attrs:
# attrs = ('_fields', '_tables', '_group_by', '_order_by')
for a in attrs:
setattr(c, a, copy.copy(getattr(c, a, None)))
return c
columns = same('fields')
__copy__ = same('clone')
@compile.when(Select)
def compile_query(compile, expr, state):
state.push("auto_tables", []) # this expr can be a subquery
state.push("context", CONTEXT.FIELD)
state.sql.append("SELECT ")
if expr.distinct():
state.sql.append("DISTINCT ")
if expr.distinct()[0] is not True:
state.sql.append("ON ")
compile(Parentheses(expr._distinct), state)
state.sql.append(SPACE)
compile(expr.fields(), state)
tables_sql_pos = len(state.sql)
tables_params_pos = len(state.params)
state.context = CONTEXT.EXPR
if expr.where():
state.sql.append(" WHERE ")
compile(expr.where(), state)
if expr.group_by():
state.sql.append(" GROUP BY ")
compile(expr.group_by(), state)
if expr.having():
state.sql.append(" HAVING ")
compile(expr.having(), state)
if expr.order_by():
state.sql.append(" ORDER BY ")
compile(expr.order_by(), state)
if expr._limit is not None:
state.sql.append(" LIMIT ")
compile(expr._limit, state)
if expr._offset:
state.sql.append(" OFFSET ")
compile(expr._offset, state)
if expr._for_update:
state.sql.append(" FOR UPDATE")
if expr.tables():
state.push('joined_table_statements', set())
state.push('sql', [])
state.push('params', [])
state.context = CONTEXT.TABLE
state.sql.append(" FROM ")
tables = expr.tables()
for join in state.auto_join_tables:
tables = join.left(tables)
compile(tables, state)
tables_sql = state.sql
tables_params = state.params
state.pop()
state.pop()
state.pop()
state.sql[tables_sql_pos:tables_sql_pos] = tables_sql
state.params[tables_params_pos:tables_params_pos] = tables_params
state.pop()
state.pop()
@factory.register
class Query(Executable, Select):
def __init__(self, tables=None, result=None):
""" Query class.
It uses the Bridge pattern to separate implementation from interface.
:param tables: tables
:type tables: Table, TableAlias, TableJoin or None
:param result: Object of implementation.
:type result: Result
"""
Select.__init__(self, tables)
Executable.__init__(self, result)
@opt_checker(["distinct", "for_update"])
def select(self, *args, **opts):
c = self.clone()
if args:
c = c.fields(*args)
if opts.get("distinct"):
c = c.distinct(True)
if opts.get("for_update"):
c._for_update = True
return c.result(c).select()
# Never clone result. It should have back link to Query instance.
# State of Result should be corresponding to state of Query object.
# We need clone both Result and Query synchronously.
def count(self, **kw):
return self.result(SelectCount(self, **kw)).count()
def insert(self, key_values=None, **kw):
kw.setdefault('table', self._tables)
kw.setdefault('fields', self._fields)
return self.result(factory.get(self).Insert(mapping=key_values, **kw)).insert()
def insert_many(self, fields, values, **kw):
# Deprecated
return self.insert(fields=fields, values=values, **kw)
def update(self, key_values=None, **kw):
kw.setdefault('table', self._tables)
kw.setdefault('fields', self._fields)
kw.setdefault('where', self._where)
kw.setdefault('order_by', self._order_by)
kw.setdefault('limit', self._limit)
return self.result(factory.get(self).Update(mapping=key_values, **kw)).update()
def delete(self, **kw):
kw.setdefault('table', self._tables)
kw.setdefault('where', self._where)
kw.setdefault('order_by', self._order_by)
kw.setdefault('limit', self._limit)
return self.result(factory.get(self).Delete(**kw)).delete()
def as_set(self, all=False):
return factory.get(self).Set(self, all=all, result=self.result)
def set(self, *args, **kwargs):
warn('set([all=False])', 'as_set([all=False])')
return self.as_set(*args, **kwargs)
def raw(self, sql, params=()):
return factory.get(self).Raw(sql, params, result=self.result)
def __getitem__(self, key):
return self.result(self).__getitem__(key)
def __len__(self):
return self.result(self).__len__()
def __iter__(self):
return self.result(self).__iter__()
QuerySet = Query
@factory.register
class SelectCount(Query):
def __init__(self, q, table_alias='count_list', field_alias='count_value'):
Query.__init__(self, q.order_by(reset=True).as_table(table_alias))
self._fields.append(func.Count(Constant('1')).as_(field_alias))
@factory.register
class Raw(Query):
def __init__(self, sql, params, result=None):
Query.__init__(self, result=result)
self._raw = OmitParentheses(Expr(sql, params))
@compile.when(Raw)
def compile_raw(compile, expr, state):
compile(expr._raw, state)
if expr._limit is not None:
state.sql.append(" LIMIT ")
compile(expr._limit, state)
if expr._offset:
state.sql.append(" OFFSET ")
compile(expr._offset, state)
class Modify(object):
def __repr__(self):
return expr_repr(self)
@factory.register
class Insert(Modify):
def __init__(self, table, mapping=None, fields=None, values=None, ignore=False, on_duplicate_key_update=None, duplicate_key=None):
self.table = table
self.fields = FieldList(*(k if isinstance(k, Expr) else table.get_field(k) for k in (mapping or fields)))
self.values = (tuple(mapping.values()),) if mapping else values
self.ignore = ignore
self.on_duplicate_key_update = tuple(
(k if isinstance(k, Expr) else table.get_field(k), v)
for k, v in on_duplicate_key_update.items()
) if on_duplicate_key_update else None
if duplicate_key:
if not is_list(duplicate_key):
duplicate_key = (duplicate_key,)
self.duplicate_key = FieldList(*(i if isinstance(i, Expr) else table.get_field(i) for i in duplicate_key))
else:
self.duplicate_key = None
@compile.when(Insert)
def compile_insert(compile, expr, state):
state.push("context", CONTEXT.TABLE)
state.sql.append("INSERT ")
state.sql.append("INTO ")
compile(expr.table, state)
state.sql.append(SPACE)
state.context = CONTEXT.FIELD_NAME
compile(Parentheses(expr.fields), state)
state.context = CONTEXT.EXPR
if isinstance(expr.values, Query):
state.sql.append(SPACE)
compile(expr.values, state)
else:
state.sql.append(" VALUES ")
compile(ExprList(*expr.values).join(', '), state)
if expr.ignore:
state.sql.append(" ON CONFLICT DO NOTHING")
elif expr.on_duplicate_key_update:
state.sql.append(" ON CONFLICT")
if expr.duplicate_key:
state.sql.append(SPACE)
state.context = CONTEXT.FIELD_NAME
compile(Parentheses(expr.duplicate_key), state)
state.context = CONTEXT.EXPR
state.sql.append(" DO UPDATE SET ")
first = True
for f, v in expr.on_duplicate_key_update:
if first:
first = False
else:
state.sql.append(", ")
state.context = CONTEXT.FIELD_NAME
compile(f, state)
state.context = CONTEXT.EXPR
state.sql.append(" = ")
compile(v, state)
state.pop()
@factory.register
class Update(Modify):
def __init__(self, table, mapping=None, fields=None, values=None, ignore=False, where=None, order_by=None, limit=None):
self.table = table
self.fields = FieldList(*(k if isinstance(k, Expr) else table.get_field(k) for k in (mapping or fields)))
self.values = tuple(mapping.values()) if mapping else values
self.ignore = ignore
self.where = where
self.order_by = order_by
self.limit = limit
@compile.when(Update)
def compile_update(compile, expr, state):
state.push("context", CONTEXT.TABLE)
state.sql.append("UPDATE ")
if expr.ignore:
state.sql.append("IGNORE ")
compile(expr.table, state)
state.sql.append(" SET ")
first = True
for field, value in zip(expr.fields, expr.values):
if first:
first = False
else:
state.sql.append(", ")
state.context = CONTEXT.FIELD_NAME
compile(field, state)
state.context = CONTEXT.EXPR
state.sql.append(" = ")
compile(value, state)
state.context = CONTEXT.EXPR
if expr.where:
state.sql.append(" WHERE ")
compile(expr.where, state)
if expr.order_by:
state.sql.append(" ORDER BY ")
compile(expr.order_by, state)
if expr.limit is not None:
state.sql.append(" LIMIT ")
compile(expr.limit, state)
state.pop()
@factory.register
class Delete(Modify):
def __init__(self, table, where=None, order_by=None, limit=None):
self.table = table
self.where = where
self.order_by = order_by
self.limit = limit
@compile.when(Delete)
def compile_delete(compile, expr, state):
state.sql.append("DELETE FROM ")
state.push("context", CONTEXT.TABLE)
compile(expr.table, state)
state.context = CONTEXT.EXPR
if expr.where:
state.sql.append(" WHERE ")
compile(expr.where, state)
if expr.order_by:
state.sql.append(" ORDER BY ")
compile(expr.order_by, state)
if expr.limit is not None:
state.sql.append(" LIMIT ")
compile(expr.limit, state)
state.pop()
@factory.register
class Set(Query):
def __init__(self, *exprs, **kw):
super(Set, self).__init__(result=kw.get('result'))
if 'op' in kw:
self.sql = kw['op']
self._all = kw.get('all', False) # Use All() instead?
self._exprs = ExprList()
for expr in exprs:
self.add(expr)
def add(self, other):
if (isinstance(other, self.__class__) and
other._all == self._all and
other._limit is None and
other._offset is None):
for expr in other._exprs:
self.add(expr)
if other._for_update:
self._for_update = other._for_update
else:
self._exprs.append(other)
# TODO: reset _order_by, _for_update?
def _op(self, cls, *others):
if not getattr(self, 'sql', None):
c = cls(*self._exprs, all=self._all)
c._limit = self._limit
c._offset = self._offset
c._order_by = self._order_by
c._for_update = self._for_update
elif self.__class__ is not cls:
c = cls(self, all=self._all) # TODO: Should be here "all"?
else:
c = self.clone()
for other in others:
c.add(other)
return c
def union(self, *others):
return self._op(factory.get(self).Union, *others)
def intersection(self, *others):
return self._op(factory.get(self).Intersect, *others)
def difference(self, *others):
return self._op(factory.get(self).Except, *others)
# FIXME: violates the interface contract, changing the semantic of its interface
__or__ = same('union')
__and__ = same('intersection')
__sub__ = same('difference')
def all(self, all=True):
self._all = all
return self
def clone(self, *attrs):
c = Query.clone(self, *attrs)
c._exprs = copy.copy(c._exprs)
return c
@factory.register
class Union(Set):
__slots__ = ()
sql = 'UNION'
@factory.register
class Intersect(Set):
__slots__ = ()
sql = 'INTERSECT'
@factory.register
class Except(Set):
__slots__ = ()
sql = 'EXCEPT'
@compile.when(Set)
def compile_set(compile, expr, state):
state.push("context", CONTEXT.SELECT)
if expr._all:
op = ' {0} ALL '.format(expr.sql)
else:
op = ' {0} '.format(expr.sql)
# TODO: add tests for nested sets.
state.precedence += 0.5 # to correct handle sub-set with limit, offset
compile(expr._exprs.join(op), state)
state.precedence -= 0.5
if expr._order_by:
# state.context = CONTEXT.FIELD_NAME
state.context = CONTEXT.EXPR
state.sql.append(" ORDER BY ")
compile(expr._order_by, state)
if expr._limit is not None:
state.sql.append(" LIMIT ")
compile(expr._limit, state)
if expr._offset:
state.sql.append(" OFFSET ")
compile(expr._offset, state)
if expr._for_update:
state.sql.append(" FOR UPDATE")
state.pop()
|
11529969
|
def test():
assert (
len(pattern1) == 2
), "Le nombre de tokens de pattern1 ne correspond pas au véritable nombre de tokens dans la chaine."
assert (
len(pattern2) == 2
), "Le nombre de tokens de pattern2 ne correspond pas au véritable nombre de tokens dans la chaine."
# Validation de Pattern 1
assert (
len(pattern1[0]) == 1
), "Le premier token de pattern1 devrait avoir un attribut unique."
assert any(
pattern1[0].get(attr) == "amazon" for attr in ("lower", "LOWER")
), "Vérifie l'attribut et la valeur du premier token de pattern1."
assert (
len(pattern1[1]) == 2
), "Le deuxième token de pattern1 devrait avoir deux attributs."
assert any(
pattern1[1].get(attr) == True for attr in ("is_title", "IS_TITLE")
), "Vérifie les attributs et valeurs du deuxième token de pattern1."
assert any(
pattern1[1].get(attr) == "PROPN" for attr in ("pos", "POS")
), "Vérifie les attributs et valeurs du deuxième token de pattern1."
# Validation de Pattern 2
assert any(
pattern2[0].get(attr) == "NOUN" for attr in ("pos", "POS")
), "Vérifie l'attribut et la valeur du premier token de pattern2."
assert any(
pattern2[1].get(attr) == "tout-compris" for attr in ("lower", "LOWER")
), "Vérifie l'attribut et la valeur du troisième token in pattern2."
assert len(matcher(doc)) == 4, "Nombre de correspondances incorrect – attendu 4."
__msg__.good(
"Bien joué ! Comme tu peux le voir, il est très important de faire "
"bien attention à la tokenisation quand tu utilises le 'Matcher' basé "
"sur les tokens. "
"Parfois il est bien plus facile de rechercher simplement des chaines "
"exactes et d'utiliser le 'PhraseMatcher', comme nous allons le faire "
"dans le prochain exercice."
)
|
11529974
|
import pygame
#button class
class Button():
def __init__(self, surface, x, y, image, size_x, size_y):
self.image = pygame.transform.scale(image, (size_x, size_y))
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
self.clicked = False
self.surface = surface
def draw(self):
action = False
#get mouse position
pos = pygame.mouse.get_pos()
#check mouseover and clicked conditions
if self.rect.collidepoint(pos):
if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:
action = True
self.clicked = True
if pygame.mouse.get_pressed()[0] == 0:
self.clicked = False
#draw button
self.surface.blit(self.image, (self.rect.x, self.rect.y))
return action
|
11530007
|
import os
import sublime
from .console_write import console_write
from .package_io import package_file_exists
class PackageRenamer():
"""
Class to handle renaming packages via the renamed_packages setting
gathered from channels and repositories.
"""
def load_settings(self):
"""
Loads the list of installed packages from the
Package Control.sublime-settings file.
"""
self.settings_file = 'Package Control.sublime-settings'
self.settings = sublime.load_settings(self.settings_file)
self.installed_packages = self.settings.get('installed_packages', [])
if not isinstance(self.installed_packages, list):
self.installed_packages = []
def rename_packages(self, installer):
"""
Renames any installed packages that the user has installed.
:param installer:
An instance of :class:`PackageInstaller`
"""
# Fetch the packages since that will pull in the renamed packages list
installer.manager.list_available_packages()
renamed_packages = installer.manager.settings.get('renamed_packages', {})
if not renamed_packages:
renamed_packages = {}
# These are packages that have been tracked as installed
installed_pkgs = self.installed_packages
# There are the packages actually present on the filesystem
present_packages = installer.manager.list_packages()
# Rename directories for packages that have changed names
for package_name in renamed_packages:
package_dir = os.path.join(sublime.packages_path(), package_name)
if not package_file_exists(package_name, 'package-metadata.json'):
continue
new_package_name = renamed_packages[package_name]
new_package_dir = os.path.join(sublime.packages_path(),
new_package_name)
changing_case = package_name.lower() == new_package_name.lower()
case_insensitive_fs = sublime.platform() in ['windows', 'osx']
# Since Windows and OSX use case-insensitive filesystems, we have to
# scan through the list of installed packages if the rename of the
# package is just changing the case of it. If we don't find the old
# name for it, we continue the loop since os.path.exists() will return
# true due to the case-insensitive nature of the filesystems.
if case_insensitive_fs and changing_case:
has_old = False
for present_package_name in present_packages:
if present_package_name == package_name:
has_old = True
break
if not has_old:
continue
if not os.path.exists(new_package_dir) or (case_insensitive_fs and changing_case):
# Windows will not allow you to rename to the same name with
# a different case, so we work around that with a temporary name
if os.name == 'nt' and changing_case:
temp_package_name = '__' + new_package_name
temp_package_dir = os.path.join(sublime.packages_path(),
temp_package_name)
os.rename(package_dir, temp_package_dir)
package_dir = temp_package_dir
os.rename(package_dir, new_package_dir)
installed_pkgs.append(new_package_name)
console_write(u'Renamed %s to %s' % (package_name, new_package_name), True)
else:
installer.manager.remove_package(package_name)
message_string = u'Removed %s since package with new name (%s) already exists' % (
package_name, new_package_name)
console_write(message_string, True)
try:
installed_pkgs.remove(package_name)
except (ValueError):
pass
sublime.set_timeout(lambda: self.save_packages(installed_pkgs), 10)
def save_packages(self, installed_packages):
"""
Saves the list of installed packages (after having been appropriately
renamed)
:param installed_packages:
The new list of installed packages
"""
installed_packages = list(set(installed_packages))
installed_packages = sorted(installed_packages,
key=lambda s: s.lower())
if installed_packages != self.installed_packages:
self.settings.set('installed_packages', installed_packages)
sublime.save_settings(self.settings_file)
|
11530009
|
from director import objectmodel as om
from director import vtkAll as vtk
from director import vtkNumpy as vnp
import numpy as np
def computePointToSurfaceDistance(pointsPolyData, meshPolyData):
cl = vtk.vtkCellLocator()
cl.SetDataSet(meshPolyData)
cl.BuildLocator()
points = vnp.getNumpyFromVtk(pointsPolyData, 'Points')
dists = np.zeros(len(points))
closestPoint = np.zeros(3)
closestPointDist = vtk.mutable(0.0)
cellId = vtk.mutable(0)
subId = vtk.mutable(0)
for i in xrange(len(points)):
cl.FindClosestPoint(points[i], closestPoint, cellId, subId, closestPointDist)
dists[i] = closestPointDist
return np.sqrt(dists)
def computePointToPointDistance(pointsPolyData, searchPolyData):
cl = vtk.vtkPointLocator()
cl.SetDataSet(searchPolyData)
cl.BuildLocator()
points = vnp.getNumpyFromVtk(pointsPolyData, 'Points')
searchPoints = vnp.getNumpyFromVtk(searchPolyData, 'Points')
closestPoints = np.zeros((len(points),3))
for i in xrange(len(points)):
ptId = cl.FindClosestPoint(points[i])
closestPoints[i] = searchPoints[ptId]
return np.linalg.norm(closestPoints - points, axis=1)
def computeAndColorByDistance(ptsName, meshName, colorByRange=[0.0, 0.05], arrayName='distance_to_mesh'):
pointCloud = om.findObjectByName(ptsName)
mesh = om.findObjectByName(meshName)
isPointCloud = mesh._isPointCloud()
assert pointCloud is not None
assert mesh is not None
dists = computePointToPointDistance(pointCloud.polyData, mesh.polyData)
vnp.addNumpyToVtk(pointCloud.polyData, dists, arrayName)
pointCloud._updateColorByProperty()
pointCloud.setProperty('Color By', arrayName)
pointCloud.colorBy(arrayName, colorByRange)
return pointCloud
|
11530020
|
import anyfig
from pathlib import Path
import time
import argparse
@anyfig.config_class # Registers the class with anyfig
class MyConfig:
def __init__(self):
# Config-parameters goes as attributes
self.experiment_note = 'Changed stuff'
self.save_directory = Path('output')
self.start_time = time.time()
# self.inner_config = InnerConfig()
@anyfig.config_class
class InnerConfig():
def __init__(self):
self.inner_text = "Yo Dawg"
# parser = argparse.ArgumentParser()
# parser.add_argument("--start_time",
# type=int,
# help="display a square of a given number")
# dict_args = vars(parser.parse_args())
# print('known', dict_args)
# print('unknown', unknown)
# args = dict(args)
# print(args)
import sys
dict_args = sys.argv[1:]
print(dict_args)
dict_args = anyfig.parse_cli_args(dict_args)
print(dict_args)
dict_args.pop('start_time', None)
config = anyfig.init_config(default_config=MyConfig, cli_args=dict_args)
print(config)
|
11530024
|
from pprint import pprint
import click
from cbapi import CbEnterpriseResponseAPI, CbThreatHunterAPI
import urllib.request
import urllib.parse
import json
import configparser
from products import vmware_cb_response as cbr, vmware_cb_enterprise_edr as cbth, microsoft_defender_for_endpoints as defender
class EDRCommon:
def __init__(self, product, profile):
self.product = product
self.profile = profile
# Build the query based on the product that was chosen
def base_query(self, *args):
if self.product == "cbr":
return cbr.build_query(*args)
elif self.product == "cbth":
return args
elif self.product == "defender":
return defender.build_query(*args)
# Search based on the product that was chosen
def process_search(self, conn, base_query, query):
if self.product == "cbr":
return cbr.process_search(conn, query, base_query)
elif self.product == "cbth":
return cbth.process_search(conn, query, base_query)
elif self.product == "defender":
return defender.process_search(conn, query, base_query)
# If defdir or deffiles were given run the appropriate search based on the product
def nested_process_search(self, criteria, conn, base_query):
if self.product == "cbr":
return cbr.nested_process_search(conn, criteria, base_query)
elif self.product == "cbth":
return cbth.nested_process_search(conn, criteria, base_query)
elif self.product == "defender":
return defender.nested_process_search(conn, criteria, base_query)
# write the rows of the CSV
def write_csv(self, output, results, *args):
for r in results:
row = [r[0], r[1], r[2], r[3], args[0], args[1]]
output.writerow(row)
def get_connection(self):
if self.product == 'cbr':
if self.profile:
cb_conn = CbEnterpriseResponseAPI(profile=self.profile)
else:
cb_conn = CbEnterpriseResponseAPI()
return cb_conn
elif self.product == 'cbth':
if self.profile:
cb_conn = CbThreatHunterAPI(profile=self.profile)
else:
cb_conn = CbThreatHunterAPI()
return cb_conn
def get_connection_creds(self, creds):
if self.product == 'defender':
if self.profile:
atp_profile = self.profile
else:
atp_profile = "default"
config = self.config_reader(creds)
token = self.get_aad_token(config[atp_profile]['tenantId'], config[atp_profile]['appId'], config[atp_profile]['appSecret'])
return token
def get_aad_token(self, tenantID, appID, appSecret):
tenantId = tenantID
appId = appID
appSecret = appSecret
url = f"https://login.windows.net/{tenantID}/oauth2/token"
resourcesAppIdUri = 'https://api.securitycenter.windows.com'
body = {
"resource": resourcesAppIdUri,
"client_id": appId,
"client_secret":appSecret,
"grant_type":"client_credentials"
}
data = urllib.parse.urlencode(body).encode("utf-8")
req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req)
jsonResponse = json.loads(response.read())
aadToken = jsonResponse["access_token"]
return aadToken
def config_reader(self, creds_file):
config = configparser.ConfigParser()
config.sections()
config.read(creds_file)
return config
|
11530058
|
from typing import List
import pytest
from ground.base import (Context,
Relation)
from ground.hints import Segment
from hypothesis import given
from bentley_ottmann.core.utils import to_sorted_pair
from bentley_ottmann.planar import segments_intersections
from tests.utils import (is_point,
reverse_point_coordinates,
reverse_segments_coordinates)
from . import strategies
@given(strategies.segments_lists)
def test_basic(segments: List[Segment]) -> None:
result = segments_intersections(segments)
assert isinstance(result, dict)
assert all(isinstance(key, tuple)
and all(isinstance(coordinate, int) for coordinate in key)
for key in result.keys())
assert all(isinstance(value, tuple)
and all(is_point(coordinate) for coordinate in value)
for value in result.values())
assert all(len(key) == 2 for key in result.keys())
assert all(1 <= len(value) <= 2 for value in result.values())
@given(strategies.empty_segments_lists)
def test_base_case(segments: List[Segment]) -> None:
result = segments_intersections(segments)
assert not result
@given(strategies.non_empty_segments_lists)
def test_step(context: Context,
segments: List[Segment]) -> None:
*rest_segments, last_segment = segments
result = segments_intersections(rest_segments)
next_result = segments_intersections(segments)
assert (next_result.keys()
== (result.keys()
| {(index, len(segments) - 1)
for index, segment in enumerate(rest_segments)
if context.segments_relation(segment, last_segment)
is not Relation.DISJOINT}))
assert result.items() <= next_result.items()
assert all(segment_id < next_segment_id == len(segments) - 1
for segment_id, next_segment_id in (next_result.keys()
- result.keys()))
assert all(context.segments_intersection(segments[segment_id],
segments[next_segment_id])
== next_result[(segment_id, next_segment_id)][0]
if len(next_result[(segment_id, next_segment_id)]) == 1
else
context.segments_intersection(segments[segment_id],
segments[next_segment_id])
not in (Relation.DISJOINT, Relation.TOUCH, Relation.CROSS)
and (to_sorted_pair(*next_result[(segment_id, next_segment_id)])
== next_result[(segment_id, next_segment_id)])
and all(context.segment_contains_point(segments[segment_id],
point)
for point in next_result[(segment_id, next_segment_id)])
and all(context.segment_contains_point(
segments[next_segment_id], point)
for point in next_result[(segment_id, next_segment_id)])
for segment_id, next_segment_id in (next_result.keys()
- result.keys()))
assert all(context.segments_relation(segments[segment_id],
segments[next_segment_id])
is not Relation.DISJOINT
for segment_id, next_segment_id in (next_result.keys()
- result.keys()))
@given(strategies.segments_lists)
def test_reversed_coordinates(segments: List[Segment]) -> None:
result = segments_intersections(segments)
reversed_result = segments_intersections(reverse_segments_coordinates(
segments))
assert result == {
ids_pair: tuple(sorted(reverse_point_coordinates(endpoint)
for endpoint in endpoints))
for ids_pair, endpoints in reversed_result.items()}
@given(strategies.degenerate_segments_lists)
def test_degenerate_segments(segments: List[Segment]) -> None:
with pytest.raises(ValueError):
segments_intersections(segments)
|
11530064
|
from pytest import raises
from dollar_ref import (
resolve,
InternalResolutionError, FileResolutionError,
ResolutionError, DecodeError
)
def test_bad_internal_ref():
data = {
'real': 'stuff',
'bad_ref': {
'$ref': '#/does/not/exist'
}
}
with raises(InternalResolutionError):
resolve(data)
def test_bad_file_ref():
data = {
'real': 'stuff',
'bad_ref': {
'$ref': '/not/existing/file#/well/this/does/not/matter'
}
}
with raises(FileResolutionError):
resolve(data)
def test_bad_json(tmpdir):
bad_json = tmpdir.join('bad.json')
bad_json.write('!very #bad ^stuff')
data = {
'$ref': f'{str(bad_json)}'
}
with raises(DecodeError):
resolve(data)
def test_bad_yaml(tmpdir):
bad_yaml = tmpdir.join('bad.json')
bad_yaml.write('{[!very #bad yaml')
data = {
'$ref': f'{str(bad_yaml)}'
}
with raises(DecodeError):
resolve(data)
def bad_ref():
data = {
'real': 'stuff',
'bad_ref': {
'$ref': '%real$wierd&things'
}
}
with raises(FileResolutionError):
resolve(data)
def test_web():
data = {
'real': 'stuff',
'bad_ref': {
'$ref': 'http://example.com#/well/this/does/not/matter'
}
}
with raises(ResolutionError):
resolve(data)
|
11530109
|
from __future__ import (
absolute_import,
unicode_literals,
)
import random
import re
from typing import (
Any,
Dict,
)
import unittest
from pymetrics.recorders.noop import noop_metrics
import six
from pysoa.common.transport.base import get_hex_thread_id
from pysoa.common.transport.redis_gateway.client import RedisClientTransport
from pysoa.test.compatibility import mock
@mock.patch('pysoa.common.transport.redis_gateway.client.RedisTransportClientCore')
class TestClientTransport(unittest.TestCase):
@staticmethod
def _get_transport(service='my_service', **kwargs):
return RedisClientTransport(service, noop_metrics, **kwargs)
# noinspection PyCompatibility
def test_core_args(self, mock_core):
transport = self._get_transport(hello='world', goodbye='earth')
mock_core.assert_called_once_with(
service_name='my_service',
hello='world',
goodbye='earth',
metrics=transport.metrics,
)
assert re.compile(r'^[0-9a-fA-F]{32}$').match(transport.client_id), transport.client_id
mock_core.reset_mock()
transport = self._get_transport(hello='world', goodbye='earth', maximum_message_size_in_bytes=42)
mock_core.assert_called_once_with(
service_name='my_service',
hello='world',
goodbye='earth',
metrics=transport.metrics,
maximum_message_size_in_bytes=42,
)
assert re.compile(r'^[0-9a-fA-F]{32}$').match(transport.client_id), transport.client_id
def test_send_request_message(self, mock_core):
transport = self._get_transport()
request_id = random.randint(1, 1000)
meta = {'app': 'ppa'}
message = {'test': 'payload'}
transport.send_request_message(request_id, meta, message)
mock_core.return_value.send_message.assert_called_once_with(
'service.my_service',
request_id,
{
'app': 'ppa',
'reply_to': 'service.my_service.{client_id}!{thread_id}'.format(
client_id=transport.client_id,
thread_id=get_hex_thread_id(),
),
},
message,
None,
)
def test_send_request_message_another_service(self, mock_core):
transport = self._get_transport('geo')
request_id = random.randint(1, 1000)
message = {'another': 'message'}
transport.send_request_message(request_id, {}, message, 25)
mock_core.return_value.send_message.assert_called_once_with(
'service.geo',
request_id,
{
'reply_to': 'service.geo.{client_id}!{thread_id}'.format(
client_id=transport.client_id,
thread_id=get_hex_thread_id(),
),
},
message,
25,
)
def test_receive_response_message(self, mock_core):
transport = self._get_transport()
transport._requests_outstanding = 1
request_id = random.randint(1, 1000)
meta = {'app': 'ppa'}
message = {'test': 'payload'}
mock_core.return_value.receive_message.return_value = request_id, meta, message
response = transport.receive_response_message()
self.assertEqual(request_id, response[0])
self.assertEqual(meta, response[1])
self.assertEqual(message, response[2])
mock_core.return_value.receive_message.assert_called_once_with(
'service.my_service.{client_id}!{thread_id}'.format(
client_id=transport.client_id,
thread_id=get_hex_thread_id(),
),
None,
)
def test_receive_response_message_another_service(self, mock_core):
transport = self._get_transport('geo')
transport._requests_outstanding = 1
request_id = random.randint(1, 1000)
meta = {} # type: Dict[six.text_type, Any]
message = {'another': 'message'}
mock_core.return_value.receive_message.return_value = request_id, meta, message
response = transport.receive_response_message(15)
self.assertEqual(request_id, response[0])
self.assertEqual(meta, response[1])
self.assertEqual(message, response[2])
mock_core.return_value.receive_message.assert_called_once_with(
'service.geo.{client_id}!{thread_id}'.format(
client_id=transport.client_id,
thread_id=get_hex_thread_id(),
),
15,
)
def test_requests_outstanding(self, mock_core):
transport = self._get_transport('geo')
self.assertEqual(0, transport.requests_outstanding)
transport.send_request_message(random.randint(1, 1000), {}, {})
self.assertEqual(1, transport.requests_outstanding)
transport.send_request_message(random.randint(1, 1000), {}, {})
self.assertEqual(2, transport.requests_outstanding)
request_id = random.randint(1, 1000)
mock_core.return_value.receive_message.return_value = request_id, {}, {}
self.assertEqual((request_id, {}, {}), transport.receive_response_message())
self.assertEqual(1, transport.requests_outstanding)
self.assertEqual((request_id, {}, {}), transport.receive_response_message())
self.assertEqual(0, transport.requests_outstanding)
self.assertEqual((None, None, None), transport.receive_response_message())
|
11530117
|
class Details(object):
def __init__(self, set_callback=None):
super(Details, self).__init__()
self._details = {}
self._set_callback = set_callback
def set(self, key, value):
"""Sets a specific detail (by name) to a specific value
"""
if self._set_callback is not None:
self._set_callback(key, value)
self._details[key] = value
def append(self, key, value):
"""Appends a value to a list key, or creates it if needed
"""
lst = self._details.setdefault(key, [])
if not isinstance(lst, list):
raise TypeError('Cannot append value to a {.__class__.__name__!r} value'.format(lst))
lst.append(value)
if self._set_callback is not None:
self._set_callback(key, lst)
def all(self):
return self._details.copy()
def __nonzero__(self):
return bool(self._details)
__bool__ = __nonzero__
def __contains__(self, key):
return key in self._details
|
11530138
|
from numpy import linspace, sin, exp
import sys
import pylab
import larch
# set python path so that larch plugins can be found
sys.path.append(larch.plugin_path('xafs'))
import xafsft
k = linspace(0, 20, 401)
chi = sin(4.2*k)*exp(-k*k/150)*exp(-(k-10)**2/50)
# need to have an larch interpreter for most functions
mylarch = larch.Interpreter()
# create an empty group
out = larch.Group()
# forward transform, passing in interpreter a group
# into which results are put
xafsft.xftf(k, chi, kmin=2, kmax=15, dk=3, kweight=4,
_larch=mylarch, group=out)
# the larch Group 'out' is a plain object, with all data in members:
pylab.plot(out.r, out.chir_mag)
pylab.show()
|
11530140
|
import re
class Main():
def __init__(self):
self.s = input()
self.c = 'qwrtypsdfghjklzxcvbnm'
def output(self):
self.ss = re.findall(r'(?<=[' + self.c + '])([aeiou]{2,})(?=[' + self.c +'])', self.s, flags = re.I)
print('\n'.join(self.ss or ['-1']))
if __name__ == '__main__':
obj = Main()
obj.output()
|
11530161
|
import sys
import datetime
def basic(arguments):
import api
critic = api.critic.startSession(for_testing=True)
repository = api.repository.fetch(critic, name="critic")
branch = api.branch.fetch(
critic, repository=repository, name=arguments.review)
review = api.review.fetch(critic, branch=branch)
alice = api.user.fetch(critic, name="alice")
bob = api.user.fetch(critic, name="bob")
dave = api.user.fetch(critic, name="dave")
erin = api.user.fetch(critic, name="erin")
all_comments = api.comment.fetchAll(critic)
assert isinstance(all_comments, list)
EXPECTED = {
0: { "text": "This is a general issue.",
"location": None,
"type": "issue",
"state": "open" },
1: { "text": "This is a general note.",
"location": None,
"type": "issue",
"state": "open" },
2: { "text": "This is a commit issue.",
"location": ("commit-message", 1, 3),
"type": "issue",
"state": "resolved",
"resolved_by": dave },
3: { "text": "This is a commit note.",
"location": ("commit-message", 5, 5),
"type": "note" },
4: { "text": "This is a file issue.",
"location": ("file-version", 1, 3),
"type": "issue",
"state": "open" },
5: { "text": "This is a file note.",
"location": ("file-version", 9, 9),
"type": "note" }
}
def check_comment(comment):
assert isinstance(comment, api.comment.Comment)
assert isinstance(comment.id, int)
assert api.comment.fetch(critic, comment_id=comment.id) is comment
expected = EXPECTED[comment_id_map[comment.id]]
assert isinstance(comment.type, str)
assert comment.type == expected["type"]
assert isinstance(comment.is_draft, bool)
assert not comment.is_draft
assert comment.review is review
assert comment.author is alice
assert isinstance(comment.timestamp, datetime.datetime)
assert isinstance(comment.text, str)
assert comment.text == expected["text"]
if comment.type == "note":
assert isinstance(comment, api.comment.Note)
return
assert isinstance(comment, api.comment.Issue)
assert isinstance(comment.state, str)
assert comment.state == expected["state"]
if comment.state == "resolved":
assert comment.resolved_by is expected["resolved_by"]
else:
assert comment.resolved_by is None
if comment.state == "addressed":
assert comment.addressed_by is expected["addressed_by"]
else:
assert comment.addressed_by is None
if expected["location"] is None:
assert comment.location is None
else:
location_type, first_line, last_line = expected["location"]
if location_type == "file-version":
# FileVersionLocation is not yet supported.
return
assert comment.location.type == location_type
assert comment.location.first_line == first_line
assert comment.location.last_line == last_line, (comment.location.last_line, last_line)
assert isinstance(comment.location, api.comment.Location)
if location_type == "commit-message":
assert isinstance(
comment.location, api.comment.CommitMessageLocation)
else:
assert isinstance(
comment.location, api.comment.FileVersionLocation)
comments = api.comment.fetchAll(critic, review=review)
assert isinstance(comments, list)
assert len(comments) == 6
comment_id_map = {
comment.id: index
for index, comment in enumerate(comments)
}
for comment in comments:
check_comment(comment)
some_comments = api.comment.fetchMany(critic, [3, 2, 1])
assert len(some_comments) == 3
assert some_comments[0].id == 3
assert some_comments[0] is api.comment.fetch(critic, 3)
assert some_comments[1].id == 2
assert some_comments[1] is api.comment.fetch(critic, 2)
assert some_comments[2].id == 1
assert some_comments[2] is api.comment.fetch(critic, 1)
print "basic: ok"
def main(argv):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--review")
parser.add_argument("tests", nargs=argparse.REMAINDER)
arguments = parser.parse_args(argv)
for test in arguments.tests:
if test == "basic":
basic(arguments)
|
11530195
|
from pygears import gear
from pygears.typing import cast as type_cast
from pygears.typing import trunc as type_trunc
from pygears.conf import reg
from pygears.core.intf import IntfOperPlugin
from pygears.hdl.util import HDLGearHierVisitor, flow_visitor
from pygears.hdl.sv import SVGenPlugin
from pygears.hdl.v import VGenPlugin
@gear(hdl={'compile': True})
async def trunc(din, *, t) -> b'type_trunc(din, t)':
async with din as d:
yield type_trunc(d, t)
@gear(hdl={'compile': True})
async def cast(din, *, t) -> b'type_cast(din, t)':
async with din as d:
yield type_cast(d, t)
def pipe(self, other):
if self.producer is not None:
name = f'cast_{self.producer.basename}'
else:
name = 'cast'
return cast(self, t=other, name=name)
@flow_visitor
class RemoveEqualReprCastVisitor(HDLGearHierVisitor):
def cast(self, node):
pout = node.out_ports[0]
pin = node.in_ports[0]
if pin.dtype.width == pout.dtype.width:
node.bypass()
class HDLCastPlugin(IntfOperPlugin, VGenPlugin, SVGenPlugin):
@classmethod
def bind(cls):
reg['gear/intf_oper/__or__'] = pipe
reg['vgen/flow'].insert(0, RemoveEqualReprCastVisitor)
reg['svgen/flow'].insert(0, RemoveEqualReprCastVisitor)
|
11530196
|
import os
import sqlite3
import pytest
from minecraft_mod_manager.gateways.sqlite_upgrader import SqliteUpgrader
from ..config import config
from .sqlite import Sqlite
db_file = f".{config.app_name}.db"
@pytest.fixture
def db() -> sqlite3.Connection:
db = sqlite3.connect(db_file)
yield db
db.close()
os.remove(db_file)
@pytest.fixture
def cursor(db: sqlite3.Connection) -> sqlite3.Cursor:
cursor = db.cursor()
yield cursor
cursor.close()
def test_create_tables_on_first_run(cursor: sqlite3.Cursor):
sqlite = Sqlite()
sqlite.close()
cursor.execute("SELECT * FROM mod")
mods = cursor.fetchall()
assert mods == []
assert_version(cursor)
def test_v0_to_v1(db: sqlite3.Connection, cursor: sqlite3.Cursor):
cursor.execute(
"CREATE TABLE mod ("
+ "id TEXT UNIQUE, "
+ "repo_type TEXT, "
+ "repo_name TEXT, "
+ "upload_time INTEGER, "
+ "active INTEGER)"
)
cursor.execute(
"INSERT INTO mod (id, repo_type, repo_name, upload_time, active) VALUES "
+ "('carpet', 'curse', 'carpet', 1, 1)"
)
db.commit()
upgrader = SqliteUpgrader(db, cursor)
upgrader._v0_to_v1()
cursor.execute("SELECT * FROM mod")
mods = cursor.fetchall()
assert [] == mods
assert_version(cursor)
@pytest.mark.parametrize(
"test_name,input,expected",
[
(
"Migrates all fields when present",
("id", "curse", "site_id", "site_slug", 123, 1),
("id", "curse:site_id:site_slug", 123, 1),
),
(
"Skip saving slug when no site is specified",
("id", "", "", "slug", 123, 1),
("id", "", 123, 1),
),
(
"Migrate slug when no site_id",
("id", "curse", "", "site_slug", 123, 0),
("id", "curse::site_slug", 123, 0),
),
(
"Skip when site is unknown",
("id", "unknown", "", "", 123, 1),
("id", "", 123, 1),
),
(
"Convert 'None' to empty",
("id", "curse", "None", "None", 123, 1),
("id", "curse::", 123, 1),
),
],
)
def test_v1_to_v2(test_name: str, input, expected, db: sqlite3.Connection, cursor: sqlite3.Cursor):
print(test_name)
cursor.execute(
"CREATE TABLE mod ("
+ "id TEXT UNIQUE, "
+ "site TEXT, "
+ "site_id TEXT, "
+ "site_slug TEXT, "
+ "upload_time INTEGER, "
+ "active INTEGER)"
)
cursor.execute(
"INSERT INTO mod (id, site, site_id, site_slug, upload_time, active) VALUES (?, ?, ?, ?, ?, ?)", input
)
db.commit()
upgrader = SqliteUpgrader(db, cursor)
upgrader._v1_to_v2()
cursor.execute("SELECT * FROM mod")
result = cursor.fetchall()
assert [expected] == result
def create_version_table(version: int, db: sqlite3.Connection, cursor: sqlite3.Cursor) -> None:
cursor.execute("CREATE TABLE version (version INTEGER)")
cursor.execute("INSERT INTO version (version) VALUES(?)", (version,))
def assert_version(cursor: sqlite3.Cursor) -> None:
cursor.execute("SELECT * FROM version")
versions = cursor.fetchall()
assert [(SqliteUpgrader._version,)] == versions
|
11530203
|
import asyncio
import collections
from utilities import DotDict, recursive_dictionary_update
class BaseMeta(type):
def __new__(mcs, name, bases, clsdict):
for key, value in clsdict.items():
if callable(value) and (value.__name__.startswith("on_") or
hasattr(value, "_command")):
clsdict[key] = asyncio.coroutine(value)
c = type.__new__(mcs, name, bases, clsdict)
return c
class BasePlugin(metaclass=BaseMeta):
"""
Defines an interface for all plugins to inherit from. Note that the init
method should generally not be overrode; all setup work should be done in
activate() if possible. If you do override __init__, remember to super()!
Note that only one instance of each plugin will be instantiated for *all*
connected clients. self.connection will be changed by the plugin
manager to the current connection.
You may access the factory if necessary via self.factory.connections
to access other clients, but this "Is Not A Very Good Idea" (tm)
`name` *must* be defined in child classes or else the plugin manager will
complain quite thoroughly.
"""
name = "Base Plugin"
description = "The common class for all plugins to inherit from."
version = ".1"
depends = ()
default_config = None
plugins = DotDict({})
auto_activate = True
def __init__(self):
self.loop = asyncio.get_event_loop()
self.plugin_config = self.config.get_plugin_config(self.name)
if isinstance(self.default_config, collections.Mapping):
temp = recursive_dictionary_update(self.default_config,
self.plugin_config)
self.plugin_config.update(temp)
else:
self.plugin_config = self.default_config
def activate(self):
pass
def deactivate(self):
pass
def on_protocol_request(self, data, connection):
"""Packet type: 0 """
return True
def on_protocol_response(self, data, connection):
"""Packet type: 1 """
return True
def on_server_disconnect(self, data, connection):
"""Packet type: 2 """
return True
def on_connect_success(self, data, connection):
"""Packet type: 3 """
return True
def on_connect_failure(self, data, connection):
"""Packet type: 4 """
return True
def on_handshake_challenge(self, data, connection):
"""Packet type: 5 """
return True
def on_chat_received(self, data, connection):
"""Packet type: 6 """
return True
def on_universe_time_update(self, data, connection):
"""Packet type: 7 """
return True
def on_celestial_response(self, data, connection):
"""Packet type: 8 """
return True
def on_player_warp_result(self, data, connection):
"""Packet type: 9 """
return True
def on_planet_type_update(self, data, connection):
"""Packet type: 10 """
return True
def on_pause(self, data, connection):
"""Packet type: 11 """
return True
def on_client_connect(self, data, connection):
"""Packet type: 12 """
return True
def on_client_disconnect_request(self, data, connection):
"""Packet type: 13 """
return True
def on_handshake_response(self, data, connection):
"""Packet type: 14 """
return True
def on_player_warp(self, data, connection):
"""Packet type: 15 """
return True
def on_fly_ship(self, data, connection):
"""Packet type: 16 """
return True
def on_chat_sent(self, data, connection):
"""Packet type: 17 """
return True
def on_celestial_request(self, data, connection):
"""Packet type: 18 """
return True
def on_client_context_update(self, data, connection):
"""Packet type: 19 """
return True
def on_world_start(self, data, connection):
"""Packet type: 20 """
return True
def on_world_stop(self, data, connection):
"""Packet type: 21 """
return True
def on_world_layout_update(self, data, connection):
"""Packet type: 22 """
return True
def on_world_parameters_update(self, data, connection):
"""Packet type: 23 """
return True
def on_central_structure_update(self, data, connection):
"""Packet type: 24 """
return True
def on_tile_array_update(self, data, connection):
"""Packet type: 25 """
return True
def on_tile_update(self, data, connection):
"""Packet type: 26 """
return True
def on_tile_liquid_update(self, data, connection):
"""Packet type: 27 """
return True
def on_tile_damage_update(self, data, connection):
"""Packet type: 28 """
return True
def on_tile_modification_failure(self, data, connection):
"""Packet type: 29 """
return True
def on_give_item(self, data, connection):
"""Packet type: 30 """
return True
def on_environment_update(self, data, connection):
"""Packet type: 31 """
return True
def on_update_tile_protection(self, data, connection):
"""Packet type: 32 """
return True
def on_set_dungeon_gravity(self, data, connection):
"""Packet type: 33 """
return True
def on_set_dungeon_breathable(self, data, connection):
"""Packet type: 34 """
def on_set_player_start(self, data, connection):
"""Packet type: 35 """
return True
def on_find_unique_entity_response(self, data, connection):
"""Packet type: 36"""
return True
def on_modify_tile_list(self, data, connection):
"""Packet type: 37 """
return True
def on_damage_tile_group(self, data, connection):
"""Packet type: 38 """
return True
def on_collect_liquid(self, data, connection):
"""Packet type: 39 """
return True
def on_request_drop(self, data, connection):
"""Packet type: 40 """
return True
def on_spawn_entity(self, data, connection):
"""Packet type: 41 """
return True
def on_connect_wire(self, data, connection):
"""Packet type: 42 """
return True
def on_disconnect_all_wires(self, data, connection):
"""Packet type: 43 """
return True
def on_world_client_state_update(self, data, connection):
"""Packet type: 44 """
return True
def on_find_unique_entity(self, data, connection):
"""Packet type: 45 """
return True
def on_unk(self, data, connection):
"""Packet type: 46 """
return True
def on_entity_create(self, data, connection):
"""Packet type: 47 """
return True
def on_entity_update(self, data, connection):
"""Packet type: 48 """
return True
def on_entity_destroy(self, data, connection):
"""Packet type: 49 """
return True
def on_entity_interact(self, data, connection):
"""Packet type: 50 """
return True
def on_entity_interact_result(self, data, connection):
"""Packet type: 51 """
return True
def on_hit_request(self, data, connection):
"""Packet type: 52 """
return True
def on_damage_request(self, data, connection):
"""Packet type: 53 """
return True
def on_damage_notification(self, data, connection):
"""Packet type: 54 """
return True
def on_entity_message(self, data, connection):
"""Packet type: 55 """
return True
def on_entity_message_response(self, data, connection):
"""Packet type: 56 """
return True
def on_update_world_properties(self, data, connection):
"""Packet type: 57 """
return True
def on_step_update(self, data, connection):
"""Packet type: 58 """
return True
def on_system_world_start(self, data, connection):
"""Packet type: 59 """
return True
def on_system_world_update(self, data, connection):
"""Packet type: 60 """
return True
def on_system_object_create(self, data, connection):
"""Packet type: 61 """
return True
def on_system_object_destroy(self, data, connection):
"""Packet type: 62 """
return True
def on_system_ship_create(self, data, connection):
"""Packet type: 63 """
return True
def on_system_ship_destroy(self, data, connection):
"""Packet type: 64 """
return True
def on_system_object_spawn(self, data, connection):
"""Packet type: 65 """
return True
def __repr__(self):
return "<Plugin instance: %s (version %s)>" % (self.name, self.version)
class CommandNameError(Exception):
"""
Raised when a command name can't be found from the `commands` list in a
`SimpleCommandPlugin` instance.
"""
class SimpleCommandPlugin(BasePlugin):
name = "simple_command_plugin"
description = "Provides a simple parent class to define chat commands."
version = "0.1"
depends = ["command_dispatcher"]
auto_activate = True
def activate(self):
super().activate()
for name, attr in [(x, getattr(self, x)) for x in self.__dir__()]:
if hasattr(attr, "_command"):
for alias in attr._aliases:
self.plugins['command_dispatcher'].register(attr, alias)
class StoragePlugin(BasePlugin):
name = "storage_plugin"
depends = ['player_manager']
def activate(self):
super().activate()
self.storage = self.plugins.player_manager.get_storage(self.name)
class StorageCommandPlugin(SimpleCommandPlugin):
name = "storage_command_plugin"
depends = ['command_dispatcher', 'player_manager']
def activate(self):
super().activate()
self.storage = self.plugins.player_manager.get_storage(self)
|
11530269
|
import sys
from copy import deepcopy
import numpy as np
from shape_validator import checkFaceValidity, checkVerticesValidity, get_array_depth
from service import GracefulShutdown
class CachedObjClass(object):
def __init__(self, name):
self.name = name
self.count = {}
self.__dict_of_data = {}
def keys(self):
return self.__dict_of_data.keys()
def value(self, key ):
self.count[key] += 1
x = self.__dict_of_data[key]
self.__validate(key, x)
x = deepcopy(x) if list is type(x) else x # Take a deepcopy of the value if type(list).
return x
def __validate(self, key, values):
depth = get_array_depth( values )
if depth == 3: # If the structure includes separated obj objects (defined by a 3-layer depth), just merge the objects.
nparr = np.array(values)
nparrv = nparr.reshape(-1, nparr.shape[-1])
# print(str(self.name)+" Cache: "+str(key)+" depth: "+str(len(nparrv.shape)))
# print("Shape: "+str(nparrv.shape))
values = nparrv.tolist()
# print(type(values))
# print(type(values[0]))
# print(type(values[0][0]))
if self.name == "faces":
checkFaceValidity( values )
elif self.name == "vertices":
checkVerticesValidity( values )
# None_invalid = all([i is not None for i in x[0] ])
# if not None_invalid:
# print("None invalid: "+str(None_invalid))
# print(self.name+" Count["+str(key)+"] = "+str(self.count[key]))
# GracefulShutdown.do_shutdown()
# float_values_invalid = all([i[0] is not None for i in x[0] ])
# if not float_values_invalid:
# print("Float Values invalid: "+str(float_values_invalid))
# print(self.name+" Count["+str(key)+"] = "+str(self.count[key]))
# GracefulShutdown.do_shutdown()
# length_invalid = all([len(i) >= 3 for i in x[0] ])
# if not length_invalid:
# print("Length invalid: "+str(length_invalid))
# print(self.name+" Count["+str(key)+"] = "+str(self.count[key]))
# GracefulShutdown.do_shutdown()
def set_once(self, key , value ):
if key not in self.__dict_of_data:
print("Caching "+str(self.name)+" for obj file: "+str(key))
self.__validate(key, value)
self.__dict_of_data[key] = deepcopy(value) if list is type(value) else value # Take a deepcopy of the value if type(list).
self.count[key] = 1
else:
pass
class CacheObjManager():
"""
Local Cache container to avoid multiple OBJ file reads for the same filename. Stores faces and vertices.
"""
cached_vertices = CachedObjClass("vertices")
cached_faces = CachedObjClass("faces")
|
11530276
|
from django_elasticsearch_dsl import fields as es_fields
from django_elasticsearch_dsl.registries import registry
from researchhub_document.related_models.researchhub_post_model import ResearchhubPost
from .base import BaseDocument
from search.analyzers import (
title_analyzer,
content_analyzer
)
@registry.register_document
class PostDocument(BaseDocument):
auto_refresh = True
hubs_flat = es_fields.TextField(attr='hubs_indexing_flat')
hot_score = es_fields.IntegerField(attr='hot_score')
score = es_fields.IntegerField(attr='score')
discussion_count = es_fields.IntegerField(attr='discussion_count')
unified_document_id = es_fields.IntegerField(attr='unified_document_id')
title = es_fields.TextField(analyzer=title_analyzer)
created_date = es_fields.DateField(attr='created_date')
updated_date = es_fields.DateField(attr='updated_date')
preview_img = es_fields.TextField(attr='preview_img')
renderable_text = es_fields.TextField(attr='renderable_text', analyzer=content_analyzer)
created_by_id = es_fields.IntegerField(attr='created_by_id')
authors = es_fields.ObjectField(
attr='authors_indexing',
properties={
'first_name': es_fields.TextField(),
'last_name': es_fields.TextField(),
'full_name': es_fields.TextField(),
}
)
hubs = es_fields.ObjectField(
attr='hubs_indexing',
properties={
'hub_image': es_fields.TextField(),
'id': es_fields.IntegerField(),
'is_locked': es_fields.TextField(),
'is_removed': es_fields.TextField(),
'name': es_fields.KeywordField(),
}
)
class Index:
name = 'post'
class Django:
model = ResearchhubPost
queryset_pagination = 250
fields = [
'id',
'document_type',
]
def should_remove_from_index(self, obj):
if obj.is_removed:
return True
return False
|
11530288
|
import torch
import torch.nn as nn
def conv_bn(in_channels,out_channels,kernel_size, stride=1, groups=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,kernel_size=kernel_size, stride=stride,
padding=kernel_size // 2, groups=groups, bias=False),
nn.BatchNorm2d(out_channels)
)
class GlobalAveragePool2D():
def __init__(self, keepdim=True):
self.keepdim = keepdim
def __call__(self, inputs):
return torch.mean(inputs, axis=[2, 3], keepdim=self.keepdim)
class SSEBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(SSEBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.norm = nn.BatchNorm2d(self.in_channels)
self.globalAvgPool = GlobalAveragePool2D()
self.conv = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=(1, 1))
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
bn = self.norm(inputs)
x = self.globalAvgPool(bn)
x = self.conv(x)
x = self.sigmoid(x)
z = torch.mul(bn, x)
return z
class Downsampling_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(Downsampling_block, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.avgpool = nn.AvgPool2d(kernel_size=(2, 2))
self.conv1 = conv_bn(self.in_channels, self.out_channels, kernel_size=1)
self.conv2 = conv_bn(self.in_channels, self.out_channels, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1)
self.globalAvgPool = GlobalAveragePool2D()
self.act = nn.SiLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
x = self.avgpool(inputs)
x = self.conv1(x)
y = self.conv2(inputs)
z = self.globalAvgPool(inputs)
z = self.conv3(z)
z = self.sigmoid(z)
a = x + y
b = torch.mul(a, z)
out = self.act(b)
return out
class Fusion(nn.Module):
def __init__(self, in_channels, out_channels):
super(Fusion, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mid_channels = 2 * self.in_channels
self.avgpool = nn.AvgPool2d(kernel_size=(2, 2))
self.conv1 = conv_bn(self.mid_channels, self.out_channels, kernel_size=1, stride=1, groups=2)
self.conv2 = conv_bn(self.mid_channels, self.out_channels, kernel_size=3, stride=2, groups=2)
self.conv3 = nn.Conv2d(in_channels=self.mid_channels, out_channels=self.out_channels, kernel_size=1, groups=2)
self.globalAvgPool = GlobalAveragePool2D()
self.act = nn.SiLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.bn = nn.BatchNorm2d(self.in_channels)
self.group = in_channels
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.data.size()
assert num_channels % self.group == 0
group_channels = num_channels // self.group
x = x.reshape(batchsize, group_channels, self.group, height, width)
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(batchsize, num_channels, height, width)
return x
def forward(self, input1, input2):
a = torch.cat([self.bn(input1), self.bn(input2)], dim=1)
a = self.channel_shuffle(a)
x = self.avgpool(a)
x = self.conv1(x)
y = self.conv2(a)
z = self.globalAvgPool(a)
z = self.conv3(z)
z = self.sigmoid(z)
a = x + y
b = torch.mul(a, z)
out = self.act(b)
return out
class Stream(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.sse = nn.Sequential(SSEBlock(self.in_channels, self.out_channels))
self.fuse = nn.Sequential(FuseBlock(self.in_channels, self.out_channels))
self.act = nn.SiLU(inplace=True)
def forward(self, inputs):
a = self.sse(inputs)
b = self.fuse(inputs)
c = a + b
d = self.act(c)
return d
class FuseBlock(nn.Module):
def __init__(self, in_channels, out_channels) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv1 = conv_bn(self.in_channels, self.out_channels, kernel_size=1)
self.conv2 = conv_bn(self.in_channels, self.out_channels, kernel_size=3, stride=1)
def forward(self, inputs):
a = self.conv1(inputs)
b = self.conv2(inputs)
c = a + b
return c
class ParNetEncoder(nn.Module):
def __init__(self, in_channels, block_channels, depth) -> None:
super().__init__()
self.in_channels = in_channels
self.block_channels = block_channels
self.depth = depth
self.d1 = Downsampling_block(self.in_channels, self.block_channels[0])
self.d2 = Downsampling_block(self.block_channels[0], self.block_channels[1])
self.d3 = Downsampling_block(self.block_channels[1], self.block_channels[2])
self.d4 = Downsampling_block(self.block_channels[2], self.block_channels[3])
self.d5 = Downsampling_block(self.block_channels[3], self.block_channels[4])
self.stream1 = nn.Sequential(
*[Stream(self.block_channels[1], self.block_channels[1]) for _ in range(self.depth[0])]
)
self.stream1_downsample = Downsampling_block(self.block_channels[1], self.block_channels[2])
self.stream2 = nn.Sequential(
*[Stream(self.block_channels[2], self.block_channels[2]) for _ in range(self.depth[1])]
)
self.stream3 = nn.Sequential(
*[Stream(self.block_channels[3], self.block_channels[3]) for _ in range(self.depth[2])]
)
self.stream2_fusion = Fusion(self.block_channels[2], self.block_channels[3])
self.stream3_fusion = Fusion(self.block_channels[3], self.block_channels[3])
def forward(self, inputs):
x = self.d1(inputs)
x = self.d2(x)
y = self.stream1(x)
y = self.stream1_downsample(y)
x = self.d3(x)
z = self.stream2(x)
z = self.stream2_fusion(y, z)
x = self.d4(x)
a = self.stream3(x)
b = self.stream3_fusion(z, a)
x = self.d5(b)
return x
class ParNetDecoder(nn.Module):
def __init__(self, in_channels, n_classes) -> None:
super().__init__()
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.decoder = nn.Linear(in_channels, n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.decoder(x)
return self.softmax(x)
class ParNet(nn.Module):
def __init__(self, in_channels, n_classes, block_channels=[64, 128, 256, 512, 2048], depth=[4, 5, 5]) -> None:
super().__init__()
self.encoder = ParNetEncoder(in_channels, block_channels, depth)
self.decoder = ParNetDecoder(block_channels[-1], n_classes)
def forward(self, inputs):
x = self.encoder(inputs)
x = self.decoder(x)
return x
def parnet_s(in_channels, n_classes):
return ParNet(in_channels, n_classes, block_channels=[64, 96, 192, 384, 1280])
def parnet_m(in_channels, n_classes):
model = ParNet(in_channels, n_classes, block_channels=[64, 128, 256, 512, 2048])
return model
def parnet_l(in_channels, n_classes):
return ParNet(in_channels, n_classes, block_channels=[64, 160, 320, 640, 2560])
def parnet_xl(in_channels, n_classes):
return ParNet(in_channels, n_classes, block_channels=[64, 200, 400, 800, 3200])
if __name__ == '__main__':
model = parnet_s(3, 1000)
model.eval()
print(model)
input = torch.randn(1, 3, 256, 256)
y = model(input)
print(y.size())
|
11530298
|
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from .gcnn import conv, GCNN
from ..tf.convert import sparse_to_tensor
class GCNNTest(tf.test.TestCase):
def test_conv(self):
adj = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
adj = sp.coo_matrix(adj, dtype=np.float32)
adj_norm = adj + sp.eye(3, dtype=np.float32)
degree = np.array(adj_norm.sum(1)).flatten()
degree = np.power(degree, -0.5)
degree = sp.diags(degree)
adj_norm = degree.dot(adj_norm).dot(degree)
adj = sparse_to_tensor(adj)
features = [[1, 2], [3, 4], [5, 6]]
features_np = np.array(features, dtype=np.float32)
features_tf = tf.constant(features, dtype=tf.float32)
weights = [[0.3], [0.7]]
weights_np = np.array(weights, dtype=np.float32)
weights_tf = tf.constant(weights, dtype=tf.float32)
expected = adj_norm.dot(features_np).dot(weights_np)
with self.test_session():
self.assertAllEqual(
conv(features_tf, adj, weights_tf).eval(), expected)
def test_init(self):
layer = GCNN(1, 2, adjs=None)
self.assertEqual(layer.name, 'gcnn_1')
self.assertIsNone(layer.adjs)
self.assertEqual(layer.vars['weights'].get_shape(), [1, 2])
self.assertEqual(layer.vars['bias'].get_shape(), [2])
def test_call(self):
adj = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
adj = sp.coo_matrix(adj, dtype=np.float32)
adj = sparse_to_tensor(adj)
layer = GCNN(2, 3, [adj, adj], name='call')
input_1 = [[1, 2], [3, 4], [5, 6]]
input_1 = tf.constant(input_1, dtype=tf.float32)
input_2 = [[7, 8], [9, 10], [11, 12]]
input_2 = tf.constant(input_2, dtype=tf.float32)
inputs = [input_1, input_2]
outputs = layer(inputs)
expected_1 = conv(input_1, adj, layer.vars['weights'])
expected_1 = tf.nn.bias_add(expected_1, layer.vars['bias'])
expected_1 = tf.nn.relu(expected_1)
expected_2 = conv(input_2, adj, layer.vars['weights'])
expected_2 = tf.nn.bias_add(expected_2, layer.vars['bias'])
expected_2 = tf.nn.relu(expected_2)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].eval().shape, (3, 3))
self.assertEqual(outputs[1].eval().shape, (3, 3))
self.assertAllEqual(outputs[0].eval(), expected_1.eval())
def test_call_without_bias(self):
adj = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
adj = sp.coo_matrix(adj, dtype=np.float32)
adj = sparse_to_tensor(adj)
layer = GCNN(2, 3, [adj, adj], bias=False, name='call_without_bias')
input_1 = [[1, 2], [3, 4], [5, 6]]
input_1 = tf.constant(input_1, dtype=tf.float32)
input_2 = [[7, 8], [9, 10], [11, 12]]
input_2 = tf.constant(input_2, dtype=tf.float32)
inputs = [input_1, input_2]
outputs = layer(inputs)
expected_1 = conv(input_1, adj, layer.vars['weights'])
expected_1 = tf.nn.relu(expected_1)
expected_2 = conv(input_2, adj, layer.vars['weights'])
expected_2 = tf.nn.relu(expected_2)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].eval().shape, (3, 3))
self.assertEqual(outputs[1].eval().shape, (3, 3))
self.assertAllEqual(outputs[0].eval(), expected_1.eval())
|
11530299
|
from crianza.compiler import (check, compile)
from crianza.errors import CompileError, MachineError, ParseError
from crianza.instructions import lookup
from crianza.optimizer import constant_fold, optimized
from crianza.parser import (parse, parse_stream)
from crianza.repl import repl, print_code
from crianza.stack import Stack
from crianza.interpreter import (
Machine,
code_to_string,
eval,
execute,
isbinary,
isbool,
isconstant,
isnumber,
isstring,
)
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2015 <NAME>"
__email__ = "<EMAIL>"
__license__ = "BSD 3-Clause"
__version__ = "0.1.9"
__all__ = [
"CompileError",
"Instruction",
"Machine",
"MachineError",
"ParseError",
"Stack",
"check",
"code_to_string",
"compile",
"constant_fold",
"eval",
"execute",
"isbinary",
"isbool",
"isconstant",
"isnumber",
"isstring",
"lookup",
"optimized",
"parse",
"parse_stream",
"print_code",
"repl",
]
|
11530330
|
import numpy as np
mdhLC = [("ushLine", "<u2"),
("ushAcquisition", "<u2"),
("ushSlice", "<u2"),
("ushPartition", "<u2"),
("ushEcho", "<u2"),
("ushPhase", "<u2"),
("ushRepetition", "<u2"),
("ushSet", "<u2"),
("ushSeg", "<u2"),
("ushIda", "<u2"),
("ushIdb", "<u2"),
("ushIdc", "<u2"),
("ushIdd", "<u2"),
("ushIde", "<u2")]
mdhCutOff = [("ushPre", "<u2"),
("ushPost", "<u2")]
mdhSlicePosVec = [("flSag", "<f4"),
("flCor", "<f4"),
("flTra", "<f4")]
mdhSliceData = [("sSlicePosVec", mdhSlicePosVec),
("aflQuaternion", "<f4", 4)]
# This is the VB line header
vb17_header = [("ulFlagsAndDMALength", "<u4"),
("lMeasUID", "<i4"),
("ulScanCounter", "<u4"),
("ulTimeStamp", "<u4"),
("ulPMUTimeStamp", "<u4"),
("aulEvalInfoMask", "<u8"),
("ushSamplesInScan", "<u2"),
("ushUsedChannels", "<u2"),
("sLC", mdhLC),
("sCutOff", mdhCutOff),
("ushKSpaceCentreColumn", "<u2"),
("ushCoilSelect", "<u2"),
("fReadOutOffcentre", "<f4"),
("ulTimeSinceLastRF", "<u4"),
("ushKSpaceCentreLineNo", "<u2"),
("ushKSpaceCentrePartitionNo", "<u2"),
("aushIceProgramPara", "<u2", 4),
("aushFreePara", "<u2", 4),
("sSliceData", mdhSliceData),
("ushChannelId", "<u2"),
("ushPTABPosNeg", "<u2")]
# VD/VE: One scan header for all channels
scan_header = [("ulFlagsAndDMALength", "<u4"),
("lMeasUID", "<i4"),
("ulScanCounter", "<u4"),
("ulTimeStamp", "<u4"),
("ulPMUTimeStamp", "<u4"),
("ushSystemType", "<u2"),
("ulPTABPosDelay", "<u2"),
("lPTABPosX", "<i4"),
("lPTABPosY", "<i4"),
("lPTABPosZ", "<i4"),
("ulReserved1", "<i4"),
("aulEvalInfoMask", "<u8"),
("ushSamplesInScan", "<u2"),
("ushUsedChannels", "<u2"),
("sLC", mdhLC),
("sCutOff", mdhCutOff),
("ushKSpaceCentreColumn", "<u2"),
("ushCoilSelect", "<u2"),
("fReadOutOffcentre", "<f4"),
("ulTimeSinceLastRF", "<u4"),
("ushKSpaceCentreLineNo", "<u2"),
("ushKSpaceCentrePartitionNo", "<u2"),
("sSliceData", mdhSliceData),
("aushIceProgramPara", "<u2", 24),
("aushReservedPara", "<u2", 4),
("ushApplicationCounter", "<u2"),
("ushApplicationMask", "<u2"),
("ulCRC", "<u4")]
# VD/VE: One channel header per channel
channel_header = [("ulTypeAndChannelLength", "<u4"),
("lMeasUID", "<i4"),
("ulScanCounter", "<u4"),
("ulReserved1", "<i4"),
("ulSequenceTime", "<u4"),
("ulUnused2", "<u4"),
("ulChannelId", "<u2"),
("ulUnused3", "<u2"),
("ulCRC", "<u4")]
vb17_hdr_type = np.dtype(vb17_header)
scan_hdr_type = np.dtype(scan_header)
channel_hdr_type = np.dtype(channel_header)
mask_id = (
'ACQEND', # last scan
'RTFEEDBACK', # Realtime feedback scan
'HPFEEDBACK', # High perfomance feedback scan
'ONLINE', # processing should be done online
'OFFLINE', # processing should be done offline
'SYNCDATA', # readout contains synchroneous data
'noname6',
'noname7',
'LASTSCANINCONCAT', # Flag for last scan in concatination
'noname9',
'RAWDATACORRECTION', # Correct with the rawdata corr. factor
'LASTSCANINMEAS', # Flag for last scan in measurement
'SCANSCALEFACTOR', # Flag for scan specific additional scale
'2NDHADAMARPULSE', # 2nd RF exitation of HADAMAR
'REFPHASESTABSCAN', # reference phase stabilization scan
'PHASESTABSCAN', # phase stabilization scan
'D3FFT', # execute 3D FFT
'SIGNREV', # sign reversal
'PHASEFFT', # execute phase fft
'SWAPPED', # swapped phase/readout direction
'POSTSHAREDLINE', # shared line
'PHASCOR', # phase correction data
'PATREFSCAN', # additional scan for PAT ref line/partition
'PATREFANDIMASCAN', # PAT ref that is also used as image scan
'REFLECT', # reflect line
'NOISEADJSCAN', # noise adjust scan
'SHARENOW', # lines may be shared between e.g. phases
'LASTMEASUREDLINE', # indicates last meas line of e.g. phases
'FIRSTSCANINSLICE', # first scan in slice; req for timestamps
'LASTSCANINSLICE', # last scan in slice; req for timestamps
'TREFFECTIVEBEGIN', # indicates the TReff begin (triggered)
'TREFFECTIVEEND', # indicates the TReff end (triggered)
'REF_POSITION', # indicates ref pos for move during scan acq.
'SLC_AVERAGED', # indicates averaged slice for sl. partial av
'TAGFLAG1', # adjust scans
'CT_NORMALIZE', # Marks corr maps scan for TimCT-Prescan norm
'SCAN_FIRST', # Marks the first scan of a particular map
'SCAN_LAST', # Marks the last scan of a particular map
'SLICE_ACCEL_REFSCAN', # single-band ref. scan for multi-band
'SLICE_ACCEL_PHASCOR', # additional phase corr. in multi-band
'FIRST_SCAN_IN_BLADE', # Marks the first line of a blade
'LAST_SCAN_IN_BLADE', # Marks the last line of a blade
'LAST_BLADE_IN_TR', # Marks all lin. of last BLADE in each TR
'PACE', # Distinguishes PACE scans from non PACE scans.
'RETRO_LASTPHASE', # Marks the last phase in a heartbeat
'RETRO_ENDOFMEAS', # Marks an ADC at end of measurement
'RETRO_REPEATTHISHEARTBEAT', # Repeat the current heartbeat
'RETRO_REPEATPREVHEARTBEAT', # Repeat the previous heartbeat
'RETRO_ABORTSCANNOW', # Just abort everything
'RETRO_LASTHEARTBEAT', # adc is from last heartbeat (a dummy)
'RETRO_DUMMYSCAN', # adc is just a dummy scan, throw it away
'RETRO_ARRDETDISABLED', # Disable all arrhythmia detection
'B1_CONTROLLOOP', # readout to be used for B1 Control Loop
'SKIP_ONLINE_PHASCOR', # scans not to be online phase corr.
'SKIP_REGRIDDING', # Marks scans not to be regridded
'MDH_VOP', # Marks scans to be used for VOP based RF monitoring
'noname57',
'noname58',
'noname59',
'noname60',
'WIP_1', # Mark scans for WIP application "type 1"
'WIP_2', # Mark scans for WIP application "type 1"
'WIP_3' # Mark scans for WIP application "type 1"
)
# create dict for faster access by name
mask_dict = {item: key for key, item in enumerate(mask_id)}
# helper function (copied from mdb class)
def unpack_bits(infomask):
# numpy's unpackbits does not work correctly for some reason
return np.bitwise_and(
infomask, 2**np.arange(8*infomask.nbytes)).astype(bool)
def is_flag_set(mdh, flag):
return bool(int(mdh['aulEvalInfoMask']) & 1 << mask_dict[flag])
def get_flag(mdh, flag):
return is_flag_set(mdh, flag)
def set_flag(mdh, flag, val):
if val:
add_flag(mdh, flag)
else:
remove_flag(mdh, flag)
def add_flag(mdh, flag):
mdh['aulEvalInfoMask'] |= np.uint64(1 << mask_dict[flag])
def remove_flag(mdh, flag):
mdh['aulEvalInfoMask'] &= ~np.uint64(1 << mask_dict[flag])
def get_flags(mdh):
mask = unpack_bits(mdh['aulEvalInfoMask'])
return dict(zip(mask_id, mask))
def get_active_flags(mdh):
return [key for key, item in get_flags(mdh).items() if item]
def set_flags(mdh, flags):
if isinstance(flags, list):
for key in flags:
set_flag(mdh, key, True)
elif isinstance(flags, dict):
for key, item in flags:
set_flag(mdh, key, item)
else:
raise ValueError
def clear_all_flags(mdh):
mdh['aulEvalInfoMask'] = 0
def is_image_scan(mdh):
disqualifier = [
'ACQEND', 'RTFEEDBACK', 'HPFEEDBACK', 'SYNCDATA', 'REFPHASESTABSCAN',
'PHASESTABSCAN', 'PHASCOR', 'NOISEADJSCAN', 'noname60']
for name in disqualifier:
if is_flag_set(mdh, name):
return False
# check for patref scan
if is_flag_set(mdh, 'PATREFSCAN')\
and not is_flag_set(mdh, 'PATREFANDIMASCAN'):
return False
return True
|
11530466
|
from __future__ import print_function
import sys
from metapub import PubMedFetcher
from metapub import FindIt
# examples of different formats:
# 18612690: PubMedArticle with multiple AbstractText sections
# 1234567: PubMedArticle with no abstract whatsoever
# 20301546: PubMedBookArticle from GeneReviews
####
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.WARNING)
ch = logging.StreamHandler()
logging.getLogger("metapub").setLevel(logging.INFO)
logging.getLogger("metapub").addHandler(ch)
####
try:
pmid = sys.argv[1]
except IndexError:
print('Supply a pubmed ID as the argument to this script.')
print('')
print('Example: python demo_pubmed.py 123456')
sys.exit()
article = PubMedFetcher().article_by_pmid(pmid)
print('')
print(article.pmid, article.title)
print('')
print('authors: %s' % ','.join(article.authors))
print('journal: %s' % article.journal)
print('')
excerpt = '(empty)' if article.abstract is None else article.abstract[:100] + '[...]'
print('abstract: %s' % excerpt)
print('')
print('pii:',str(article.pii))
print('doi:',str(article.doi))
print('pmc:',str(article.pmc))
print('volume:',str(article.volume))
print('issue:',str(article.issue))
print('pages:',str(article.pages))
print('year:',str(article.year))
print('')
print('MeSH headings: ')
for DUI in list(article.mesh.keys()):
print('\t', DUI, article.mesh[DUI]['descriptor_name'], article.mesh.get('qualifier_name', ''))
if article.publication_types:
print('\nPublication Type Information')
for pt in list(article.publication_types.keys()):
print('\t', pt, article.publication_types[pt])
if article.chemicals:
print('\nChemical List')
for DUI in list(article.chemicals.keys()):
print('\t', DUI, article.chemicals[DUI]['substance_name'])
if article.grants:
print('\nGrant Information')
for gr in grants:
print('\t', gr)
if article.history:
print('\nArticle History')
for hist in article.history:
print('\t', hist, article.history[hist])
print('')
print('FindIt results:')
source = FindIt(pmid=pmid)
print('\tdoi:', source.doi)
print('\turl:', source.url)
print('\tbackup:', source.backup_url)
print('\treason:', source.reason)
print(article.citation_html)
|
11530476
|
from ..api import rule
from ..api._endpoint import ApiEndpoint, maybe_login_required
from ..entities._entity import NotFound
from ..entities.commit import Commit, CommitSerializer
class CommitListAPI(ApiEndpoint):
serializer = CommitSerializer()
@maybe_login_required
def get(self):
"""
---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits
"""
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits)
class CommitEntityAPI(ApiEndpoint):
serializer = CommitSerializer()
def _get(self, commit_id):
try:
commit = Commit.one(id=commit_id)
except NotFound:
self.abort_404_not_found()
return commit
@maybe_login_required
def get(self, commit_id):
"""
---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits
"""
commit = self._get(commit_id)
return self.serializer.one.dump(commit)
commit_entity_view = CommitEntityAPI.as_view("commit")
commit_list_view = CommitListAPI.as_view("commits")
rule(
"/commits/<commit_id>/",
view_func=commit_entity_view,
methods=["GET"],
)
rule(
"/commits/",
view_func=commit_list_view,
methods=["GET"],
)
|
11530481
|
import json
import datetime
import time
import boto3
import os
def train_and_generate_recommendations(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value. For example, this can be a
# numeric value or a string, or it can be a compound value such as
# a JSON structure.
_input_table_name = row[1]
_output_table_name = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
bucket = os.environ['s3_bucket']
prefix = "training-job-" + time.strftime("%Y%m%d%H%M%S")
s3_output_location = 's3://{}/'.format(bucket)
print(s3_output_location)
training_job_name = prefix
TRAINING_IMAGE_ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_training_job(
TrainingJobName=training_job_name,
HyperParameters=dict(input_table_name=_input_table_name, output_table_name=_output_table_name, region=os.environ['region']),
AlgorithmSpecification={
'TrainingImage': TRAINING_IMAGE_ECR_PATH,
'TrainingInputMode': 'File'
},
RoleArn=SAGEMAKER_ROLE_ARN,
OutputDataConfig={
'S3OutputPath': s3_output_location
},
ResourceConfig={
'InstanceType': 'ml.m5.xlarge',
'InstanceCount': 1,
'VolumeSizeInGB': 10
},
StoppingCondition={
'MaxRuntimeInSeconds': 10000
}
)
training_job_arn = response['TrainingJobArn']
print(training_job_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, training_job_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
def deploy_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value.
model_name = row[1]
model_data_url = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_model(
ModelName=model_name,
PrimaryContainer={
'Image': ECR_PATH,
'ModelDataUrl': model_data_url
},
ExecutionRoleArn=SAGEMAKER_ROLE_ARN
)
print(response)
print("now trying to create endpoint config...")
response = client.create_endpoint_config(
EndpointConfigName=model_name,
ProductionVariants=[
{
'VariantName': 'variant-1',
'ModelName': model_name,
'InitialInstanceCount': 1,
'InstanceType': 'ml.t2.medium'
}
]
)
print(response)
print("now trying to create the endpoint...")
response = client.create_endpoint(
EndpointName=model_name,
EndpointConfigName=model_name
)
endpoint_arn = response['EndpointArn']
print(endpoint_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, endpoint_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
# function that performs real-time prediction
def invoke_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
body = ""
for row in rows:
model_name = row[1]
# extract and transform the user_ids and item_ids posted to csv
body = body + row[2] + "," + row[3] + "\n"
# invoke the SageMaker endpoint
client = boto3.client('sagemaker-runtime')
response = client.invoke_endpoint(
EndpointName=model_name,
Body=body.encode('utf-8'),
ContentType='text/csv'
)
predictions = response["Body"].read().decode('utf-8')
i = 0
array_of_rows_to_return = []
for prediction in iter(predictions.splitlines()):
# Put the returned row number and the returned value into an array.
row_to_return = [i, prediction]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
i = i + 1
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
|
11530497
|
import faker
from unittest import mock
from unittest.mock import patch
from foundations_spec import *
from foundations_core_cli.command_line_interface import CommandLineInterface
from foundations_atlas_cli.sub_parsers.atlas.atlas_parser import AtlasParser
class TestAtlasParser(Spec):
class MockSleep(object):
_epsilon = 0.0001
def __init__(self):
self._time_elapsed = 0
self.time_to_wait = 0
self.callback = lambda: None
def __call__(self, wait_time):
self._time_elapsed += wait_time
if self._time_elapsed >= self.time_to_wait - self._epsilon:
self.callback()
run_file = let_patch_mock('importlib.import_module')
os_file_exists = let_patch_mock('os.path.isfile')
os_chdir = let_patch_mock('os.chdir')
os_kill = let_patch_mock('os.kill')
subprocess_popen = let_patch_mock('subprocess.Popen')
print_mock = let_patch_mock('builtins.print')
exit_mock = let_patch_mock('sys.exit')
open_mock = let_patch_mock('builtins.open')
server_process = let_mock()
requests_post_mock = let_patch_mock('requests.post')
config_manager_mock = let_patch_mock('foundations_contrib.global_state.config_manager')
environment_fetcher_mock = let_patch_mock('foundations_core_cli.environment_fetcher.EnvironmentFetcher.get_all_environments')
find_environment_mock = let_patch_mock('foundations_core_cli.environment_fetcher.EnvironmentFetcher.find_environment')
artifact_downloader_class_mock = let_patch_mock('foundations_contrib.archiving.artifact_downloader.ArtifactDownloader')
artifact_downloader_mock = let_mock()
get_pipeline_archiver_for_job_mock = let_patch_mock('foundations_contrib.archiving.get_pipeline_archiver_for_job')
pipeline_archiver_mock = let_mock()
mock_deploy_job = let_patch_mock('foundations_contrib.job_deployer.deploy_job')
@let
def fake_model_server_pid(self):
import random
return random.randint(1,65000)
@let
def mock_job_id(self):
return self.faker.uuid4()
@let
def mock_model_name(self):
return f'model-{self.faker.random.randint(1000, 9999)}'
@let
def mock_user_provided_model_name(self):
return self.faker.word()
@let_now
def os_cwd(self):
mock = self.patch('os.getcwd')
mock.return_value = '/path/to/where/ever/we/are'
return mock
def _get_mock_file(self):
mock_file_object = Mock()
mock_file_object.__enter__ = lambda x: mock_file_object
mock_file_object.__exit__ = Mock()
return mock_file_object
@let_now
def mock_pid_file(self):
return self._get_mock_file()
@let_now
def sleep_mock(self):
return self.patch('time.sleep', self.MockSleep())
@let
def fake_save_dir(self):
return self.faker.uri_path()
@let
def fake_source_dir(self):
return self.faker.uri_path()
@let
def fake_env(self):
return self.faker.word()
@let
def fake_job_status(self):
status = self.faker.word()
while status == 'queued':
status = self.faker.word()
return status
@let
def server_startup_time(self):
from random import random
between_zero_and_one = random()
return between_zero_and_one * 2.7 + 0.2
@let
def mock_job_deployment(self):
return Mock()
@let
def fake_job_logs(self):
return self.faker.sentence()
@let
def pipeline_context(self):
from foundations_internal.pipeline_context import PipelineContext
return PipelineContext()
@let
def fake_script_file_name(self):
return '{}.py'.format(self.faker.word())
@let
def fake_project_name(self):
return self.faker.word()
@let
def fake_directory(self):
return self.faker.file_path()
@let
def ram(self):
return self.faker.random.random() * 8 + 0.0001
@let
def num_gpus(self):
return self.faker.random_int(0, 8)
@let
def level_1_subparsers_mock(self):
return Mock()
@let
def level_2_subparsers_mock(self):
return Mock()
@let
def level_2_parser_mock(self):
return Mock()
@let
def level_3_parser_mock(self):
return Mock()
@let
def command(self):
return self.faker.word()
def fake_config_path(self, environment):
return 'home/foundations/lou/config/{}.config.yaml'.format(environment)
def test_sub_parser_retrieves_command_line_interface_as_parameter(self):
cli = CommandLineInterface([''])
atlas_sub_parser = AtlasParser(cli)
self.assertTrue(type(atlas_sub_parser._cli) is CommandLineInterface)
def test_sub_parser_setup_parser_on_cli_instantiation(self):
mock_add_parser = self.patch('foundations_atlas_cli.sub_parsers.atlas.atlas_parser.AtlasParser.add_sub_parser')
CommandLineInterface([''])
mock_add_parser.assert_called_once()
@patch('argparse.ArgumentParser')
def test_retrieve_artifact_has_correct_options(self, parser_class_mock):
parser_mock = Mock()
parser_class_mock.return_value = parser_mock
parser_mock.add_subparsers.return_value = self.level_1_subparsers_mock
self.level_1_subparsers_mock.add_parser.return_value = self.level_2_parser_mock
self.level_2_parser_mock.add_subparsers.return_value = self.level_2_subparsers_mock
self.level_2_subparsers_mock.add_parser.return_value = self.level_3_parser_mock
CommandLineInterface([])
parser_class_mock.assert_called_with(prog='foundations')
version_call = call('--version', action='store_true', help='Displays the current Foundations version')
debug_call = call('--debug', action='store_true', help='Sets debug mode for the CLI')
parser_mock.add_argument.assert_has_calls(
[
version_call,
debug_call
]
)
retrieve_call = call('get', help='Get file types from execution environments')
self.level_1_subparsers_mock.add_parser.assert_has_calls([retrieve_call])
retrieve_argument_call = call('job', help='Specify job to retrieve artifacts from')
job_id_call = call('job_id', type=str, help='Specify job uuid of already deployed job')
env_call = call('scheduler_config', type=str, help='Environment to get from')
save_directory_call = call('--save_dir', type=str, default=None, help='Specify local directory path for artifacts to save to. Defaults to directory within current working directory')
source_directory_call = call('--source_dir', type=str, default='', help='Specify relative directory path to download artifacts from. Default will download all artifacts from job')
self.level_2_subparsers_mock.add_parser.assert_has_calls([retrieve_argument_call])
self.level_3_parser_mock.add_argument.assert_has_calls(
[
job_id_call,
env_call,
save_directory_call,
source_directory_call
],
any_order=True
)
def test_retrieve_artifacts_fails_if_missing_environment(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'job', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_retrieve_artifacts_prints_error_if_missing_environment(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'job', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_any_call('Could not find submission configuration with name: `{}`'.format(self.fake_env))
def test_submit_forwards_default_arguments_to_command_line_job_submission(self):
self.patch('foundations_core_cli.job_submission.submit_job.submit', MockCommandLineJobDeployer)
expected_arguments = Mock()
expected_arguments.scheduler_config = self.fake_env
expected_arguments.job_directory = self.fake_directory
expected_arguments.entrypoint = None
expected_arguments.project_name = None
expected_arguments.ram = None
expected_arguments.num_gpus = None
expected_arguments.stream_job_logs = True
expected_arguments.command = [self.command]
CommandLineInterface(['submit', self.fake_env, self.fake_directory, self.command]).execute()
arguments = MockCommandLineJobDeployer.arguments
self._assert_submit_arguments_equal(expected_arguments, arguments)
def test_submit_forwards_specified_arguments_to_command_line_job_submission(self):
self.patch('foundations_core_cli.job_submission.submit_job.submit', MockCommandLineJobDeployer)
expected_arguments = Mock()
expected_arguments.scheduler_config = self.fake_env
expected_arguments.job_directory = self.fake_directory
expected_arguments.entrypoint = self.fake_script_file_name
expected_arguments.project_name = self.fake_project_name
expected_arguments.ram = self.ram
expected_arguments.num_gpus = self.num_gpus
expected_arguments.stream_job_logs = False
expected_arguments.command = [self.command]
command_to_run = [
'submit',
f'--entrypoint={self.fake_script_file_name}',
f'--project-name={self.fake_project_name}',
f'--ram={self.ram}',
f'--num-gpus={self.num_gpus}',
f'--stream-job-logs=False',
self.fake_env,
self.fake_directory,
self.command
]
CommandLineInterface(command_to_run).execute()
arguments = MockCommandLineJobDeployer.arguments
self._assert_submit_arguments_equal(expected_arguments, arguments)
@patch('argparse.ArgumentParser')
def test_retrieve_logs_has_correct_options(self, parser_class_mock):
parser_mock = Mock()
parser_class_mock.return_value = parser_mock
parser_mock.add_subparsers.return_value = self.level_1_subparsers_mock
self.level_1_subparsers_mock.add_parser.return_value = self.level_2_parser_mock
self.level_2_parser_mock.add_subparsers.return_value = self.level_2_subparsers_mock
self.level_2_subparsers_mock.add_parser.return_value = self.level_3_parser_mock
CommandLineInterface([])
parser_class_mock.assert_called_with(prog='foundations')
version_call = call('--version', action='store_true', help='Displays the current Foundations version')
debug_call = call('--debug', action='store_true', help='Sets debug mode for the CLI')
parser_mock.add_argument.assert_has_calls(
[
version_call,
debug_call
]
)
retrieve_call = call('get', help='Get file types from execution environments')
self.level_1_subparsers_mock.add_parser.assert_has_calls([retrieve_call])
retrieve_argument_call = call('logs', help='Get logs for jobs')
job_id_call = call('scheduler_config', type=str, help='Environment to get from')
env_call = call('job_id', type=str, help='Specify job uuid of already deployed job')
self.level_2_subparsers_mock.add_parser.assert_has_calls([retrieve_argument_call])
self.level_3_parser_mock.add_argument.assert_has_calls(
[
job_id_call,
env_call
],
any_order=True
)
def test_get_job_logs_for_environment_that_does_not_exist_prints_error_message(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_any_call('Could not find submission configuration with name: `{}`'.format(self.fake_env))
def test_get_job_logs_for_environment_that_does_not_exist_exits_with_code_1(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_environment_that_exists_for_job_that_does_not_exist_prints_error_message(self):
self._set_job_status(None)
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with('Error: Job `{}` does not exist for environment `{}`'.format(self.mock_job_id, self.fake_env))
def test_get_job_logs_for_environment_that_exists_for_job_that_does_not_exist_exits_with_code_1(self):
self._set_job_status(None)
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_queued_job_prints_error_message(self):
self._set_job_status('queued')
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with('Error: Job `{}` is queued and has not produced any logs'.format(self.mock_job_id))
def test_get_job_logs_for_queued_job_exits_with_code_1(self):
self._set_job_status('queued')
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_job_that_exists_and_is_not_queued_prints_logs(self):
self._set_job_status(self.fake_job_status)
self.mock_job_deployment.get_job_logs.return_value = self.fake_job_logs
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with(self.fake_job_logs)
def test_get_job_logs_for_job_that_exists_and_is_not_queued_does_not_call_exit(self):
self._set_job_status(self.fake_job_status)
self.mock_job_deployment.get_job_logs.return_value = self.fake_job_logs
load_mock = Mock()
self.patch('foundations_core_cli.job_submission.config.load', load_mock)
load_mock.return_value = None
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_not_called()
def _assert_deploy_arguments_equal(self, expected_arguments, actual_arguments):
for attribute_name in ['env', 'job_directory', 'entrypoint', 'project_name', 'ram', 'num_gpus']:
self.assertEqual(getattr(expected_arguments, attribute_name), getattr(actual_arguments, attribute_name))
def _assert_submit_arguments_equal(self, expected_arguments, actual_arguments):
for attribute_name in ['scheduler_config', 'job_directory', 'entrypoint', 'project_name', 'ram', 'num_gpus', 'stream_job_logs', 'command']:
self.assertEqual(getattr(expected_arguments, attribute_name), getattr(actual_arguments, attribute_name))
def _set_run_script_environment(self, environment_to_set):
self.config_manager_mock.__getitem__ = ConditionalReturn()
self.config_manager_mock.__getitem__.return_when(environment_to_set, 'run_script_environment')
def _set_job_status(self, status):
self.mock_job_deployment.get_job_status.return_value = status
mock_job_deployment_class = ConditionalReturn()
mock_job_deployment_class.return_when(self.mock_job_deployment, self.mock_job_id, None, None)
mock_get_item = ConditionalReturn()
mock_get_item.return_when({'deployment_type': mock_job_deployment_class}, 'deployment_implementation')
self.config_manager_mock.__getitem__ = mock_get_item
class MockCommandLineJobDeployer(object):
arguments = None
deploy_called = False
def __init__(self, arguments):
MockCommandLineJobDeployer.arguments = arguments
def deploy(self):
MockCommandLineJobDeployer.deploy_called = True
|
11530516
|
from unittest import mock
from rest_framework import status
from lego.apps.users.models import User
from lego.utils.test_utils import BaseAPITestCase
class ListArticlesTestCase(BaseAPITestCase):
fixtures = ["test_users.yaml"]
url = "/api/v1/files/"
def setUp(self):
self.user = User.objects.first()
def test_post_file_no_auth(self):
res = self.client.post(f"{self.url}")
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_with_no_key(self):
self.client.force_authenticate(self.user)
res = self.client.post(f"{self.url}", data={})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch("lego.apps.files.models.File.create_file")
def test_post_create_file_call(self, mock_create_file):
self.client.force_authenticate(self.user)
key = "myfile.png"
try:
self.client.post(f"{self.url}", data={"key": key})
except Exception:
pass
mock_create_file.assert_not_called()
try:
self.client.post(f"{self.url}", data={"key": key, "public": True})
except Exception:
pass
mock_create_file.assert_called_with(key, self.user, True)
try:
self.client.post(f"{self.url}", data={"key": key, "public": False})
except Exception:
pass
mock_create_file.assert_called_with(key, self.user, False)
|
11530523
|
import collections
from .item import ItemCollection
from .signals import navbar_created
class NavigationBar(collections.Iterable):
"""The navigation bar object."""
def __init__(self, name, items=None, alias=None):
self.name = name
self.items = ItemCollection(items or [])
self.initializers = []
self.alias = alias or {}
# sends signal
navbar_created.send(self.__class__, bar=self)
def __iter__(self):
return iter(self.items)
def initializer(self, fn):
"""Adds a initializer function.
If you want to initialize the navigation bar within a Flask app
context, you can use this decorator.
The decorated function should nave one paramater ``nav`` which is the
bound navigation extension instance.
"""
self.initializers.append(fn)
return fn
def alias_item(self, alias):
"""Gets an item by its alias."""
ident = self.alias[alias]
return self.items[ident]
@property
def current_item(self):
"""Get the current active navigation Item if any.
.. versionadded:: 0.2.0
"""
return self._get_current_item(self)
def _get_current_item(self, items):
for item in items:
if item.is_active:
return item
else:
nested = self._get_current_item(item.items)
if nested:
return nested
|
11530539
|
import netbox_agent.dmidecode as dmidecode
from netbox_agent.config import config
from netbox_agent.config import netbox_instance as nb
from netbox_agent.inventory import Inventory
from netbox_agent.location import Datacenter, Rack, Tenant
from netbox_agent.misc import create_netbox_tags, get_device_role, get_device_type, get_device_platform
from netbox_agent.network import ServerNetwork
from netbox_agent.power import PowerSupply
from pprint import pprint
import subprocess
import logging
import socket
import sys
class ServerBase():
def __init__(self, dmi=None):
if dmi:
self.dmi = dmi
else:
self.dmi = dmidecode.parse()
self.baseboard = dmidecode.get_by_type(self.dmi, 'Baseboard')
self.bios = dmidecode.get_by_type(self.dmi, 'BIOS')
self.chassis = dmidecode.get_by_type(self.dmi, 'Chassis')
self.system = dmidecode.get_by_type(self.dmi, 'System')
self.device_platform = get_device_platform(config.device.platform)
self.network = None
self.tags = list(set([
x.strip() for x in config.device.tags.split(',') if x.strip()
])) if config.device.tags else []
self.nb_tags = list(create_netbox_tags(self.tags))
config_cf = set([
f.strip() for f in config.device.custom_fields.split(",")
if f.strip()
])
self.custom_fields = {}
self.custom_fields.update(dict([
(k.strip(), v.strip()) for k, v in
[f.split("=", 1) for f in config_cf]
]))
def get_tenant(self):
tenant = Tenant()
return tenant.get()
def get_netbox_tenant(self):
tenant = self.get_tenant()
if tenant is None:
return None
nb_tenant = nb.tenancy.tenants.get(
slug=self.get_tenant()
)
return nb_tenant
def get_datacenter(self):
dc = Datacenter()
return dc.get()
def get_netbox_datacenter(self):
dc = self.get_datacenter()
if dc is None:
logging.error("Specificing a datacenter (Site) is mandatory in Netbox")
sys.exit(1)
nb_dc = nb.dcim.sites.get(
slug=dc,
)
if nb_dc is None:
logging.error("Site (slug: {}) has not been found".format(dc))
sys.exit(1)
return nb_dc
def update_netbox_location(self, server):
dc = self.get_datacenter()
nb_rack = self.get_netbox_rack()
nb_dc = self.get_netbox_datacenter()
update = False
if dc and server.site and server.site.slug != nb_dc.slug:
logging.info('Datacenter location has changed from {} to {}, updating'.format(
server.site.slug,
nb_dc.slug,
))
update = True
server.site = nb_dc.id
if (
server.rack
and nb_rack
and server.rack.id != nb_rack.id
):
logging.info('Rack location has changed from {} to {}, updating'.format(
server.rack,
nb_rack,
))
update = True
server.rack = nb_rack
if nb_rack is None:
server.face = None
server.position = None
return update, server
def update_netbox_expansion_location(self, server, expansion):
update = False
if expansion.tenant != server.tenant:
expansion.tenant = server.tenant
update = True
if expansion.site != server.site:
expansion.site = server.site
update = True
if expansion.rack != server.rack:
expansion.rack = server.rack
update = True
return update
def get_rack(self):
rack = Rack()
return rack.get()
def get_netbox_rack(self):
rack = self.get_rack()
datacenter = self.get_netbox_datacenter()
if not rack:
return None
if rack and not datacenter:
logging.error("Can't get rack if no datacenter is configured or found")
sys.exit(1)
return nb.dcim.racks.get(
name=rack,
site_id=datacenter.id,
)
def get_product_name(self):
"""
Return the Chassis Name from dmidecode info
"""
return self.system[0]['Product Name'].strip()
def get_service_tag(self):
"""
Return the Service Tag from dmidecode info
"""
return self.system[0]['Serial Number'].strip()
def get_expansion_service_tag(self):
"""
Return the virtual Service Tag from dmidecode info host
with 'expansion'
"""
return self.system[0]['Serial Number'].strip() + " expansion"
def get_hostname(self):
if config.hostname_cmd is None:
return '{}'.format(socket.gethostname())
return subprocess.getoutput(config.hostname_cmd)
def is_blade(self):
raise NotImplementedError
def get_blade_slot(self):
raise NotImplementedError
def get_chassis(self):
raise NotImplementedError
def get_chassis_name(self):
raise NotImplementedError
def get_chassis_service_tag(self):
raise NotImplementedError
def get_bios_version(self):
raise NotImplementedError
def get_bios_version_attr(self):
raise NotImplementedError
def get_bios_release_date(self):
raise NotImplementedError
def get_power_consumption(self):
raise NotImplementedError
def get_expansion_product(self):
raise NotImplementedError
def _netbox_create_chassis(self, datacenter, tenant, rack):
device_type = get_device_type(self.get_chassis())
device_role = get_device_role(config.device.chassis_role)
serial = self.get_chassis_service_tag()
logging.info('Creating chassis blade (serial: {serial})'.format(
serial=serial))
new_chassis = nb.dcim.devices.create(
name=self.get_chassis_name(),
device_type=device_type.id,
serial=serial,
device_role=device_role.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_chassis
def _netbox_create_blade(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_product_name())
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info(
'Creating blade (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_blade
def _netbox_create_blade_expansion(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_expansion_product())
serial = self.get_expansion_service_tag()
hostname = self.get_hostname() + " expansion"
logging.info(
'Creating expansion (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_blade
def _netbox_deduplicate_server(self):
serial = self.get_service_tag()
hostname = self.get_hostname()
server = nb.dcim.devices.get(name=hostname)
if server and server.serial != serial:
server.delete()
def _netbox_create_server(self, datacenter, tenant, rack):
device_role = get_device_role(config.device.server_role)
device_type = get_device_type(self.get_product_name())
if not device_type:
raise Exception('Chassis "{}" doesn\'t exist'.format(self.get_chassis()))
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info('Creating server (serial: {serial}) {hostname}'.format(
serial=serial, hostname=hostname))
new_server = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
platform=self.device_platform,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_server
def get_netbox_server(self, expansion=False):
if expansion is False:
return nb.dcim.devices.get(serial=self.get_service_tag())
else:
return nb.dcim.devices.get(serial=self.get_expansion_service_tag())
def _netbox_set_or_update_blade_slot(self, server, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = server.parent_device.device_bay \
if server.parent_device else None
actual_chassis = actual_device_bay.device \
if actual_device_bay else None
slot = self.get_blade_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if real_device_bays:
logging.info(
'Setting device ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=server.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = server
real_device_bay.save()
else:
logging.error('Could not find slot {slot} for chassis'.format(
slot=slot
))
def _netbox_set_or_update_blade_expansion_slot(self, expansion, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = expansion.parent_device.device_bay if expansion.parent_device else None
actual_chassis = actual_device_bay.device if actual_device_bay else None
slot = self.get_blade_expansion_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if not real_device_bays:
logging.error('Could not find slot {slot} expansion for chassis'.format(
slot=slot
))
return
logging.info(
'Setting device expansion ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=expansion.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = expansion
real_device_bay.save()
def netbox_create_or_update(self, config):
"""
Netbox method to create or update info about our server/blade
Handle:
* new chassis for a blade
* new slot for a blade
* hostname update
* Network infos
* Inventory management
* PSU management
"""
datacenter = self.get_netbox_datacenter()
rack = self.get_netbox_rack()
tenant = self.get_netbox_tenant()
if config.purge_old_devices:
self._netbox_deduplicate_server()
if self.is_blade():
chassis = nb.dcim.devices.get(
serial=self.get_chassis_service_tag()
)
# Chassis does not exist
if not chassis:
chassis = self._netbox_create_chassis(datacenter, tenant, rack)
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_blade(chassis, datacenter, tenant, rack)
# Set slot for blade
self._netbox_set_or_update_blade_slot(server, chassis, datacenter)
else:
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_server(datacenter, tenant, rack)
logging.debug('Updating Server...')
# check network cards
if config.register or config.update_all or config.update_network:
self.network = ServerNetwork(server=self)
self.network.create_or_update_netbox_network_cards()
update_inventory = config.inventory and (config.register or
config.update_all or config.update_inventory)
# update inventory if feature is enabled
self.inventory = Inventory(server=self)
if update_inventory:
self.inventory.create_or_update()
# update psu
if config.register or config.update_all or config.update_psu:
self.power = PowerSupply(server=self)
self.power.create_or_update_power_supply()
self.power.report_power_consumption()
expansion = nb.dcim.devices.get(serial=self.get_expansion_service_tag())
if self.own_expansion_slot() and config.expansion_as_device:
logging.debug('Update Server expansion...')
if not expansion:
expansion = self._netbox_create_blade_expansion(chassis, datacenter, tenant, rack)
# set slot for blade expansion
self._netbox_set_or_update_blade_expansion_slot(expansion, chassis, datacenter)
if update_inventory:
# Updates expansion inventory
inventory = Inventory(server=self, update_expansion=True)
inventory.create_or_update()
elif self.own_expansion_slot() and expansion:
expansion.delete()
expansion = None
update = 0
# for every other specs
# check hostname
if server.name != self.get_hostname():
server.name = self.get_hostname()
update += 1
server_tags = sorted(set([x.name for x in server.tags]))
tags = sorted(set(self.tags))
if server_tags != tags:
new_tags_ids = [x.id for x in self.nb_tags]
if not config.preserve_tags:
server.tags = new_tags_ids
else:
server_tags_ids = [x.id for x in server.tags]
server.tags = sorted(set(new_tags_ids + server_tags_ids))
update += 1
if server.custom_fields != self.custom_fields:
server.custom_fields = self.custom_fields
update += 1
if config.update_all or config.update_location:
ret, server = self.update_netbox_location(server)
update += ret
if server.platform != self.device_platform:
server.platform = self.device_platform
update += 1
if update:
server.save()
if expansion:
update = 0
expansion_name = server.name + ' expansion'
if expansion.name != expansion_name:
expansion.name = expansion_name
update += 1
if self.update_netbox_expansion_location(server, expansion):
update += 1
if update:
expansion.save()
logging.debug('Finished updating Server!')
def print_debug(self):
self.network = ServerNetwork(server=self)
print('Datacenter:', self.get_datacenter())
print('Netbox Datacenter:', self.get_netbox_datacenter())
print('Rack:', self.get_rack())
print('Netbox Rack:', self.get_netbox_rack())
print('Is blade:', self.is_blade())
print('Got expansion:', self.own_expansion_slot())
print('Product Name:', self.get_product_name())
print('Platform:', self.device_platform)
print('Chassis:', self.get_chassis())
print('Chassis service tag:', self.get_chassis_service_tag())
print('Service tag:', self.get_service_tag())
print('NIC:',)
pprint(self.network.get_network_cards())
pass
def own_expansion_slot(self):
"""
Indicates if the device hosts an expansion card
"""
return False
def own_gpu_expansion_slot(self):
"""
Indicates if the device hosts a GPU expansion card
"""
return False
def own_drive_expansion_slot(self):
"""
Indicates if the device hosts a drive expansion bay
"""
return False
|
11530591
|
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
@app.get('/')
def main():
return 'root url'
@app.get('/json')
def json():
return {
"result": "json",
"sub_result": {"sub": "json"}
}
class Request(BaseModel):
id: int
body: str
@app.post('/post')
def post(request: Request):
return request
|
11530596
|
import re
import copy
import inspect
import ast
import textwrap
"""
Utilities for manipulating the Abstract Syntax Tree of Python constructs
"""
class NameVisitor(ast.NodeVisitor):
"""
NodeVisitor that builds a set of all of the named identifiers in an AST
"""
def __init__(self, *args, **kwargs):
super(NameVisitor, self).__init__(*args, **kwargs)
self.names = set()
def visit_Name(self, node):
self.names.add(node.id)
def visit_arg(self, node):
if hasattr(node, 'arg'):
self.names.add(node.arg)
elif hasattr(node, 'id'):
self.names.add(node.id)
def get_new_names(self, num_names):
"""
Returns a list of new names that are not already present in the AST.
New names will have the form _N, for N a non-negative integer. If the
AST has no existing identifiers of this form, then the returned names
will start at 0 ('_0', '_1', '_2'). If the AST already has identifiers
of this form, then the names returned will not include the existing
identifiers.
Parameters
----------
num_names: int
The number of new names to return
Returns
-------
list of str
"""
prop_re = re.compile(r"^_(\d+)$")
matching_names = [n for n in self.names if prop_re.match(n)]
if matching_names:
start_number = max([int(n[1:]) for n in matching_names]) + 1
else:
start_number = 0
return ["_" + str(n) for n in
range(start_number, start_number + num_names)]
class ExpandVarargTransformer(ast.NodeTransformer):
"""
Node transformer that replaces the starred use of a variable in an AST
with a collection of unstarred named variables.
"""
def __init__(self, starred_name, expand_names, *args, **kwargs):
"""
Parameters
----------
starred_name: str
The name of the starred variable to replace
expand_names: list of stf
List of the new names that should be used to replace the starred
variable
"""
super(ExpandVarargTransformer, self).__init__(*args, **kwargs)
self.starred_name = starred_name
self.expand_names = expand_names
class ExpandVarargTransformerStarred(ExpandVarargTransformer):
# Python 3
def visit_Starred(self, node):
if node.value.id == self.starred_name:
return [ast.Name(id=name, ctx=node.ctx) for name in
self.expand_names]
else:
return node
class ExpandVarargTransformerCallArg(ExpandVarargTransformer):
# Python 2
def visit_Call(self, node):
if getattr(node, 'starargs', None) and node.starargs.id == self.starred_name:
node.starargs = None
node.args.extend([ast.Name(id=name, ctx=ast.Load())
for name in self.expand_names])
return node
else:
return node
def function_to_ast(fn):
"""
Get the AST representation of a function
"""
# Get source code for function
# Dedent is needed if this is a nested function
fn_source = textwrap.dedent(inspect.getsource(fn))
# Parse function source code into an AST
fn_ast = ast.parse(fn_source)
# # The function will be the fist element of the module body
# fn_ast = module_ast.body[0]
return fn_ast
def ast_to_source(ast):
"""Convert AST to source code string using the astor package"""
import astor
return astor.to_source(ast)
def compile_function_ast(fn_ast):
"""
Compile function AST into a code object suitable for use in eval/exec
"""
assert isinstance(fn_ast, ast.Module)
fndef_ast = fn_ast.body[0]
assert isinstance(fndef_ast, ast.FunctionDef)
return compile(fn_ast, "<%s>" % fndef_ast.name, mode='exec')
def function_ast_to_function(fn_ast, stacklevel=1):
# Validate
assert isinstance(fn_ast, ast.Module)
fndef_ast = fn_ast.body[0]
assert isinstance(fndef_ast, ast.FunctionDef)
# Compile AST to code object
code = compile_function_ast(fn_ast)
# Evaluate the function in a scope that includes the globals and
# locals of desired frame.
current_frame = inspect.currentframe()
eval_frame = current_frame
for _ in range(stacklevel):
eval_frame = eval_frame.f_back
eval_locals = eval_frame.f_locals
eval_globals = eval_frame.f_globals
del current_frame
scope = copy.copy(eval_globals)
scope.update(eval_locals)
# Evaluate function in scope
eval(code, scope)
# Return the newly evaluated function from the scope
return scope[fndef_ast.name]
def _build_arg(name):
try:
# Python 3
return ast.arg(arg=name)
except AttributeError:
# Python 2
return ast.Name(id=name, ctx=ast.Param())
def expand_function_ast_varargs(fn_ast, expand_number):
"""
Given a function AST that use a variable length positional argument
(e.g. *args), return a function that replaces the use of this argument
with one or more fixed arguments.
To be supported, a function must have a starred argument in the function
signature, and it may only use this argument in starred form as the
input to other functions.
For example, suppose expand_number is 3 and fn_ast is an AST
representing this function...
def my_fn1(a, b, *args):
print(a, b)
other_fn(a, b, *args)
Then this function will return the AST of a function equivalent to...
def my_fn1(a, b, _0, _1, _2):
print(a, b)
other_fn(a, b, _0, _1, _2)
If the input function uses `args` for anything other than passing it to
other functions in starred form, an error will be raised.
Parameters
----------
fn_ast: ast.FunctionDef
expand_number: int
Returns
-------
ast.FunctionDef
"""
assert isinstance(fn_ast, ast.Module)
# Copy ast so we don't modify the input
fn_ast = copy.deepcopy(fn_ast)
# Extract function definition
fndef_ast = fn_ast.body[0]
assert isinstance(fndef_ast, ast.FunctionDef)
# Get function args
fn_args = fndef_ast.args
# Function variable arity argument
fn_vararg = fn_args.vararg
# Require vararg
if not fn_vararg:
raise ValueError("""\
Input function AST does not have a variable length positional argument
(e.g. *args) in the function signature""")
assert fn_vararg
# Get vararg name
if isinstance(fn_vararg, str):
vararg_name = fn_vararg
else:
vararg_name = fn_vararg.arg
# Compute new unique names to use in place of the variable argument
before_name_visitor = NameVisitor()
before_name_visitor.visit(fn_ast)
expand_names = before_name_visitor.get_new_names(expand_number)
# Replace use of *args in function body
if hasattr(ast, "Starred"):
# Python 3
expand_transformer = ExpandVarargTransformerStarred
else:
# Python 2
expand_transformer = ExpandVarargTransformerCallArg
new_fn_ast = expand_transformer(
vararg_name, expand_names
).visit(fn_ast)
new_fndef_ast = new_fn_ast.body[0]
# Replace vararg with additional args in function signature
new_fndef_ast.args.args.extend(
[_build_arg(name=name) for name in expand_names]
)
new_fndef_ast.args.vararg = None
# Run a new NameVistor an see if there were any other non-starred uses
# of the variable length argument. If so, raise an exception
after_name_visitor = NameVisitor()
after_name_visitor.visit(new_fn_ast)
if vararg_name in after_name_visitor.names:
raise ValueError("""\
The variable length positional argument {n} is used in an unsupported context
""".format(n=vararg_name))
# Remove decorators if present to avoid recursion
fndef_ast.decorator_list = []
# Add missing source code locations
ast.fix_missing_locations(new_fn_ast)
# Return result
return new_fn_ast
def expand_varargs(expand_number):
"""
Decorator to expand the variable length (starred) argument in a function
signature with a fixed number of arguments.
Parameters
----------
expand_number: int
The number of fixed arguments that should replace the variable length
argument
Returns
-------
function
Decorator Function
"""
if not isinstance(expand_number, int) or expand_number < 0:
raise ValueError("expand_number must be a non-negative integer")
def _expand_varargs(fn):
fn_ast = function_to_ast(fn)
fn_expanded_ast = expand_function_ast_varargs(fn_ast, expand_number)
return function_ast_to_function(fn_expanded_ast, stacklevel=2)
return _expand_varargs
|
11530609
|
import ciso8601
import dateutil.parser
from cartographer.field_types import SchemaAttribute
from cartographer.utils.datetime import as_utc, make_naive
class DateAttribute(SchemaAttribute):
@classmethod
def format_value_for_json(cls, value):
return as_utc(value).isoformat()
def from_json(self, serialized_value):
if self.is_nullable and serialized_value is None:
return None
try:
# ciso8601 is significantly faster than dateutil.parser for parsing iso8601 strings, so we try it first
parsed_value = ciso8601.parse_datetime(serialized_value)
assert parsed_value is not None # Caveat: asserts won't run if python is run with -O.
except Exception as e:
parsed_value = dateutil.parser.parse(serialized_value)
return make_naive(parsed_value)
|
11530629
|
from PySide2.QtCore import QItemSelectionModel, QObject, Signal
from PySide2.QtWidgets import QDialog, QTableWidgetItem, QVBoxLayout
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.utils import block_signals, unique_name
class ListEditor(QObject):
"""A string list editor that doesn't allow duplicates"""
# Indicates the items were re-arranged
items_rearranged = Signal()
# Indicates that some items were deleted
# Provides a list of names that were deleted.
items_deleted = Signal(list)
# Indicates that an item was renamed.
# Provides an old item name and it's new name.
item_renamed = Signal(str, str)
# Indicates that some items were copied.
# Provides a list of items that were copied, and a list of new
# names they correspond to, in order.
items_copied = Signal(list, list)
# Indicates that a default item was added to the end.
# Provides the name of the new item.
item_added = Signal(str)
def __init__(self, items, parent=None):
super().__init__(parent)
self.ui = UiLoader().load_file('list_editor.ui', parent)
# Make sure there are no duplicates in the items
items = list(dict.fromkeys(items))
self._items = items
self._prev_selected_items = []
self.update_table()
self.setup_connections()
def setup_connections(self):
self.selection_model.selectionChanged.connect(self.selection_changed)
self.ui.up.clicked.connect(self.up)
self.ui.down.clicked.connect(self.down)
self.ui.delete_.clicked.connect(self.delete)
self.ui.copy.clicked.connect(self.copy)
self.ui.add.clicked.connect(self.add)
self.ui.table.itemChanged.connect(self.item_edited)
@property
def selection_model(self):
return self.ui.table.selectionModel()
@property
def selected_rows(self):
return sorted([x.row() for x in self.selection_model.selectedRows()])
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if self._items == items:
return
# Make sure there are no duplicates in the items
items = list(dict.fromkeys(items))
self._items = items
self.update_table()
@property
def selected_items(self):
return [self.items[x] for x in self.selected_rows]
@property
def num_items(self):
return len(self.items)
def clear_selection(self):
self.selection_model.clear()
def select_row(self, i):
model_index = self.selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
self.selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
selected_rows = self.selected_rows
num_selected = len(selected_rows)
top_selected = 0 in selected_rows
bottom_selected = self.num_items - 1 in selected_rows
one_selected = num_selected == 1
any_selected = num_selected > 0
all_selected = num_selected == self.num_items
self.ui.up.setEnabled(one_selected and not top_selected)
self.ui.down.setEnabled(one_selected and not bottom_selected)
self.ui.copy.setEnabled(any_selected)
self.ui.delete_.setEnabled(any_selected and not all_selected)
def update_prev_selected_items(self):
self._prev_selected_items = self.selected_items
def up(self):
i = self.selected_rows[0]
self.swap(i, i - 1)
def down(self):
i = self.selected_rows[0]
self.swap(i, i + 1)
def delete(self):
self.update_prev_selected_items()
selected_rows = self.selected_rows
deleted = []
for i in selected_rows:
deleted.append(self.items.pop(i - len(deleted)))
new_selection = min(selected_rows[-1] - len(deleted) + 1,
self.num_items - 1)
self.update_table()
self.select_row(new_selection)
self.items_deleted.emit(deleted)
def copy(self):
self.update_prev_selected_items()
old_items = [self.items[x] for x in self.selected_rows]
new_items = []
for name in old_items:
new_name = unique_name(self.items + new_items, name)
new_items.append(new_name)
self.items += new_items
self.update_table()
self.items_copied.emit(old_items, new_items)
def add(self):
new_name = unique_name(self.items, 'new')
self.items += [new_name]
self.update_table()
# Select the new item
self.clear_selection()
self.select_row(len(self.items) - 1)
self.item_added.emit(new_name)
def item_edited(self, item):
row = item.row()
old_name = self.items[row]
new_name = item.text()
if new_name in self.items:
# Make sure it is not a duplicate. If it is, then revert it.
new_name = old_name
self.items[row] = new_name
self.update_prev_selected_items()
self.update_table()
if new_name != old_name:
self.item_renamed.emit(old_name, new_name)
def swap(self, i, j):
self.update_prev_selected_items()
items = self.items
items[i], items[j] = items[j], items[i]
self.update_table()
self.items_rearranged.emit()
def update_table(self):
table = self.ui.table
with block_signals(table):
table.clearContents()
table.setRowCount(self.num_items)
for i, item in enumerate(self.items):
table.setItem(i, 0, QTableWidgetItem(item))
for item in self._prev_selected_items:
if item in self.items:
self.select_row(self.items.index(item))
class ListEditorDialog(QDialog):
def __init__(self, items, parent=None):
super().__init__(parent)
layout = QVBoxLayout(self)
self.setLayout(layout)
self.editor = ListEditor(items, self)
layout.addWidget(self.editor.ui)
self.setWindowTitle('List Editor')
UiLoader().install_dialog_enter_key_filters(self)
@property
def items(self):
return self.editor.items
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
items = [
'a',
'b',
'c',
'd',
'e',
]
dialog = ListEditorDialog(items)
def dialog_finished():
print(f'Final items: {dialog.items}')
def items_rearranged():
print(f'Items re-arranged: {dialog.items}')
def items_deleted(items):
print(f'Items deleted: {items=}')
def items_copied(old_names, new_names):
print(f'Items copied: {old_names=} => {new_names=}')
def item_renamed(old_name, new_name):
print(f'Item renamed: {old_name} => {new_name}')
editor = dialog.editor
editor.items_rearranged.connect(items_rearranged)
editor.items_deleted.connect(items_deleted)
editor.items_copied.connect(items_copied)
editor.item_renamed.connect(item_renamed)
dialog.finished.connect(dialog_finished)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
|
11530630
|
import asyncio
import tempfile
import os
import contextlib
import pytest
from postfix_mta_sts_resolver import netstring
from postfix_mta_sts_resolver.responder import STSSocketmapResponder
import postfix_mta_sts_resolver.utils as utils
import postfix_mta_sts_resolver.base_cache as base_cache
@contextlib.contextmanager
def set_env(**environ):
old_environ = dict(os.environ)
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@pytest.mark.asyncio
@pytest.mark.timeout(10)
async def test_responder_expiration(event_loop):
async def query(host, port, domain):
reader, writer = await asyncio.open_connection(host, port)
stream_reader = netstring.StreamReader()
string_reader = stream_reader.next_string()
writer.write(netstring.encode(b'test ' + domain.encode('ascii')))
try:
res = b''
while True:
try:
part = string_reader.read()
except netstring.WantRead:
data = await reader.read(4096)
assert data
stream_reader.feed(data)
else:
if not part:
break
res += part
return res
finally:
writer.close()
with tempfile.NamedTemporaryFile() as cachedb:
cfg = {}
cfg["port"] = 18461
cfg["cache_grace"] = 0
cfg["shutdown_timeout"] = 1
cfg["cache"] = {
"type": "sqlite",
"options": {
"filename": cachedb.name,
},
}
cfg = utils.populate_cfg_defaults(cfg)
cache = utils.create_cache(cfg['cache']['type'],
cfg['cache']['options'])
await cache.setup()
pol_body = {
"version": "STSv1",
"mode": "enforce",
"mx": [ "mail.loc" ],
"max_age": 1,
}
await cache.set("no-record.loc", base_cache.CacheEntry(0, "0", pol_body))
resp = STSSocketmapResponder(cfg, event_loop, cache)
await resp.start()
try:
result = await query(cfg['host'], cfg['port'], 'no-record.loc')
assert result == b'NOTFOUND '
finally:
await resp.stop()
await cache.teardown()
|
11530673
|
import theano
import numpy as np
import os
from theano import tensor as T
from collections import OrderedDict
# nb might be theano.config.floatX
dtype = T.config.floatX # @UndefinedVariable
class Elman(object):
def __init__(self, ne, de, na, nh, n_out, cs, npos,
update_embeddings=True):
'''
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
na :: number of acoustic or language model features at each word step
(acoustic context size in frames * number of features)
nh :: dimension of the hidden layer
n_out :: number of classes
cs :: word window context size
npos :: number of pos tags
'''
# parameters of the model
self.emb = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,
(ne + 1, de)).
astype(dtype)) # add one for PADDING
if na == 0:
# NB original one, now Wx becomes much bigger with acoustic data
self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,
((de * cs) +
(npos * cs),
nh))
.astype(dtype))
else:
self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,
((de * cs) +
(npos * cs) +
na, nh))
.astype(dtype))
self.Wh = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,
(nh, nh))
.astype(dtype))
self.W = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,
(nh, n_out))
.astype(dtype))
self.bh = theano.shared(np.zeros(nh, dtype=dtype))
self.b = theano.shared(np.zeros(n_out, dtype=dtype))
self.h0 = theano.shared(np.zeros(nh, dtype=dtype))
# Use the eye function (diagonal 1s) for the POS, small in memory
self.pos = T.eye(npos, npos, 0)
self.n_acoust = na # the number of acoustic features
# Weights for L1 and L2
self.L1_reg = 0.0
self.L2_reg = 0.00001
# without embeddings updates
self.params = [self.Wx, self.Wh, self.W, self.bh, self.b, self.h0]
self.names = ['Wx', 'Wh', 'W', 'bh', 'b', 'h0']
if update_embeddings:
self.params = [self.emb, self.Wx, self.Wh, self.W, self.bh,
self.b, self.h0]
self.names = ['embeddings', 'Wx', 'Wh', 'W', 'bh', 'b', 'h0']
# as many columns as context window size/lines as words in the sentence
self.idxs = T.imatrix()
self.pos_idxs = T.imatrix()
# simply a matrix: number of features * length sentence
self.extra_features = T.matrix()
# TODO Old version no pos
# x = self.emb[self.idxs].reshape((self.idxs.shape[0], de*cs))
if na == 0:
# POS version, not just the embeddings
# but with the POS window concatenated
x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],
de*cs)),
self.pos[self.pos_idxs].reshape(
(self.pos_idxs.shape[0],
npos*cs))), 1)
else:
# TODO new version with extra features
x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],
de*cs)),
self.pos[self.pos_idxs].reshape(
(self.pos_idxs.shape[0],
npos*cs)),
self.extra_features), 1)
self.y = T.iscalar('y') # label
# TODO for sentences
# self.y = T.ivector('y') #labels for whole sentence
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) +
self.bh)
s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)
return [h_t, s_t]
[h, s], _ = theano.scan(fn=recurrence,
sequences=x, outputs_info=[self.h0, None],
n_steps=x.shape[0])
p_y_given_x_lastword = s[-1, 0, :]
p_y_given_x_sentence = s[:, 0, :]
p_y_given_x_sentence_hidden = (h, s[:, 0, :])
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# TODO adding this- zero one loss for the last word
# y_pred_word = T.argmax(p_y_given_x_lastword)
# learning rate not hard coded as could decay
self.lr = T.scalar('lr')
# Cost: standard nll loss
self.nll = -T.mean(T.log(p_y_given_x_lastword)[self.y])
self.sentence_nll = -T.mean(T.log(p_y_given_x_sentence)
[T.arange(x.shape[0]), self.y])
if na == 0:
self.classify = theano.function(inputs=[self.idxs, self.pos_idxs],
outputs=y_pred)
else:
self.classify = theano.function(inputs=[self.idxs, self.pos_idxs,
self.extra_features],
outputs=y_pred)
# regularisation terms
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
# if not using this set this to 0 to avoid unecessary computation
self.L1 = 0
# self.L1 = abs(self.Wh.sum()) + abs(self.Wx.sum()) + \
# abs(self.W.sum()) + abs(self.emb.sum())\
# + abs(self.bh.sum()) + abs(self.b.sum()) + abs(self.h0.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.Wh ** 2).sum() + (self.Wx ** 2).sum() +\
(self.W ** 2).sum() + (self.emb ** 2).sum() +\
(self.bh ** 2).sum() + (self.b ** 2).sum() +\
(self.h0 ** 2).sum()
self.cost = self.nll \
+ self.L1_reg * self.L1 \
+ self.L2_reg * self.L2_sqr
gradients = T.grad(self.cost, self.params)
self.updates = OrderedDict((p, p-self.lr*g)
for p, g in zip(self.params, gradients))
# costs for multiple labels (one for each in the input)
self.sentence_cost = self.sentence_nll \
+ self.L1_reg * self.L1 \
+ self.L2_reg * self.L2_sqr
sentence_gradients = T.grad(self.sentence_cost, self.params)
self.sentence_updates = OrderedDict((p, p - self.lr*g)
for p, g in
zip(self.params,
sentence_gradients))
if na == 0:
self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs],
outputs=p_y_given_x_sentence)
self.soft_max_return_hidden_layer = theano.function(
inputs=[self.idxs, self.pos_idxs],
outputs=p_y_given_x_sentence_hidden)
else:
self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs,
self.extra_features],
outputs=p_y_given_x_sentence)
self.soft_max_return_hidden_layer = theano.function(
inputs=[self.idxs, self.pos_idxs,
self.extra_features],
outputs=p_y_given_x_sentence_hidden)
if na == 0:
self.train = theano.function(inputs=[self.idxs, self.pos_idxs,
self.y,
self.lr],
outputs=self.nll,
updates=self.updates)
else:
self.train = theano.function(inputs=[self.idxs, self.pos_idxs,
self.extra_features,
self.y,
self.lr],
outputs=self.nll,
updates=self.updates)
self.normalize = theano.function(
inputs=[],
updates={self.emb:
self.emb /
T.sqrt((self.emb**2).sum(axis=1))
.dimshuffle(0, 'x')}
)
def classify_by_index(self, word_idx, indices, pos_idx=None,
extra_features=None):
"""Classification method which assumes the dialogue matrix is
in the right format.
:param word_idx: window size * dialogue length matrix
:param labels: vector dialogue length long
:param indices: 2 * dialogue length matrix for start, stop indices
:param pos_idx: pos window size * dialogue length matrix
:param extra_features: number of features * dialogue length matrix
"""
output = []
for start, stop in indices:
if extra_features:
output.extend(self.classify(word_idx[start:stop+1, :],
pos_idx[start:stop+1, :],
np.asarray(
extra_features[start:stop+1, :],
dtype='float32')
)
)
else:
output.extend(self.classify(word_idx[start:stop+1, :],
pos_idx[start:stop+1, :]
)
)
return output
def fit(self, word_idx, labels, lr, indices, pos_idx=None,
extra_features=None):
"""Fit method which assumes the dialogue matrix is in the right
format.
:param word_idx: window size * dialogue length matrix
:param labels: vector dialogue length long
:param indices: 2 * dialogue length matrix for start, stop indices
:param pos_idx: pos window size * dialogue length matrix
:param extra_features: number of features * dialogue length matrix
"""
loss = 0
test = 0
testing = False
for start, stop in indices:
# print start, stop
if testing:
test += 1
if test > 50:
break
if extra_features:
x = self.train(word_idx[start:stop+1, :],
pos_idx[start:stop+1, :],
np.asarray(extra_features[start:stop+1, :],
dtype='float32'),
labels[stop],
lr)
else:
x = self.train(word_idx[start:stop+1, :],
pos_idx[start:stop+1, :],
labels[stop],
lr)
loss += x
self.normalize()
return loss
def shared_dataset(self, mycorpus, borrow=True, data_type='int32'):
""" Load the dataset into shared variables """
return theano.shared(np.asarray(mycorpus, dtype=data_type),
borrow=True)
def load_weights_from_folder(self, folder):
for name, param in zip(self.names, self.params):
param.set_value(np.load(os.path.join(folder, name + ".npy")))
def load(self, folder):
emb = np.load(os.path.join(folder, 'embeddings.npy'))
Wx = np.load(os.path.join(folder, 'Wx.npy'))
Wh = np.load(os.path.join(folder, 'Wh.npy'))
W = np.load(os.path.join(folder, 'W.npy'))
bh = np.load(os.path.join(folder, 'bh.npy'))
b = np.load(os.path.join(folder, 'b.npy'))
h0 = np.load(os.path.join(folder, 'h0.npy'))
return emb, Wx, Wh, W, bh, b, h0
def load_weights(self, emb=None, Wx=None, Wh=None, W=None, bh=None, b=None,
h0=None):
if emb is not None:
self.emb.set_value(emb)
if Wx is not None:
self.Wx.set_value(Wx)
if Wh is not None:
self.Wh.set_value(Wh)
if W is not None:
self.W.set_value(W)
if bh is not None:
self.bh.set_value(bh)
if b is not None:
self.b.set_value(b)
if h0 is not None:
self.h0.set_value(h0)
def save(self, folder):
for param, name in zip(self.params, self.names):
np.save(os.path.join(folder, name + '.npy'), param.get_value())
|
11530678
|
from notedrive.lanzou import CodeDetail, LanZouCloud, download
downer = LanZouCloud()
downer.ignore_limits()
downer.login_by_cookie()
def example1():
print(downer.login_by_cookie() == CodeDetail.SUCCESS)
def example2():
file_path = '/Users/liangtaoniu/workspace/MyDiary/tmp/weights/yolov3.weight'
downer.upload_file(file_path=file_path)
def example3():
download('https://wwe.lanzoui.com/ig56tpia6rg',
dir_pwd='./download/lanzou')
# download('https://wws.lanzous.com/b01hh63kf', dir_pwd='./download/lanzou')
# downer.down_dir_by_url('https://wws.lanzous.com/b01hh2zve', dir_pwd='./download/lanzou')
pass
def example4():
print("upload")
# res = downer.upload_file('/tmp/models/yolo/configs/yolov3.h5', folder_id=2129808)
res = downer.upload_file(
'/root/workspace/notechats/notedata/example/download/meta_Electronics.json.gz', folder_id=2192474)
print(res)
pass
def example5():
print(downer.get_dir_list(folder_id=2184164))
# example1()
# example2()
example3()
# example4()
# example5()
# https://wws.lanzous.com/b01hjn3aj
# print(downer._session.cookies)
|
11530697
|
import os
import pytest
import shutil
import tempfile
from pathlib import Path
from nip import parse, dump, load, construct
from nip.utils import deep_equals
import builders
class TestConfigLoadDump:
save_folder: Path
@classmethod
def setup_class(cls):
cls.save_folder = Path(tempfile.mkdtemp())
# def teardown_method(self):
# if os.path.isfile(self.save_path):
# os.remove(self.save_path)
#
# @classmethod
# def teardown_class(cls):
# if os.path.isdir(cls.save_folder):
# shutil.rmtree(cls.save_folder)
@pytest.mark.parametrize(
'config_path', ['configs/config.nip', 'configs/document.nip']
)
def test_load_dump(self, config_path):
config_path = Path(config_path)
for config in parse(config_path, always_iter=True):
obj = construct(config)
save_path = self.save_folder.joinpath(f"{config_path.stem}.nip")
dump(save_path, config)
reproduced_obj = load(save_path)
print(obj)
print(reproduced_obj)
assert deep_equals(reproduced_obj, obj)
|
11530711
|
import copy
import warnings
from collections.abc import Iterable, Iterator
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
from stingray.exceptions import StingrayError
from stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis
from stingray.largememory import createChunkedSpectra, saveData
from stingray.utils import genDataPath, rebin_data, rebin_data_log, simon
from .events import EventList
from .lightcurve import Lightcurve
from .utils import show_progress
# location of factorial moved between scipy versions
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
try:
from pyfftw.interfaces.scipy_fft import fft, fftfreq
except ImportError:
warnings.warn("pyfftw not installed. Using standard scipy fft")
from scipy.fft import fft, fftfreq
__all__ = [
"Crossspectrum", "AveragedCrossspectrum", "coherence", "time_lag",
"cospectra_pvalue", "normalize_crossspectrum"
]
def normalize_crossspectrum(unnorm_power, tseg, nbins, nphots1, nphots2, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
nbins : int
Number of bins in the light curve
nphots1 : int
Number of photons in the light curve no. 1
nphots2 : int
Number of photons in the light curve no. 2
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
log_nphots1 = np.log(nphots1)
log_nphots2 = np.log(nphots2)
actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
if norm.lower() == 'leahy':
power = c_num * 2. / actual_nphots
elif norm.lower() == 'frac':
meancounts1 = nphots1 / nbins
meancounts2 = nphots2 / nbins
actual_mean = np.sqrt(meancounts1 * meancounts2)
assert actual_mean > 0.0, \
"Mean count rate is <= 0. Something went wrong."
c = c_num / float(nbins ** 2.)
power = c * 2. * tseg / (actual_mean ** 2.0)
elif norm.lower() == 'abs':
meanrate = np.sqrt(nphots1 * nphots2) / tseg
power = c_num * 2. * meanrate / actual_nphots
elif norm.lower() == 'none':
power = unnorm_power
else:
raise ValueError("Value for `norm` not recognized.")
return power
def normalize_crossspectrum_gauss(
unnorm_power, mean_flux, var, dt, N, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
mean_flux: float
The mean flux of the light curve (if a cross spectrum, the geometrical
mean of the flux in the two channels)
var: float
The variance of the light curve (if a cross spectrum, the geometrical
mean of the variance in the two channels)
dt: float
The sampling time of the light curve
N: int
The number of bins in the light curve
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
Examples
--------
>>> lc_c = np.random.poisson(10000, 10000)
>>> lc_c_var = 10000
>>> lc = lc_c / 17.3453
>>> lc_var = (100 / 17.3453)**2
>>> pds_c = np.absolute(np.fft.fft(lc_c))**2
>>> pds = np.absolute(np.fft.fft(lc))**2
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), lc_c_var, 0.1, len(lc_c), norm='leahy')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='leahy')
>>> np.allclose(norm, norm_c)
True
>>> np.isclose(np.mean(norm[1:]), 2, atol=0.1)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='frac')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='frac')
>>> np.allclose(norm, norm_c)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='abs')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='abs')
>>> np.allclose(norm / np.mean(lc)**2, norm_c / np.mean(lc_c)**2)
True
>>> np.isclose(np.mean(norm_c[2:]), 2 * np.mean(lc_c * 0.1), rtol=0.1)
True
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
common_factor = 2 * dt / N
rate_mean = mean_flux * dt
if norm.lower() == 'leahy':
norm = 2 / var / N
elif norm.lower() == 'frac':
norm = common_factor / rate_mean**2
elif norm.lower() == 'abs':
norm = common_factor
elif norm.lower() == 'none':
norm = 1
else:
raise ValueError("Value for `norm` not recognized.")
return norm * c_num
def _averaged_cospectra_cdf(xcoord, n):
"""
Function calculating the cumulative distribution function for
averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).
Parameters
----------
xcoord : float or iterable
The cospectral power for which to calculate the CDF.
n : int
The number of averaged cospectra
Returns
-------
cdf : float
The value of the CDF at `xcoord` for `n` averaged cospectra
"""
if np.size(xcoord) == 1:
xcoord = [xcoord]
cdf = np.zeros_like(xcoord)
for i, x in enumerate(xcoord):
prefac_bottom1 = factorial(n - 1)
for j in range(n):
prefac_top = factorial(n - 1 + j)
prefac_bottom2 = factorial(
n - 1 - j) * factorial(j)
prefac_bottom3 = 2.0 ** (n + j)
prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *
prefac_bottom3)
gf = -j + n
first_fac = scipy.special.gamma(gf)
if x >= 0:
second_fac = scipy.special.gammaincc(gf, n * x) * first_fac
fac = 2.0 * first_fac - second_fac
else:
fac = scipy.special.gammaincc(gf, -n * x) * first_fac
cdf[i] += (prefac * fac)
if np.size(xcoord) == 1:
return cdf[i]
else:
continue
return cdf
def cospectra_pvalue(power, nspec):
"""
This function computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying assumption that make this calculation valid
is that the powers in the power spectrum follow a Laplace distribution,
and this requires that:
1. the co-spectrum is normalized according to [Leahy 1983]_
2. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overall shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Huppenkothen 2017]_.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smaller power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract
* .. [Huppenkothen 2017] http://adsabs.harvard.edu/abs/2018ApJS..236...13H
"""
if not np.all(np.isfinite(power)):
raise ValueError("power must be a finite floating point number!")
# if power < 0:
# raise ValueError("power must be a positive real number!")
if not np.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if not np.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
elif nspec == 1:
lapl = scipy.stats.laplace(0, 1)
pval = lapl.sf(power)
elif nspec > 50:
exp_sigma = np.sqrt(2) / np.sqrt(nspec)
gauss = scipy.stats.norm(0, exp_sigma)
pval = gauss.sf(power)
else:
pval = 1. - _averaged_cospectra_cdf(power, nspec)
return pval
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
For details on the definition of the coherence, see Vaughan and Nowak,
1996 [#]_.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
The first light curve data for the channel of interest.
lc2: :class:`stingray.Lightcurve` object
The light curve data for reference band
Returns
-------
coh : ``np.ndarray``
The array of coherence versus frequency
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
return cs.coherence()
def time_lag(lc1, lc2):
"""
Estimate the time lag of two light curves.
Calculate time lag and uncertainty.
Equation from Bendat & Piersol, 2011 [bendat-2011]_.
Returns
-------
lag : np.ndarray
The time lag
lag_err : np.ndarray
The uncertainty in the time lag
References
----------
.. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
lag = cs.time_lag()
return lag
class Crossspectrum(object):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty :class:`Crossspectrum` object to populate with your
own Fourier-transformed data (this can sometimes be useful when making
binned power spectra). Stingray uses the scipy.fft standards for the sign
of the Nyquist frequency.
Parameters
----------
data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The first light curve data for the channel/band of interest.
data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The light curve data for the reference band.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of the cross spectrum.
fullspec: boolean, optional, default ``False``
If False, keep only the positive frequencies, or if True, keep all of them .
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
dt: float
The time resolution of the light curve. Only needed when constructing
light curves in the case where ``data1``, ``data2`` are
:class:`EventList` objects
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra (complex numbers)
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging more than one spectra). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
nphots1: float
The total number of photons in light curve 1
nphots2: float
The total number of photons in light curve 2
"""
def __init__(self, data1=None, data2=None, norm='none', gti=None,
lc1=None, lc2=None, power_type="real", dt=None, fullspec=False):
if isinstance(norm, str) is False:
raise TypeError("norm must be a string")
if norm.lower() not in ["frac", "abs", "leahy", "none"]:
raise ValueError("norm must be 'frac', 'abs', 'leahy', or 'none'!")
self.norm = norm.lower()
# check if input data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if data1 is None or data2 is None:
if data1 is not None or data2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.power_err = None
self.df = None
self.nphots1 = None
self.nphots2 = None
self.m = 1
self.n = None
return
if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \
dt is None:
raise ValueError("If using event lists, please specify the bin "
"time to generate lightcurves.")
if not isinstance(data1, EventList):
lc1 = data1
else:
lc1 = data1.to_lc(dt)
if not isinstance(data2, EventList):
lc2 = data2
elif isinstance(data2, EventList) and data2 is not data1:
lc2 = data2.to_lc(dt)
elif data2 is data1:
lc2 = lc1
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self.power_type = power_type
self.fullspec = fullspec
self._make_crossspectrum(lc1, lc2, fullspec)
# These are needed to calculate coherence
self._make_auxil_pds(lc1, lc2)
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
if lc1 is not lc2 and isinstance(lc1, Lightcurve):
self.pds1 = Crossspectrum(lc1, lc1, norm='none')
self.pds2 = Crossspectrum(lc2, lc2, norm='none')
def _make_crossspectrum(self, lc1, lc2, fullspec=False):
"""
Auxiliary method computing the normalized cross spectrum from two
light curves. This includes checking for the presence of and
applying Good Time Intervals, computing the unnormalized Fourier
cross-amplitude, and then renormalizing using the required
normalization. Also computes an uncertainty estimate on the cross
spectral powers.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
fullspec: boolean, default ``False``
Return full frequency array (True) or just positive frequencies (False)
"""
# make sure the inputs work!
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if self.lc2.mjdref != self.lc1.mjdref:
raise ValueError("MJDref is different in the two light curves")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.split_by_gti()[0]
lc2 = lc2.split_by_gti()[0]
# total number of photons is the sum of the
# counts in the light curve
self.meancounts1 = lc1.meancounts
self.meancounts2 = lc2.meancounts
self.nphots1 = np.float64(np.sum(lc1.counts))
self.nphots2 = np.float64(np.sum(lc2.counts))
self.err_dist = 'poisson'
if lc1.err_dist == 'poisson':
self.var1 = lc1.meancounts
else:
self.var1 = np.mean(lc1.counts_err) ** 2
self.err_dist = 'gauss'
if lc2.err_dist == 'poisson':
self.var2 = lc2.meancounts
else:
self.var2 = np.mean(lc2.counts_err) ** 2
self.err_dist = 'gauss'
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise StingrayError("Light curves do not have same time binning "
"dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
self.dt = lc1.dt
self.n = lc1.n
# the frequency resolution
self.df = 1.0 / lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2, fullspec)
# If co-spectrum is desired, normalize here. Otherwise, get raw back
# with the imaginary part still intact.
self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
if lc1.err_dist.lower() != lc2.err_dist.lower():
simon("Your lightcurves have different statistics."
"The errors in the Crossspectrum will be incorrect.")
elif lc1.err_dist.lower() != "poisson":
simon("Looks like your lightcurve statistic is not poisson."
"The errors in the Powerspectrum will be incorrect.")
if self.__class__.__name__ in ['Powerspectrum',
'AveragedPowerspectrum']:
self.power_err = self.power / np.sqrt(self.m)
elif self.__class__.__name__ in ['Crossspectrum',
'AveragedCrossspectrum']:
# This is clearly a wild approximation.
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
unnorm_power_err = np.sqrt(2) / np.sqrt(self.m) # Leahy-like
unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
unnorm_power_err += np.zeros_like(self.power)
self.power_err = \
self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
else:
self.power_err = np.zeros(len(self.power))
def _fourier_cross(self, lc1, lc2, fullspec=False):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (where lc2 is the one that gets
complex-conjugated). The user has the option to either get just the
positive frequencies or the full spectrum.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: :class:`stingray.Lightcurve` object
Another light curve to be Fourier transformed.
This is the reference band.
fullspec: boolean. Default is False.
If True, return the whole array of frequencies, or only positive frequencies (False).
Returns
-------
fr: numpy.ndarray
The squared absolute value of the Fourier amplitudes
"""
fourier_1 = fft(lc1.counts) # do Fourier transform 1
fourier_2 = fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)
cross = np.multiply(fourier_1, np.conj(fourier_2))
if fullspec is True:
return freqs, cross
else:
return freqs[freqs > 0], cross[freqs > 0]
def rebin(self, df=None, f=None, method="mean"):
"""
Rebin the cross spectrum to a new frequency resolution ``df``.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes df with ``f*self.df``
Returns
-------
bin_cs = :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
:class:`AveragedPowerspectrum`, too.
"""
if f is None and df is None:
raise ValueError('You need to specify at least one between f and '
'df')
elif f is not None:
df = f * self.df
# rebin cross spectrum to new resolution
binfreq, bincs, binerr, step_size = \
rebin_data(self.freq, self.power, df, self.power_err,
method=method, dx=self.df)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = copy.copy(self)
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.norm = self.norm
bin_cs.nphots1 = self.nphots1
bin_cs.power_err = binerr
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data(self.freq, self.unnorm_power, df,
method=method, dx=self.df)
bin_cs.unnorm_power = binpower_unnorm
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin(df=df, f=f, method=method))
bin_cs.cs_all = cs_all
if hasattr(self, 'pds1'):
bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)
if hasattr(self, 'pds2'):
bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)
try:
bin_cs.nphots2 = self.nphots2
except AttributeError:
if self.type == 'powerspectrum':
pass
else:
raise AttributeError(
'Spectrum has no attribute named nphots2.')
bin_cs.m = np.rint(step_size * self.m)
return bin_cs
def _normalize_crossspectrum(self, unnorm_power, tseg):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
if self.err_dist == 'poisson':
return normalize_crossspectrum(
unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm,
self.power_type)
return normalize_crossspectrum_gauss(
unnorm_power, np.sqrt(self.meancounts1 * self.meancounts2),
np.sqrt(self.var1 * self.var2),
dt=self.dt,
N=self.n,
norm=self.norm,
power_type=self.power_type)
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
.. math::
d\\nu_j = d\\nu_{j-1} (1+f)
Parameters
----------
f: float, optional, default ``0.01``
parameter that steers the frequency resolution
Returns
-------
new_spec : :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
"""
binfreq, binpower, binpower_err, nsamples = \
rebin_data_log(self.freq, self.power, f,
y_err=self.power_err, dx=self.df)
# the frequency resolution
df = np.diff(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df / 2
new_spec = copy.copy(self)
new_spec.freq = binfreq
new_spec.power = binpower
new_spec.power_err = binpower_err
new_spec.m = nsamples * self.m
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data_log(self.freq, self.unnorm_power, f, dx=self.df)
new_spec.unnorm_power = binpower_unnorm
if hasattr(self, 'pds1'):
new_spec.pds1 = self.pds1.rebin_log(f)
if hasattr(self, 'pds2'):
new_spec.pds2 = self.pds2.rebin_log(f)
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin_log(f))
new_spec.cs_all = cs_all
return new_spec
def coherence(self):
""" Compute Coherence function of the cross spectrum.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : numpy.ndarray
Coherence function
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
return self.unnorm_power.real / (self.pds1.power.real *
self.pds2.power.real)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return np.angle(self.unnorm_power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * np.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
def plot(self, labels=None, axis=None, title=None, marker='-', save=False,
filename=None):
"""
Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of the ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for the``matplotlib.pyplot.axis()`` method.
title : str, default ``None``
The title of the plot.
marker : str, default '-'
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional, default ``False``
If ``True``, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
plt.figure('crossspectrum')
plt.plot(self.freq,
np.abs(self.power),
marker,
color='b',
label='Amplitude')
plt.plot(self.freq,
np.abs(self.power.real),
marker,
color='r',
alpha=0.5,
label='Real Part')
plt.plot(self.freq,
np.abs(self.power.imag),
marker,
color='g',
alpha=0.5,
label='Imaginary Part')
if labels is not None:
try:
plt.xlabel(labels[0])
plt.ylabel(labels[1])
except TypeError:
simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
plt.legend(loc='best')
if axis is not None:
plt.axis(axis)
if title is not None:
plt.title(title)
if save:
if filename is None:
plt.savefig('spec.png')
else:
plt.savefig(filename)
else:
plt.show(block=False)
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, assuming an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, where M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying assumptions are fulfilled:
1. The power spectrum is Leahy-normalized
2. There is no source of variability in the data other than the
periodic signal to be determined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for all powers in
the power spectrum, where index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentially
significant powers. Must be between 0 and 1.
Default is ``1`` (all p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report all powers) though for any application
where `threshold`` is set to something meaningful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for all powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.norm == "leahy":
raise ValueError("This method only works on "
"Leahy-normalized power spectra!")
if np.size(self.m) == 1:
# calculate p-values for all powers
# leave out zeroth power since it just encodes the number of photons!
pv = np.array([cospectra_pvalue(power, self.m)
for power in self.power])
else:
pv = np.array([cospectra_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add 1 to the indices to make up for the fact that
# we left out the first power above!
indices = np.where(pv < threshold)[0]
pvals = np.vstack([pv[indices], indices])
return pvals
class AveragedCrossspectrum(Crossspectrum):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforming each segment and then averaging the
resulting cross spectra.
Parameters
----------
data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A light curve from which to compute the cross spectrum. In some cases, this would
be the light curve of the wavelength/energy/frequency band of interest.
data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A second light curve to use in the cross spectrum. In some cases, this would be
the wavelength/energy/frequency reference band to compare the band of interest with.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an
integer multiple of the ``segment_size``, then any fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artifacts.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
dt : float
The time resolution of the light curve. Only needed when constructing
light curves in the case where data1 or data2 are of :class:EventList
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of
the cross spectrum.
silent : bool, default False
Do not show a progress bar when generating an averaged cross spectrum.
Useful for the batch execution of many spectra
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
fullspec: boolean, optional, default ``False``
If True, return the full array of frequencies, otherwise return just the
positive frequencies.
large_data : bool, default False
Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.
save_all : bool, default False
Save all intermediate PDSs used for the final average. Use with care.
This is likely to fill up your RAM on medium-sized datasets, and to
slow down the computation when rebinning.
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve
nphots1: float
The total number of photons in the first (interest) light curve
nphots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
def __init__(self, data1=None, data2=None, segment_size=None, norm='none',
gti=None, power_type="real", silent=False, lc1=None, lc2=None,
dt=None, fullspec=False, large_data=False, save_all=False):
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if segment_size is None and data1 is not None:
raise ValueError("segment_size must be specified")
if segment_size is not None and not np.isfinite(segment_size):
raise ValueError("segment_size must be finite!")
if large_data and data1 is not None and data2 is not None:
if isinstance(data1, EventList):
input_data = 'EventList'
elif isinstance(data1, Lightcurve):
input_data = 'Lightcurve'
chunks = int(np.rint(segment_size // data1.dt))
segment_size = chunks * data1.dt
else:
raise ValueError(
f'Invalid input data type: {type(data1).__name__}')
dir_path1 = saveData(data1, persist=False, chunks=chunks)
dir_path2 = saveData(data2, persist=False, chunks=chunks)
data_path1 = genDataPath(dir_path1)
data_path2 = genDataPath(dir_path2)
spec = createChunkedSpectra(input_data,
'AveragedCrossspectrum',
data_path=list(data_path1 +
data_path2),
segment_size=segment_size,
norm=norm,
gti=gti,
power_type=power_type,
silent=silent,
dt=dt)
for key, val in spec.__dict__.items():
setattr(self, key, val)
return
self.type = "crossspectrum"
self.segment_size = segment_size
self.power_type = power_type
self.fullspec = fullspec
self.show_progress = not silent
self.dt = dt
self.save_all = save_all
if isinstance(data1, EventList):
lengths = data1.gti[:, 1] - data1.gti[:, 0]
good = lengths >= segment_size
data1.gti = data1.gti[good]
data1 = list(data1.to_lc_list(dt))
if isinstance(data2, EventList):
lengths = data2.gti[:, 1] - data2.gti[:, 0]
good = lengths >= segment_size
data2.gti = data2.gti[good]
data2 = list(data2.to_lc_list(dt))
Crossspectrum.__init__(self, data1, data2, norm, gti=gti,
power_type=power_type, dt=dt, fullspec=fullspec)
return
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
is_event = isinstance(lc1, EventList)
is_lc = isinstance(lc1, Lightcurve)
is_lc_iter = isinstance(lc1, Iterator)
is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter
# A way to say that this is actually not a power spectrum
if self.type != "powerspectrum" and \
(lc1 is not lc2) and (is_event or is_lc or is_lc_list):
self.pds1 = AveragedCrossspectrum(lc1, lc1,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
self.pds2 = AveragedCrossspectrum(lc2, lc2,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):
"""
Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
each.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
segment_size : ``numpy.float``
Size of each light curve segment to use for averaging.
Other parameters
----------------
silent : bool, default False
Suppress progress bars
Returns
-------
cs_all : list of :class:`Crossspectrum`` objects
A list of cross spectra calculated independently from each light curve segment
nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``
Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.
"""
assert isinstance(lc1, Lightcurve)
assert isinstance(lc2, Lightcurve)
if lc1.tseg != lc2.tseg:
simon("Lightcurves do not have same tseg. This means that the data"
"from the two channels are not completely in sync. This "
"might or might not be an issue. Keep an eye on it.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise ValueError("Light curves do not have same time binning dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
current_gtis = cross_two_gtis(lc1.gti, lc2.gti)
lc1.gti = lc2.gti = current_gtis
lc1.apply_gtis()
lc2.apply_gtis()
if self.gti is None:
self.gti = current_gtis
else:
if not np.allclose(self.gti, current_gtis):
self.gti = np.vstack([self.gti, current_gtis])
check_gtis(current_gtis)
cs_all = []
nphots1_all = []
nphots2_all = []
start_inds, end_inds = \
bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,
dt=lc1.dt)
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
local_show_progress = show_progress
if not self.show_progress or silent:
local_show_progress = lambda a: a
for start_ind, end_ind in \
local_show_progress(zip(start_inds, end_inds)):
time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])
counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])
counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])
time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])
counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])
counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])
if np.sum(counts_1) == 0 or np.sum(counts_2) == 0:
warnings.warn(
"No counts in interval {}--{}s".format(time_1[0],
time_1[-1]))
continue
gti1 = np.array([[time_1[0] - lc1.dt / 2,
time_1[-1] + lc1.dt / 2]])
gti2 = np.array([[time_2[0] - lc2.dt / 2,
time_2[-1] + lc2.dt / 2]])
lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
err_dist=lc1.err_dist,
gti=gti1,
dt=lc1.dt, skip_checks=True)
lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
err_dist=lc2.err_dist,
gti=gti2,
dt=lc2.dt, skip_checks=True)
with warnings.catch_warnings(record=True) as w:
cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm,
power_type=self.power_type, fullspec=self.fullspec)
cs_all.append(cs_seg)
nphots1_all.append(np.sum(lc1_seg.counts))
nphots2_all.append(np.sum(lc2_seg.counts))
return cs_all, nphots1_all, nphots2_all
def _make_crossspectrum(self, lc1, lc2, fullspec=False):
"""
Auxiliary method computing the normalized cross spectrum from two light curves.
This includes checking for the presence of and applying Good Time Intervals, computing the
unnormalized Fourier cross-amplitude, and then renormalizing using the required normalization.
Also computes an uncertainty estimate on the cross spectral powers. Stingray uses the
scipy.fft standards for the sign of the Nyquist frequency.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
fullspec: boolean, default ``False``,
If True, return all frequencies otherwise return only positive frequencies
"""
local_show_progress = show_progress
if not self.show_progress:
local_show_progress = lambda a: a
# chop light curves into segments
if isinstance(lc1, Lightcurve) and \
isinstance(lc2, Lightcurve):
if self.type == "crossspectrum":
cs_all, nphots1_all, nphots2_all = \
self._make_segment_spectrum(lc1, lc2, self.segment_size)
elif self.type == "powerspectrum":
cs_all, nphots1_all = \
self._make_segment_spectrum(lc1, self.segment_size)
else:
raise ValueError("Type of spectrum not recognized!")
else:
cs_all, nphots1_all, nphots2_all = [], [], []
for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)):
if self.type == "crossspectrum":
cs_sep, nphots1_sep, nphots2_sep = \
self._make_segment_spectrum(lc1_seg, lc2_seg,
self.segment_size,
silent=True)
nphots2_all.append(nphots2_sep)
elif self.type == "powerspectrum":
cs_sep, nphots1_sep = \
self._make_segment_spectrum(lc1_seg, self.segment_size,
silent=True)
else:
raise ValueError("Type of spectrum not recognized!")
cs_all.append(cs_sep)
nphots1_all.append(nphots1_sep)
cs_all = np.hstack(cs_all)
nphots1_all = np.hstack(nphots1_all)
if self.type == "crossspectrum":
nphots2_all = np.hstack(nphots2_all)
m = len(cs_all)
nphots1 = np.mean(nphots1_all)
power_avg = np.zeros_like(cs_all[0].power)
power_err_avg = np.zeros_like(cs_all[0].power_err)
unnorm_power_avg = np.zeros_like(cs_all[0].unnorm_power)
for cs in cs_all:
power_avg += cs.power
unnorm_power_avg += cs.unnorm_power
power_err_avg += (cs.power_err) ** 2
power_avg /= float(m)
power_err_avg = np.sqrt(power_err_avg) / m
unnorm_power_avg /= float(m)
self.freq = cs_all[0].freq
self.power = power_avg
self.unnorm_power = unnorm_power_avg
self.m = m
self.power_err = power_err_avg
self.df = cs_all[0].df
self.n = cs_all[0].n
self.nphots1 = nphots1
if self.save_all:
self.cs_all = cs_all
if self.type == "crossspectrum":
self.nphots1 = nphots1
nphots2 = np.mean(nphots2_all)
self.nphots2 = nphots2
def coherence(self):
"""Averaged Coherence function.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Compute an averaged Coherence function of cross spectrum by computing
coherence function of each segment and averaging them. The return type
is a tuple with first element as the coherence function and the second
element as the corresponding uncertainty associated with it.
Note : The uncertainty in coherence function is strictly valid for Gaussian \
statistics only.
Returns
-------
(coh, uncertainty) : tuple of np.ndarray
Tuple comprising the coherence function and uncertainty.
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if np.any(self.m < 50):
simon("Number of segments used in averaging is "
"significantly low. The result might not follow the "
"expected statistical distributions.")
# Calculate average coherence
unnorm_power_avg = self.unnorm_power
num = np.absolute(unnorm_power_avg) ** 2
# The normalization was 'none'!
unnorm_powers_avg_1 = self.pds1.power.real
unnorm_powers_avg_2 = self.pds2.power.real
coh = num / (unnorm_powers_avg_1 * unnorm_powers_avg_2)
coh[~np.isfinite(coh)] = 0.0
# Calculate uncertainty
uncertainty = \
(2 ** 0.5 * coh * (1 - coh)) / (np.sqrt(coh) * self.m ** 0.5)
uncertainty[coh == 0] = 0.0
return (coh, uncertainty)
def time_lag(self):
"""Calculate time lag and uncertainty.
Equation from Bendat & Piersol, 2011 [bendat-2011]__.
Returns
-------
lag : np.ndarray
The time lag
lag_err : np.ndarray
The uncertainty in the time lag
"""
lag = super(AveragedCrossspectrum, self).time_lag()
coh, uncert = self.coherence()
dum = (1. - coh) / (2. * coh)
dum[coh == 0] = 0.0
lag_err = np.sqrt(dum / self.m) / (2 * np.pi * self.freq)
return lag, lag_err
|
11530725
|
import json
JC_SETTINGS_FOLDER_NAME = "javascript_completions"
JC_SETTINGS_FOLDER = os.path.join(PACKAGE_PATH, JC_SETTINGS_FOLDER_NAME)
class JavaScriptCompletions():
def init(self):
self.api = {}
self.API_Setup = sublime.load_settings('JavaScript-Completions.sublime-settings').get('completion_active_list')
sublime.set_timeout_async(self.load_api)
def load_api(self):
# Caching completions
if self.API_Setup:
for API_Keyword in self.API_Setup:
self.api[API_Keyword] = sublime.load_settings( API_Keyword + '.sublime-settings' )
if self.api[API_Keyword].get("scope") == None :
path_to_json = os.path.join(PACKAGE_PATH, "sublime-completions", API_Keyword + '.sublime-settings' )
if os.path.isfile(path_to_json):
with open(path_to_json) as json_file:
self.api[API_Keyword] = json.load(json_file)
def get(self, key):
return sublime.load_settings('JavaScript-Completions.sublime-settings').get(key)
|
11530826
|
import logging
import time
from typing import List
import boto3
from lib.data.dynamo.replication_dao import ReplicationDao
from lib.data.ssm.ssm import SsmDao
from lib.models.replication_config import ReplicationConfig
from lib.svcs.replication import ReplicationService
from lib.svcs.slack import SlackService
from lib.models.slack import SlackColor, FigReplicationMessage, SimpleSlackMessage
from lib.config.constants import FIGGY_WEBHOOK_URL_PATH
from lib.utils.utils import Utils
repl_dao: ReplicationDao = ReplicationDao(boto3.resource('dynamodb'))
ssm: SsmDao = SsmDao(boto3.client('ssm'))
repl_svc: ReplicationService = ReplicationService(repl_dao, ssm)
webhook_url = ssm.get_parameter_value(FIGGY_WEBHOOK_URL_PATH)
slack: SlackService = SlackService(webhook_url=webhook_url)
log = Utils.get_logger(__name__, logging.INFO)
def notify_slack(config: ReplicationConfig):
message = FigReplicationMessage(replication_cfg=config)
slack.send_message(message)
def handle(event, context):
try:
repl_configs: List[ReplicationConfig] = repl_dao.get_all()
for config in repl_configs:
time.sleep(.15) # This is to throttle PS API Calls to prevent overloading the API.
updated = repl_svc.sync_config(config)
if updated:
notify_slack(config)
except Exception as e:
log.exception("Caught irrecoverable error while executing.")
title = "Figgy experienced an irrecoverable error!"
message=f"The following error occurred in an the *figgy-replication-syncer* lambda. " \
f"If this appears to be a bug with figgy, please tell us by submitting a GitHub issue!" \
f" \n\n```{Utils.printable_exception(e)}```"
message = SimpleSlackMessage(title=title, message=message, color=SlackColor.RED)
slack.send_message(message)
raise e
if __name__ == '__main__':
handle(None, None)
|
11530829
|
import time
from pathlib import Path
from logging import getLogger
import json
import argparse
import os
import sys
import skimage.io
import numpy as np
import click
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from tqdm import tqdm
if torch.cuda.is_available():
torch.randn(10).cuda()
tqdm.monitor_interval = 0
# todo: make this better
sys.path.append('./') # aa
from aa.cresi.net.pytorch_utils.concrete_eval import FullImageEvaluator
from aa.road_networks.skeletonize import run_skeletonize
from aa.road_networks.cleaning_graph import cleaning_graph
from aa.road_networks.infer_speed import infer_speed
from aa.road_networks.create_submission import make_sub, make_sub_debug
from aa.pytorch.transforms import get_flips_colors_augmentation
from aa.pytorch.data_provider import ReadingImageProvider, RawImageType
import aa.cli.sp5r2.util as u
logger = getLogger('aa')
@click.group()
def cli():
pass
@cli.command()
@click.option('-c', '--config_path', type=str)
@click.option('-f', '--fold', type=int, default=0)
def evaltest(config_path, fold):
# 02
conf = u.load_config(config_path)
u.set_filehandler(conf)
logger.info('ARGV: {}'.format(str(sys.argv)))
eval_test(conf, fold, nfolds=1)
@cli.command()
@click.option('-c', '--config_path', type=str)
def mergefolds(config_path):
# 03a
conf = u.load_config(config_path)
u.set_filehandler(conf)
logger.info('ARGV: {}'.format(str(sys.argv)))
merge_folds(conf, nfolds=conf.num_folds)
@cli.command()
@click.option('-c', '--config_path', type=str)
def makegraph(config_path):
# 04, 05, 06, 07: skelton, simplify, infer_speed, sub
conf = u.load_config(config_path)
u.set_filehandler(conf)
logger.info('ARGV: {}'.format(str(sys.argv)))
# Output: ske, sknw_gpickle, wkt
# Required time with single process: 30min -> (multiproc: 5min)
run_skeletonize(conf)
# Output: graphs
# Required time with single process: 7min -> (multiproc: 2min)
cleaning_graph(conf)
# Output: graphs_speed
# Required time with single process: 2min
infer_speed(conf)
# Output: solution
# Required time with single process: 0min
make_sub(conf)
make_sub_debug(conf)
class RawImageTypePad(RawImageType):
def finalyze(self, data):
padding_size = 22
return self.reflect_border(data, padding_size)
def eval_test(conf, args_fold, nfolds=1):
# Full resolution input
conf.target_rows = conf.eval_rows
conf.target_cols = conf.eval_cols
paths = {
'masks': '',
'images': conf.test_data_refined_dir_ims,
}
fn_mapping = {
'masks': lambda name: os.path.splitext(name)[0] + '.tif',
}
ds = ReadingImageProvider(RawImageTypePad,
paths,
fn_mapping,
image_suffix='',
num_channels=conf.num_channels)
logger.info(f'Total Dataset size: {len(ds)}')
folds = [([], list(range(len(ds)))) for i in range(nfolds)]
save_dir = f'/wdata/working/sp5r2/models/preds/{conf.modelname}/fold{args_fold}_test'
weight_dir = f'/wdata/working/sp5r2/models/weights/{conf.modelname}/fold{args_fold}'
keval = FullImageEvaluator(conf,
ds,
save_dir=save_dir,
test=True,
flips=3,
num_workers=2,
border=conf.padding,
save_im_gdal_format=False)
for fold, (train_idx, test_idx) in enumerate(folds):
logger.info(f'Fold idx: {args_fold}')
logger.info(f'Train size: {len(train_idx)}')
logger.info(f'Test size: {len(test_idx)}')
keval.predict(args_fold, test_idx, weight_dir, verbose=False)
break
def merge_folds(conf, nfolds=4):
fold0_pred_dir = f'/wdata/working/sp5r2/models/preds/{conf.modelname}/fold0_test/'
files = sorted(Path(fold0_pred_dir).glob('./*.tif'))
# TODO: multiprocess
for fn in tqdm(files, total=len(files)):
preds = []
for fold_idx in range(nfolds):
name = f'fold{fold_idx}_' + fn.name.lstrip('fold0_')
model_pred_base = str(fn.parent.parent / f'fold{fold_idx}_test' / name)
pred = skimage.io.imread(model_pred_base)
preds.append(pred)
preds = np.mean(preds, axis=0).astype(np.uint8)
merged_path = str(fn.parent.parent / 'merged_test' / fn.name.lstrip('fold0_'))
Path(merged_path).parent.mkdir(parents=True, exist_ok=True)
skimage.io.imsave(merged_path, preds, compress=1)
if __name__ == '__main__':
u.set_logger()
cli()
|
11530845
|
from .base import *
DEBUG = False
ADMINS = (
('<NAME>', '<EMAIL>'),
)
ALLOWED_HOSTS = ['.educaproject.com']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'educa',
'USER': 'educa',
'PASSWORD': '*****',
}
}
# SSL config
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
|
11530854
|
from cleancat.base import (
Bool,
Choices,
DateTime,
Dict,
Email,
Embedded,
EmbeddedReference,
Enum,
Field,
Integer,
List,
Regex,
RelaxedURL,
Schema,
SortedSet,
StopValidation,
String,
TrimmedString,
URL,
ValidationError,
)
__all__ = [
'Bool',
'Choices',
'DateTime',
'Dict',
'Email',
'Embedded',
'EmbeddedReference',
'Enum',
'Field',
'Integer',
'List',
'Regex',
'RelaxedURL',
'Schema',
'SortedSet',
'StopValidation',
'String',
'TrimmedString',
'URL',
'ValidationError',
]
__version__ = '1.0.0'
|
11530889
|
import json
import logging
import os
from lib import (
create_response,
write_key,
read_key_or_default,
get_config,
get_tf_metadata,
)
logger = logging.getLogger()
logger.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
def lambda_handler(event, context):
project_id = event["pathParameters"]["projectId"]
logger.info(f"Got request for project {project_id}")
statefile = f"{project_id}/terraform.tfstate"
self_url = "https://" + event["requestContext"]["domainName"]
config = get_config(project_id)
if config["name"] == "invalid":
return create_response(
f"No project exists, please visit {self_url}/project/new", 404
)
project_name = config["name"]
logger.info(f"Got request for {project_name} with id {project_id}")
# Get existing state or create new
if event["httpMethod"] == "GET":
logger.info("Type is GET, send state")
data = read_key_or_default(statefile)
return create_response(data.decode("utf-8"))
# update
if event["httpMethod"] == "POST":
logger.info("Type is POST, save and send state")
data = event["body"]
metadata = get_tf_metadata(data, True)
if metadata["terraform_version"] == "invalid":
return create_response("Unable to parse", code=500)
else:
write_key(statefile, data)
# todo: write the terraform.tfstate.serial
return create_response(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.