prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import csv
from bs4 import BeautifulSoup
from collections import Counter
import re
import os
OUTPUT_NAME = os.getenv('OUTPUT_NAME',
'data_detikcom_labelled_740_7_class.csv')
csv_file = open('data_detikcom_labelled_740.csv')
csv_reader = csv.DictReader(csv_file)
|
# Tranform individual label to candidate pair label
l | abel_map = {
'pos_ahok': 'pos_ahok_djarot',
'pos_djarot': 'pos_ahok_djarot',
'pos_anies': 'pos_anies_sandi',
'pos_sandi': 'pos_anies_sandi',
'pos_agus': 'pos_agus_sylvi',
'pos_sylvi': 'pos_agus_sylvi',
'neg_ahok': 'neg_ahok_djarot',
'neg_djarot': 'neg_ahok_djarot',
'neg_anies': 'neg_anies_sandi',
'neg_sandi': 'neg_anies_sandi',
'neg_agus': 'neg_agus_sylvi',
'neg_sylvi': 'neg_agus_sylvi',
'oot': 'oot'
}
fields = ['title', 'raw_content', 'labels']
train_file = open(OUTPUT_NAME, 'w')
csv_writer = csv.DictWriter(train_file, fields)
csv_writer.writeheader()
for row in csv_reader:
title = row['title']
raw_content = row['raw_content']
labels = []
label_1 = row['sentiment_1']
if label_1 != '':
candidate_pair_label = label_map[label_1]
if not candidate_pair_label in labels:
labels.append(candidate_pair_label)
label_2 = row['sentiment_2']
if label_2 != '':
candidate_pair_label = label_map[label_2]
if not candidate_pair_label in labels:
labels.append(candidate_pair_label)
label_3 = row['sentiment_3']
if label_3 != '':
candidate_pair_label = label_map[label_3]
if not candidate_pair_label in labels:
labels.append(candidate_pair_label)
# Skip content if label not exists
if not labels: continue
label_str = ','.join(labels)
data_row = {'title': title, 'raw_content': raw_content,
'labels': label_str}
csv_writer.writerow(data_row)
print OUTPUT_NAME, 'created'
csv_file.close()
train_file.close()
|
# Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants to hide XSD Datatypes used by Point Values and Properties
These help to describe the data in a feed so the receiving Thing can know what kind of data to expect
See also http://www.w3.org/TR/xmlschema-2/#built-in-datatypes
"""
from __future__ import unicode_literals
BASE64 = 'base64Binary'
'''Represents a sequence of binary octets (bytes) encoded according to RFC 2045,
the standard defining the MIME types (look under "6.8 Base64 Content-Transfer-Encoding").
'''
BOOLEAN = 'boolean'
'''A Boolean true or false value. Representations of true are "true" and "1"; false is denoted as "false" or "0".'''
BYTE = 'byte'
'''A sign | ed 8-bit integer in the range [-128 -> +127]. Derived from the short datatype.'''
UN | SIGNED_BYTE = 'unsignedByte'
'''An unsigned 8-bit integer in the range [0, 255]. Derived from the unsignedShort datatype.'''
DATE = 'date'
'''Represents a specific date. The syntax is the same as that for the date part of dateTime,
with an optional time zone indicator. Example: "1889-09-24".
'''
DATETIME = 'dateTime'
'''
Represents a specific instant of time. It has the form YYYY-MM-DDThh:mm:ss followed by an optional time-zone suffix.
`YYYY` is the year, `MM` is the month number, `DD` is the day number,
`hh` the hour in 24-hour format, `mm` the minute, and `ss` the second (a decimal and fraction are allowed for the
seconds part).
The optional zone suffix is either `"Z"` for Universal Coordinated Time (UTC), or a time offset of the form
`"[+|-]hh:mm"`, giving the difference between UTC and local time in hours and minutes.
Example: "2004-10-31T21:40:35.5-07:00" is a time on Halloween 2004 in Mountain Standard time. The equivalent UTC would
be "2004-11-01T04:40:35.5Z".
'''
DECIMAL = 'decimal'
'''Any base-10 fixed-point number. There must be at least one digit to the left of the decimal point, and a leading "+"
or "-" sign is allowed.
Examples: "42", "-3.14159", "+0.004".
'''
DOUBLE = 'double'
'''A 64-bit floating-point decimal number as specified in the IEEE 754-1985 standard. The external form is the same as
the float datatype.
'''
FLOAT = 'float'
'''A 32-bit floating-point decimal number as specified in the IEEE 754-1985 standard.
Allowable values are the same as in the decimal type, optionally followed by an exponent,
or one of the special values "INF" (positive infinity), "-INF" (negative infinity), or "NaN" (not a number).
The exponent starts with either "e" or "E", optionally followed by a sign, and one or more digits.
Example: "6.0235e-23".
'''
INT = 'int'
'''Represents a 32-bit signed integer in the range [-2,147,483,648, 2,147,483,647]. Derived from the long datatype.'''
INTEGER = 'integer'
'''Represents a signed integer. Values may begin with an optional "+" or "-" sign. Derived from the decimal datatype.'''
LONG = 'long'
'''A signed, extended-precision integer; at least 18 digits are guaranteed. Derived from the integer datatype. '''
STRING = 'string'
'''Any sequence of zero or more characters.'''
TIME = 'time'
'''A moment of time that repeats every day. The syntax is the same as that for dateTime,
omitting everything up to and including the separator "T". Examples: "00:00:00" is midnight,
and "13:04:00" is an hour and four minutes after noon.
'''
URI = 'anyURI'
'''
The data must conform to the syntax of a Uniform Resource Identifier (URI), as defined in RFC 2396
as amended by RFC 2732. Example: "http://www.nmt.edu/tcc/"
is the URI for the New Mexico Tech Computer Center's index page.
'''
IRI = 'IRI'
'''Only for use with property API calls. Used to handle properties which require an IRI (URIRef) value.'''
|
'''
Kaya Baber
Physics 440 - Computational Physics
Assignment 3
Problem 1
Hamiltonian Dynamics of a Nonlinear Pendulum
Consider a simple pendulum of length in
gravitational field g. The frequency in the limit of small angles is Ω_0 ≡ radical(g/l) , but do not assume the limit
of small angles for the following calculations.
(a) Start with the Hamiltonian and develop two first order equations for the angle θ and its conjugate
momentum p_θ .
((d^2)θ/d(t^2)) = - (g/l)sin(θ)
θ_dot = P_θ/(ml)^2
| P_θ_dot = -mlsin(θ)
(b) Use a second-order leapfrog algorithm to compute the motion of the pendulum. If we choose a
computational unit of time [T ] = Ω_0^(−1) , then 2π computational time units equals one period in the limit of
small oscillations. Another way to think about it is that we can choose a set of units such that Ω_0 = 1.
Make a graph of phase space trajectories for a variety of initial condition | s.
(c) Liouville’s Theorem states that the phase-space volume of a infinitesimally close ensemble of states is
conserved. Demonstrate Liouville’s Theorem by considering an ensemble of closely spaced initial conditions.
'''
|
# Copyright (c) 2013 Jordan Halterman <jordan.halterman@gmail.com>
# See LICENSE for details.
import sys, os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from active_redis import ActiveRedis
redis = ActiveRedis()
# Create an unnamed set.
myset = redis.set()
# Add items to the set.
myset.add('foo')
myset.add('bar')
# We can also create a named set by passing a key to the constructor.
myset = redis.set('myset')
myset.add('foo')
del myset
mys | et = redis.set('myset')
pr | int myset # set([u'foo'])
myset.delete()
print myset # set()
|
#!/usr/bin/env python
# encoding: utf-8
"""A test module"""
import datetime
import tempfile
import os
import shutil
import scores.common as common
class TestCommon(object):
""" A Test class"""
def test_date_function(self):
"""Test"""
a_date = datetime.datetime.now()
a_dat | e = a_date.replace(microsecond=0)
tst | amp = common.datetime_to_timestamp(a_date)
assert tstamp > 0
converted_bck = common.timestamp_to_datetime(tstamp)
assert converted_bck == a_date
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 00:22
from __future__ import unicode_literals
from django.db import migrations
class | Migration(migrations.Migration):
dependencies = [
('configuration_management_tools', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='smslibgateways',
options={'managed': False, 'verbose_name': 'Gateways'},
),
migrations.AlterModelOptions(
name='smslibnumberroutes',
| options={'managed': False, 'verbose_name': 'Routes'},
),
]
|
import os, sys, shutil
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
scramble_lib = os.path.join( "..", "..", "..", "lib" )
sys.path.append( scramble_lib )
import get_platform # fixes fat python 2.5
from ez_setup import use_setuptools
use_setuptools( download_delay=8, to_dir=scramble_lib )
from setuptools import *
# get the tag
if os.access( ".galaxy_tag", os.F_OK ):
tagfile = open( ".galaxy_tag", "r" )
tag = tagfile.readline().strip()
else:
tag = None
# in case you're running this by hand from a dirty module source dir
for dir in [ "build", "dist" ]:
if os.access( dir, os.F_OK ):
print "scr | amble.py: removing dir:", dir
shutil.rmtree( dir )
# reset args for distutils
me = sys.argv[0]
sys.argv = [ me ]
sys.argv.append( "egg_info" )
if tag is not None:
#sys.argv.append( "egg_info" )
sys.argv.append( "--tag-build=%s" | %tag )
# svn revision (if any) is handled directly in tag-build
sys.argv.append( "--no-svn-revision" )
sys.argv.append( "bdist_egg" )
# do it
execfile( "setup.py", globals(), locals() )
|
"""
__graph_MT_post__OUT2.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
__________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__OUT2(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.Chan | gesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'conn | ector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([81.0, 37.0, 81.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__OUT2', width = '0', justify= 'left', stipple='' )
self.gf128 = GraphicalForm(drawing, h, 'gf128', fontObject=font)
self.graphForms.append(self.gf128)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__OUT2
|
r'''
<license>
CSPLN_MaryKeelerEdition; Manages images to which notes can be added.
Copyright (C) 2015-2016, Thomas Kercheval
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
___________________________________________________________</license>
Description:
For creating CSPLN webapps for LINUX, from scaffolding.
Inputs:
Version number, of MKE_vxx_xx_xx scaffolding file.
Where each x corresponds to a current version number.
Input as "xx_xx_xx"
Number of web applications
Outputs:
Web applications, number depends on Input.
Puts web2py.py in each web_app (not included in windows version).
Puts scaffolding (current ap | p version) into each web2py frame.
Renames scaffolding application to 'MKE_Static_Name'.
Currently:
To Do:
Done:
'''
import os | , sys, shutil
from the_decider import resolve_relative_path as resolve_path
def check_file_exist(path):
"""Check if the file at the given path exists."""
if os.path.exists(path):
pass
else:
sys.exit('File {} doesn\'t exist'.format(path))
return None
def grab_out_paths(number_apps, app_path):
"""
From the number of applications necessary, create a list
of pathnames where we will create linux applications.
"""
out_dir = resolve_path(__file__, app_path)
project_part = 'P{}'
os = "linux"
out_paths = []
for num in range(1, number_apps + 1):
strin = project_part.format(str(num))
print "{part}, preparing for generation.".format(part=strin)
out_paths.append(out_dir.format(os=os, pat=strin))
return out_paths
def grab_web2py_frame():
"""Grab the path of the web2py framework and check its existence."""
webframe = resolve_path(__file__, '../apps/scaffolding/linux/web2py')
webdotpy = resolve_path(__file__,'../apps/scaffolding/common/web2py.py')
check_file_exist(webdotpy)
check_file_exist(webframe)
return webframe, webdotpy
def grab_scaffold_app(current_version):
"""Grab the path of our scaffolding and check its existence."""
mkever = '../apps/scaffolding/version/MKE_v{}'.format(current_version)
mkever = resolve_path(__file__, mkever)
check_file_exist(mkever)
return mkever
def copy_webframez(number_apps, app_path):
"""
For each path where we intend to create a linux application,
create a copy of the web2py framework and a modified copy
of web2py.py.
"""
webframe, webdotpy = grab_web2py_frame()
out_paths = grab_out_paths(number_apps, app_path)
for path in out_paths:
shutil.copytree(webframe, os.path.join(path, 'web2py'))
next_path = os.path.join(path, 'web2py')
shutil.copy(webdotpy, next_path)
print ' web2py frame copied to: {}'.format(path)
print ' web2py.py copied to: {}'.format(next_path)
return out_paths
def modify_out_paths(int_paths):
"""
Modifies the out_paths from the locations of the web2py framework
to where our applications will be generated.
"""
mod_out = []
addition = 'web2py/applications'
for path in int_paths:
new_path = os.path.join(path, addition)
mod_out.append(new_path)
return mod_out
def grab_filename_from_path(in_path):
"""Input a path, return last chunck"""
import ntpath
head, tail = ntpath.split(in_path)
return tail or ntpath.basename(head)
def copy_app(version, out_paths):
"""
Creates an application for every copy of the web2py framework,
from scaffolding application.
"""
scaff_app = grab_scaffold_app(version)
filename = grab_filename_from_path(scaff_app)
for path in out_paths:
shutil.copytree(scaff_app, os.path.join(path, filename))
old_name = os.path.join(path, filename)
new_name = os.path.join(path, 'MKE_Static_Name')
os.rename(old_name, new_name)
return None
def deploy_scaffolding(version_now, num_apps, app_path):
"""
Deploys the web2py framework and the current version of our
scaffolding, as many times as is necessary.
"""
print "\n Creating Linux applications...\n" + "_"*79
out_paths = copy_webframez(num_apps, app_path)
new_paths = modify_out_paths(out_paths)
copy_app(version_now, new_paths)
print "_"*79
return None
if __name__ == "__main__":
NUM_APPS = 10
VERSION = '00_01_02'
APP_PATH = '../apps/web_apps/{os}/{pat}'
deploy_scaffolding(VERSION, NUM_APPS, APP_PATH)
|
t os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_decimal_rows(self):
# GH 12907
data = StringIO('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''')
expected = DataFrame(data={'Header': 1100.101}, index=[0])
result = self.read_html(data, decimal='#')[0]
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['0.763', '0.244']})
html_df = read_html(html_data, converters={'a': str})[0]
tm.assert_frame_equal(expected_df, html_df)
def test_na_values(self):
# GH 13461
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': [0.763, np.nan]})
html_df = read_html(html_data, na_values=[0.244])[0]
tm.assert_frame_equal(expected_df, html_df)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, ht | ml_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
( | "Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def _lang_enc(filename):
return os.path.splitext(os.path.basename(filename))[0].split('_')
class TestReadHtmlEncoding(object):
files = glob.glob(os.path.join(DATA_PATH, 'html_encoding', '*.html'))
flavor = 'bs4'
@classmethod
def setup_class(cls):
_skip_if_none_of((cls.flavor, 'html5lib'))
def read_html(self, *args, **kwargs):
kwargs['flavor'] = self.flavor
return read_html(*args, **kwargs)
def read_filename(self, f, encoding):
return self.read_html(f, encoding=encoding, index_col=0)
def read_file_like(self, f, encoding):
with open(f, 'rb') as fobj:
return self.read_html(BytesIO(fobj.read()), encoding=encoding,
index_col=0)
def read_string(self, f, encoding):
with open(f, 'rb') as fobj:
return self.read_html(fobj.read(), encoding=encoding, index_col=0)
def test_encode(self):
assert self.files, 'no files read from the data folder'
for f in self.files:
_, encoding = _lang_enc(f)
try:
from_string = self.read_string(f, encoding).pop()
from_file_like = self.read_file_like(f, encoding).pop()
from_filename = self.read_filename(f, encoding).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
continue
raise
class TestReadHtmlEncodingLxml(TestReadHtmlEncoding):
flavor = 'lxml'
@classmethod
def setup_class(cls):
super(TestReadHtmlEncodingLxml, cls).setup_class()
_skip_if_no(cls.flavor)
class TestReadHtmlLxml(ReadHtmlMixin):
flavor = 'lxml'
@classmethod
def setup_class(cls):
_skip_if_no('lxml')
def test_data_fail(self):
from lxml.etree import XMLSyntaxError
spam_data = os.path.join(DATA_PATH, 'spam.html')
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
with pytest.raises(XMLSyntaxError):
self.read_html(spam_data)
with pytest.raises(XMLSyntaxError):
self.read_html(banklist_data)
def test_works_on_valid_markup(self):
filename = os.path.join(DATA_PATH, 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@tm.slow
def test_fallback_success(self):
_skip_if_none_of(('bs4', 'html5lib'))
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self):
data = os.path.join(DATA_PATH, 'computer_sales_page.html')
self.read_html(data, header=[0, 1])
def test_invalid_flavor():
url = 'google.com'
with pytest.raises(ValueError):
read_html(url, 'google', flavor='not a* valid**++ flaver')
def get_elements_from_file(url, element='table'):
_skip_if_none_of(('bs4', 'html5lib'))
url = file_path_to_url(url)
from bs4 import BeautifulSoup
with urlopen(url) as f:
soup = BeautifulSoup(f, features='html5lib')
retur |
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needl | e: str
:rtype: int
"""
for i in range(len(haystack)-len(needle) + 1):
if haystack[i: i + len(needle)] == needle:
return i
return | -1
|
import logging
from ..models import Activity
from .date import activity_stream_date_to_datetime, datetime_to_string
log = logging.getLogger(__name__)
def activity_from_dict(data):
log.debug("Converting YouTube dict to Activity Model")
activity_dict = activity_dict_from_dict(data)
return Activity.from_activity_dict(activity_dict)
def activity_dict_from_dict(blob):
log.debug("Converting YouTube dict to activity dict: %s", blob)
stream_object = {}
| stream_object["@context"] = "http://www.w3.org/ns/activitystreams"
stream_object["@type"] = "Activity"
date = blob.get("snippet").get("publishedAt")
date = activity_stream_date_to_datetime(date)
stream_object["published"] = datetime_to_string(date)
stream_object["provider"] = {
"@type": "Service",
"displayName": "YouTube"
}
snippet = blob.get("snippet")
stream_object["actor"] = {
"@type": "Person",
"@id": "https://www.youtub | e.com/user/{}".format(snippet.get("channelTitle")),
"displayName": snippet.get("channelTitle"),
}
stream_object["object"] = {
"@id": "https://www.youtube.com/watch?v={}".format(blob.get("id").get("videoId")),
"@type": "Video",
"displayName": snippet.get("title"),
"url": [{
"href": "https://www.youtube.com/watch?v={}".format(blob.get("id").get("videoId")),
"@type": "Link"
}],
"content": snippet.get("description"),
"youtube:etag": blob.get("etag"),
"youtube:kind": blob.get("kind"),
"youtube:id:kind": blob.get("id").get("kind"),
"youtube:channelId": snippet.get("channelId"),
"youtube:liveBroadcastContent": snippet.get("liveBroadcastContent"),
"image": [
{
"@type": "Link",
"href": snippet.get("thumbnails").get("default").get("url"),
"mediaType": "image/jpeg",
"youtube:resolution": "default"
},
{
"@type": "Link",
"href": snippet.get("thumbnails").get("medium").get("url"),
"mediaType": "image/jpeg",
"youtube:resolution": "medium"
},
{
"@type": "Link",
"href": snippet.get("thumbnails").get("high").get("url"),
"mediaType": "image/jpeg",
"youtube:resolution": "high"
},
]
}
return stream_object
"""
"""
"""
{
"@context": "http://www.w3.org/ns/activitystreams",
"@type": "Activity", ------ Abstract wrapper
"published": "2015-02-10T15:04:55Z",
"provider": {
"@type": "Service",
"displayName": "Twitter|FaceBook|Instagram|YouTube"
},
"actor": {
"@type": "Person",
"@id": "https://www.twitter.com/{{user.screen_name}}
"displayName": "Martin Smith",
"url": "http://example.org/martin",
"image": {
"@type": "Link",
"href": "http://example.org/martin/image.jpg",
"mediaType": "image/jpeg"
}
},
------------------------------------------------------
"object" : {
"@id": "urn:example:blog:abc123/xyz",
"@type": "Note",
"url": "http://example.org/blog/2011/02/entry",
"content": "This is a short note"
},
------------------------------------------------------
"object" : {
"@id": "urn:example:blog:abc123/xyz",
"@type": "Video",
"displayName": "A Simple Video",
"url": "http://example.org/video.mkv",
"duration": "PT2H"
},
------------------------------------------------------
"object" : {
"@id": "urn:example:blog:abc123/xyz",
"@type": "Image",
"displayName": "A Simple Image",
"content": "any messages?"
"url": [
{
"@type": "Link",
"href": "http://example.org/image.jpeg",
"mediaType": "image/jpeg"
},
{
"@type": "Link",
"href": "http://example.org/image.png",
"mediaType": "image/png"
}
]
},
}
"""
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 7 13:10:05 2016
@author: thasegawa
"""
import os
import pandas as pd
economic_list = list(pd.read_excel('data\\fields\\economicIndicators_Real.xlsx', header=None)[0])
#fuel_list = list(pd.read_excel('data\\fields\\fuel_binary.xlsx', header=None)[0]) + [None]
fuel_list = list(pd.read_excel('data\\fields\\fuel_binary.xlsx', header=None)[0])
# Iterate through each regression result and retrieve R^2 and coefficient
group_list = ['pathmid',
'pathnj',
'pathnyc',
'pathtotal',
'pathwtc']
path = 'data\\regress_out\\all_v2'
outcol_list = ['PATH Group',
'R^2',
'Elasticity Coefficient',
'Economic Variable',
'Economic Coefficient',
'Fuel Variable',
'Fuel Coefficient',
'M1 Coefficient',
'M2 Coefficient',
'M3 Coefficient',
'M4 Coefficient',
'M5 Coefficient',
'M6 Coefficient',
'M7 Coefficient',
'M8 Coefficient',
'M9 Coefficient',
'M10 Coefficient',
'M11 Coefficient',
'Recession_FRED Coefficient',
'Sandy Coefficient',
'Snow_Median Coefficient',
'Intercept']
out_dict = {key: [] for key in outcol_list}
fname_list = os.listdir(path)
for index, group in enumerate(group_list):
R2_list = []
coef_list = []
for fuel in fuel_list:
for economic in economic_list:
fname = 'regress_summary_{0}_{1}.txt'.format(group, economic)
with open(os.path.join(path,fname)) as f:
lines = f.readlines()
for line in lines:
if line[:9] == 'R-squared':
R2 = float(line.strip().split(' ')[-1])
linesplit = line.split(' ')
if (len(linesplit) > 2):
if (linesplit[1] == 'Fare-1Trip'):
coef = float(linesplit[2])
if R2 is not None:
R2_list.append(R2)
else:
R2_list.append(-999)
if coef is not None:
coef_list.append(coef)
else:
coef_list.append(-999)
if index == 0:
R2_out = pd.DataFrame({'Economic Indicator': economic_list,
group: R2_list})
coef_out | = pd.DataFrame({'Economic Indicator': economic_list,
group: coef_list})
else:
R2_out[group] = R2_l | ist
coef_out[group] = coef_list
R2_out.to_excel('data\\regress_out\\regresssummary_R2.xlsx', index = False)
coef_out.to_excel('data\\regress_out\\regresssummary_coef.xlsx', index = False) |
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License. |
# snippet-sourcedescription:[MyCodeCommitFunction.py demonstrates how to use an AWS Lambda function to return the URLs used for cloning an AWS CodeCo | mmit repository to a CloudWatch log.]
# snippet-service:[codecommit]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[AWS CodeCommit]
# snippet-keyword:[Code Sample]
# snippet-keyword:[GetRepository]
# snippet-sourcetype:[full-example]
# snippet-sourceauthor:[AWS]
# snippet-sourcedate:[2016-03-07]
# snippet-start:[codecommit.python.MyCodeCommitFunction.complete]
import json
import boto3
codecommit = boto3.client('codecommit')
def lambda_handler(event, context):
#Log the updated references from the event
references = { reference['ref'] for reference in event['Records'][0]['codecommit']['references'] }
print("References: " + str(references))
#Get the repository from the event and show its git clone URL
repository = event['Records'][0]['eventSourceARN'].split(':')[5]
try:
response = codecommit.get_repository(repositoryName=repository)
print("Clone URL: " +response['repositoryMetadata']['cloneUrlHttp'])
return response['repositoryMetadata']['cloneUrlHttp']
except Exception as e:
print(e)
print('Error getting repository {}. Make sure it exists and that your repository is in the same region as this function.'.format(repository))
raise e
# snippet-end:[codecommit.python.MyCodeCommitFunction.complete]
|
idom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
from xml.dom.minicompat import *
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_ | CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._inter | n_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-17 19:24
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0047_remove_mapupdate_changed_geometries'),
]
operations = [
migrations.CreateModel(
name='Ramp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minx', | models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min x coordinate')),
('miny', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min y coordinate')),
('maxx', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max x coordinate')),
('maxy', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max y coordinate')),
| ('geometry', c3nav.mapdata.fields.GeometryField(default=None, geomtype='polygon')),
('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ramps', to='mapdata.Space', verbose_name='space')),
],
options={
'verbose_name': 'Ramp',
'verbose_name_plural': 'Ramps',
'default_related_name': 'ramps',
},
),
]
|
def _checkInput(index):
if index < 0:
raise ValueError("Indice negativo non supportato [{}]".format(index))
elif type(index) != int:
raise TypeError("Inserire un intero [tipo input {}]".format(type(index).__name__))
def fib_from_string(index):
_c | heckInput(index)
serie = "0 1 1 2 3 5 8".replace(" ", "")
return int(serie[index])
def fib_from_list(index):
_checkInput(index)
serie = [0,1,1,2,3,5,8]
return serie[index]
def fib_from_algo(index):
_checkInput(index)
current_number = current_index = 0
base = 1
while current_index < index:
old_base = current_number
current_number = current_number + base
base = old_base
current_index += 1
pass
return current_num | ber
def recursion(index):
if index <= 1:
return index
return recursion(index - 1) + recursion(index - 2)
def fib_from_recursion_func(index):
_checkInput(index)
return recursion(index)
calculate = fib_from_recursion_func |
#!/usr/bin/env python
import analyze_conf
import sys
import datetime, glob, job_stats, os, subprocess, time
import operator
import matplotlib
# Set the matplotlib output mode from config if it exists
if not 'matplotlib.pyplot' in sys.modules:
try:
matplotlib.use(analyze_conf.matplotlib_output_mode)
except NameError:
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils, lariat_utils, plot
import math
import multiprocessing, functools, itertools
import cPickle as pickle
def do_work(file,mintime,wayness,lariat_dict):
retval=(None,None,None,None,None)
res=plot.get_data(file,mintime,wayness,lariat_dict)
if (res is None):
return retval
(ts, ld, tmid,
read_rate, write_rate, stall_rate, clock_rate, avx_rate, sse_rate, inst_rate,
meta_rate, l1_rate, l2_rate, l3_rate, load_rate, read_frac, stall_frac) = res
# return (scipy.stats.tmean(stall_frac),
# scipy.stats.tmean((load_rate - (l1_rate + l2_rate +
# l3_rate))/load_rate))
mean_mem_rate=scipy.stats.tmean(read_rate+write_rate)*64.0
ename=ld.exc.split('/')[-1]
ename=tspl_utils.string_shorten(ld.comp_name | (ename,ld.equiv_patterns),8)
if ename=='unknown':
return retval
flag=False
if mean_mem_rate < 75.*1000000000./16.:
flag=True
return (scipy.stats.tmean(stall_frac),
scipy.stats.tmean((load_rate - (l1_rate))/load_rate),
scipy.stats.tmean(clock_rate/inst_rate),ename,
flag)
def main():
parser = argparse.ArgumentParser(description='Look for imbalance between'
| 'hosts for a pair of keys')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
parser.add_argument('-p', help='Set number of processes',
nargs=1, type=int, default=[1])
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.filearg)
procs = min(len(filelist),n.p[0])
job=pickle.load(open(filelist[0]))
jid=job.id
epoch=job.end_time
ld=lariat_utils.LariatData(jid,end_epoch=epoch,daysback=3,directory=analyze_conf.lariat_path)
if procs < 1:
print 'Must have at least one file'
exit(1)
pool = multiprocessing.Pool(processes=procs)
partial_work=functools.partial(do_work,mintime=3600.,wayness=16,lariat_dict=ld.ld)
results=pool.map(partial_work,filelist)
fig1,ax1=plt.subplots(1,1,figsize=(20,8),dpi=80)
fig2,ax2=plt.subplots(1,1,figsize=(20,8),dpi=80)
maxx=0.
for state in [ True, False ]:
stalls=[]
misses=[]
cpis=[]
enames=[]
for (s,m,cpi,ename,flag) in results:
if (s != None and m > 0. and m < 1.0 and flag==state):
stalls.extend([s])
misses.extend([m])
cpis.extend([cpi])
enames.extend([ename])
markers = itertools.cycle(('o','x','+','^','s','8','p',
'h','*','D','<','>','v','d','.'))
colors = itertools.cycle(('b','g','r','c','m','k','y'))
fmt={}
for e in enames:
if not e in fmt:
fmt[e]=markers.next()+colors.next()
for (s,c,e) in zip(stalls,cpis,enames):
# ax1.plot(numpy.log10(1.-(1.-s)),numpy.log10(c),
maxx=max(maxx,1./(1.-s))
ax1.plot((1./(1.-s)),(c),
marker=fmt[e][0],
markeredgecolor=fmt[e][1],
linestyle='', markerfacecolor='None',
label=e)
ax1.hold=True
ax2.plot((1./(1.-s)),(c),
marker=fmt[e][0],
markeredgecolor=fmt[e][1],
linestyle='', markerfacecolor='None',
label=e)
ax2.hold=True
#ax.plot(numpy.log10(stalls),numpy.log10(cpis),fmt)
#ax.plot(numpy.log10(1.0/(1.0-numpy.array(stalls))),numpy.log10(cpis),fmt)
ax1.set_xscale('log')
ax1.set_xlim(left=0.95,right=1.05*maxx)
ax1.set_yscale('log')
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.45, box.height])
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.45, box.height])
handles=[]
labels=[]
for h,l in zip(*ax1.get_legend_handles_labels()):
if l in labels:
continue
else:
handles.extend([h])
labels.extend([l])
ax1.legend(handles,labels,bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0., numpoints=1,ncol=4)
ax1.set_xlabel('log(Cycles per Execution Cycle)')
ax1.set_ylabel('log(CPI)')
handles=[]
labels=[]
for h,l in zip(*ax2.get_legend_handles_labels()):
if l in labels:
continue
else:
handles.extend([h])
labels.extend([l])
ax2.legend(handles,labels,bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0., numpoints=1,ncol=4)
ax2.set_xlabel('Cycles per Execution Cycle')
ax2.set_ylabel('CPI')
fname='miss_v_stall_log'
fig1.savefig(fname)
fname='miss_v_stall'
fig2.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
|
def populate(template, values):
# template is a string containing tags. the tags get replaced with the entries from the values dictionary.
# example:
# > templat | e = "hello there <<your name>>!"
# > values = {"your name": "bukaroo banzai"}
# > populateTemplate( template, values)
# "hello there bukaroo banzai!"
result = tem | plate["text"]
name = "None"
try:
for name in template["parameters"]:
result = result.replace("<<%s>>" % name, str(values[name]))
except KeyError:
print "Template value dictionary is missing the entry:", name
return result
### dts file template
dtsContents = {
"parameters": ("type", "part number", "header names", "hardware names", "fragments"),
"text": """/*
* Easy <<type>> mux control of <<header names>> (<<hardware names>>)
*/
/dts-v1/;
/plugin/;
/ {
compatible = "ti,beaglebone", "ti,beaglebone-black";
/* identification */
part-number = "<<part number>>";
/* version = "00A0"; */
/* state the resources this cape uses */
exclusive-use =
/* the pin header uses */
<<header names>>,
/* the hardware IP uses */
<<hardware names>>;
<<fragments>>
};
"""
}
### fragment template
fragment = {
"parameters": ("type", "index", "header name", "clean header name", "state name", "offset and mux list"),
"text": """
/* <<state name>> state */
fragment@<<index>> {
target = <&am33xx_pinmux>;
__overlay__ {
<<type>>_<<clean header name>>_<<state name>>: pinmux_<<type>>_<<header name>>_<<state name>> {
pinctrl-single,pins = <
<<offset and mux list>>
>;
};
};
};
"""
}
### pinctrlTemplate template
pinctrl = {
"parameters": ("type", "index", "clean header name", "state name"),
"text": """pinctrl-<<index>> = <&<<type>>_<<clean header name>>_<<state name>>>;"""
}
pinmuxHelper = {
"parameters": ("type", "index", "header name", "state names list", "pinctrl list", "gpio index"),
"text": """
fragment@<<index>> {
target = <&ocp>;
__overlay__ {
<<type>>-<<header name>>_gpio<<gpio index>> {
compatible = "bone-pinmux-helper";
status = "okay";
pinctrl-names = <<state names list>>;
<<pinctrl list>>
};
};
};
"""
}
ledHelper = {
"parameters": ("index", "header name", "gpio bank + 1", "gpio pin", "output pinctrl entry"),
"text": """
fragment@<<index>> {
target = <&ocp>;
__overlay__ {
led_<<header name>>_helper {
compatible = "gpio-leds";
pinctrl-names = "default";
<<output pinctrl entry>>
leds-<<header name>> {
label = "leds:<<header name>>";
gpios = <&gpio<<gpio bank + 1>> <<gpio pin>> 0>;
linux,default-trigger = "none";
default-state = "off";
};
};
};
};
"""
}
prussHelper = {
"parameters": ("status", "index"),
"text": """
fragment@<<index>> {
target = <&pruss>;
__overlay__ {
status = "<<status>>";
};
};
"""
} |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
class BomLine(Model):
_name = "bom.line"
_fields = {
"bom_id": fields.Many2One("bom", "BoM", required=True, | on_delete="cascade"),
"product_id": fields.Many2One("product", "Product", required=True),
"qty": fields.Decimal("Qty", required=True, scale=6),
"uom_id": fields.Many2One("uom", "UoM", required=True),
"location_id": fiel | ds.Many2One("stock.location", "RM Warehouse"),
"container": fields.Selection([["sale", "From Sales Order"]], "RM Container"),
"lot": fields.Selection([["production", "From Production Order"]], "RM Lot"),
"issue_method": fields.Selection([["manual", "Manual"], ["backflush", "Backflush"]], "Issue Method"),
"qty2": fields.Decimal("Qty2", scale=6),
"notes": fields.Text("Notes"),
}
BomLine.register()
|
#!/usr/bin/python
import re
userInput = raw_input("input equation\n")
numCount = 0
operandCount = 0
entryBracketCount = 0
exitBracketCount = 0
charCount = 0
endOfLine = len(userInput) - 1
for i in range(len(userInput)):
if (re.search('[\s*a-z\s*A-Z]+', userInput[i])):
charCount = charCount + 1
print operandCount, " 1"
elif (re.search('[\s*0-9]+', userInput[i])):
numCount = numCount + 1
print operandCount, " 2"
elif (re.search('[\*]', userInput[i])):
print 'TRUE'
# operandCount = operandCount + 1
# print operandCount, " 3.5"
# elif (re.search('[\s*\+|\s*\-|\s*\/]+', userInput[i])):
elif (re.search('[+-/*]+', userInput[i])):
operandCount = operandCount + 1
print operandCount, " 3"
# if(re.search('[\s*\+|\s*\-|\s*\/]+', userInput[endOfLine])):
if(re.search('[+-/*]+', userInput[endOfLine])):
print "invalid expression"
print "1"
exit(0)
else:
if((re.search('[\s*a-zA-Z]+', userInput[i - 1])) or (re.search('[\s*\d]+', userInput[i - 1]))):
continue
else:
print 'invalid expression'
print '2'
exit(0)
if(re.search('[\s*\d]+', userInput[i - 1])):
continue
else:
print 'invalid expression'
print '3'
exit(0)
if(re.search('[\s*a-zA-Z]+', userInput[i + 1])):
continue
elif(re.search('[\s*\d]+', userInput[i + 1])):
continue
elif (re.search('[\(]+', userInput[i + 1])):
continue
elif (re.search('[\)]+', userInput[i + 1])):
continue
else:
print 'invalid | expression'
print '4'
exit(0)
elif (re.search('[\(]+', userInput[i])):
entryBracketCount = entryBracketCount + 1
print | operandCount, " 4"
elif (re.search('[\)]+', userInput[i])):
exitBracketCount = exitBracketCount + 1
print operandCount, " 5"
if(re.search('[\)]+', userInput[endOfLine])):
continue
else:
if(re.search('[\(]+', userInput[i + 1])):
print 'invalid expression'
print '5'
exit(0)
print operandCount, " 6"
if (entryBracketCount != exitBracketCount):
print "invalid expression"
print '6'
exit(0)
elif operandCount == 0:
print operandCount
print "invalid expression"
print '7'
exit(0)
elif ((numCount == 0) and (charCount == 0)):
print "invalid expression"
print '8'
exit(0)
else:
print "valid expression"
|
"""Support for Zigbee switches."""
import voluptuous as vol
from homeassistant.components.switch import SwitchDevice
from . import PLATFORM_SCHEMA, ZigBeeDigitalOut, ZigBeeDigitalOutConfig
CONF_ON_STATE = "on_state" |
DEFAULT_ON_STATE = "high"
STATES = ["high", "low"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_ON_STATE): vol.In(STATES)})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zigbee switch platform."""
add_entities([ZigBeeSwitch(hass, ZigBeeDigitalOutConfig(config))])
class ZigBeeSwitch(ZigBeeDigitalOut, SwitchDevice):
"""Represe | ntation of a Zigbee Digital Out device."""
pass
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Python 3.2 code
#
# Copyright (c) 2012 Jeff Smits
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with t | his program. If not, see <http://www.gnu.org/licenses/>.
#
# C3P - C-compatible code preprocessor
# This commandline tool reads a file and expands macro's.
#
# This file is a utility file and doesn't contain the whole tool | .
# Also it does not run standalone.
#
# This file imports all the tests
from .acceptance_tests import Acc_test
from .unit_tests import * |
#!/usr/bin/env python2
#
# wsi_bot_codebook3
#
# Version 3 of codebook construction:
#
# -uses OpenCV for faster operation - but different local descriptors than in the 1st version;
# -uses annotation files for defining the regions from where the descriptors are to be
# extracted
# - try to optimize the codebook with respect to some class labels
from __future__ import (absolute_import, division, print_function, unicode_literals)
__version__ = 0.1
__author__ = 'Vlad Popovici'
import os
import argparse as opt
import numpy as np
import numpy.linalg
from scipy.stats import ttest_ind
import skimage.draw
import skimage.io
from skimage.exposure import equalize_adapthist, rescale_intensity
import cv2
import cv2.xfeatures2d
from sklearn.cluster import MiniBatchKMeans
from sklearn.lda import LDA
from stain.he import rgb2he
from util.storage import ModelPersistence
def find_in_list(_value, _list):
"""
Returns the indexes of all occurrences of value in a list.
"""
return np.array([i for i, v in enumerate(_list) if v == _value], dtype=int)
def main():
p = opt.ArgumentParser(description="""
Extracts features from annotated regions and constructs a codebook of a given size.
""")
p.add_argument('in_file', action='store', help='a file with image file, annotation file and label (0/1)')
p.add_argument('out_file', action='store', help='resulting model file name')
#p.add_argument('codebook_size', action='store', help='codebook size', type=int)
p.add_argument('-t', '--threshold', action='store', type=int, default=5000,
help='Hessian threshold for SURF features.')
p.add_argument('-s', '--standardize', action='store_true', default=False,
help='should the features be standardized before codebook construction?')
p.add_argument('-v', '--verbose', action='store_true', help='verbose?')
args = p.parse_args()
th = args.threshold
all_image_names, all_descriptors = [], []
all_roi = []
y = []
unique_image_names = []
with open(args.in_file, mode='r') as fin:
for l in fin.readlines():
l = l.strip()
if len(l) == 0:
break
img_file, annot_file, lbl = [z_ for z_ in l.split()][0:3] # file names: image and its annotation and label
y.append(int(lbl))
if args.verbose:
print("Image:", img_file)
img = cv2.imread(img_file)
coords = np.fromfile(annot_file, dtype=int, sep=' ') # x y - values
coords = np.reshape(coords, (coords.size/2, 2), order='C')
# get the bounding box:
xmin, ymin = coords.min(axis=0)
xmax, ymax = coords.max(axis=0)
if args.verbose:
print("\t...H&E extraction")
img = img[ymin:ymax+2, xmin:xmax+2, :] # keep only the region of interest
img_h, _ = rgb2he(img, normalize=True) # get the H- component
img_h = equalize_adapthist(img_h)
img_h = rescale_intensity(img_h, out_range=(0,255))
# make sure the dtype is right for image and the mask: OpenCV is sensitive to data type
img_h = img_h.astype(np.uint8)
if args.verbose:
print("\t...building mask")
mask = np.zeros(img_h.shape, dtype=np.uint8)
r, c = skimage.draw.polygon(coords[:,1]-ymin, coords[:,0]-xmin) # adapt to new image...
mask[r,c] = 1 # everything outside the region is black
if args.verbose:
print("\t...feature detection and computation")
img_h *= mask
feat = cv2.xfeatures2d.SURF_create(hessianThreshold=th)
keyp, desc = feat.detectAndCompute(img_h, mask)
if args.verbose:
print("\t...", str(len(keyp)), "features extracted")
all_descriptors.extend(desc)
all_image_names.extend([img_file] * len(keyp))
unique_image_names.append(img_file)
# end for
X = np.hstack(all_descriptors)
X = np.reshape(X, (len(all_descriptors), all_descriptors[0].size), order='C')
if args.standardize:
# make sure each variable (column) is mean-centered and has unit standard deviation
Xm = np.mean(X, axis=0)
Xs = np.std(X, axis=0)
Xs[np.isclose(Xs, 1e-16)] = 1.0
X = (X - Xm) / Xs
y = np.array(y, dtype=int)
rng = np.random.RandomState(0)
acc = [] # will keep accuracy of the classifier
vqs = [] # all quantizers, to find the best
for k in np.arange(10, 121, 10):
# Method:
# -generate a codebook with k codewords
# -re-code the data
# -compute frequencies
# -estimate classification on best 10 features
if args.verbose:
print("\nK-means clustering (k =", str(k), ")")
print("\t...with", str(X.shape[0]), "points")
#-codebook and re-coding
vq = MiniBatchKMeans(n_clusters=k, random_state=rng,
batch_size=500, compute_labels=True, verbose=False) # vector quantizer
vq.fit(X)
vqs.append(vq)
#-codeword frequencies
| frq = np.zeros((len(unique_image_names), k))
for i in range(vq.labels_.size):
frq[unique_image_names.index(all_image_names[i]), vq.labels_[i]] += 1.0
for i in range(len(unique_image_names)):
if frq[i, :].sum() > 0:
frq[i, :] /= frq[i, :].sum()
if args.verbose:
print("...\tfeature selection (t-test)")
pv = np.ones(k)
| for i in range(k):
_, pv[i] = ttest_ind(frq[y == 0, i], frq[y == 1, i])
idx = np.argsort(pv) # order of the p-values
if args.verbose:
print("\t...classification performance estimation")
clsf = LDA(solver='lsqr', shrinkage='auto').fit(frq[:,idx[:10]], y) # keep top 10 features
acc.append(clsf.score(frq[:, idx[:10]], y))
acc = np.array(acc)
k = np.arange(10, 121, 10)[acc.argmax()] # best k
if args.verbose:
print("\nOptimal codebook size:", str(k))
# final codebook:
vq = vqs[acc.argmax()]
# compute the average distance and std.dev. of the points in each cluster:
avg_dist = np.zeros(k)
sd_dist = np.zeros(k)
for k in range(0, k):
d = numpy.linalg.norm(X[vq.labels_ == k, :] - vq.cluster_centers_[k, :], axis=1)
avg_dist[k] = d.mean()
sd_dist[k] = d.std()
with ModelPersistence(args.out_file, 'c', format='pickle') as d:
d['codebook'] = vq
d['shift'] = Xm
d['scale'] = Xs
d['standardize'] = args.standardize
d['avg_dist_to_centroid'] = avg_dist
d['stddev_dist_to_centroid'] = sd_dist
return True
if __name__ == '__main__':
main()
|
"""Image renderer modul | e."""
from mfr.core import RenderResult
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param fp: File pointer
:param src: Path to file
:param alt: Alternate text for the image
:return: RenderResult object containing the content html
"""
# Default src to the filename
src = src or fp.name
content = '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
| return RenderResult(content)
|
self._disable_message_config()
if not self.text:
raise ValueError('No text configured')
kw = {}
kw.upd | ate(self.config)
kw['message'] = self
try:
notifier = zope.component.getUtility(
zeit.push.interfaces.IPushNotifier, name=self.type)
notifier.send(self.text, self.url, **kw)
self.log_success()
log.info('Push notification for %s sent', self.type)
except Exception, e:
self.log_error(str(e))
log.error(u'Er | ror during push to %s with config %s',
self.type, self.config, exc_info=True)
def _disable_message_config(self):
push = zeit.push.interfaces.IPushMessages(self.context)
push.set(self.config, enabled=False)
@property
def text(self):
push = zeit.push.interfaces.IPushMessages(self.context)
return getattr(push, self.get_text_from)
@property
def type(self):
return self.__class__.__dict__['grokcore.component.directive.name']
@property
def url(self):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.push')
return zeit.push.interfaces.IPushURL(self.context).replace(
zeit.cms.interfaces.ID_NAMESPACE, config['push-target-url'])
@zope.cachedescriptors.property.Lazy
def object_log(self):
return zeit.objectlog.interfaces.ILog(self.context)
def log_success(self):
self.object_log.log(_(
'Push notification for "${name}" sent.'
' (Message: "${message}", Details: ${details})',
mapping={'name': self.type.capitalize(),
'message': self.text,
'details': self.log_message_details}))
def log_error(self, reason):
self.object_log.log(_(
'Error during push to ${name} ${details}: ${reason}',
mapping={'name': self.type.capitalize(),
'details': self.log_message_details,
'reason': reason}))
@property
def log_message_details(self):
return '-'
@grok.adapter(zeit.cms.interfaces.ICMSContent)
@grok.implementer(zeit.push.interfaces.IPushURL)
def default_push_url(context):
return context.uniqueId
class AccountData(grok.Adapter):
grok.context(zeit.cms.interfaces.ICMSContent)
grok.implements(zeit.push.interfaces.IAccountData)
def __init__(self, context):
super(AccountData, self).__init__(context)
self.__parent__ = context # make security work
@property
def push(self):
return zeit.push.interfaces.IPushMessages(self.context)
@property
def facebook_main_enabled(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(type='facebook', account=source.MAIN_ACCOUNT)
return service and service.get('enabled')
@facebook_main_enabled.setter
def facebook_main_enabled(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.MAIN_ACCOUNT),
enabled=value)
# We cannot use the key ``text``, since the first positional parameter of
# IPushNotifier.send() is also called text, which causes TypeError.
@property
def facebook_main_text(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(type='facebook', account=source.MAIN_ACCOUNT)
return service and service.get('override_text')
@facebook_main_text.setter
def facebook_main_text(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.MAIN_ACCOUNT),
override_text=value)
@property
def facebook_magazin_enabled(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(
type='facebook', account=source.MAGAZIN_ACCOUNT)
return service and service.get('enabled')
@facebook_magazin_enabled.setter
def facebook_magazin_enabled(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.MAGAZIN_ACCOUNT),
enabled=value)
@property
def facebook_magazin_text(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(
type='facebook', account=source.MAGAZIN_ACCOUNT)
return service and service.get('override_text')
@facebook_magazin_text.setter
def facebook_magazin_text(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.MAGAZIN_ACCOUNT),
override_text=value)
@property
def facebook_campus_enabled(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(type='facebook', account=source.CAMPUS_ACCOUNT)
return service and service.get('enabled')
@facebook_campus_enabled.setter
def facebook_campus_enabled(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.CAMPUS_ACCOUNT),
enabled=value)
@property
def facebook_campus_text(self):
source = zeit.push.interfaces.facebookAccountSource(None)
service = self.push.get(
type='facebook', account=source.CAMPUS_ACCOUNT)
return service and service.get('override_text')
@facebook_campus_text.setter
def facebook_campus_text(self, value):
source = zeit.push.interfaces.facebookAccountSource(None)
self.push.set(dict(
type='facebook', account=source.CAMPUS_ACCOUNT),
override_text=value)
@property
def twitter_main_enabled(self):
source = zeit.push.interfaces.twitterAccountSource(None)
service = self.push.get(type='twitter', account=source.MAIN_ACCOUNT)
return service and service.get('enabled')
@twitter_main_enabled.setter
def twitter_main_enabled(self, value):
source = zeit.push.interfaces.twitterAccountSource(None)
self.push.set(dict(
type='twitter', account=source.MAIN_ACCOUNT),
enabled=value)
@property
def twitter_ressort_text(self):
return self._nonmain_twitter_service.get('override_text')
@twitter_ressort_text.setter
def twitter_ressort_text(self, value):
self.push.set(
dict(type='twitter', variant='ressort'), override_text=value)
@property
def twitter_ressort(self):
return self._nonmain_twitter_service.get('account')
@twitter_ressort.setter
def twitter_ressort(self, value):
service = self._nonmain_twitter_service
enabled = None
# BBB `variant` was introduced in zeit.push-1.21
if service and 'variant' not in service:
self.push.delete(service)
enabled = service.get('enabled')
self.push.set(
dict(type='twitter', variant='ressort'), account=value)
if enabled is not None:
self.twitter_ressort_enabled = enabled
@property
def twitter_ressort_enabled(self):
return self._nonmain_twitter_service.get('enabled')
@twitter_ressort_enabled.setter
def twitter_ressort_enabled(self, value):
service = self._nonmain_twitter_service
account = None
# BBB `variant` was introduced in zeit.push-1.21
if service and 'variant' not in service:
self.push.delete(service)
account = service.get('account')
self.push.set(
dict(type='twitter', variant='ressort'), enabled=value)
if account is not None:
self.twitter_ressort = account
@property
def _nonmain_twitter_service(self):
source = zeit.push.interfaces.twitterAccountSource(None)
for service in self.push.message_config:
if service['type'] ! |
import sys
import os
from scale_model import StartupDataModel, VCModel
from flask.ext.restful import Resource, reqparse
from flask import Flask, jsonify, request, make_response
import os
from database import db
from flask.ext.security import current_user
from json import dumps
class Scale_DAO(object):
def __init__(self):
print 'making scale DAO'
self.user_scale = StartupDataModel.query.filter_by(username=current_user.email).order_by(StartupDataModel.date.desc()).first()
print self.user_scale
class Scale_resource(Resource):
def get(self, **kwargs):
"""
TODO: get old data to render in form as default
"""
#check= request.args.get('check')
if current_user.is_anonymous():
return jsonify(status=400)
scale = Scale_DAO()
if scale.user_scale:
return make_response(dumps(scale.user_scale.as_dict()))
else:
return jsonify(scale_authed=False)
def post(self):
"""
TODO: add update instead of just creating whole new record
"""
if current_user.is_anonymous():
return jsonify(msg="You are no longer logged in",status=400)
try:
data = request.json
cb_url = data.get('crunchbase_url')
al_url = data.get('angellist_url')
description = data.get('description')
new_data = StartupDataModel(username=current_user.email, crunchbase_url= | cb_url, angellist_url=al_url, description=description)
db.session.add(new_data)
db.session.commit()
return jsonify(status=200,msg="Data added successfully!")
except:
| jsonify(msg="Error adding your data.")
|
import numpy
# from nmt import train
# from nmtlm import train
from nmt import train
def main(job_id, params):
print params
trainerr, validerr, testerr = train(saveto=params['model'][0],
reload_=params['reload'][0],
dim_word=params['dim_word'][0],
dim=params['dim'][0],
n_words=params['n-words'][0],
n_words_src=params['n-words'][0],
decay_c=params['decay-c'][0],
clip_c=params['clip-c'][0],
lrate=params['learning-rate'][0],
optimizer=params['optimizer'][0],
maxlen=50,
batch_size=16,
valid_batch_size=16,
validFreq=5000,
dispFreq=10,
| saveFreq=5000,
sampleFreq=10,
use_dropout=params['use-dropout'][0])
return validerr
if __name__ == '__main__':
main(0, {
'model': ['model.npz'],
'dim_word': [38 | 4],
'dim': [512],
'n-words': [30000],
'optimizer': ['adam'],
'decay-c': [0.],
'clip-c': [10.],
'use-dropout': [False],
'learning-rate': [0.0001],
'reload': [False]})
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
version_added: "1.8"
description:
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
options:
component:
description:
- "The name of the component being deployed. Ex: billing"
required: true
aliases: ['name']
version:
description:
- The deployment version.
required: true
token:
description:
- API token.
required: true
state:
description:
- State of the deployment.
required: true
choices: ['started', 'finished', 'failed']
hosts:
description:
- Name of affected host name. Can be a list.
required: false
default: machine's hostname
aliases: ['host']
env:
description:
- The environment name, typically 'production', 'staging', etc.
required: false
owner:
description:
- The person responsible for the deployment.
required: false
description:
description:
- Free text description of the deployment.
required: false
url:
description:
- Base URL of the API server.
required: False
default: https://api.bigpanda.io
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ ]
'''
EXAMPLES = '''
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: started
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: finished
# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
hosts: '{{ ansible_hostname }}'
state: started
delegate_to: localhost
register: deployment
- bigpanda:
component: '{{ deployment.component }}'
version: '{{ deployment.version }}'
token: '{{ deployment.token }}'
state: finished
delegate_to: localhost
'''
# ===========================================
# Module execution.
#
import json
import socket
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
argument_spec=dict(
component=dict(required=True, aliases=['name']),
versio | n=dict(required=True),
token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
env=dict(required=False),
owner=dict(required=False),
description=dict(required=False),
message=dict(required=False),
| source_system=dict(required=False, default='ansible'),
validate_certs=dict(default='yes', type='bool'),
url=dict(required=False, default='https://api.bigpanda.io'),
),
supports_check_mode=True,
check_invalid_arguments=False,
)
token = module.params['token']
state = module.params['state']
url = module.params['url']
# Build the common request body
body = dict()
for k in ('component', 'version', 'hosts'):
v = module.params[k]
if v is not None:
body[k] = v
if not isinstance(body['hosts'], list):
body['hosts'] = [body['hosts']]
# Insert state-specific attributes to body
if state == 'started':
for k in ('source_system', 'env', 'owner', 'description'):
v = module.params[k]
if v is not None:
body[k] = v
request_url = url + '/data/events/deployments/start'
else:
message = module.params['message']
if message is not None:
body['errorMessage'] = message
if state == 'finished':
body['status'] = 'success'
else:
body['status'] = 'failure'
request_url = url + '/data/events/deployments/end'
# Build the deployment object we return
deployment = dict(token=token, url=url)
deployment.update(body)
if 'errorMessage' in deployment:
message = deployment.pop('errorMessage')
deployment['message'] = message
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True, **deployment)
# Send the data to bigpanda
data = json.dumps(body)
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
import socket
import random
from PIL import Image
import json
import sys, getopt
import math
import pika
# Screen VARS
offset_x = 80
offset_y = 24
screen_width = 240
screen_height = 240
# Internal options
queueAddress = ''
fileName = ''
workers = 36
Matrix = []
def main(argv):
global fileName, workers
inputFile = ''
try:
opts, args = getopt.getopt(argv, "hi:w:", ["file=", "workers="])
except getopt.GetoptError:
print('img_to_queue.py -i <inputfile> -w workers')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('img_to_queue.py -i <inputfile> -w workers')
sys.exit()
elif opt in ("-i", "--file"):
fileName = arg
print("File to process: " + fileName)
elif opt in ("-w", "--workers"):
workers = int(arg)
if (math.sqrt(float(workers)) - int(math.sqrt(float(workers))) > 0):
print('The square root of amount of workers is not a whole numbers. GTFO!')
sys.exit()
print("Amount of available workers: " + str(workers))
pompImage()
def addPixelToWorkFile(x, y, r, g, b, index_x, index_y, Matrix):
#print("Current index x:" + str(index_x) + " y: " + str(index_y))
Matrix[index_x][index_y].append({'x': x, 'y': y, 'rgb': "%0.2X" % r + '' + "%0.2X" % g + '' + "%0.2X" % b})
def pompImage():
print("Processiong image to JSON")
im = Image.open(fileName).convert('RGB')
im.thumbnail((240, 240), Image.ANTIALIAS)
_, _, width, height = im.getbbox()
# start with x and y index 1
slice_size = int(screen_width / int(math.sqrt(workers)))
amount_of_keys = int(screen_width / slice_size)
print(amount_of_keys)
w, h = amount_of_keys, amount_of_keys
Matrix = [[[] for x in range(w)] for y in range(h)]
# workFile = [[0 for x in range(amount_of_keys)] for y in range(amount_of_keys)]
for x in range(width):
index_x = int((x / slice_size))
for y in range(height):
r, g, b = im.getpixel((x, y))
index_y = int((y / slice_size))
addPixelToWorkFile(x + offset_x, y + offset_y, r, g, b, index_x, index_y, Matrix)
# print("Current index x:"+str(index_x)+" y: "+str(index_y)+" WORKER:"+str(index_y*index_x))
sendToQueue(Matrix)
def sendToQueue(arrayOfWorkers):
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost',
credentials=pika.Pla | inCredentials(username='pomper',
password='pomper')))
channel = connection.channel()
channel.queue_declare(queue='pomper', durable=False,)
channe | l.queue_purge(queue='pomper')
for worker in arrayOfWorkers:
for pixels in worker:
channel.basic_publish(exchange='',
routing_key='pomper',
body=json.dumps(pixels))
if __name__ == "__main__":
main(sys.argv[1:])
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITH | OUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language gov | erning permissions and limitations
# under the License.
from sqlalchemy import Column, Table, MetaData, String
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks.c.dns.alter(name='dns1')
dns2 = Column('dns2', String(255))
networks.create_column(dns2)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
networks = Table('networks', meta, autoload=True)
networks.c.dns1.alter(name='dns')
networks.drop_column('dns2')
|
# Generated by Django 2.1. | 5 on 2019-10-01 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0005_auto_20191001_1559'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='dts_type',
field=models.CharField(choices=[('0', 'User Comment'), ('1', 'Validation History'), ('2', 'Reported Issue')], default='0', help_text='Differentiate user comments from automatic validation | or defect comments.', max_length=1, verbose_name='Type'),
),
]
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozm | an@gmx.at>
# Purpose: test mixin Clipping
# Created: 31.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: GPLv3
import unittest
from svgwrite.mixins import Clipping
from svgwrite.base import BaseElement
class SVGMock(BaseElement, Clipping):
elementname = 'svg'
class TestClipping(unittest.TestCase):
def test_clip_rect_numbers(self):
obj = SV | GMock(debug=True)
obj.clip_rect(1, 2, 3, 4)
self.assertEqual(obj['clip'], 'rect(1,2,3,4)')
def test_clip_rect_auto(self):
obj = SVGMock(debug=True)
obj.clip_rect('auto', 'auto', 'auto', 'auto')
self.assertEqual(obj['clip'], 'rect(auto,auto,auto,auto)')
if __name__=='__main__':
unittest.main() |
from typing import Iterable, Callable, Optional, Any, List, Iterator
from dupescan.fs._fileentry import FileEntry
from dupescan.fs._root import Root
from dupescan.types import AnyPath
FSPredicate = Callable[[FileEntry], bool]
ErrorHandler = Callable[[EnvironmentError], Any]
def catch_filter(inner_filter: FSPredicate, error_handler_func: ErrorHandler) -> FSPredicate:
# If no filter function provided, return one that includes everything. In
# this case it will never raise an error, so error_handler_func doesn't get
# a look-in here
if inner_filter is None:
def always_true(*args, **kwargs):
return True
return always_true
# Otherwise if the filter function throws an EnvironmentError, pass it to
# the error_handler_func (if provided) and return false
def wrapped_func(*args, **kwargs):
try:
return inner_filter(*args, **kwargs)
except EnvironmentError as env_error:
if error_handler_func is not None:
error_handler_func(env_error)
return False
return wrapped_func
def noerror(_):
pass
class Walker(object):
def __init__(
self,
recursive: bool,
dir_object_filter: Optional[FSPredicate]=None,
file_object_filt | er: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
):
self._recursive = bool(recursive)
self._onerror = noerror if onerror is None else onerror
self._dir_filter = catch_filter(dir_object_filter, self._onerror)
self._file_filter = catch_filter(file_object_filter, self._onerror)
def __call__(self, paths: Iterable[AnyPath]) -> It | erator[FileEntry]:
for root_index, root_path in enumerate(paths):
root_spec = Root(root_path, root_index)
try:
root_obj = FileEntry.from_path(root_path, root_spec)
except EnvironmentError as env_error:
self._onerror(env_error)
continue
if root_obj.is_dir and self._dir_filter(root_obj):
if self._recursive:
yield from self._recurse_dir(root_obj)
else:
yield root_obj
elif root_obj.is_file and self._file_filter(root_obj):
yield root_obj
def _recurse_dir(self, root_obj: FileEntry):
dir_obj_q: List[FileEntry] = [ root_obj ]
next_dirs: List[FileEntry] = [ ]
while len(dir_obj_q) > 0:
dir_obj = dir_obj_q.pop()
next_dirs.clear()
try:
for child_obj in dir_obj.dir_content():
try:
if (
child_obj.is_dir and
not child_obj.is_symlink and
self._dir_filter(child_obj)
):
next_dirs.append(child_obj)
elif (
child_obj.is_file and
self._file_filter(child_obj)
):
yield child_obj
except EnvironmentError as query_error:
self._onerror(query_error)
except EnvironmentError as env_error:
self._onerror(env_error)
dir_obj_q.extend(reversed(next_dirs))
def flat_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(False, dir_object_filter, file_object_filter, onerror)(paths)
def recurse_iterator(
paths: Iterable[AnyPath],
dir_object_filter: Optional[FSPredicate]=None,
file_object_filter: Optional[FSPredicate]=None,
onerror: Optional[ErrorHandler]=None
) -> Iterator[FileEntry]:
return Walker(True, dir_object_filter, file_object_filter, onerror)(paths)
|
#!/usr | /bin/env python3
from pyserv.databrow | se import main
main()
|
import os, sys
up_path = os.path.abspath('..')
sys.path.append(up_path)
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
from objects import SimObject
from utils import scalar
from covar import draw_ellipsoid, vec2cov, cov2vec,\
project_psd
from kalman_filter import ekf_update
from numpy.random import multivariate_normal as mvn
import time
from math import atan2, atan
import robots
from openravepy import *
from transforms import unscented_transform
from rave_draw import *
#import openravepy as rave
class RaveLocalizerBot(robots.Robot):
NX = -1
NU = -1
def __init__(self, bot, obj):
self.bot = bot
self.NX = bot.NX + 3 #FIXME (hack for now)
self.NU = bot.NU
self.dt = bot.dt
x = array(zeros((self.NX)))
for t in range(bot.NX):
x[t] = bot.x[t]
x[bot.NX] = obj[0]
x[bot.NX+1] = obj[1]
x[bot.NX+2] = obj[2]
self.EPS = bot.EPS
robots.Robot.__init__(self, x, dt=self.dt)
def dynamics(self, X, u):
bot_up = self.bot.dynamics(X[0:self.bot.NX], u)
return vstack((bot_up, X[self.bot.NX:]))
def collision_penalty_trajectory(self, x, env):
return 0 #Todo: FIXME
def camera_obj_state(self,x):
#Returns the transform of the camera and object
camera_transform = self.bo | t.camera_transform(x[0:self.bot.NX])
obj_pos = x[self.bot.NX:]
z = mat(zeros((10,1)))
z[0:7] = camera_transform
z[7:10] = obj_pos
return z
"""
def fov_state(self, x):
xy = mat(self.bot.traj_pos(x)).T
theta = self.bot.orientation(x)
#print vstack((xy, theta, x[self.bot.NX:]))
if isinstance(x, tuple) or len(x.shape) == 1:
x = m | at(x).T
if isinstance(xy, tuple) or xy.shape[0] < xy.shape[1]:
xy = mat(xy).T
return vstack((xy, theta, x[self.bot.NX:]))
"""
def observe(self, scene, x=None):
zs = self.bot.observe(scene, x[0:self.bot.NX])
return vstack((zs, robots.Robot.observe(self, scene, x)))
def draw_trajectory(self, xs, mus=None, Sigmas=None, color=array((1.0, 0.0, 0.0, 0.2))):
bnx = self.bot.NX
self.bot.draw_trajectory(xs[0:bnx], mus[0:bnx], Sigmas[0:bnx, 0:bnx], color)
def draw(self, X=None, color=array((1.0, 0.0, 0.0))):
self.bot.draw(x[0:bnx], color)
class BarretWAM(robots.Robot):
# wrapper for openrave robots
NX = 7
NU = 7
EPS = 1e-3
def __init__(self, ravebot, env, state_rep='angles', dt=-1):
self.ravebot = ravebot
self.env = env # used for drawing purposes
self.state_rep = state_rep
self.handles = [ ] # used for drawing purposes
self.jointnames = ['Shoulder_Yaw', 'Shoulder_Pitch', 'Shoulder_Roll', 'Elbow', 'Wrist_Yaw', 'Wrist_Pitch', 'Wrist_Roll']
self.jointidxs = [ravebot.GetJoint(name).GetDOFIndex() for name in self.jointnames]
self.ravebot_manip = self.ravebot.SetActiveManipulator('arm')
self.lower_limits, self.upper_limits = self.ravebot.GetDOFLimits()
tmp_lower_limits = []
tmp_upper_limits = []
for idx in self.jointidxs:
tmp_lower_limits.append(self.lower_limits[idx])
tmp_upper_limits.append(self.upper_limits[idx])
self.lower_limits = mat(array(tmp_lower_limits)).T
self.upper_limits = mat(array(tmp_upper_limits)).T
self.ravebot.SetActiveDOFs(self.jointidxs)
x = [0] * len(self.jointidxs)
robots.Robot.__init__(self, x, dt=dt)
self.index = BarretWAM.increment_index()
def traj_pos(self, x=None):
if x == None:
x = self.x
if self.state_rep == 'angles':
return mat(self.forward_kinematics(x)[0:3,3])
else: #state representation = points
pass
def orientation(self, x=None):
if x == None:
x = self.x
if self.state_rep == 'angles':
return self.forward_kinematics(x)[0:3,0:3]
else:
pass
def __str__(self):
return 'ravebot[' + str(self.index) + ']'
def dynamics(self, x, u):
if self.state_rep == 'angles':
thetas = x + u
thetas = minimum(thetas, self.upper_limits)
thetas = maximum(thetas, self.lower_limits)
"""
for i in range(thetas.shape[0]):
if thetas[i] > self.upper_limits[i]:
thetas[i] = self.upper_limits[i]
elif thetas[i] < self.lower_limits[i]:
thetas[i] = self.lower_limits[i]
"""
return thetas
else:
pass
def camera_transform(self, x):
camera_rel_transform = self.ravebot.GetAttachedSensor('camera').GetRelativeTransform()
with self.env:
self.ravebot.SetDOFValues(x, self.jointidxs)
link_transform = mat(self.ravebot.GetLink('wam4').GetTransform())
camera_trans = link_transform * camera_rel_transform
camera_quat = quatFromRotationMatrix(array(camera_trans[0:3,0:3]))
camera_vec = mat(zeros((7,1)))
camera_vec[0:3] = camera_trans[0:3,3]
camera_vec[3:7] = mat(camera_quat).T
return camera_vec
def observe(self, scene, x=None):
if x==None:
x = self.x
zs = robots.Robot.observe(self, scene, x)
# also give joint angle observations
#if zs.size > 0:
# pass
#zs = vstack((zs, mat('x[2]')))
#zs = vstack((zs, mat('x[3]')))
#else:
# zs = mat('x[3]')
return zs
def forward_kinematics(self, thetas):
with self.env:
self.ravebot.SetDOFValues(thetas,self.jointidxs)
return mat(self.ravebot_manip.GetEndEffectorTransform())
def inverse_kinematics(self, xyz):
pass
def draw_Cspace(self, X=None, color='blue'):
pass
def collision_penalty_trajectory(self, x, env):
return 0 #Todo: FIXME
def draw_trajectory(self, xs, mus=None, Sigmas=None, color=array((1.0, 0.0, 0.0, 0.2))):
T = xs.shape[1]
XYZ = mat(zeros((3,T)))
for t in range(T):
XYZ[:,t] = self.traj_pos(xs[:,t])
if mus != None and Sigmas != None:
for t in range(T):
mu_y, Sigma_y = unscented_transform(mus[:,t], Sigmas[:,:,t],\
lambda x: self.traj_pos(x))
# padding for positive definiteness
Sigma_y = Sigma_y + 0.0001 * identity(3)
self.handles.append(draw_ellipsoid(mu_y, Sigma_y, std_dev=2,\
env=self.env, colors=color))
#self.handles.append(self.env.drawlinestrip(points=array(((xyz[0], xyz[1], xyz[2]),(0.0, 0.0,0.0))),
# linewidth=3.0))
self.handles.append(self.env.drawlinestrip(points=XYZ.T, linewidth=3.0, colors=color[0:3]))
def draw(self, X=None, color=array((1.0, 0.0, 0.0))):
if X == None:
X = self.x
xyz = self.traj_pos(X)
with self.env:
"""
# works with only a few robots
newrobot = RaveCreateRobot(self.env,self.ravebot.GetXMLId())
newrobot.Clone(self.ravebot,0)
for link in newrobot.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(0.6)
self.env.Add(newrobot,True)
newrobot.SetActiveDOFs(self.jointidxs)
newrobot.SetDOFValues(X, self.jointidxs)
self.handles.append(newrobot)
"""
self.handles.append(self.env.plot3(points=xyz, pointsize=1.0, colors=color))
|
class Solution:
def toLowerCase(self, str: str) -> | str:
rs = ""
# 32
section = ord("a") - ord("A")
for s in str:
if ord(s) >= ord("A") and ord(s) <= ord("Z"):
rs = rs + chr(ord(s) + section)
else:
rs = rs + s |
return rs
sol = Solution()
print(sol.toLowerCase("Hello"))
|
from .nucleicacidpartitemcontroller import NucleicAcidPartItemController
from .oligoitemcontroller import OligoItemController
from .stranditemcontroller import StrandItemController
from .viewrootcontroller import ViewRootController
from .virtualhelixitemcontrolle | r import | VirtualHelixItemController |
ure__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import io
import os
import apache_beam as beam
from apache_beam.metrics import Metrics
import matplotlib.pyplot as plot
import tensorflow as tf
from tf import gfile
from typing import List
from typing import Text
from google.protobuf import text_format
from deepmath.deephol import deephol_pb2
from deepmath.deephol import io_util
from deepmath.deephol.deephol_loop.missing import recordio
from deepmath.deephol.deephol_loop.missing import runner
from deepmath.deephol.utilities import deephol_stat_pb2
from deepmath.deephol.utilities import stats
STATS_BASENAME = 'proof_stats'
AGGREGATE_STAT_BASENAME = 'aggregate_stat'
PROVEN_GOALS_BASENAME = 'proven_goals_fps'
OPEN_GOALS_BASENAME = 'open_goals_fps'
PROVEN_STATS_BASENAME = 'proven_stats'
PRETTY_STATS_BASENAME = 'pretty_stats'
CACTUS_PLOT_FILE_NAME = 'cactus.pdf'
CACTUS_DATA_FILE_NAME = 'cactus.dat'
class StatDoFn(beam.DoFn):
"""Beam DoFn for statistics generation."""
def __init__(self):
self.processed_counter = Metrics.counter(self.__class__, 'processed')
self.proven_counter = Metrics.counter(self.__class__, 'proven')
self.attempted_counter = Metrics.counter(self.__class__, 'attempted')
self.nodes_counter = Metrics.counter(self.__class__, 'nodes')
def process(self, proof_log: deephol_pb2.ProofLog
) -> List[deephol_stat_pb2.ProofStat]:
self.processed_counter.inc()
s = stats.proof_log_stats(proof_log)
self.proven_counter.inc(s.num_theorems_proved)
self.attempted_counter.inc(s.num_theorems_attempted)
self.nodes_counter.inc(s.num_nodes)
return [s]
class AggregateStatsFn(beam.CombineFn):
"""Beam CombineFn for statistics aggregation."""
def create_accumulator(self):
return deephol_stat_pb2.ProofAggregateStat()
def add_input(self, target, source):
stats.merge_stat(target, source)
return target
def merge_accumulators(self, aggregate_stats):
result = deephol_stat_pb2.ProofAggregateStat()
for s in aggregate_stats:
stats.merge_aggregate_stat(result, s)
return result
def extract_output(self, result):
return result
class UniqueFn(beam.CombineFn):
"""De-duping combinator for Beam."""
def create_accumulator(self):
return set()
def add_input(self, target, source):
target.add(source)
return target
def merge_accumulators(self, sets):
result = set()
for s in sets:
result.update(s)
return result
def extract_output(self, result):
retu | rn '\n'.join([str(x) for x in result])
def proven_or_open(proof_stat):
if proof_stat.num_theorems_proved > 0:
yield beam.pvalue.TaggedOutput('proven',
| '%d' % proof_stat.theorem_fingerprint)
else:
yield beam.pvalue.TaggedOutput('open',
'%d' % proof_stat.theorem_fingerprint)
def make_proof_logs_collection(root, proof_logs: Text):
return (root | 'Create' >> recordio.ReadFromRecordIO(
proof_logs, beam.coders.ProtoCoder(deephol_pb2.ProofLog)))
def reporting_pipeline(proof_logs_collection, stats_out: Text,
aggregate_stats: Text, proven_goals: Text,
open_goals: Text):
"""A pipeline reporting aggregate statistics and proved theorems.
Args:
proof_logs_collection: beam collection of proof logs.
stats_out: Filename for outputting per proof statistics.
aggregate_stats: Filename for storing aggregated statistics
proven_goals: Filename for the fingerprint of proven goals.
open_goals: Filename for the fingerprint of open goals.
Returns:
A beam pipeline for writing statistics.
"""
proof_stats = (proof_logs_collection | 'Stats' >> beam.ParDo(StatDoFn()))
_ = proof_stats | 'WriteStats' >> recordio.WriteToRecordIO(
file_path_prefix=stats_out,
coder=beam.coders.ProtoCoder(deephol_stat_pb2.ProofStat))
_ = (
proof_stats
| 'AggregateStats' >> beam.CombineGlobally(AggregateStatsFn())
| 'MapProtoToString' >> beam.Map(text_format.MessageToString)
| 'WriteAggregates' >> beam.io.WriteToText(aggregate_stats, '.pbtxt'))
results = proof_stats | (
'ProvenOrOpen' >> beam.FlatMap(proven_or_open).with_outputs())
_ = (
results.proven
| 'UniqueProven' >> beam.CombineGlobally(UniqueFn())
| 'WriteProven' >> beam.io.WriteToText(proven_goals, '.txt'))
_ = (
results.open
| 'UniqueOpen' >> beam.CombineGlobally(UniqueFn())
| 'WriteOpen' >> beam.io.WriteToText(open_goals, '.txt'))
def file_lines_set(fname):
with gfile.Open(fname) as f:
return set([line.rstrip() for line in f])
class ReportingPipeline(object):
"""Top level class to manage a reporting pipeline."""
def __init__(self, out_dir: Text):
self.out_dir = out_dir
gfile.MakeDirs(out_dir)
self.proof_stats_filename = os.path.join(out_dir, STATS_BASENAME)
self.aggregate_stat_filename = os.path.join(out_dir,
AGGREGATE_STAT_BASENAME)
self.proven_goals_filename = os.path.join(out_dir, PROVEN_GOALS_BASENAME)
self.open_goals_filename = os.path.join(out_dir, OPEN_GOALS_BASENAME)
self.proven_stats_filename = os.path.join(out_dir, PROVEN_STATS_BASENAME)
self.pretty_stats_filename = os.path.join(out_dir, PRETTY_STATS_BASENAME)
self.cactus_plot_filename = os.path.join(out_dir, CACTUS_PLOT_FILE_NAME)
self.cactus_data_filename = os.path.join(out_dir, CACTUS_DATA_FILE_NAME)
def setup_pipeline(self, proof_logs_collection):
reporting_pipeline(proof_logs_collection, self.proof_stats_filename,
self.aggregate_stat_filename, self.proven_goals_filename,
self.open_goals_filename)
def write_final_stats(self):
"""Log and write final aggregated statistics to file system."""
fname = self.aggregate_stat_filename + '-00000-of-00001.pbtxt'
aggregate_stat = io_util.load_text_proto(
fname, deephol_stat_pb2.ProofAggregateStat, 'aggregate statistics')
if aggregate_stat is None:
tf.logging.warning('Could not read aggregate statistics "%s"', fname)
return
tf.logging.info('Stats:\n%s',
stats.aggregate_stat_to_string(aggregate_stat))
open_goals = file_lines_set(self.open_goals_filename +
'-00000-of-00001.txt')
proven_goals = file_lines_set(self.proven_goals_filename +
'-00000-of-00001.txt')
never_proven = open_goals - proven_goals
num_open_goals = len(never_proven)
num_proven_goals = len(proven_goals)
tf.logging.info('Open goals: %d', num_open_goals)
tf.logging.info('Proved goals: %d', num_proven_goals)
perc_proven = 100.0 * num_proven_goals / float(num_open_goals +
num_proven_goals)
tf.logging.info('Percentage proven: %.2f', perc_proven)
with gfile.Open(self.proven_stats_filename, 'w') as f:
f.write('%d %d %.2f\n' % (num_open_goals, num_proven_goals, perc_proven))
with gfile.Open(self.pretty_stats_filename, 'w') as f:
f.write('%s\n' % stats.detailed_statistics(aggregate_stat))
# Write cactus plot
if aggregate_stat.proof_closed_after_millis:
cactus_data = list(aggregate_stat.proof_closed_after_millis)
cactus_data.sort()
with gfile.Open(self.cactus_data_filename, 'w') as f:
f.write('\n'.join(map(str, cactus_data)))
fig = plot.figure()
plot.xlabel('Number of proofs closed')
plot.ylabel('Wall clock time in s')
plot.plot([ms * .001 for ms in cactus_data]) # convert to seconds
buf = io.BytesIO()
fig.savefig(buf, format='pdf', bbox_inches='tight')
with gfile.Open(self.cactus_plot_filename, 'wb') as f:
f.write(buf.getvalue())
def run_pipeline(self, proof_logs: Text):
def pipeline(root):
proof_logs_collection = make_proof_logs_collection(root, proof_logs)
self.setup_pipeline(proof_logs_collection)
runner.Runner().run(pipeline).wait_until_finish()
self.write_final_s |
de | f propagate(la): # la: [list(int)]
print la, la # [str], [str]
propagate([1]) # []
propagate([2]) # []
| |
from brms.settings.base import *
import dj_database_url
|
DEBUG = False
ALLOWED_HOSTS = ['.example.com']
# Use the cached template loader so template is compiled once and read from
# memory instead of reading from disk on each load.
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
DATABASES['default'] = dj_datab | ase_url.config() |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-07 19:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
| ('posts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='url',
field=m | odels.URLField(),
),
]
|
# -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_contemporary_base_uvalue_by_bui | lding_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
ahde = NULL
if not oeq_global.isnull([parameters['HLAE']]):
ahde=float(parameters['HLAE']) + 40.0 * 0.8
# Air Change Heatloss for standard Rooms 40 | kWh/m2a nach Geiger Lüftung im Wohnungsbau
# 20% of the Total Area are used for stairs and floors
return {'AHDE': {'type': QVariant.Double, 'value': ahde}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='Building',
extension_name='AHD Building per Livig Area EnEV',
layer_name= 'Annual Heat Demand (per Living Area, EnEV)',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='AHDE',
source_type='none',
par_in=['HLAE'],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['AHDE'],
description=u"Calculate EnEV Annual Heat Demand per Living Area",
evaluation_method=calculation)
extension.registerExtension(default=True)
|
"""
Serializers for Video Abstraction Layer
Serialization is usually sent through the VideoSerializer which uses the
EncodedVideoSerializer which uses the profile_name as it's profile field.
"""
from rest_framework import serializers
from django.core.exceptions import ValidationError
from edxval.models import Profile, Video, EncodedVideo, Subtitle, CourseVideo
class EncodedVid | eoSerializer(serializers.ModelSerializer):
"""
Serializer for EncodedVideo object.
Uses the profile_name as it's profile value instead of a Profile object.
"""
profile = serializers.SlugRelatedField(slug_field="profile_name")
class Meta: # pylint: disable=C1001, C0111
model = EncodedVideo
fields = (
"created",
"modified",
"url",
"file_size",
"bitrate",
"profile",
)
def get_identi | ty(self, data):
"""
This hook is required for bulk update.
We need to override the default, to use the slug as the identity.
"""
return data.get('profile', None)
class SubtitleSerializer(serializers.ModelSerializer):
"""
Serializer for Subtitle objects
"""
content_url = serializers.CharField(source='get_absolute_url', read_only=True)
content = serializers.CharField(write_only=True)
def validate_content(self, attrs, source):
"""
Validate that the subtitle is in the correct format
"""
value = attrs[source]
if attrs.get('fmt') == 'sjson':
import json
try:
loaded = json.loads(value)
except ValueError:
raise serializers.ValidationError("Not in JSON format")
else:
attrs[source] = json.dumps(loaded)
return attrs
class Meta: # pylint: disable=C1001, C0111
model = Subtitle
lookup_field = "id"
fields = (
"fmt",
"language",
"content_url",
"content",
)
class CourseSerializer(serializers.RelatedField):
"""
Field for CourseVideo
"""
def to_native(self, value):
return value.course_id
def from_native(self, data):
if data:
course_video = CourseVideo(course_id=data)
course_video.full_clean(exclude=["video"])
return course_video
class VideoSerializer(serializers.ModelSerializer):
"""
Serializer for Video object
encoded_videos takes a list of dicts EncodedVideo data.
"""
encoded_videos = EncodedVideoSerializer(many=True, allow_add_remove=True)
subtitles = SubtitleSerializer(many=True, allow_add_remove=True, required=False)
courses = CourseSerializer(many=True, read_only=False)
url = serializers.SerializerMethodField('get_url')
class Meta: # pylint: disable=C1001, C0111
model = Video
lookup_field = "edx_video_id"
exclude = ('id',)
def get_url(self, obj):
"""
Return relative url for the object
"""
return obj.get_absolute_url()
def restore_fields(self, data, files):
"""
Overridden function used to check against duplicate profile names.
Converts a dictionary of data into a dictionary of deserialized fields. Also
checks if there are duplicate profile_name(s). If there is, the deserialization
is rejected.
"""
reverted_data = {}
if data is not None and not isinstance(data, dict):
self._errors['non_field_errors'] = ['Invalid data']
return None
try:
profiles = [ev["profile"] for ev in data.get("encoded_videos", [])]
if len(profiles) != len(set(profiles)):
self._errors['non_field_errors'] = ['Invalid data: duplicate profiles']
except KeyError:
raise ValidationError("profile required for deserializing")
except TypeError:
raise ValidationError("profile field needs to be a profile_name (str)")
for field_name, field in self.fields.items():
field.initialize(parent=self, field_name=field_name)
try:
field.field_from_native(data, files, field_name, reverted_data)
except ValidationError as err:
self._errors[field_name] = list(err.messages)
return reverted_data
|
from django.conf.urls.defaults | import patterns, url
urlpatter | ns = ()
|
c = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def printWaveformCode(bc, name):
print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted))
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, probePolAngle, pumpPolAngle):
fileSystem = Environs.FileSystem
print("Measuring parameters ...")
bh.StopPattern()
hc.UpdateRFPowerMonitor()
hc.UpdateRFFrequencyMonitor()
bh.StartPattern()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
hc.UpdateI2AOMFreqMonitor()
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["rfState"] = rfState
bc.Settings["phaseScramblerV"] = scramblerV
bc.Settings["probePolarizerAngle"] = probePolAngle
bc.Settings["pumpPolarizerAngle"] = pumpPolAngle
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
# these next 3, seemingly redundant, lines are to preserve backward compatibility
bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre
bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre
bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre
bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre |
bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep
bc.GetModulationByName("LF1").Centre = hc.FLPZTVoltage
bc.GetModulationByName("LF1").Step = hc.FLPZTStep
bc.GetModulationByName("LF1").PhysicalCentre = hc.I2LockAOMFrequencyCentre
bc.GetModulationByName("LF1").PhysicalStep = hc.I2LockAOMFrequencyStep
# generate the waveform codes
pr | int("Generating waveform codes ...")
eWave = bc.GetModulationByName("E").Waveform
eWave.Name = "E"
lf1Wave = bc.GetModulationByName("LF1").Waveform
lf1Wave.Name = "LF1"
ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F") )
bc.GetModulationByName("B").Waveform = ws["B"]
bc.GetModulationByName("DB").Waveform = ws["DB"]
bc.GetModulationByName("PI").Waveform = ws["PI"]
bc.GetModulationByName("RF1A").Waveform = ws["RF1A"]
bc.GetModulationByName("RF2A").Waveform = ws["RF2A"]
bc.GetModulationByName("RF1F").Waveform = ws["RF1F"]
bc.GetModulationByName("RF2F").Waveform = ws["RF2F"]
# change the inversions of the static codes E and LF1
bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool()
bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool()
# print the waveform codes
# printWaveformCode(bc, "E")
# printWaveformCode(bc, "B")
# printWaveformCode(bc, "DB")
# printWaveformCode(bc, "PI")
# printWaveformCode(bc, "RF1A")
# printWaveformCode(bc, "RF2A")
# printWaveformCode(bc, "RF1F")
# printWaveformCode(bc, "RF2F")
# printWaveformCode(bc, "LF1")
# store e-switch info in block config
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
# store the E switch asymmetry in the block
bc.Settings["E0PlusBoost"] = hc.E0PlusBoost
return bc
# lock gains
# microamps of current per volt of control input
kSteppingBiasCurrentPerVolt = 1000.0
# max change in the b-bias voltage per block
kBMaxChange = 0.05
# volts of rf*a input required per cal's worth of offset
kRFAVoltsPerCal = 3.2
kRFAMaxChange = 0.1
# volts of rf*f input required per cal's worth of offset
kRFFVoltsPerCal = 8
kRFFMaxChange = 0.1
def updateLocks(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
# note the weird python syntax for a one element list
sigIndex = pmtChannelValues.GetChannelIndex(("SIG",))
sigValue = pmtChannelValues.GetValue(sigIndex)
bIndex = pmtChannelValues.GetChannelIndex(("B",))
bValue = pmtChannelValues.GetValue(bIndex)
#bError = pmtChannelValues.GetError(bIndex)
dbIndex = pmtChannelValues.GetChannelIndex(("DB",))
dbValue = pmtChannelValues.GetValue(dbIndex)
#dbError = pmtChannelValues.GetError(dbIndex)
rf1aIndex = pmtChannelValues.GetChannelIndex(("RF1A","DB"))
rf1aValue = pmtChannelValues.GetValue(rf1aIndex)
#rf1aError = pmtChannelValues.GetError(rf1aIndex)
rf2aIndex = pmtChannelValues.GetChannelIndex(("RF2A","DB"))
rf2aValue = pmtChannelValues.GetValue(rf2aIndex)
#rf2aError = pmtChannelValues.GetError(rf2aIndex)
rf1fIndex = pmtChannelValues.GetChannelIndex(("RF1F","DB"))
rf1fValue = pmtChannelValues.GetValue(rf1fIndex)
#rf1fError = pmtChannelValues.GetError(rf1fIndex)
rf2fIndex = pmtChannelValues.GetChannelIndex(("RF2F","DB"))
rf2fValue = pmtChannelValues.GetValue(rf2fIndex)
#rf2fError = pmtChannelValues.GetError(rf2fIndex)
lf1Index = pmtChannelValues.GetChannelIndex(("LF1",))
lf1Value = pmtChannelValues.GetValue(lf1Index)
#lf1Error = pmtChannelValues.GetError(lf1Index)
lf1dbIndex = pmtChannelValues.GetChannelIndex(("LF1","DB"))
lf1dbValue = pmtChannelValues.GetValue(lf1dbIndex)
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB: " + str(lf1dbValue)
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/8.0) * feedbackSign * (hc.CalStepCurrent * (bValue / dbValue)) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, 0, 5)
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_desc = '''
This package contains the ${name} Sphinx extension.
.. add description here ..
'''
requires = ['Sphinx>=0.6']
setup(
name='sphinxcontrib-${name}',
version='0.1',
url='http://bitbucket.org/birkenfeld/sphinx-contrib',
download_url='http://pypi.python.org/pypi/sphinxcontrib-${name}',
license='BSD',
author='${author}',
author_email='${author_email}',
description='Sphinx "${name}" extension',
long_description= | long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Sphinx :: Extension',
#'Framework :: Sphinx :: Theme',
'Topic :: Documentation',
'Topi | c :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
)
|
* beta[0][1]
assert all(abs((a[0] * b[0] + a[1] * b[1]) / x - 1) < 0.00001 for a, b in zip(alpha, beta))
@staticmethod
def getGamma(gammas, k, layout, intent):
index = 2 * (1 if layout[k + 1] else 0) + (1 if intent else 0)
return gammas[index]
@staticmethod
def getForwardBackwardEstimates(positionRelevances, gammas, layout, clicks, intent,
debug=False):
N = len(clicks)
if debug:
assert N + 1 == len(layout)
alpha = [[0.0, 0.0] for i in xrange(N + 1)]
beta = [[0.0, 0.0] for i in xrange(N + 1)]
alpha[0] = [0.0, 1.0]
beta[N] = [1.0, 1.0]
# P(E_{k+1} = e, C_k | E_k = e', G, I)
updateMatrix = [[[0.0 for e1 in [0, 1]] for e in [0, 1]] for i in xrange(N)]
for k, C_k in enumerate(clicks):
a_u = positionRelevances['a'][k]
s_u = positionRelevances['s'][k]
gamma = DbnModel.getGamma(gammas, k, layout, intent)
if C_k == 0:
updateMatrix[k][0][0] = 1
updateMatrix[k][0][1] = (1 - gamma) * (1 - a_u)
updateMatrix[k][1][0] = 0
updateMatrix[k][1][1] = gamma * (1 - a_u)
else:
updateMatrix[k][0][0] = 0
updateMatrix[k][0][1] = (s_u + (1 - gamma) * (1 - s_u)) * a_u
updateMatrix[k][1][0] = 0
updateMatrix[k][1][1] = gamma * (1 - s_u) * a_u
for k in xrange(N):
for e in [0, 1]:
alpha[k + 1][e] = sum(alpha[k][e1] * updateMatrix[k][e][e1] for e1 in [0, 1])
beta[N - 1 - k][e] = sum(beta[N - k][e1] * updateMatrix[N - 1 - k][e1][e] for e1 in [0, 1])
return alpha, beta
def _getSessionEstimate(self, positionRelevances, layout, clicks, intent):
# Returns a dict that represents the following:
# {'a': P(A_k | I, C, G),
# 's': P(S_k | I, C, G),
# 'C': P(C | I, G),
# 'clicks': P(C_k | C_1, ..., C_{k-1}, I, G)}
# E.g., sessionEstimate['a'][k] = P(A_k = 1 | I = i, C, G).
N = len(clicks)
if self.config.get('DEBUG', DEBUG):
assert N + 1 == len(layout)
sessionEstimate = {'a': [0.0] * N, 's': [0.0] * N, 'e': [[0.0, 0.0] for k in xrange(N)], 'C': 0.0, 'clicks': [0.0] * N}
alpha, beta = self.getForwardBackwardEstimates(positionRelevances,
self.gammas, layout, clicks, intent,
debug=self.config.get('DEBUG', DEBUG)
)
try:
varphi = [((a[0] * b[0]) / (a[0] * b[0] + a[1] * b[1]), (a[1] * b[1]) / (a[0] * b[0] + a[1] * b[1])) for a, b in zip(alpha, beta)]
except ZeroDivisionError:
print >>sys.stderr, alpha, beta, [(a[0] * b[0] + a[1] * b[1]) for a, b in zip(alpha, beta)], positionRelevances
sys.exit(1)
if self.config.get('DEBUG', DEBUG):
assert all(ph[0] < 0.01 for ph, c in zip(varphi[:N], clicks) i | f c != 0), (alpha, beta, varphi, clicks)
# calculate P(C | I, G) for k = 0
sessionEstimate['C'] = alpha[0][0] * beta[0][0] + alpha[0][1] * beta[0][1] # == 0 + 1 * beta[0][1]
for k, C_k in enumerate(clicks):
a_u = positionRelevances['a'][k]
s_u = positionRelevances['s'][k]
gamma = self.getGamma(self.gammas, k, layout, intent)
# E_k_multipli | er --- P(S_k = 0 | C_k) P(C_k | E_k = 1)
if C_k == 0:
sessionEstimate['a'][k] = a_u * varphi[k][0]
sessionEstimate['s'][k] = 0.0
else:
sessionEstimate['a'][k] = 1.0
sessionEstimate['s'][k] = varphi[k + 1][0] * s_u / (s_u + (1 - gamma) * (1 - s_u))
# P(C_1, ..., C_k | I)
sessionEstimate['clicks'][k] = sum(alpha[k + 1])
return sessionEstimate
def _get_click_probs(self, s, possibleIntents):
"""
Returns clickProbs list:
clickProbs[i][k] = P(C_1, ..., C_k | I=i)
"""
# TODO: ensure that s.clicks[l] not used to calculate clickProbs[i][k] for l >= k
positionRelevances = {}
for intent in possibleIntents:
positionRelevances[intent] = {}
for r in ['a', 's']:
positionRelevances[intent][r] = [self.urlRelevances[intent][s.query][url][r] for url in s.results]
if self.config.get('QUERY_INDEPENDENT_PAGER', QUERY_INDEPENDENT_PAGER):
for k, u in enumerate(s.results):
if u == 'PAGER':
# use dummy 0 query for all fake pager URLs
positionRelevances[intent][r][k] = self.urlRelevances[intent][0][url][r]
layout = [False] * len(s.layout) if self.ignoreLayout else s.layout
return dict((i, self._getSessionEstimate(positionRelevances[i], layout, s.clicks, i)['clicks']) for i in possibleIntents)
def get_model_relevances(self, session, intent=False):
"""
Returns estimated relevance of each document in a given session
based on a trained click model.
You can make use of the fact that model trains different relevances
for different intents by specifying `intent` argument. If it is set
to False, simple web relevance is returned, if it is to True, then
vertical relevance is returned, i.e., how relevant each document
is to a vertical intent.
"""
relevances = []
for rank, result in enumerate(session.results):
a = self.urlRelevances[intent][session.query][result]['a']
s = self.urlRelevances[intent][session.query][result]['s']
relevances.append(a * s)
return relevances
def predict_click_probs(self, session, intent=False):
"""
Predicts click probabilities for a given session. Does not use clicks.
"""
click_probs = []
for rank, result in enumerate(session.results):
a = self.urlRelevances[intent][session.query][result]['a']
click_probs.append(a)
return click_probs
def predict_stop_probs(self, session, intent=False):
"""
Predicts stop probabilities for each document in a session.
"""
stop_probs = []
for rank, result in enumerate(session.results):
s = self.urlRelevances[intent][session.query][result]['s']
stop_probs.append(s)
return stop_probs
def get_abandonment_prob(self, rank, intent=False, layout=None):
"""
Predicts probability of stopping without click after examining document at rank `rank`.
"""
return 1.0 - self.getGamma(self.gammas, rank, layout, intent)
class SimplifiedDbnModel(DbnModel):
def __init__(self, ignoreIntents=True, ignoreLayout=True, config=None):
assert ignoreIntents
assert ignoreLayout
DbnModel.__init__(self, (1.0, 1.0, 1.0, 1.0), ignoreIntents, ignoreLayout, config)
def train(self, sessions):
max_query_id = self.config.get('MAX_QUERY_ID')
if max_query_id is None:
print >>sys.stderr, 'WARNING: no MAX_QUERY_ID specified for', self
max_query_id = 100000
urlRelFractions = [defaultdict(lambda: {'a': [1.0, 1.0], 's': [1.0, 1.0]}) for q in xrange(max_query_id)]
for s in sessions:
query = s.query
lastClickedPos = len(s.clicks) - 1
for k, c in enumerate(s.clicks):
if c != 0:
lastClickedPos = k
for k, (u, c) in enumerate(zip(s.results, s.clicks[:(lastClickedPos + 1)])):
tmpQuery = query
if self.config.get('QUERY_INDEPENDENT_PAGER', QUERY_INDEPENDENT_PAGER) \
and u == 'PAGER':
assert self.config.get('TRANSFORM_LOG', TRANSFORM_LOG)
# the same dummy query for all pagers
query = 0
if c != 0:
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import pickle
from pkg_resources import resource_filename
from nupic.regions.record_sensor import RecordSensor
from nupic.data.file_record_stream import FileRecordStream
"""
Generate column statistics for a StandardSource.
Each entry in statsInfo corresponds to one column, and contains a list
of statistics that should be computed for that column. Known statistics
are:
for floating point or integer values:
number -- min, max, mean
for string or integer values:
category -- list of all unique values and count
The model for a stats object is that you call the constructor with
the first value, and then add values with add().
(The alternative would be no args for the constructor, and
all values would be added with add()).
There are two reasons for this:
- no initialization check required every time we add a value
- getStats() can always return a valid result
"""
class NumberStatsCollector(object):
validTypes = [int, float]
def __init__(self):
self.min = 0
self.max = 0
self.sum = 0
self.n = 0
self.initialized = False
def _addFirst(self, value):
if type(value) not in self.validTypes:
raise RuntimeError("NumberStatsCollector -- value '%s' is not a valid type" % value)
value = float(value)
self.min = value
self.max = value
self.sum = value
self.n = 1
self.initialized = True
def add(self, value):
if not self.initialized:
self._addFirst(value)
return
value = float(value)
if value < self.min:
self.min = value
if value > self.max:
self.max = value
self.sum += value
self.n += 1
def getStats(self):
return dict(min = self.min,
max = self.max,
sum = self.sum,
n = self.n,
average = self.sum / self.n)
class CategoryStatsCollector(object):
def __init__(self):
self.categories = dict()
def add(self, value):
self.categories[value] = self.categories.get(value, 0) + 1
def getStats(self):
return dict(categories = self.categor | ies)
def getStatsFilename(filename, statsInfo, filters=[]):
if not os.path.isabs( | filename):
raise RuntimeError("Filename %s is not an absolute path" % filename)
if not filename.endswith(".csv"):
raise RuntimeError("generateStats only supports csv files: %s" % filename)
d = os.path.dirname(filename)
basename = os.path.basename(filename).replace("csv", "stats")
sstring = "stats"
for key in statsInfo:
sstring += "_" + key
if len(filters) > 0:
sstring += "_filters"
for filter in filters:
sstring += "_" + filter.getShortName()
statsFilename = os.path.join(d, sstring + "_" + basename)
return statsFilename
def generateStats(filename, statsInfo, maxSamples = None, filters=[], cache=True):
"""Generate requested statistics for a dataset and cache to a file.
If filename is None, then don't cache to a file"""
# Sanity checking
if not isinstance(statsInfo, dict):
raise RuntimeError("statsInfo must be a dict -- "
"found '%s' instead" % type(statsInfo))
filename = resource_filename("nupic.datafiles", filename)
if cache:
statsFilename = getStatsFilename(filename, statsInfo, filters)
# Use cached stats if found AND if it has the right data
if os.path.exists(statsFilename):
try:
r = pickle.load(open(statsFilename, "rb"))
except:
# Ok to ignore errors -- we will just re-generate the file
print "Warning: unable to load stats for %s -- " \
"will regenerate" % filename
r = dict()
requestedKeys = set([s for s in statsInfo])
availableKeys = set(r.keys())
unavailableKeys = requestedKeys.difference(availableKeys)
if len(unavailableKeys ) == 0:
return r
else:
print "generateStats: re-generating stats file %s because " \
"keys %s are not available" % \
(filename, str(unavailableKeys))
os.remove(filename)
print "Generating statistics for file '%s' with filters '%s'" % (filename, filters)
sensor = RecordSensor()
sensor.dataSource = FileRecordStream(filename)
sensor.preEncodingFilters = filters
# Convert collector description to collector object
stats = []
for field in statsInfo:
# field = key from statsInfo
if statsInfo[field] == "number":
# This wants a field name e.g. consumption and the field type as the value
statsInfo[field] = NumberStatsCollector()
elif statsInfo[field] == "category":
statsInfo[field] = CategoryStatsCollector()
else:
raise RuntimeError("Unknown stats type '%s' for field '%s'" % (statsInfo[field], field))
# Now collect the stats
if maxSamples is None:
maxSamples = 500000
for i in xrange(maxSamples):
try:
record = sensor.getNextRecord()
except StopIteration:
break
for (name, collector) in statsInfo.items():
collector.add(record[name])
del sensor
# Assemble the results and return
r = dict()
for (field, collector) in statsInfo.items():
stats = collector.getStats()
if field not in r:
r[field] = stats
else:
r[field].update(stats)
if cache:
f = open(statsFilename, "wb")
pickle.dump(r, f)
f.close()
# caller may need to know name of cached file
r["_filename"] = statsFilename
return r
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-02 21:54
from __future__ import unicode_literals
from django.db import migrations, models
def copy_to_question_page(apps, schema_editor):
current_database = schema_editor.connection.alias
QuestionPage = apps.get_model('wizard_builder.QuestionPage')
for page in QuestionPage.objects.using(current_database):
page.new_position = page.position
page.new_section = page.section
for site in page.sites.all():
page.new_sites.add(site)
page.save()
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
('wizard_builder', '0008_remove_textpage'),
]
operations = [
migrations.AddField(
model_name='questionpage',
name='new_position',
field=models.PositiveSmallIntegerField(default=0, | verbose_name='position'),
),
migrations.AddField(
model_name='questionpage',
name='new_section',
field=models.IntegerField(choices=[(1, 'When'), (2, 'Where'), (3, 'What'), (4, 'Who')], default=1),
) | ,
migrations.AddField(
model_name='questionpage',
name='new_sites',
field=models.ManyToManyField(to='sites.Site'),
),
migrations.RunPython(
copy_to_question_page,
reverse_code=migrations.RunPython.noop,
),
]
|
f.screen.write_process_input = \
lambda data: p_in.write(data.encode())
self.stream = pyte.ByteStream()
self.stream.attach(self.screen)
def feed(self, data):
self.stream.feed(data)
def updateAttributes(self, initialize = False):
buffer = self.screen.buffer
lines = None
if not initialize:
lines = self.screen.dirty
else:
lines = range(self.screen.lines)
self.attributes = [[list(attribute[1:]) + [False, 'default', 'default'] for attribute in line.values()] for line in buffer.values()]
for y in lines:
try:
t = self.attributes[y]
except:
self.attributes.append([])
self.attributes[y] = [list(attribute[1:]) + [False, 'default', 'default'] for attribute in (buffer[y].values())]
if len(self.attributes[y]) < self.screen.columns:
diff = self.screen.columns - len(self.attributes[y])
self.attributes[y] += [['default', 'default', False, False, False, False, False, False, 'default', 'default']] * diff
def resize(self, lines, columns):
self.screen.resize(lines, columns)
self.setCursor()
self.updateAttributes(True)
def setCursor(self, x = -1, y = -1):
xPos = x
yPos = y
if xPos == -1:
xPos = self.screen.cursor.x
if yPos == -1:
yPos = self.screen.cursor.y
self.screen.cursor.x = min(self.screen.cursor.x, self.screen.columns - 1)
self.screen.cursor.y = min(self.screen.cursor.y, self.screen.lines - 1)
def GetScreenContent(self):
cursor = self.screen.cursor
self.text = '\n'.join(self.screen.display)
self.updateAttributes(self.attributes == None)
self.screen.dirty.clear()
return {"cursor": (cursor.x, cursor.y),
'lines': self.screen.lines,
'columns': self.screen.columns,
"text": self.text,
'attributes': self.attributes.copy(),
'screen': 'pty',
'screenUpdateTime': time.time(),
}.copy()
class driver(screenDriver):
def __init__(self):
screenDriver.__init__(self)
self.signalPipe = os.pipe()
self.p_out = None
self.terminal = None
self.p_pid = -1
signal.signal(signal.SIGWINCH, self.handleSigwinch)
def initialize(self, environment):
self.env = environment
self.command = self.env['runtime']['settingsManager'].getSetting('general','shell')
self.shortcutType = self.env['runtime']['inputManager'].getShortcutType()
self.env['runtime']['processManager'].addCustomEventThread(self.terminalEmulation)
def getCurrScreen(self):
self.env['screen']['oldTTY'] = 'pty'
self.env['screen']['newTTY'] = ' | pty'
def injectTextToScreen(self, msgBytes, screen = None):
if not screen:
screen = self.p_out.fileno()
if isinstance(msgBytes, str):
msgBytes = bytes(msgBytes, 'UTF-8')
os.write(screen, msgBytes)
def getSessionInformation(self):
self.env['screen']['autoI | gnoreScreens'] = []
self.env['general']['prevUser'] = getpass.getuser()
self.env['general']['currUser'] = getpass.getuser()
def readAll(self, fd, timeout = 0.3, interruptFd = None, len = 65536):
msgBytes = b''
fdList = []
fdList += [fd]
if interruptFd:
fdList += [interruptFd]
starttime = time.time()
while True:
r = screen_utils.hasMoreWhat(fdList, 0.0001)
# nothing more to read
if not fd in r:
break
data = os.read(fd, len)
if data == b'':
raise EOFError
msgBytes += data
# exit on interrupt available
if interruptFd in r:
break
# respect timeout but wait a little bit of time to see if something more is here
if (time.time() - starttime) >= timeout:
break
return msgBytes
def openTerminal(self, columns, lines, command):
p_pid, master_fd = pty.fork()
if p_pid == 0: # Child.
argv = shlex.split(command)
env = os.environ.copy()
#values are VT100,xterm-256color,linux
try:
if env["TERM"] == '':
env["TERM"] = 'linux'
except:
env["TERM"] = 'linux'
os.execvpe(argv[0], argv, env)
# File-like object for I/O with the child process aka command.
p_out = os.fdopen(master_fd, "w+b", 0)
return Terminal(columns, lines, p_out), p_pid, p_out
def resizeTerminal(self,fd):
s = struct.pack('HHHH', 0, 0, 0, 0)
s = fcntl.ioctl(0, termios.TIOCGWINSZ, s)
fcntl.ioctl(fd, termios.TIOCSWINSZ, s)
lines, columns, _, _ = struct.unpack('hhhh', s)
return lines, columns
def getTerminalSize(self, fd):
s = struct.pack('HHHH', 0, 0, 0, 0)
lines, columns, _, _ = struct.unpack('HHHH', fcntl.ioctl(fd, termios.TIOCGWINSZ, s))
return lines, columns
def handleSigwinch(self, *args):
os.write(self.signalPipe[1], b'w')
def terminalEmulation(self,active , eventQueue):
try:
old_attr = termios.tcgetattr(sys.stdin)
tty.setraw(0)
lines, columns = self.getTerminalSize(0)
if self.command == '':
self.command = screen_utils.getShell()
self.terminal, self.p_pid, self.p_out = self.openTerminal(columns, lines, self.command)
lines, columns = self.resizeTerminal(self.p_out)
self.terminal.resize(lines, columns)
fdList = [sys.stdin, self.p_out, self.signalPipe[0]]
while active.value:
r, _, _ = select(fdList, [], [], 1)
# none
if r == []:
continue
# signals
if self.signalPipe[0] in r:
os.read(self.signalPipe[0], 1)
lines, columns = self.resizeTerminal(self.p_out)
self.terminal.resize(lines, columns)
# input
if sys.stdin in r:
try:
msgBytes = self.readAll(sys.stdin.fileno(), len=4096)
except (EOFError, OSError):
eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None})
break
if self.shortcutType == 'KEY':
try:
self.injectTextToScreen(msgBytes)
except:
eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None})
break
else:
eventQueue.put({"Type":fenrirEventType.ByteInput,
"Data":msgBytes })
# output
if self.p_out in r:
try:
msgBytes = self.readAll(self.p_out.fileno(), interruptFd=sys.stdin.fileno())
except (EOFError, OSError):
eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None})
break
# feed and send event bevore write, the pyte already has the right state
# so fenrir already can progress bevore os.write what should give some better reaction time
self.terminal.feed(msgBytes)
eventQueue.put({"Type":fenrirEventType.ScreenUpdate,
"Data":screen_utils.createScreenEventData(self.terminal.GetScreenContent())
})
self.injectTextToScreen(msgBytes, screen=sys.stdout.fileno())
except Exception as e: # Process died?
print(e)
eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None})
finally:
os.kill(self.p_pid, signal.SIGTERM)
sel |
# -*- coding: utf-8 -*-
# Generated by Django 1. | 10.1 on 2016-12-17 20:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jordbruksmark', '0002_auto_20161217_2140'),
]
operations = [
migrations.AlterModelOptions(
name='wochen_menge',
option | s={'verbose_name': 'Wochen Menge', 'verbose_name_plural': 'Wochen Mengen'},
),
]
|
S = [
NETWORK_LINK_TYPE_PHY,
NETWORK_LINK_TYPE_BOND,
NETWORK_LINK_TYPE_VLAN,
NETWORK_SERVICE_NAMESERVER
]
def _parse_subnets(self, subnets, link_name):
networks = []
if not subnets or not isinstance(subnets, list):
LOG.warning("Subnets '%s' is empty or not a list.",
subnets)
return networks
for subnet in subnets:
if not isinstance(subnet, dict):
LOG.warning("Subnet '%s' is not a dictionary",
subnet)
continue
if subnet.get("type") in ["dhcp", "dhcp6"]:
continue
routes = []
for route_data in subnet.get("routes", []):
route_netmask = route_data.get("netmask")
route_network = route_data.get("network")
route_network_cidr = network_utils.ip_netmask_to_cidr(
route_network, route_netmask)
route_gateway = route_data.get("gateway")
route = network_model.Route(
network_cidr=route_network_cidr,
gateway=route_gateway
)
routes.append(route)
address_cidr = subnet.get("address")
netmask = subnet.get("netmask")
if netmask:
address_cidr = network_utils.ip_netmask_to_cidr(
address_cidr, netmask)
gateway = subnet.get("gateway")
if gateway:
# Map the gateway as a default route, depending on the
# IP family / version (4 or 6)
gateway_net_cidr = "0.0.0.0/0"
if netaddr.valid_ipv6(gateway):
gateway_net_cidr = "::/0"
routes.append(
network_model.Route(
network_cidr=gateway_net_cidr,
gateway=gateway
)
)
networks.append(network_model.Network(
link=link_name,
address_cidr=address_cidr,
dns_nameservers=subnet.get("dns_nameservers"),
routes=routes
))
return networks
def _parse_physical_config_item(self, item):
if not item.get('name'):
LOG.warning("Physical NIC does not have a name.")
return
link = network_model.Link(
id=item.get('name'),
name=item.get('name'),
type=network_model.LINK_TYPE_PHYSICAL,
enabled=True,
mac_address=item.get('mac_address'),
mtu=item.get('mtu'),
bond=None,
vlan_link=None,
vlan_id=None
)
return network_model.NetworkDetailsV2(
links=[link],
networks=self._parse_subnets(item.get("subnets"), link.name),
services=[]
)
def _parse_bond_config_item(self, item):
if not item.get('name'):
LOG.warning("Bond does not have a name.")
return
bond_params = item.get('params')
if not bond_params:
LOG.warning("Bond does not have parameters")
return
bond_mode = bond_params.get('bond-mode')
if bond_mode not in network_model.AVAILABLE_BOND_TYPES:
raise exception.CloudbaseInitException(
"Unsupported bond mode: %s" % bond_mode)
bond_lacp_rate = None
if bond_mode == network_model.BOND_TYPE_8023AD:
bond_lacp_rate = bond_params.get('bond-lacp-rate')
if (bond_lacp_rate and bond_lacp_rate not in
network_model.AVAILABLE_BOND_LACP_RATES):
raise exception.CloudbaseInitException(
"Unsupported bond lacp rate: %s" % bond_lacp_rate)
bond_xmit_hash_policy = bond_params.get('xmit_hash_policy')
if (bond_xmit_hash_policy and bond_xmit_hash_policy not in
network_model.AVAILABLE_BOND_LB_ALGORITHMS):
raise exception.CloudbaseInitException(
"Unsupported bond hash policy: %s" %
| bond_xmit_hash_policy)
bond_interfaces = item.get('bond_interfaces')
b | ond = network_model.Bond(
members=bond_interfaces,
type=bond_mode,
lb_algorithm=bond_xmit_hash_policy,
lacp_rate=bond_lacp_rate,
)
link = network_model.Link(
id=item.get('name'),
name=item.get('name'),
type=network_model.LINK_TYPE_BOND,
enabled=True,
mac_address=item.get('mac_address'),
mtu=item.get('mtu'),
bond=bond,
vlan_link=None,
vlan_id=None
)
return network_model.NetworkDetailsV2(
links=[link],
networks=self._parse_subnets(item.get("subnets"), link.name),
services=[]
)
def _parse_vlan_config_item(self, item):
if not item.get('name'):
LOG.warning("VLAN NIC does not have a name.")
return
link = network_model.Link(
id=item.get('name'),
name=item.get('name'),
type=network_model.LINK_TYPE_VLAN,
enabled=True,
mac_address=item.get('mac_address'),
mtu=item.get('mtu'),
bond=None,
vlan_link=item.get('vlan_link'),
vlan_id=item.get('vlan_id')
)
return network_model.NetworkDetailsV2(
links=[link],
networks=self._parse_subnets(item.get("subnets"), link.name),
services=[]
)
def _parse_nameserver_config_item(self, item):
return network_model.NetworkDetailsV2(
links=[],
networks=[],
services=[network_model.NameServerService(
addresses=item.get('address', []),
search=item.get('search')
)]
)
def _get_network_config_parser(self, parser_type):
parsers = {
self.NETWORK_LINK_TYPE_PHY: self._parse_physical_config_item,
self.NETWORK_LINK_TYPE_BOND: self._parse_bond_config_item,
self.NETWORK_LINK_TYPE_VLAN: self._parse_vlan_config_item,
self.NETWORK_SERVICE_NAMESERVER: self._parse_nameserver_config_item
}
parser = parsers.get(parser_type)
if not parser:
raise exception.CloudbaseInitException(
"Network config parser '%s' does not exist",
parser_type)
return parser
def parse(self, network_config):
links = []
networks = []
services = []
if not network_config:
LOG.warning("Network configuration is empty")
return
if not isinstance(network_config, list):
LOG.warning("Network config '%s' is not a list.",
network_config)
return
for network_config_item in network_config:
if not isinstance(network_config_item, dict):
LOG.warning("Network config item '%s' is not a dictionary",
network_config_item)
continue
net_conf_type = network_config_item.get("type")
if net_conf_type not in self.SUPPORTED_NETWORK_CONFIG_TYPES:
LOG.warning("Network config type '%s' is not supported",
net_conf_type)
continue
net_details = (
self._get_network_config_parser(net_conf_type)
(network_config_item))
if net_details:
links += net_details.links
networks += net_details.networks
services += net_details.services
return network_model.NetworkDetailsV2(
links=links,
networks=networks,
services=services
)
class NoCloudConfigDriveService(baseconfigdrive.BaseConfigDriveService):
def __init__(self):
super(NoCloudConfigDriveServ |
#!/usr/bin/env python
import sys
def inv(s):
if s[0] == '-':
return s[1:]
elif s[0] == '+':
return '-' + s[1:]
else: # plain number
return '-' | + s
if len(sys.argv) != 1:
print 'Usage:', sys.argv[0]
sys.exit(1)
for line in sys.stdin:
linesplit = line.strip().split()
if len(linesplit) == 3:
assert(linesplit[0] == 'p')
print('p ' + inv(linesplit[2]) + ' ' + linesplit[1])
elif len(linesplit) == 5:
assert(linesplit[0] == 's')
print('s ' + \
inv(linesplit[2]) + ' ' + linesplit[1] + ' ' | + \
inv(linesplit[4]) + ' ' + linesplit[3] )
elif len(linesplit) == 0:
print
|
# -*- coding: utf-8 -*-
"""
anparser - an Open Source Android Artifact Parser
Copyright (C) 2015 Preston Miller
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'prmiller91'
__license__ = 'GPLv3'
__date__ = '20150129'
__version__ = '0.00'
from collections import OrderedDict
import logging
import yar | a
import pandas as pd
path = None
match = None
yara_list = []
def yara_parser(file_list, rules_path):
"""
Parses files for Malware signatures with Yara
:param file_list: List of all files
:param rules_path: Path t | o custom Yara rules
:return: Dictionary of matches
"""
try:
rules = yara.compile(rules_path)
except (yara.libyara_wrapper.YaraSyntaxError, IOError) as exception:
msg = 'Yara Rule Compilation Error: {0:s}'.format(rules_path + ' > ' + str(exception))
print(msg)
logging.error(msg)
raise IOError
for file_path in file_list:
try:
match = rules.match(file_path)
except yara.libyara_wrapper.YaraMatchError as exception:
msg = 'Yara Match Error: {0:s}'.format(file_path + ' > ' + str(exception))
logging.error(msg)
pass
if match:
yara_processor(match, file_path)
return pd.DataFrame(yara_list)
def yara_processor(match, path):
"""
Processes Yara Match for Output
:param match: A single yara match
:param path: File path for match
:return:
"""
yara_data = OrderedDict()
for key in match.keys():
rule = match[key][0]['rule']
matches = match[key][0]['matches']
strings = match[key][0]['strings']
meta = match[key][0]['meta']
tags = match[key][0]['tags']
for string in strings:
yara_data['File Path'] = path
yara_data['Rule'] = rule
yara_data['Matches'] = str(matches)
if meta != {}:
try:
yara_data['Author'] = meta['author']
except KeyError:
yara_data['Author'] = ''
try:
yara_data['Description'] = meta['description']
except KeyError:
yara_data['Description'] = ''
else:
yara_data['Author'] = ''
yara_data['Description'] = ''
yara_data['Flag'] = string['flags']
yara_data['Identifier'] = string['identifier']
yara_data['Data'] = string['data']
yara_data['Offset'] = string['offset']
if tags == []:
yara_data['Tags'] = ''
else:
yara_data['Tags'] = tags
yara_list.append(yara_data)
yara_data = OrderedDict() |
from dart.model.base import BaseModel | , dictable
@dictable
class ApiKey(BaseModel):
def __init__(self, id, user_id, api_key, api_secret):
"""
:type user_id: str
:type api_key: str
:type api_secret: str
"""
self.id = id
self.user_id = user_id
self.api_key = api_key
self.api_secret = a | pi_secret
|
import time
import recordlib
if __name__ == "__main__":
recordlib.initialize()
print("waiting for input")
recordlib.logging.info("waiting for input")
try:
# define interrupt, get rising signal, debounce pin
recordlib.GPIO.add_event_detect(
recordlib.TASTER_1,
recordlib.GPIO.RISING,
callback=recordlib.start_recording,
bouncetime=1000
)
recordlib.GPIO.add_event_detect(
recordlib.TASTER_2, |
recordlib.GPIO.RISING,
c | allback=recordlib.stop_recording,
bouncetime=1000
)
# keep script running
while True:
time.sleep(0.5)
finally:
recordlib.GPIO.cleanup()
print("\nQuit\n")
|
from __future__ import print_function
from imports import *
import common
class Base( common.Base ):
pass
class TestUnitMiSeqToNewbler( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline.fix_fastq import miseq_to_newbler_id
return | miseq_to_newbler_id( *args, **kwargs )
def test_r1_correct( self ):
r = self._C( 'abcd 1' )
eq_( 'abcd#0/1 (abcd 1)', r )
def test_r2_correct( self ):
r = self._C( 'abcd 2' )
eq_( 'abcd#0/2 (abcd 2)', r )
class TestUnitModFqRead( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline. | fix_fastq import mod_fq_read
return mod_fq_read( *args, **kwargs )
def test_mods_correctly( self ):
from bactpipeline.fix_fastq import miseq_to_newbler_id as mtni
id = 'abcd 1'
seq = 'ATGC'
qual = 'IIII'
r = self._C( id, seq, qual )
read = '{0}\n{1}\n+\n{2}\n'.format(mtni(id),seq,qual)
eq_( read, r )
class TestUnitParseFq( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline.fix_fastq import parse_fq
return parse_fq( *args, **kwargs )
def fake_fq( self ):
with open( 'fake.fq', 'w' ) as fh:
for i in range( 1, 101 ):
fh.write( '@abcd:{0} {1}\n'.format( i, (i%2)+1) )
fh.write( 'ACGT\n' )
fh.write( '+\n' )
fh.write( 'IIII\n' )
return 'fake.fq'
def test_parses( self ):
fq = self.fake_fq()
r = self._C( fq )
for id, seq, qual in r:
ids = id.split()
x = ids[0].split(':')
eq_( '@abcd', x[0] )
eq_( 'ACGT', seq )
eq_( 'IIII', qual )
class TestFunctional( Base ):
def sample_files( self ):
fixdir = join( dirname(__file__), 'fixtures', 'fix_fastq' )
return glob( join( fixdir, '*.fastq' ) )
def _C( self, *args, **kwargs ):
script = 'fix_fastq'
cmd = [script]
if kwargs.get('outdir',False):
cmd += ['-o', kwargs.get('outdir')]
cmd += list(*args)
print(cmd)
return subprocess.call( cmd )
def test_runs_correctly( self ):
fastqs = self.sample_files()
r = self._C( fastqs )
eq_( 0, r )
ok_( exists( 'outdir' ), 'did not create outdir by default' )
fqs = os.listdir( 'outdir' )
eq_( set([]), set([basename(fq) for fq in fastqs]) - set(fqs) )
|
"""
@brief test log(time=200s)
"""
import os
import unittest
import math
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
from ensae_teaching_cs.special.image.image_synthese_base import Vecteur, Couleur, Source, Repere
from ensae_teaching_cs.special.image.image_synthese_sphere import Sphere
from ensae_teaching_cs.special.image.image_synthese_phong import ScenePhong
from ensae_teaching_cs.special.image.image_synthese_facette import Rectangle
from ensae_teaching_cs.special.image.image_synthese_facette_image import RectangleImage, SphereReflet
class TestImageSyntheseImage(unittest.TestCase):
def test_scene_image(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_scene_bette")
image = os.path.join(temp, "..", "data", "bette_davis.png")
s = ScenePhong(Repere(), math.pi / 1.5, 400, 200)
s.ajoute_source(Source(Vecteur(0, 8, 8), Couleur(0.4, 0.4, 0.4)))
s.ajoute_source(Source(Vecteur(10, 0, 0), Couleur(0.4, 0.4, 0.4)))
s.ajoute_source(Source(Vecteur(8, 8, 4.5), Couleur(0.4, 0.4, 0.4)))
s.ajoute_objet(Sphere(Vecteur(3, -4, 7), 1, Couleur(1, 0, 0)))
s.ajoute_objet(SphereReflet(Vecteur(0, -400, 12),
396, Couleur(0.5, 0.5, 0.5), 0.5))
s.ajoute_source(Source(Vecteur(7, 2, 8), Couleur(0.2, 0.2, 0.2)))
s.ajoute_source(Source(Vecteur(12.5, 3, 5), Couleur(0.2, 0.2, 0.2)))
s.ajoute_source(Source(Vecteur(-12.5, 1, 6), Couleur(0.2, 0.2, 0.2)))
s.ajoute_objet(Rectangle(Vecteur(-12.4, 0.99, 5.9), Vecteur(-12.6, 0.99, 5.9),
Vecteur(-12.6, 0.99, 6.1), None, Couleur(0, 0, 0)))
if is_travis_or_appveyor() == "travis":
warnings.warn("pygame is not available")
return
import pygame
s.ajoute_objet(RectangleImage(Vecteur(8, -3.5, 9), Vecteur(2, -3.5, 8),
Vecteur(2, 3.8, 8), None, image, invertx=True, pygame=pygame))
from ensae_teaching_cs.helpers.pygame_helper import wait_event
screen = pygame.display.set_mode(s.dim)
screen.fill((255, 255, 255))
s.construit_image(screen, pygame=pygame, fLOG=fLOG)
| pygame.image.save(screen, os.path.join(temp, "scene_bette.png"))
if __name__ == "__main__":
wait_event(pygame)
if __name__ == "__main | __":
unittest.main()
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
''' |
Created on 18 Feb 2013
@author: George
'''
'''
models a frame entity. This can flow through the system and carry parts
'''
from simpy import Resource
from Globals import G
from Entity import Entity
#The entity object
class Frame(Entity):
type="Frame"
capacity=4 #the number of parts that the frame can take
def __init__(self, id=None, name=None,**kw):
| Entity.__init__(self,id=id,name = name)
self.Res=Resource(self.capacity)
#dimension data
self.width=2.0
self.height=2.0
self.lenght=2.0
def getFrameQueue(self):
return self.Res.users
|
#
# This is a parser that generates the document tree for you.
#
# To use this parser, create an instance of XElementParser:
# parser = saxexts.make_parser()
# xp = XElementParser(parser)
#
# If you have defined classes in the current environment, you might want ot
# pass this environment *to* the parser, so your classes will be created as
# tree nodes instead of the default (base) XElement class instances:
#
#
# def MyElementClass1(XElement): ...
# def MyElementClass2(XElement): ...
# ...
#
# parser = saxexts.make_parser()
# xp = XElementParser(parser, vars())
#
# Once your parser is constructed, you can parse one or more documents as
# follows:
# doc_list = ['f1','f2','f3']
# -or-
# doc_list = ['url1','url2','url3']
#
# for doc in doc_list:
# doc_tree = xp.process(doc)
# print doc_tree.toXML()
import strin | g
import sys
import types
from xml.sax import saxexts
from xml.sax import saxlib
from xelement import XElement, XTreeHandler
class XElementParser:
def __init__(self, outer_env={}, parser=None):
if parser == None:
self.parser = saxexts.XMLValParserFa | ctory.make_parser()
else:
self.parser = parser
self.parser_error_handler = ErrorPrinter()
self.parser.setErrorHandler(self.parser_error_handler)
self.xth = XTreeHandler(IgnoreWhiteSpace='yes',
RemoveWhiteSpace='yes',
CreateElementMap='yes',
RequireUserClasses='yes')
for x in outer_env.keys():
if type(outer_env[x]) == types.ClassType or isinstance(x, object):
self.xth.registerElementClass(outer_env[x], x)
self.parser.setDocumentHandler(self.xth)
def process(self, document_uri):
Ok=None
try:
self.parser_error_handler.reset()
self.parser.parse(document_uri)
if self.parser_error_handler.has_errors():
raise "validation failed"
return self.xth.getDocument().getChild()
except IOError,e:
print "\nI/O Error: " + document_uri + ": " + str(e)
except saxlib.SAXException,e:
print "\nParse Error: " + document_uri + ": " + str(e)
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self):
self.error_count = 0
def reset(self):
self.error_count = 0
def has_errors(self):
return self.error_count
def warning(self, exception):
print "Warning: %s %s" % (str(exception), exception.getMessage())
sys.exit(1)
def error(self, exception):
self.error_count = self.error_count + 1
print "Error: %s %s" % (str(exception), exception.getMessage())
def fatalError(self, exception):
self.error_count = self.error_count + 1
print "Fatal Error: %s %s" % (str(exception), exception.getMessage())
|
import os
import re
BROKER_URL = os.getenv("CLOUDAMQP_URL", 'amqp://')
# BROKER_POOL_LIMIT = None
MONGOLAB_URI = None
MONGOLAB_DB = None
URI_WITH_AUTH = None
mongolab = os.getenv("MONGOLAB_URI")
if mongolab is not None:
uri_pat = r"mongodb://([^:]+):([^@]+)@([^:]+):(\d+)/(.+)"
user, passwd, host, port, db = re.match(uri_pat | , mongolab).groups()
uri = "mongodb://{}:{}".format(host, port)
MONGOLAB_URI = uri
MONGOLAB_DB = db
# CELERY_RESULT_BACKEND = uri
# | CELERY_MONGODB_BACKEND_SETTINGS = {
# 'database': db,
# 'user': user,
# 'password': passwd
# }
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
|
r_class(
[_('An error occurred in our system. '
'Please try again later.')]
)
form.errors['__all__'] = msg
else:
return HttpResponseRedirect(
reverse('firefox.android.sms-thankyou'))
return l10n_utils.render(request, 'firefox/android/sms-send.html',
{'sms_form': form})
def windows_billboards(req):
major_version = req.GET.get('majorVersion')
minor_version = req.GET.get('minorVersion')
if major_version and minor_version:
major_version = float(major_version)
minor_version = float(minor_version)
if major_version == 5 and minor_version == 1:
return l10n_utils.render(req, 'firefox/unsupported/winxp.html')
return l10n_utils.render(req, 'firefox/unsupported/win2k.html')
def fx_home_redirect(request):
return HttpResponseRedirect(reverse('firefox.new'))
def dnt(request):
response = l10n_utils.render(request, 'firefox/dnt.html')
response['Vary'] = 'DNT'
return response
def all_downloads(request, channel):
if channel is None:
channel = 'release'
if channel == 'organizations':
channel = 'esr'
version = get_latest_version('firefox', channel)
query = request.GET.get('q')
channel_names = {
'release': _('Firefox'),
'b | eta': _('Firefox Beta'),
'aurora': _('Firefox Aurora'),
'esr': _('Firefox Extended Support Release'),
}
return l10n_utils.render(request, 'firefox/all.html', {
| 'full_builds': firefox_details.get_filtered_full_builds(version, query),
'test_builds': firefox_details.get_filtered_test_builds(version, query),
'query': query,
'channel': channel,
'channel_name': channel_names[channel],
})
@csrf_protect
def firefox_partners(request):
# If the current locale isn't in our list, return the en-US value
press_locale = request.locale if (
request.locale in LOCALE_FXOS_HEADLINES) else 'en-US'
template_vars = {
'locale_headline_url': LOCALE_FXOS_HEADLINES[press_locale]['url'],
'locale_headline_title': LOCALE_FXOS_HEADLINES[press_locale]['title'],
'js_common': JS_COMMON,
'js_mobile': JS_MOBILE,
'js_desktop': JS_DESKTOP,
}
form_kwargs = {
'interest_set': 'fx',
'lead_source': 'www.mozilla.org/firefox/partners/'}
return process_partnership_form(
request, 'firefox/partners/index.html', 'firefox.partners.index', template_vars, form_kwargs)
def releases_index(request):
releases = {}
major_releases = firefox_details.firefox_history_major_releases
minor_releases = firefox_details.firefox_history_stability_releases
for release in major_releases:
major_verion = float(re.findall(r'^\d+\.\d+', release)[0])
# The version numbering scheme of Firefox changes sometimes. The second
# number has not been used since Firefox 4, then reintroduced with
# Firefox ESR 24 (Bug 870540). On this index page, 24.1.x should be
# fallen under 24.0. This patter is a tricky part.
major_pattern = r'^' + \
re.escape(
('%s' if major_verion < 4 else '%g') % round(major_verion, 1))
releases[major_verion] = {
'major': release,
'minor': sorted(filter(lambda x: re.findall(major_pattern, x),
minor_releases),
key=lambda x: int(re.findall(r'\d+$', x)[0]))
}
return l10n_utils.render(request, 'firefox/releases/index.html',
{'releases': sorted(releases.items(), reverse=True)})
def latest_notes(request, product='firefox', channel='release'):
version = get_latest_version(product, channel)
if channel == 'beta':
version = re.sub(r'b\d+$', 'beta', version)
if channel == 'organizations':
version = re.sub(r'esr$', '', version)
dir = 'auroranotes' if channel == 'aurora' else 'releasenotes'
path = [product, version, dir]
locale = getattr(request, 'locale', None)
if locale:
path.insert(0, locale)
return HttpResponseRedirect('/' + '/'.join(path) + '/')
def latest_sysreq(request, channel='release'):
version = get_latest_version('firefox', channel)
if channel == 'beta':
version = re.sub(r'b\d+$', 'beta', version)
if channel == 'organizations':
version = re.sub(r'^(\d+).+', r'\1.0', version)
path = ['firefox', version, 'system-requirements']
locale = getattr(request, 'locale', None)
if locale:
path.insert(0, locale)
return HttpResponseRedirect('/' + '/'.join(path) + '/')
def show_whatsnew_tour(oldversion):
match = re.match(r'\d{1,2}', oldversion)
if match:
num_oldversion = int(match.group(0))
return num_oldversion < 29
return False
class LatestFxView(TemplateView):
"""
Base class to be extended by views that require visitor to be
using latest version of Firefox. Classes extending this class must
implement either `get_template_names` function or provide
`template_name` class attribute.
"""
@vary_on_headers('User-Agent')
def dispatch(self, *args, **kwargs):
return super(LatestFxView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
# required for newsletter form post that is handled in
# newsletter/helpers.py
return self.get(request, *args, **kwargs)
def redirect_to(self):
"""
Redirect visitors based on their user-agent.
- Up-to-date Firefox users pass through.
- Other Firefox users go to the new page.
- Non Firefox users go to the new page.
"""
query = self.request.META.get('QUERY_STRING')
query = '?' + query if query else ''
user_agent = self.request.META.get('HTTP_USER_AGENT', '')
if 'Firefox' not in user_agent:
return reverse('firefox.new') + query
# TODO : Where to redirect bug 757206
user_version = '0'
match = UA_REGEXP.search(user_agent)
if match:
user_version = match.group(1)
if not is_current_or_newer(user_version):
return reverse('firefox.new') + query
return None
def render_to_response(self, context, **response_kwargs):
redirect_url = self.redirect_to()
if redirect_url is not None:
return HttpResponsePermanentRedirect(redirect_url)
else:
return l10n_utils.render(self.request,
self.get_template_names(),
context,
**response_kwargs)
class FirstrunView(LatestFxView):
def get(self, request, *args, **kwargs):
if not settings.DEV and not request.is_secure():
uri = 'https://{host}{path}'.format(
host=request.get_host(),
path=request.get_full_path(),
)
return HttpResponsePermanentRedirect(uri)
return super(FirstrunView, self).get(request, *args, **kwargs)
def get_template_names(self):
locale = l10n_utils.get_locale(self.request)
fc_ctx = funnelcake_param(self.request)
f = fc_ctx.get('funnelcake_id', 0)
if f == '30' and locale == 'en-US':
template = 'firefox/australis/firstrun-no-tour.html'
else:
template = 'firefox/australis/firstrun-tour.html'
# return a list to conform with original intention
return [template]
class WhatsnewView(LatestFxView):
# Locales targeted for FxOS
fxos_locales = []
locales_with_video = {
'en-US': 'american',
'en-GB': 'british',
'de': 'german_final',
'it': 'italian_final',
'ja': 'japanese_final',
'es-AR': 'spanish_final',
'es-CL': 'spanish_final',
'es-ES': 'spanish_final',
'es-MX': 'spanish_final',
}
def get(self, request, *args, **kwargs):
if not settings.DEV and not request.is |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2019 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Job Manager."""
import json
import shlex
from flask import current_app
from reana_commons.utils import calculate_file_access_time
from reana_db.database import Session
from reana_db.models import Job as JobTable
from reana_db.models import JobCache, JobStatus, Workflow
class JobManager:
"""Job management interface."""
def __init__(
self,
docker_img="",
cmd=[],
prettified_cmd="",
env_vars={},
workflow_uuid=None,
workflow_workspace=None,
job_name=None,
):
"""Instanciates basic job.
:param docker_img: Docker image.
:type docker_img: str
:param cmd: Command to execute.
:type cmd: list
:param prettified_cmd: pretified version of command to execute.
:type prettified_cmd: str
:param env_vars: Environment variables.
:type env_vars: dict
:param workflow_uuid: Unique workflow id.
:type workflow_uuid: str
:param workflow_workspace: Absolute path to workspace
:type workflow_workspace: str
:param job_name: Name of the job.
:type job_name: str
"""
self.docker_img = docker_img or ""
if isinstance(cmd, str):
self.cmd = shlex.split(cmd)
else:
self.cmd = cmd or []
self.prettified_cmd = prettified_cmd
self.workflow_uuid = workflow_uuid
self.workflow_workspace = workflow_workspace
self.job_name = job_name
self.env_vars = self._extend_env_vars(env_vars)
def execution_hook(fn):
"""Add before execution hooks and DB operations."""
def wrapper(inst, *args, **kwargs):
inst.before_execution()
backend_job_id = fn(inst, *args, **kwargs)
inst.create_job_in_db(backend_job_id)
inst.cache_job()
return backend_job_id
return wrapper
def before_execution(self):
"""Before job submission hook."""
pass
def after_execution(self):
"""After job submission hook."""
pass
@execution_hook
def execute(self):
"""Execute a job.
:returns: Job ID.
:rtype: str
| """
raise NotImplementedError
def get_status(self):
"""Get job status.
:returns: job status.
:rtype: str
"""
raise NotImplementedError
def get_logs(self):
"""Get job log.
:returns: stderr, stdout of a job.
:rtype: dict
"""
raise NotImplementedError
def stop(self):
"""Stop a job."""
raise NotImplementedError
def create_job_in_db(self, back | end_job_id):
"""Create job in db."""
job_db_entry = JobTable(
backend_job_id=backend_job_id,
workflow_uuid=self.workflow_uuid,
status=JobStatus.created.name,
compute_backend=self.compute_backend,
cvmfs_mounts=self.cvmfs_mounts or "",
shared_file_system=self.shared_file_system or False,
docker_img=self.docker_img,
cmd=json.dumps(self.cmd),
env_vars=json.dumps(self.env_vars),
deleted=False,
job_name=self.job_name,
prettified_cmd=self.prettified_cmd,
)
Session.add(job_db_entry)
Session.commit()
self.job_id = str(job_db_entry.id_)
def cache_job(self):
"""Cache a job."""
workflow = (
Session.query(Workflow).filter_by(id_=self.workflow_uuid).one_or_none()
)
access_times = calculate_file_access_time(workflow.workspace_path)
prepared_job_cache = JobCache()
prepared_job_cache.job_id = self.job_id
prepared_job_cache.access_times = access_times
Session.add(prepared_job_cache)
Session.commit()
def update_job_status(self):
"""Update job status in DB."""
pass
def _extend_env_vars(self, env_vars):
"""Extend environment variables with REANA specific ones."""
prefix = "REANA"
env_vars[prefix + "_WORKSPACE"] = self.workflow_workspace
env_vars[prefix + "_WORKFLOW_UUID"] = str(self.workflow_uuid)
return env_vars
|
#!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-6-24
#Copyright 2013 nuoqingyun xuqifeng
from bson.code import Cod | e
traffic_map = Code("function () {"
"emit(th | is.domain, this.bytes);"
"}")
traffic_reduce = Code("function (key, values) {"
" var sum = 0;"
" var count = 0;"
" values.forEach(function(byte){"
" sum += byte;"
" count ++;"
"});"
" return {'sum':sum, 'count':count};"
"}")
traffic_reduce1 = Code("function (keyDomain, valuesBytes) {"
" return Array.sum(valuesBytes);"
"}")
traffic_map_test = Code("function () {"
"emit(this.domain, {bytes:this.bytes, visit:1, hits:this.code});"
"}")
traffic_reduce_test = Code("function (key, values) {"
" var sum = 0;"
" var count = 0;"
" var visits = 0;"
" values.forEach(function(vals){"
" sum += vals.bytes;"
" count += vals.hits;"
" visits += vals.visit;"
"});"
" return {bytes:sum, visit:visits, hits:count};"
"}")
|
ndaModules import Vec3
from direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func
from direct.interval.IntervalGlobal import LerpScaleInterval
from direct.interval.IntervalGlobal import WaitInterval, ActorInterval, FunctionInterval
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.minigame.MinigameRulesPanel import MinigameRulesPanel
from toontown.parties import PartyGlobals
from direct.fsm import ClassicFSM, State
class PartyCatchActivityToonSD(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('PartyCatchActivityToonSD')
FallBackAnim = 'slip-backward'
FallFwdAnim = 'slip-forward'
CatchNeutralAnim = 'catch-neutral'
CatchRunAnim = 'catch-run'
EatNeutralAnim = 'catch-eatneutral'
EatNRunAnim = 'catch-eatnrun'
animList = [FallBackAnim,
FallFwdAnim,
CatchNeutralAnim,
CatchRunAnim,
EatNeutralAnim,
EatNRunAnim]
def __init__(self, avId, activity):
PartyCatchActivityToonSD.notify.debug('init : avId = %s, activity = %s ' % (avId, activity))
self.avId = avId
self.activity = activity
self.isLocal = avId == base.localAvatar.doId
self.toon = self.activity.getAvatar(self.avId)
self.unexpectedExit = False
self.fsm = ClassicFSM.ClassicFSM('CatchActivityAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['notPlaying', 'normal', 'rules']),
State.State('notPlaying', self.enterNotPlaying, self.exitNotPlaying, ['normal', 'rules', 'cleanup']),
State.State('rules', self.enterRules, self.exitRules, ['normal', 'cleanup']),
State.State('normal', self.enterNormal, self.exitNormal, ['eatFruit',
'fallBack',
'fallForward',
'notPlaying']),
State.State('eatFruit', self.enterEatFruit, self.exitEatFruit, ['normal',
'fallBack',
'fallForward',
'eatFruit',
'notPlaying']),
State.State('fallBack', self.enterFallBack, self.exitFallBack, ['normal', 'notPlaying']),
State.State('fallForward', self.enterFallForward, self.exitFallForward, ['normal', 'notPlaying']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup')
self.enteredAlready = False
def load(self):
self.setAnimState('off', 1.0)
for anim in self.animList:
self.toon.pose(anim, 0)
def unload(self):
del self.fsm
def enter(self):
if not self.enteredAlready:
self.enteredAlready = True
self.fsm.enterInitialState()
self._exiting = False
def exit(self, unexpectedExit = False):
if self._exiting:
return
self._exiting = True
self.unexpectedExit = unexpectedExit
if not self.unexpectedExit:
self.fsm.requestFinalState()
del self._exiting
def enterInit(self):
self.notify.debug('enterInit')
self.toon.startBlink()
self.toon.stopLookAround()
if self.isLocal:
self.activity.initOrthoWalk()
self.dropShadow = self.toon.dropShadow
self.origDropShadowColor = self.dropShadow.getColor()
c = self.origDropShadowColor
alpha = 0.35
self.dropShadow.setColor(c[0], c[1], c[2], alpha)
def exitInit(self):
pass
def enterNotPlaying(self):
self.toon.stopBlink()
self.toon.startLookAround()
self.setAnimState('neutral', 1.0)
if self.isLocal:
self.activity.orthoWalk.stop()
self.dropShadow.setColor(self.origDropShadowColor)
def exitNotPlaying(self):
self.dropShadow = self.toon.dropShadow
self.origDropShadowColor = self.dropShadow.getColor()
c = self.origDropShadowColor
alpha = 0.35
self.dropShadow.setColor(c[0], c[1], c[2], alpha)
def enterRules(self):
if self.isLocal:
self.notify.debug('enterNormal')
self.setAnimState('Catching', 1.0)
self.activity.orthoWalk.stop()
self.accept(self.activity.rulesDoneEvent, self.handleRulesDone)
self.rulesPanel = MinigameRulesPanel('PartyRulesPanel', self.activity.getTitle(), self.activity.getInstructions(), self.activity.rulesDoneEvent, PartyGlobals.DefaultRulesTimeout)
base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], False)
self.rulesPanel.load()
self.rulesPanel.enter()
else:
self.fsm.request('normal')
def handleRulesDone(self):
self.fsm.request('normal')
def exitRules(self):
self.setAnimState('off', 1.0)
self.ignore(self.activity.rulesDoneEvent)
if hasattr(self, 'rulesPanel'):
self.rulesPanel.exit()
self.rulesPanel.unload()
del self.rulesPanel
base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], True)
def enterNormal(self):
self.notify.debug('enterNormal')
self.setAnimState('Catching', 1.0)
if self.isLocal:
self.activity.orthoWalk.start()
self.toon.lerpLookAt(Vec3.forward() + Vec3.up(), time=0.2, blink=0)
def exitNormal(self):
self.setAnimState('off', 1.0)
if self.isLocal:
self.activity.orthoWalk.stop()
self.toon.lerpLookAt(Vec3.forward(), time=0.2, blink=0)
def eatFruit(self, fruitModel, handNode):
if self.fsm.getCurrentState().getName() == 'eatFruit':
self.fsm.request('normal')
self.fsm.request('eatFruit', [fruitModel, handNode])
def enterEatFruit(self, fruitModel, handNode):
self.notify.debug('enterEatFruit')
self.setAnimState('CatchEating', 1.0)
if self.isLocal:
self.activity.orthoWalk.start()
self.fruitModel = fruitModel
renderScale = fruitModel.getScale(render)
fruitModel.reparentTo(handNode)
fruitModel.setScale(render, renderScale)
duration = self.toon.getDuration('catch-eatneutral')
self.eatIval = Sequence(Parallel(WaitInterval(duration), Sequence(LerpScaleInterval(fruitModel, duration / 2.0, fruitModel.getScale() * 0.5, blendType='easeInOut'), Func(fruitModel.hide))), Func(self.fsm.request, 'normal'), name=self.toon.uniqueName('eatingIval'))
self.eatIval.start()
def exitEatFruit(self):
self.eatIval.pause()
del self.eatIval
self.fruitModel.reparentTo(hidden)
self.fruitModel.removeNode()
del self.fruitModel
self.setAnimState('off', 1.0)
if self.isLocal:
self.activity.orthoWalk.stop()
def enterFallBack(sel | f):
self.notify.debug('enterFallBack')
if self.isLocal:
base.playSfx(self.activity.sndOof)
duration = 1.0
animName = self.FallBackAnim
startFrame = 12
totalFrames = self.toon.getNumFrames(animName)
frames = totalFrames - 1 - startFrame
frameRate = self.toon.getFrameRate(animName)
newRate = frames / duration
playRate = newRate / frameRate
def resume(self = self):
self. | fsm.request('normal')
self.fallBackIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume))
self.fallBackIval.start()
def exitFallBack(self):
self.fallBackIval.pause()
del self.fallBackIval
def enterFallForward(self):
self.notify.debug('enterFallForward')
if self.isLocal:
base.playSfx(self.activity.sndOof)
duration = 2.0
animName = self.FallFwdAnim
startFrame = 12
totalFrames = self.toon.getNumFrames(animName)
frames = totalFrames - 1 - startFrame
pauseFrame = 19
frameRate = self.toon.getFrameRate(animName)
newRate = frames / (duration * 0.5)
playRate = newRate / frameRate
|
rue
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
current_site = Site.objects.get_current()
notices_url = u"http://%s%s" % (
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
'email_full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
'MEDIA_URL': settings.MEDIA_URL,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html = render_to_string('notification/email_body.html',{
| 'message': mes | sages['email_full.html'],
}, context)
#notice = Notice.objects.create(user=user, message=messages['notice.html'], notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email \
and user.is_active: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
recipients)
msg.attach_alternative(html, "text/html")
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('use |
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for eager_pg.trajectory_batch_stats.
Note that the explicit .numpy() casting also implicitly checks that the methods
all return tensors and not numpy arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from eager_pg import trajectory_batch_stats
import tensorflow as tf
tbs = trajectory_batch_stats
TEST_MASK = [[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]] # pyformat: disable
# Generally masks will be floats so we can easily multiply tensors.
NP_TEST_MASK = np.array(TEST_MASK, dtype=np.float32)
class TrajectoryBatchStatsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests to ensure that statistics on batches of trajectory are correct."""
@property
def expected_lengths(self):
return tf.constant([3, 5, | 2, 1], dtype=tf.float32)
def test_get_trajecto | ry_lengths(self):
"""Checks if the length of each trajectory in the batch is correct."""
# pylint: disable=invalid-name
TF_TEST_MASK = tf.constant(NP_TEST_MASK)
TF_TEST_MASK_TF_F64 = tf.cast(TF_TEST_MASK, tf.float64)
NP_TEST_MASK_NP_F64 = NP_TEST_MASK.astype(np.float64)
ALL_MASKS = [
TF_TEST_MASK, NP_TEST_MASK, TF_TEST_MASK_TF_F64, NP_TEST_MASK_NP_F64
]
# pylint: enable=invalid-name
for mask in ALL_MASKS:
computed_lengths = tbs.get_trajectory_lengths(mask)
self.assertTrue(np.allclose(computed_lengths, self.expected_lengths))
def run_without_lengths(self, stats_function, args):
"""Helper function to run stats."""
return stats_function(*args)
def run_with_lengths(self, stats_function, args):
"""Helper function to run stats with precomputed lengths."""
return stats_function(*args, trajectory_lengths=self.expected_lengths)
@parameterized.named_parameters(
dict(
testcase_name='rewards',
raw_batch=np.array([[1, 2, 3, 4]] * 5).astype(np.float32),
statistic_function=tbs.reward_summaries,
expected_results_with_traj={
'mean_step_reward': (3. / 3 + 10. / 5 + 6. / 2 + 4. / 1) / 4.0,
'mean_trajectory_reward': (3. + 10. + 6. + 4.) / 4.0,
'stderr_trajectory_reward': np.sqrt(np.sum(
(np.array([3., 10., 6., 4.]) -
(3. + 10. + 6. + 4.) / 4.0)**2 / 3) / 4)
},
expected_results_no_traj={
'mean_trajectory_reward': (5 + 10 + 15 + 20) / 4.0,
'mean_step_reward': (1 + 2 + 3 + 4) / 4.0
}),
dict(
testcase_name='entropies',
raw_batch=np.array([[1, 2, 3, 4]] * 5).astype(np.float32),
statistic_function=tbs.entropy_summaries,
expected_results_with_traj={
'mean_step_entropy': (3. / 3 + 10. / 5 + 6. / 2 + 4. / 1) / 4.0,
'mean_trajectory_entropy': (3. + 10. + 6. + 4.) / 4.0
}),
)
def test_calculations(self,
raw_batch,
statistic_function,
expected_results_with_traj,
expected_results_no_traj=None): # pylint: disable=g-doc-args
"""Test calculations of statistc_name on raw_batch using statistic_function.
"""
stats = []
stats.append(
self.run_with_lengths(statistic_function, (raw_batch, NP_TEST_MASK)))
stats.append(
self.run_without_lengths(statistic_function, (raw_batch, NP_TEST_MASK)))
for stat in stats:
for expected_key in expected_results_with_traj.keys():
self.assertAllClose(stat[expected_key].numpy(),
expected_results_with_traj[expected_key])
if expected_results_no_traj is not None:
stat = self.run_without_lengths(statistic_function, (raw_batch,))
for expected_key in expected_results_no_traj.keys():
self.assertAllClose(stat[expected_key].numpy(),
expected_results_no_traj[expected_key])
def test_reward_calculations_errors(self):
"""Ensures that the reward calculations return the correct errors."""
rewards_as_list = [[1, 2, 3, 4]] * 5
self.assertRaises(TypeError, tbs.reward_summaries, rewards_as_list, None)
rewards_as_numpy = np.array(rewards_as_list)
rewards_as_numpy_wrong_shape = np.expand_dims(rewards_as_numpy, 1)
self.assertRaises(ValueError, tbs.reward_summaries,
rewards_as_numpy_wrong_shape, None)
# TODO(zaf): Find a way to @parameterized this?
def test_returns_calculations(self):
test_returns = np.array([[0.125, 1.875, 0.25, 1.5], [0.25, 1.75, 0.5, 1.0],
[0.5, 1.5, 1.0, 0.0]])
stats = tbs.return_summaries(test_returns)
expected_mean_return = (0.125 + 1.875 + 0.25 + 1.5) / 4.0
self.assertEqual(stats['mean_trajectory_return'].numpy(),
expected_mean_return)
pop_variance = np.sum((test_returns[0] - expected_mean_return)**2 / 3)
standard_error = np.sqrt(pop_variance) / np.sqrt(4)
self.assertTrue(
np.allclose(stats['stderr_trajectory_return'].numpy(), standard_error))
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
self.assertEqual(vl, form.attributes[str(nm)])
self.assertEqual(len(form.attributes), attributeMap.count() - cnt)
mydoc = form.node.firstChildElement(str("doc"))
text = DomTools.getText(mydoc)
olddoc = unicode(text).strip() if text else ""
self.assertEqual(olddoc, form.doc)
text = DomTools.getText(form.node)
oldcont = unicode(text).strip() if text else ""
self.assertEqual(oldcont, form.content)
form.name = nname
form.content = ntype
form.attributes.clear()
for at in attrs.keys():
form.attributes[at] = attrs[at]
form.doc = mdoc
form.root = doc
allAttr = True
cm = ComponentModel(doc, allAttr)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
form.view = TestView(cm)
form.view.testIndex = di
form.updateNode()
cnt = 0
for i in range(attributeMap.count()):
nm = attributeMap.item(i).nodeName()
vl = attributeMap.item(i).nodeValue()
if nm == "name":
self.assertEqual(vl, nname)
cnt += 1
elif nm == "type":
self.assertEqual(vl, ntype)
cnt += 1
else:
self.assertEqual(vl, attrs[str(nm)])
self.assertEqual(len(attrs), attributeMap.count() - cnt)
mydoc = form.node.firstChildElement(str("doc"))
text = DomTools.getText(mydoc)
olddoc = unicode(text).strip() if text else ""
self.assertEqual(olddoc, mdoc)
# constructor test
# \brief It tests default settings
def test_updateNode_withindex(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
dks = []
cks = []
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
nn = self.__rnd.randint(0, 9)
qdn.setAttribute("name", "myname%s" % nn)
qdn.setAttribute("type", "mytype%s" % nn)
qdn.setAttribute("unit", "myunits%s" % nn)
qdn.setAttribute("shortname", "mynshort%s" % nn)
cks.append(doc.createTextNode("$components.some%s\n" % nn))
qdn.appendChild(cks[-1])
doc.appendChild(qdn)
dname = "doc"
mdoc = doc.createElement(dname)
qdn.appendChild(mdoc)
ndcs = self.__rnd.randint(0, 10)
for n in range(ndcs):
dks.append(doc.createTextNode("\nText\n %s\n" % n))
mdoc.appendChild(dks[-1])
form = DefinitionDlg()
form.show()
form.node = qdn
self.assertEqual(form.name, '')
self.assertEqual(form.content, '')
self.assertEqual(form.doc, '')
self.assertEqual(form.attributes, {})
self.assertEqual(form.subItems,
["group", "field", "attribute", "link",
"component", "doc", "symbols"])
self.assertEqual(form.ui.__class__.__name__, "Ui_DefinitionDlg")
form.setFromNode()
form.createGUI()
allAttr = True
cm = ComponentModel(doc, allAttr)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
form.view = TestView(cm)
form.view.testIndex = di
nname = "newname"
ntype = "newtype"
attrs = {"unit": "newunit", "longname": "newlogname"}
mdoc = "New text \nNew text"
attributeMap = form.node.attributes()
cnt = 0
for i in range(attributeMap.count()):
nm = attributeMap.item(i).nodeName()
vl = attributeMap.item(i).nodeValue()
if nm == "name":
self.assertEqual(vl, form.name)
cnt += 1
else:
self.assertEqual(vl, form.attributes[str(nm)])
self.assertEqual(len(form.attributes), attributeMap.count() - cnt)
mydoc = form.node.firstChildElement(str("doc"))
text = DomTools.getText(mydoc)
olddoc = unicode(text).strip() if text else ""
self.assertEqual(olddoc, form.doc)
text = DomTools.getText(form.node)
oldcont = unicode(text).strip() if text else ""
self.assertEqual(oldcont, form.content)
form.name = nname
form.content = ntype
form.attributes.clear()
for at in attrs.keys():
form.attributes[at] = attrs[at]
form.doc = mdoc
form.root = doc
allAttr = True
cm = ComponentModel(doc, allAttr)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
form.view = TestView(cm)
form.view.testIndex = di
form.updateNode(di)
cnt = 0
for i in range(attributeMap.count()):
nm = attributeMap.item(i).nodeName()
vl = attributeMap.item(i).nodeValue()
if nm == "name":
self.assertEqual(vl, nname)
cnt += 1
elif nm == "type":
self.assertEqual(vl, ntype)
cnt += 1
else:
self.assertEqual(vl, attrs[str(nm)])
self.assertEqual(len(attrs), attributeMap.count() - cnt)
mydoc = form.node.firstChildElement(str("doc"))
text = DomTools.getText(mydoc)
olddoc = unicode(text).strip() if text else ""
self.assertEqual(olddoc, mdoc)
text = DomTools.getText(form.node)
oldcont = unicode(text).strip() if text else ""
self.assertEqual(oldcont, form.content)
# constructor test
# \brief It tests default settings
def test_apply(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
dks = []
cks = []
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
nn = self.__rnd.randint(0, 9)
qdn.setAttribute("name", "myname%s" % nn)
qdn.setAttribute("type", "mytype%s" % nn)
qdn.setAttribute("unit", "myunits%s" % nn)
qdn.setAttribute("shortname", "mynshort%s" % nn)
cks.append(doc.createTextNode("$components.some%s\n" % nn))
qdn.appendChild(cks[-1])
doc.appendChild(qdn)
dname = "doc"
mdo | c = doc.createElement(dname)
qdn.appendChild(mdoc)
ndcs = self.__rnd.randint(0, 10)
for n in range(ndcs):
dks.append(doc.createTextNode("\nText\n %s\n" % n))
mdoc.appendChild(dks[-1])
for | m = DefinitionDlg()
form.show()
form.node = qdn
self.assertEqual(form.name, '')
self.assertEqual(form.content, '')
self.assertEqual(form.doc, '')
self.assertEqual(form.attributes, {})
self.assertEqual(form.subItems,
["group", "field", "attribute", "link", "component",
"doc", "symbols"])
self.assertEqual(form.ui.__class__.__name__, "Ui_DefinitionDlg")
form.setFromNode()
form.createGUI()
allAttr = True
cm = ComponentModel(doc, allAttr)
ri = cm.rootIndex
di = cm.index(0, 0, ri)
form.view = TestView(cm)
form.view.testIndex = di
attributeMap = form.node.attributes()
cnt = 0
for i in range(attributeMap.count()):
nm = attributeMap.item(i).nodeName()
vl = attributeMap.item(i).nodeValue()
if nm == "name":
self.assertEqual(vl, form.name)
cnt += 1
else:
self.assertEqual(vl, form.attributes[str(nm)])
self.assertEqual(len(form.attributes), attributeMap.count() - cnt)
mydoc = form.node.firstChildElement(str("doc"))
text = DomTools.getText(mydoc)
olddoc = unicode(text).strip() if text else ""
self.assertEqual(olddoc, form.doc)
text = DomTools.getText(form.node)
oldcont = unicode(text).strip() if text else ""
self.assertEqual(oldcont, form.content)
nname = "newname"
ntype = "newtype"
attrs = {"unit": "newunit", "longname": "newlogname",
"mynew": " |
#!/usr/bin/env python3
import sys
import nu | matuned
dryrun = False
if len(sys.argv) > 1:
if sys.argv[1] == '-n':
dryrun | = True
numatuned.fire(60, dryrun)
|
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
valid_portion=0.1)
trainX, tra | inY = train
testX, testY = test
# Data preprocessing
# Sequen | ce padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32)
model.save('sentiment.tflearn') |
# Copyright (c) 2020, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint:disable=unused-argument,unused-import
try:
from drf_yasg.openapi import Response as OpenAPIResponse
from drf_yasg.utils import no_body, swagger_auto_schema
except ImportError:
from functools import wraps
from .compat import available_attrs
class no_body(object): #pylint:disable=invalid-name
pass
def swagger_auto_schema(function=None, **kwargs):
"""
Dummy decorator when drf_yasg is not present.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
return view_func(request, *args, **kwargs)
return _wrapped_view
if function:
return decorator(function)
return decorator
class OpenAPIResponse(object):
"""
Dummy response objec | t to docu | ment API.
"""
def __init__(self, *args, **kwargs):
pass
|
from fabric.api import env, local, run, sudo
env.user = 'root'
env.hosts = ['204.232.205.6']
env.code_dir = '/home/docs/sites/readthedocs.org/checkouts/readthedocs.org'
env.virtualenv = '/home/docs/sites/readthedocs.org'
env.rundir = '/home/docs/sites/readthedocs.org/run'
env.chef_executable = '/var/lib/gems | /1.8/bin/chef-solo'
def install_chef():
sudo('apt-get update', pty=True)
sudo('apt-get install -y git-core rubygems ruby ruby-dev', pty=True)
sudo('gem install chef --no-ri - | -no-rdoc', pty=True)
def sync_config():
local('rsync -av . %s@%s:/etc/chef' % (env.user, env.hosts[0]))
def update():
sync_config()
sudo('cd /etc/chef && %s' % env.chef_executable, pty=True)
def reload():
"Reload the server."
env.user = "docs"
run("kill -HUP `cat %s/gunicorn.pid`" % env.rundir, pty=True)
def restart():
"Restart (or just start) the server"
sudo('restart readthedocs-gunicorn', pty=True)
|
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
| low=1<<31
profit=0
for p in prices:
if p<low:
low=p
if p-low> | profit:
profit=p-low
return profit |
import sqlalchemy as sa
from oslo_db.sqlalchemy import types as db_types
from nca47.db.sqlalchemy.models import base as model_base
from nca47.objects import attributes as attr
HasTenant = model_base.HasTenant
HasId = model_base.HasId
HasStatus = model_base.HasStatus
HasOperationMode = model_base.HasOperationMode
class DnsServer(model_base.BASE, HasId, HasOperationMode):
"""Represents a dns server."""
name = sa.Column(sa | .String(attr.NAME_MAX_LEN))
class Zone(model_base.BASE, HasId, HasOpe | rationMode):
"""Represents a dns zone."""
__tablename__ = 'dns_zone_info'
zone_name = sa.Column(sa.String(attr.NAME_MAX_LEN))
tenant_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
zone_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
vres_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
masters = sa.Column(db_types.JsonEncodedList)
slaves = sa.Column(db_types.JsonEncodedList)
renewal = sa.Column(sa.String(attr.NAME_MAX_LEN))
default_ttl = sa.Column(sa.String(attr.NAME_MAX_LEN))
owners = sa.Column(db_types.JsonEncodedList)
ad_controller = sa.Column(sa.String(attr.NAME_MAX_LEN))
comment = sa.Column(sa.String(attr.NAME_MAX_LEN))
class ZoneRecord(model_base.BASE, HasId, HasOperationMode):
"""Represents a dns zone."""
__tablename__ = 'dns_rrs_info'
zone_id = sa.Column(sa.String(attr.UUID_LEN))
rrs_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
rrs_name = sa.Column(sa.String(attr.NAME_MAX_LEN))
type = sa.Column(sa.String(attr.NAME_MAX_LEN))
klass = sa.Column(sa.String(attr.NAME_MAX_LEN))
ttl = sa.Column(sa.String(attr.NAME_MAX_LEN))
rdata = sa.Column(sa.String(attr.NAME_MAX_LEN))
|
from utile import pretty_xml, xml_to_dict, element_to_dict
from testsuite.support import etree, TestCase
import unittest
XML_DATA = "<html><body><h1> | test1</h1><h2>test2</h2></body></html>"
XML_PRETTY = """\
<html>
<body>
<h1>test1</h1>
<h2>test2</h2>
</body>
</html>
"""
XML_DICT = {'body': {'h2': 'test2', 'h1': 'test1'}}
@unittest.skipUnless(etree, 'lxml not installed')
class XMLTestCase(TestCase):
def test_pretty_xml(self):
self.assertEqual(pretty_xml(XML_DATA), XML_PRETTY)
def test_element_to_dict(self):
self.assertEqual(eleme | nt_to_dict(etree.XML(XML_DATA)), XML_DICT)
def test_xml_to_dict(self):
self.assertEqual(xml_to_dict(XML_DATA), XML_DICT)
|
#!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: tbrandt@astro.princeton.edu
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import *
import multiprocessing
import numpy as np
def config(nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.star | tswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
print "You are running Mac OS-X."
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
| mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall
|
# (c) 2018 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.httpapi import HttpApiBase
from ansible.utils.display import Display
display = Display()
class HttpApi(HttpApiBase):
def _run_queue(self, queue, output):
if self._become:
display.vvvv('firing event: on_become')
queue.insert(0, 'enable')
request = request_builder(queue, output)
headers = {'Content-Type': 'application/json'}
response, response_data = self.connection.send('/ins', request, headers=headers, method='POST')
try:
response_data = json.loads(to_text(response_data.getvalue()))
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(response_data.getvalue())
))
results = handle_response(response_data)
if self._become:
results = results[1:]
return results
def send_request(self, data, **message_kwargs):
output = None
queue = list()
responses = list()
for item in to_list(data):
cmd_output = message_kwargs.get('output', 'text')
if isinstance(item, dict):
command = item['command']
if 'output' in item:
cmd_output = item['output']
else:
command = item
# Emulate '| json' from CLI
if command.endswith('| json'):
command = command.rsplit('|', 1)[0]
cmd_output = 'json'
if output and output != cmd_output:
responses.extend(self._run_queue(queue, output))
queue = list()
output = cmd_output
queue.append(command)
if queue:
responses.extend(self._run_queue(queue, output))
if len(responses) == 1:
return responses[0]
return responses
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
resp = list()
operations = self.connection.get_device_operations()
self.connection.check_edit_config_capability(operations, candidate, commit, replace, comment)
if replace:
device_info = self.connection.get_device_info()
if '9K' not in device_info.get('network_os_platform', ''):
raise ConnectionError(msg=u'replace is supported only on Nexus 9K devices')
candidate = 'config replace {0}'.format(replace)
responses = self.send_request(candida | te, output='config')
for response in to_list(responses):
if response != '{}':
resp.append(response)
if not re | sp:
resp = ['']
return resp
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
try:
out = self.send_request(commands)
except ConnectionError as exc:
if check_rc is True:
raise
out = to_text(exc)
out = to_list(out)
if not out[0]:
return out
for index, response in enumerate(out):
if response[0] == '{':
out[index] = json.loads(response)
return out
def handle_response(response):
results = []
if response['ins_api'].get('outputs'):
for output in to_list(response['ins_api']['outputs']['output']):
if output['code'] != '200':
raise ConnectionError('%s: %s' % (output['input'], output['msg']))
elif 'body' in output:
result = output['body']
if isinstance(result, dict):
result = json.dumps(result)
results.append(result.strip())
return results
def request_builder(commands, output, version='1.0', chunk='0', sid=None):
"""Encodes a NXAPI JSON request message
"""
output_to_command_type = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
maybe_output = commands[0].split('|')[-1].strip()
if maybe_output in output_to_command_type:
command_type = output_to_command_type[maybe_output]
commands = [command.split('|')[0].strip() for command in commands]
else:
try:
command_type = output_to_command_type[output]
except KeyError:
msg = 'invalid format, received %s, expected one of %s' % \
(output, ','.join(output_to_command_type.keys()))
raise ConnectionError(msg)
if isinstance(commands, (list, set, tuple)):
commands = ' ;'.join(commands)
msg = {
'version': version,
'type': command_type,
'chunk': chunk,
'sid': sid,
'input': commands,
'output_format': 'json'
}
return json.dumps(dict(ins_api=msg))
|
']['uuid'] = str(project.app_uuid)
manifest['pebble']['enableMultiJS'] = project.app_modern_multi_js
manifest['pebble']['displayName'] = project.app_long_name
if project.app_is_hidden:
manifest['pebble']['watchapp']['hiddenApp'] = project.app_is_hidden
if project.app_platforms:
manifest['pebble']['targetPlatforms'] = project.app_platform_list
return manifest
def generate_manifest_dict(project, resources):
if project.is_standard_project_type:
if project.sdk_version == '2':
return generate_v2_manifest_dict(project, resources)
else:
return generate_v3_manifest_dict(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_manifest_dict(project)
elif project.project_type == 'pebblejs':
return generate_pebblejs_manifest_dict(project, resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def dict_to_pretty_json(d):
return json.dumps(d, indent=4, separators=(',', ': '), sort_keys=True) + "\n"
def generate_resource_dict(project, resources):
if project.is_standard_project_type:
return generate_native_resource_dict(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_resource_dict()
elif project.project_type == 'pebblejs':
return generate_pebblejs_resource_dict(resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_native_resource_dict(project, resources):
resource_map = {'media': []}
for resource in resources:
for resource_id in resource.get_identifiers():
d = {
'type': resource.kind,
'file': resource.root_path,
'name': resource_id.resource_id,
}
if resource_id.character_regex:
d['characterRegex'] = resource_id.character_re | gex
if resource_id.tracking:
d['trackingAdjust'] = resource_id.tracking
if resource_id.memory_format:
d['memoryFormat'] = resource_id.memory_format
if resource_id.storage_format:
d['storageFormat'] = resource_id | .storage_format
if resource_id.space_optimisation:
d['spaceOptimization'] = resource_id.space_optimisation
if resource.is_menu_icon:
d['menuIcon'] = True
if resource_id.compatibility is not None:
d['compatibility'] = resource_id.compatibility
if project.sdk_version == '3' and resource_id.target_platforms:
d['targetPlatforms'] = json.loads(resource_id.target_platforms)
resource_map['media'].append(d)
return resource_map
def generate_simplyjs_resource_dict():
return {
"media": [
{
"menuIcon": True,
"type": "png",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "png",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
}
def generate_pebblejs_resource_dict(resources):
media = [
{
"menuIcon": True, # This must be the first entry; we adjust it later.
"type": "bitmap",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "bitmap",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "bitmap",
"name": "IMAGE_TILE_SPLASH",
"file": "images/tile_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
for resource in resources:
if resource.kind not in ('bitmap', 'png'):
continue
d = {
'type': resource.kind,
'file': resource.root_path,
'name': re.sub(r'[^A-Z0-9_]', '_', resource.root_path.upper()),
}
if resource.is_menu_icon:
d['menuIcon'] = True
del media[0]['menuIcon']
media.append(d)
return {
'media': media
}
def generate_simplyjs_manifest(project):
return dict_to_pretty_json(generate_simplyjs_manifest_dict(project))
def generate_simplyjs_manifest_dict(project):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionLabel": project.app_version_label,
"versionCode": 1,
"capabilities": project.app_capabilities.split(','),
"watchapp": {
"watchface": project.app_is_watchface
},
"appKeys": {},
"resources": generate_simplyjs_resource_dict(),
"projectType": "simplyjs"
}
return manifest
def generate_pebblejs_manifest(project, resources):
return dict_to_pretty_json(generate_pebblejs_manifest_dict(project, resources))
def generate_pebblejs_manifest_dict(project, resources):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionLabel": project.app_version_label,
"capabilities": project.app_capabilities.split(','),
"versionCode": 1,
"watchapp": {
"watchface": project.app_is_watchface,
'hiddenApp': project.app_is_hidden
},
"appKeys": {},
"resources": generate_pebblejs_resource_dict(resources),
"projectType": "pebblejs",
"sdkVersion": "3",
}
if project.app_platforms:
manifest["targetPlatforms"] = project.app_platform_list
return manifest
def load_manifest_dict(manifest, manifest_kind, default_project_type='native'):
""" Load data from a manifest dictionary
:param manifest: a dictionary of settings
:param manifest_kind: 'package.json' or 'appinfo.json'
:return: a tuple of (models.Project options dictionary, the media map, the dependencies dictionary)
"""
project = {}
dependencies = {}
if manifest_kind == APPINFO_MANIFEST:
project['app_short_name'] = manifest['shortName']
project['app_long_name'] = manifest['longName']
project['app_company_name'] = manifest['companyName']
project['app_version_label'] = manifest['versionLabel']
project['app_keys'] = dict_to_pretty_json(manifest.get('appKeys', {}))
project['sdk_version'] = manifest.get('sdkVersion', '2')
project['app_modern_multi_js'] = manifest.get('enableMultiJS', False)
elif manifest_kind == PACKAGE_MANIFEST:
project['app_short_name'] = manifest['name']
project['app_company_name'] = manifest['author']
project['semver'] = manifest['version']
project['app_long_name'] = manifest['pebble'].get('displayName', None)
project['app_keys'] = dict_to_pretty_json(manifest['pebble'].get('messageKeys', []))
project['keywords'] = manifest.get('keywords', [])
dependencies = manifest.get('dependencies', {})
manifest = manifest['pebble']
project['app_modern_multi_js'] = manifest.get('enableMultiJS', True)
project['sdk_version'] = manifest.get('sdkVersion', '3')
else:
raise InvalidProjectArchiveException(_('Invalid manifest kind: %s') % manifest_kind[-12:])
project['app_uuid'] = manifest.get('uuid', uuid.uuid4())
project['app_is_watchface'] = manifest.get('watchapp', {}).get('watchface', False)
project['app_is_hidden'] = manifest.get('watchapp', {}).get('hiddenApp', False)
project['app_is_shown_on_communication'] = manifest.get('watchapp', {}).get('onlyShownOnCommunication', False)
project['app_ |
lated.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'problem_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_exercises'", 'through': "'FavoriteExercise'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_exercises'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_exercises'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'T | rue', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked | ': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'exercises'", 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciserevision': {
'Meta': {'object_name': 'ExerciseRevision', 'db_table': "u'exercise_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exerciserevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models |
# -*- coding=utf8 -*-
#******************************************************************************
# MediaTypes.py
#------------------------------------------------------------------------------
#
# Copyright (c) 2015 LivingOn <LivingOn@xmail.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#******************************************************************************
import os
from resources.lib.SxxExxKennung import SxxExxKennung
from resources.lib.YoutubePlaylist import YoutubePlaylist
class MediaType(object):
_PLUGIN = "plugin://plugin.video.youtube/play/?video_id=%s"
_all_strm_files = []
_all_strm_folder = []
def create_strm_files(self):
raise NotImplemented
@classmethod
def activate_all_streams(cls, series_library, movies_library):
for strmfile in _get_inactive_strms_in(series_library):
_activate_stream_in(strmfile)
for strmfile in _get_inactive_strms_in(movies_library):
_activate_stream_in(strmfile)
_remove_inactive_file_from(series_library)
_remove_inactive_file_from(movies_library)
@classmethod
def exists_inactive_streams(cls, series_library, movies_library):
inactive_series = os.path.exists("%sinactive" % series_library)
inactive_movies = os.path.exists("%sinactive" % movies_library)
return inactive_series or inactive_movies
@classmethod
def all_strm_folder(cls):
all_folder = []
[all_folder.append(i) for i in cls._all_strm_folder if not i in all_folder]
return all_folder
@classmethod
def clear_all_strm_folder(cls):
cls._all_strm_folder = []
class NoMediaFile(MediaType):
def create_strm_files(self):
pass
class SingleMediaFile(MediaType):
def __init__(self, librarypath, title, videoid, season=None):
self._librarypath = librarypath
self._title = title
self._videoid = videoid
self._season = season
def create_strm_files(self):
folder = _create_strm_folder(self._librarypath, self._title)
self._all_strm_folder.append(folder)
title = self._season if self._season else self._title
strmfile = "%s/%s.strm" % (folder, title)
if _write_strm_file(strmfile, MediaType._PLUGIN % self._videoid):
_append_to_inactive_file(strmfile, self._librarypath)
class PlaylistFile(MediaType):
def __init__(self, libraypath, title, playlistid):
self._librarypath = libraypath
self._title = title
self._playlistid = playlistid
def create_strm_files(self):
folder = _create_strm_folder(self._librarypath, self._title)
self._all_strm_folder.append(folder)
for (title, videoid) in YoutubePlaylist.parse(self._playlistid):
serie = SxxExxKennung.parse(title)
if serie:
strmfile = "%s/%s.strm" % (folder, serie)
if _write_strm_file(strmfile, MediaType._PLUGIN % videoid):
_append_to_inactive_file(strmfile, self._librarypath)
def _create_strm_folder(librarypath, title):
folder = "%s%s" % (librarypath, title)
try:
os.mkdir(folder)
except OSError:
pass
return folder
def _write_strm_file(strmfile, content):
result = False
entryline = "%s\n" % content
if _is_not_in_strm_file(strmfile, entryline):
entryline = "#%s" % entryline
try:
open(strmfile, "a+").write(entryline)
result = True
| except IOError:
pass
return result
def _append_to_inactive_file(strmfile, librarypath):
inactive_file = "%sinactive" % librarypath
entryline = "%s\n" % strmfile
try:
open(inactive_file, "a+").write(entryline)
except IOError:
pass
def _get_inactive_strms_in(library):
content = []
inactive_file = "%sinactive" % library
try:
content = open(inactive_file, "rU").readlines()
except IOError:
| pass
result = []
[result.append(i.strip("\n")) for i in content if not i in result]
return result
def _is_not_in_strm_file(strmfile, entryline):
content = []
try:
content = open(strmfile, "rU").readlines()
except IOError:
pass
return not entryline in content
def _activate_stream_in(strmfile):
try:
content = open(strmfile, "rU").readlines()
new_content = []
for line in content:
if line.startswith("#plugin:"):
new_content.append(line[1:])
else:
new_content.append(line)
open(strmfile, "w").writelines(new_content)
except IOError:
pass
def _remove_inactive_file_from(library):
try:
os.remove("%sinactive" % library)
except OSError:
pass
|
"""
This module is meant for vendorizing Python libraries. Most libraries will need
to have some ``sys.path`` alterations done unless the | y are doing relative
imports.
Do **not** add anything to this module that does not represent a vendorized
library | .
Vendored libraries should go into the ``vendor`` directory and imported from
there. This is so we allow libraries that are installed normally to be imported
if the vendored module is not available.
The import dance here is done so that all other imports throught ceph-deploy
are kept the same regardless of where the module comes from.
The expected way to import remoto would look like this::
from ceph_deploy.lib import remoto
"""
try:
# vendored
from .vendor import remoto
except ImportError:
# normally installed
import remoto # noqa
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPbdzmq(RPackage):
"""Programming with Big Data -- Interface to 'ZeroMQ'
'ZeroMQ' is a well-known library for high-performance asynchronous
messaging in scalable, distributed applications. This package provides
high level R wrapper functions to easily utilize 'ZeroMQ'. We mainly focus
on interactive client/server programming frameworks. For convenience, a
minimal 'ZeroMQ' library (4.1.0 rc1) is shipped with 'pbdZMQ', which can
be used if no system installation of 'ZeroMQ' is available. A few wrapper
functions compatible with 'rzmq' are also provided."""
homepage = "http://r-pbd.org/"
url = "https://cloud.r-project.org/src/contrib/pbdZMQ_0.2-4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/pbdZMQ"
version('0.3-4', sha256='07794bd6858e093f8b6b879ddd5ab0195449b47a41b70cab2f60603f0a53b129')
version('0.3-3', sha256='ae26c13400e2acfb6463ff9b67156847a22ec | 79f3b53baf65119efaba1636eca')
version('0.3-2', sha256='ece2a2881c662f77126e4801ba4e01c991331842b0d636ce5a2b591b9de3fc37')
version('0.2-4', sha256='bfacac88b0d4156c70cf63fc4cb9969a950693996901a4fa3dcd59949ec065f6')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.2.0:', when='@0.2-6:', type=('build', 'run'))
depends_on('r@3.5.0:', when='@0.3-4:', type=('build', 'run'))
depends_on('r-r6', when='@:0.2-6', type=('build', 'run'))
depen | ds_on('libzmq@4.0.4:')
|
ort, proxy, proxy_port, proxy_user, proxy_pass,
self.region.endpoint, debug, https_connection_factory, path)
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType' : 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListTopics', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic}
response = self.make_request('GetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Label' : label}
self.build_list_params(params, account_ids, 'AWSAccountId')
self.build_list_params(params, actions, 'ActionName')
response = self.make_request('AddPermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Label' : label}
response = self.make_request('RemovePermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'ContentType' : 'JSON',
'Name' : topic}
response = self.make_request('CreateTopic', params, | '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
| :param topic: The ARN of the topic
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic}
response = self.make_request('DeleteTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def publish(self, topic, message, subject=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Message' : message}
if subject:
params['Subject'] = subject
response = self.make_request('Publish', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The name of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
:rtype: :class:`boto.sdb.domain.Domain` object
:return: The newly created domain
"""
params = {'ContentType' : 'JSON',
'TopicArn' : topic,
'Protocol' : protocol,
'Endpoint' : endpoint}
response = self.make_request('Subscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
|
'''
Created on Jul 28, 2013
@author: Rob
'''
import os, yaml
config = {
'names': [
'NT',
'VGTestServer'
],
'servers':{
'irc.server.tld': {
'port':6667,
'password':None,
'channels':{
'#vgstation': {
'nudges':True,
'status':True
}
}
}
},
'plugins':
{
'redmine': {
'url': '',
'apikey':''
},
'nudge': {
'hostname': '',
'port': 45678,
'key': 'passwordgoeshere'
}
}
}
def ReadFromDisk():
global config
config_file = 'config.yml'
if not os.path.isfile(config_file):
with open(config_file | , 'w') as cw:
yaml.dump(config, cw, default_flow_style=False)
with open(config_file, 'r') as cr:
config = yaml.load(cr)
# if config['database']['username'] == '' or config['database']['password'] == '' or config['database']['schema'] == '':
# print('!!! Default config.yml detected. Please edit it before continuing.')
# sys.exit(1)
def get(key,default=None):
global config
try:
parts = key.split('.')
| value = config[parts[0]]
if len(parts) == 1:
return value
for part in parts[1:]:
value = value[part]
return value
except KeyError:
return default |
# -*- coding: utf-8 -*-
#from __future__ import print_function, division, absolute_import, unicode_literals
#from gmusicapi.clients.webclient import Webclient
#from gmusicapi.clients | .musicmanager import Musicmanager
from gmusicapi.clients.mobile | client import Mobileclient
#(Webclient, Musicmanager, Mobileclient) # noqa
|
l = []
for x in range(int( | input())):
l.append(int(input()))
l.sort()
print(' '.join(str(x) fo | r x in l[::-1]))
|
d_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.removeinstancedatabase': {
'Meta': {'object_name': 'RemoveInstanceDatabase'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'remove_instances_database_manager'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'remove_instances_database_manager'", 'to': u"orm['physical.Instance']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'remove_instances_database_manager'", 'to': u"orm['notification.TaskHistory']"}),
'task_schedule': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'maintenance_removeinstancedatabase_related'", 'null': 'True', 'to': u"orm['maintenance.TaskSchedule']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.restartdatabase': {
'Meta': {'object_name': 'RestartDatabase'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'restart_database_manager'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restart_database_manager'", 'to': u"orm['notification.TaskHistory']"}),
'task_schedule': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'maintenance_restartdatabase_related'", 'null': 'True', 'to': u"orm['maintenance.TaskSchedule']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.taskschedule': {
'Meta': {'object_name': 'TaskSchedule'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'task_schedules'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.updatessl': {
'Meta': {'object_name': 'UpdateSsl'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'update_ssl_manager'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'update_ssl_manager'", 'to': u"orm['notification.TaskHistory']"}),
'task_schedule': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'maintenance_updatessl_related'", 'null': 'True', 'to': u"orm['maintenance.TaskSchedule']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'details': ('d | jango.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [] | , {'primary_key': 'True'}),
'object_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'relevance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '1'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'WAITING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeFiel |
from mrq.task import Task
from mrq.context import connections
class MongoTimeout(Task):
def run(self, params):
res = connections.mongodb_jobs.eval("""
function() {
var a;
for (i=0;i<10000000;i++) {
for (y=0;y<10000000;y++) { |
a = Math.max(y);
}
| }
return a;
}
""")
return res
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 Barroux Abbey (www.barroux.org)
# Copyright 2017-2019 Akretion France (www.akretion.com)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields
class BaseConfigSettings(models.TransientModel):
_inherit = 'base.config.settings'
mass_validation_account_id = fields.Many2o | ne(
related=' | company_id.mass_validation_account_id')
mass_validation_analytic_account_id = fields.Many2one(
related='company_id.mass_validation_analytic_account_id')
mass_validation_journal_id = fields.Many2one(
related='company_id.mass_validation_journal_id')
mass_post_move = fields.Boolean(related='company_id.mass_post_move')
|
# coding=utf-8
import socket
import thread
import time
import Queue
import re
import random
class IrcClient:
def __init__(self, host, port, nick, realname, printAll=True, isMibbitBot=False):
self.nick = nick
self.realname = realname
self.host = host
self.port = port
self.sock = socket.socket()
self.RecvQueue = Queue.Queue()
self.SendQueue = Queue.Queue()
self.printAll = printAll
self.EventHandlers = []
self.ignoredNicks = []
self.channels = []
self.sock.connect((host,port))
thread.start_new_thread(self.RecieveWorker, ())
thread.start_new_thread(self.SendWorker, ())
thread.start_new_thread(self.EventWorker, ())
self.RegisterEventHandler(self.PingEventHandler)
self.WaitForSilence()
self.Send("USER "+self.nick+" 0 * :"+self.realname)
self.Send("NICK "+self.nick)
self.WaitForSilence()
def RecieveWorker(self):
recvbuffer = ""
c = ""
while True:
c = self.sock.recv(1)
if c=='\n':
if self.printAll == True:
print("RECV: "+recvbuffer)
self.RecvQueue.put(recvbuffer)
recvbuffer = ""
else:
recvbuffer += c
def SendWorker(self):
while True:
toSend = self.SendQueue.get()
if self.printAll == True:
print("SEND: "+toSend)
self.sock.send(toSend)
def EventWorker(self):
while True:
recvItem = self.RecvQueue.get()
prefix = ""
command = ""
params = ""
trailing = ""
expression = re.compile(ur':([\w!.@-]*) {0,1}([A-Za-z0-9]*) {0,1}([\w# ]*) {0,1}:{0,1}(.*)')
match = re.search(expression, recvItem)
if match != None:
prefix = match.group(1)
command = match.group(2)
params = match.group(3)
trailing = match.group(4)
for func in self.EventHandlers:
try:
func(self, recvItem, prefix, command, params, trailing)
except:
print("WARNING: Error in handler function!")
pass
def WaitForSilence(self, maxIterations=10, delay=0.2):
time.sleep(delay)
while self.RecvQueue.empty != True:
time.sleep(delay)
maxIterations -= 1;
if maxIterations <= 0:
break;
pass;
def RegisterEventHandler(self, func):
self.EventHandlers.append(func)
def RemoveEventHandler(self, func):
try:
self.EventHandlers.remove(func)
except:
print("WARNING: tried to remove unknown handler!")
pass
def Send(self, cmd):
self.SendQueue.put(cmd+'\n')
def PingEventHandler(self, client, event, prefix, command, params, trailing):
if event[:4] == "PING":
self.Send("PONG"+event[4:])
def SendMessage(self, destination, message):
self.Send("PRIVMSG "+destination+" :"+message)
def BroadcastMessage(self, message):
for channel in self.channels:
self.SendMessage(channel, message)
def SetNick(self, nickname):
self.Send("NICK "+nickname)
def JoinChannel(self, channelname, channelpassword=""):
self.Send("JOIN "+channelname+" "+channelpassword)
self.channels.append(channelname)
def LeaveChannel(self, channelname):
self.Send("PART "+channelname)
try:
self.channels.remove(channelname)
except:
print("WARNING: Tried to leave channel "+channelname+", but you arent in that channel!")
pass
def AddIgnore(self, name):
self.ignoredNicks.append(name)
def RemoveIgnore(self, name):
try:
self.ignoredNicks.remove(nam | e)
except:
print("WARNING: You didnt ignore "+name+" in the first place!")
pass
def IsIgnored(self, name):
if name in self.ignoredNicks:
return True
else:
return False
def Identify(self, password):
| self.SendMessage("nickserv", "identify "+password)
|
#!/usr/bin/env python
import setuptools |
if __name__ == "__main__":
setuptools.setup(
name="aecg100",
version="1.1.0.18",
author="WHALETEQ Co., LTD",
description="WHALETEQ Co., LTD AECG100 Linux SDK",
url="https://www.whaleteq.com/en/Support/Download/7/Linux%20SDK",
include_package_data=True,
package_data={
'': ['sdk/ | *.so', 'sdk/*.h', 'sample/python/*.txt']
},
)
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
prefixed_jobs = """
serial flow: [
job: 'top_quick1'
serial flow: [
job: 'top_x_quick2-1'
]
serial flow: [
job: 'top_x_quick2-2'
]
serial flow: [
job: 'top_x_quick2-3'
]
job: 'top_quick3'
parallel flow: (
serial flow: [
job: 'top_y_z_quick4a'
]
serial flow: [
job: 'quick4b'
]
job: 'top_y_quick5'
)
]
"""
def test_prefix(api_type, c | apsys):
with api_select.api(__file__, api_type) as api:
def job(name):
api.job(name, max_fails=0, expect_invocations=0, expect_order=None, params=None)
api.flow_job()
job('quick1')
index = 0
for index in 1, 2, 3:
job('x_quick2-' + str(index))
job('quick3')
job('y_z_quick4')
job('y_quick5')
with serial(a | pi, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1:
ctrl1.invoke('quick1')
for index in 1, 2, 3:
with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2:
ctrl2.invoke('quick2-' + str(index))
ctrl1.invoke('quick3')
with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a:
ctrl3a.invoke('quick4a')
# Reset prefix
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b:
ctrl3b.invoke('quick4b')
ctrl2.invoke('quick5')
sout, _ = capsys.readouterr()
assert prefixed_jobs.strip() in sout
|
#!/usr/bin/python
|
import apt_pkg
import logging
import os
import mock
import sys
import tempfile
import unittest
sys.path.insert(0, "..")
from unattended_upgrade import _setup_logging
class MockOptions:
dry_run = False
debug = False
class TestLogdir(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
apt_pkg.init()
self.mock_options = MockOptions()
def test_logdir(self):
# | test log
logdir = os.path.join(self.tempdir, "mylog")
apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir)
logging.root.handlers = []
_setup_logging(self.mock_options)
self.assertTrue(os.path.exists(logdir))
def test_logdir_depreated(self):
# test if the deprecated APT::UnattendedUpgrades dir is not used
# if the new UnaUnattendedUpgrades::LogDir is given
logdir = os.path.join(self.tempdir, "mylog-use")
logdir2 = os.path.join(self.tempdir, "mylog-dontuse")
apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir)
apt_pkg.config.set("APT::UnattendedUpgrades::LogDir", logdir2)
logging.root.handlers = []
_setup_logging(self.mock_options)
self.assertTrue(os.path.exists(logdir))
self.assertFalse(os.path.exists(logdir2))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
def countingsort(sortableli | st):
maxval = max(sortablelist)
m = maxval + 1
count = [0] * m # init with zeros
for a in sortablelist:
count[a] += 1 # count occurences
i = 0
for a in range(m): # emit
for c in range(count[a]): # - emit 'count[a]' copies | of 'a'
sortablelist[i] = a
i += 1
def main():
import random
a = [random.randint(0, 1000) for i in range(100)]
countingsort(a)
print (a)
main()
|
# $Id$
#
from rdkit import Chem
from rdkit.Chem import rdReducedGraphs as rdRG
from rdkit import RDConfig
import numpy
import unittest
class TestCase(unittest.TestCase) :
def setUp(self):
pass
def test1(self):
m = Chem.MolFromSmiles('OCCc1ccccc1')
mrg = rdRG.GenerateMolExtendedReducedGraph(m)
mrg.UpdatePropertyCache(False)
self.failUnlessEqual('[*]cCCO',Chem.MolToSmiles(mrg))
m = Chem.MolFromSmiles('OCCC1CCCCC1')
mrg = rdRG.GenerateMolExtendedReducedGraph(m)
mrg.UpdatePropertyCache(False)
self.failUnlessEqual('[*]CCCO',Chem.MolToSmiles(mrg))
def test2(self):
m = Chem.MolFromSmiles('OCCc1ccccc1')
mrg = rdRG.GenerateMolExtendedReducedGraph(m)
mrg.UpdatePropertyCache(False)
self.failUnlessEqual('[*]cCCO',Chem.MolToSmiles(mrg))
fp1 = rdRG.GenerateErGFingerprintForReducedGraph(mrg)
fp2 = rdRG.GetErGFingerprint(m)
md = max(abs(fp1-fp2))
self.failUnless(md<1e-4)
def test3(self):
m = Chem.MolFromSmiles('OCCc1ccccc1')
fp1 = rdRG.GetErGFingerprint(m)
m = Chem.MolFromSmiles('OCCC1CC=CC=C1')
fp2 = rdRG.GetErGFingerprint(m)
md = max(abs(fp1-fp2))
self.failUnlessAlmostEqua | l(0.0,md,4)
def test4(self):
m = Chem.MolF | romSmiles('OCCc1ccccc1')
fp1 = rdRG.GetErGFingerprint(m)
fp2 = rdRG.GetErGFingerprint(m,fuzzIncrement=0.1)
md = max(abs(fp1-fp2))
self.failUnlessAlmostEqual(0.2,md,4)
if __name__ == '__main__':
unittest.main()
|
# -*- encoding: utf- | 8 -*-
{
'name': 'Export Inventory Costs',
'version': '3.0.0.0',
'category': "Warehouse Management",
'description': """
Export Inventory Costs
""",
'author': 'Didotech SRL',
'website': 'http://www.didotech.com',
'license': 'AGPL-3',
"depends": [
'base',
'stock',
],
"data": [
'wizard/wizard_inventory_costs_view.xml',
'v | iews/stock_view.xml'
],
"demo": [],
"active": False,
"installable": True,
"application": True,
}
|
r += [ f ]
return r
def getInitParameters(self, slave_tid):
"""
hand over parameters to slave once.
@param slave_tid: slave task id
@type slave_tid: int
@return: dictionary with init parameters
@rtype: {param:value}
"""
host = self.hostnameFromTID( slave_tid )
nice = self.niceness.get( host, self.niceness.get('default',0) )
return {'ferror':self.ferror,
'debug':self.debug, 'nice':nice, 'host':host}
def cleanup( self ):
"""
Tidy up
"""
if self.clean:
self.cleanCache()
def cleanCache( self ):
"""
Remove left-over cache files
"""
fs = [ self.ref_frec, self.ref_flig, self.ref_com, self.ref_brec,
self.ref_blig ]
fs.extend( self.members_frec + self.members_flig )
fs.extend( self.members_brec + self.members_blig )
fs.extend( self.members_com )
for f in fs:
self.log.add('removing %s: %i' % (f, T.tryRemove(f)) )
def saveProtocols( self ):
"""
Save protocol to file.
"""
f_prot = T.stripSuffix( T.absfile(self.fout) ) + '_protocols.dat'
self.log.write( 'Saving parameters to %s...' % f_prot )
T.dump( self.protocols, f_prot )
def done(self):
"""
Write result to file.
"""
tree = self.getResult()
self.log.add("Saving result to %s..." % self.fout)
T.dump( tree, self.fout )
self.log.add( "Done" )
##
## Assemble the protocols for many AmberEntropist runs
##
def __cpupdate( self, d1, d2 ):
"""
Merge 2 dictionaries *d1* and *d2* and return a copy
"""
r = copy.copy( d1 )
r.update( d2 )
return r
def protocols_standard( self, trec, tlig, tcom,
ex_frec=None, ex_flig=None, ex_com=None,
doshift=1,
**options ):
"""
Create 13 parameter sets for AmberEntropist that cover the calculation
of rec, lig, com and fcom entropies with and without splitting of the
complex, with and without shifting and shuffling of frames.
@param options: additional options (like cast, s, e, atoms, thin, step)
that are the same in all parameter sets
@type options: key=value
@return: each value of the returned dict contains a set of
arguments for one AmberEntropist run
@rtype: dict of dict
"""
fcp = self.__cpupdate
r = {}
S = self ## make rest more readable
d = { 'ref':None, 'cast':1, 'chains':None,
'split':0, 'shift':0, 'shuffle':0, 'ex_n':0, 'ex3':None,
'thin':None, 'step':1, 'ss':0, 'se':None, 'atoms':None }
d.update( options )
r['frec'] = fcp( d, {'traj':trec, 'ref':S.ref_brec, 'ex':ex_frec } )
r['flig'] = fcp( d, {'traj':tlig, 'ref':S.ref_blig, 'ex':ex_flig } )
r['brec'] = fcp( d, {'traj':tcom, 'ref':S.ref_frec, 'ex':ex_com,
'chains':S.cr } )
r['blig'] = fcp( d, {'traj':tcom, 'ref':S.ref_flig, 'ex':ex_com,
| 'chains':S.cl } )
r['fcom'] = fcp( d, {'traj':'%s+%s'%(trec, tlig),
'ex':(ex_frec, ex_flig),
'ref':S.ref_com, 'split':1 } )
## if doshift:
## r['fcom_shift'] = fcp( r['fcom'], {'shift':1 } )
r['fcom_shuff'] = fcp( r['fcom'], {'shuffle':1 } )
r['com'] = fcp( d, {'traj':tcom, 'ex':ex_com,
| 'ref':'%s+%s' % (S.ref_frec, S.ref_flig) } )
r['com_split'] = fcp( r['com'], { 'split':1, 'border':S.cl[0] } )
## r['com_shuff'] = fcp( r['com'], { 'shuffle':1, 'border':S.cl[0] } )
r['com_split_shuff'] = fcp( r['com'],
{'split':1,'shuffle':1,'border':S.cl[0] } )
if doshift:
## r['com_shift'] = fcp( r['com'], { 'shift':1,'border':S.cl[0] } )
r['com_split_shift'] = fcp( r['com'],
{'split':1,'shift':1, 'border':S.cl[0] } )
return r
def protocols_single_all( self, **options ):
"""
Set of protocols for all-member trajectories AND single-member traj.
with the different shuffle, shift, split settings.
Usually 11 x 13 protocols for AmberEntropist (10 members and 1 for all)
@param options: additional options (like cast, s, e, atoms, thin, step)
that are the same in all parameter sets
@type options: key=value
@return: each value of the returned dict contains a set of arguments
for one AmberEntropist run, each key is a tuple of the
member index and the protocol name, i.e. (0, 'fcom_shuffle')
The set of protocols for all-member trajectories has member
index None.
@rtype: dict of dict
"""
r = {}
## put all-member protocolls under member index 'None'
prots = self.protocols_standard( self.rec, self.lig, self.com,
self.ex_frec, self.ex_flig, self.ex_com,
**options )
for k,p in prots.items():
r[ (None, k) ] = p
if not self.all:
## put single-member protocols under their respective member index
for i in range( len( self.members_frec ) ):
prots = self.protocols_standard(self.members_frec[i],
self.members_flig[i],
self.members_com[i], doshift=0,
**options )
for k, p in prots.items():
r[ (i, k) ] = p
return r
def protocols_var_range( self, **options ):
"""
Complete set of protocols also considering different values of the
variable option.
"""
self.log.add( 'variable option %s with %i values' \
% (self.var, len(self.vrange)))
r = {}
for v in self.vrange:
d = copy.copy( options )
d[ self.var ] = v
prots = self.protocols_single_all( **d )
for k, p in prots.items():
r[ (v,) + k ] = p
return r
##
## Re-organize results
##
def dictionate( self, d ):
"""
Take dict with tuple keys (value, int_member, str_protocol) and build
a tree-like dict of dicts in which the values of d can be accessed
like::
d[value][int_member][str_protocol]
@param d: the raw results accumulated from the slave nodes
@type d: dict
@return: tree-like dict ordered by variable value, member, protocol
@rtype: dict of dict of dict of dict
"""
r = {}
keys = d.keys()
## only convert single value tuple keys into non-tuple keys
if len( keys[0] ) == 1:
for k in keys:
r[ k[0] ] = d[ k ]
return r
x_values = MU.nonredundant( [ k[0] for k in keys ] )
for x in x_values:
sub_keys = [ k for k in keys if k[0] == x ]
y_values = MU.nonredundant( [ k[1:] for k in sub_keys] )
r[ x ] = {}
for y in y_values:
r[x][y] = d[ (x,) + y ]
r[ x ] = self.dictionate( r[x] )
return r
def getResult( self, **arg ):
"""
Collapse the results for different values of the variable parameter
into lists and put the results into a tree ala::
r[ member_index ][ protocol_name ][ result_field ] -> [ values ]
@return: tree-like dict ordered by variable value, member, protocol
@rtype: dict of dict of dict of lists
"""
tree = self.dictionate( self.result )
vvalues = tree.keys()
|
#! /usr/bin/env python
#coding=utf-8
## @Configuration of Preprocessing for SEIMS
#
# TODO, give more detailed description here.
import os,platform
## Directionaries
if platform.system() == "Windows":
DATA_BASE_DIR = r'E:\github-zlj\model_data\model_dianbu_30m_longterm\data_prepare'
PREPROC_SCRIPT_DIR = r'E:\github-zlj\SEIMS\preprocess'
CPP_PROGRAM_DIR = r'E:\github-zlj\SEIMS_Preprocess\Debug'
METIS_DIR = r'E:\github-zlj\SEIMS_Preprocess\metis\programs\Debug'
MPIEXEC_DIR = None
elif platform.system() == "Linux":
DATA_BASE_DIR = r'/data/liujz/data'
PREPROC_SCRIPT_DIR = r'/data/hydro_preprocessing'
CPP_PROGRAM_DIR = r'/data/hydro_preprocessing/cpp_programs'
METIS_DIR = r'/soft/programming/metis-5.1.0/build/programs'
MPIEXEC_DIR = None
CLIMATE_DATA_DIR = DATA_BASE_DIR + os.sep + 'climate'
SPATIAL_DATA_DIR = DATA_BASE_DIR + os.sep + 'spatial'
WORKING_DIR = DATA_BASE_DIR + os.sep + 'output'
## MongoDB related
#HOSTNAME = '192.168.6.55'
HOSTNAME = '127.0.0.1'
PORT = 27017
ClimateDBName = 'climate_dianbu'
SpatialDBName = 'model_dianbu_30m_longterm'
forCluster = False
stormMode = False
if forCluster and 'cluster_' not in SpatialDBName.lower():
SpatialDBName = 'cluster_' + SpatialDBName
## Climate Input
PrecSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Preci_dianbu_Vor.shp'
if stormMode:
PrecStormSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Preci_dianbu_Vor_storm.shp'
MeteorSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Metero_hefei_Vor.shp'
PrecExcelPrefix = CLIMATE_DATA_DIR + os.sep + 'precipitation_by_day_'
PrecDataYear = [2014]
MeteoVarFile = CLIMATE_DATA_DIR + os.sep + 'Variables.txt'
MeteoDailyFile = CLIMATE_DATA_DIR + os.sep+ 'meteorology_dianbu_daily.txt'
MetroSiteFile = CLIMATE_DATA_DIR + os.sep + 'sites_hefei.txt'
DischargeExcelPrefix = CLIMATE_DATA_DIR + os.sep + 'discharge_by_day_'
DischargeYear = [2014]
## Parameters for SEIMS
sqliteFile = DATA_BASE_DIR + os.sep + "Parameter.db3"
## Spatial Input
dem = SPATIAL_DATA_DIR + os.sep + 'dem_30m.tif'
outlet_file = SPATIAL_DATA_DIR + os.sep + 'outlet_30m.shp'
threshold = 0 # threshold for stream extraction from D8-flow accumulation weighted Peuker-Douglas stream sources
# if threshold is 0, then Drop Analysis is used to select the optimal value.
np = 4 # number of parallel processors
landuseFile = SPATIAL_DATA_DIR + os.sep + 'landuse_30m.tif'
sandList = []
clayList = []
orgList = []
for i in [1,2]:
sandFile = SPATIAL_DATA_DIR + os.sep + "sand" + str(i) + ".tif"
clayFile = SPATIAL_DATA_DIR + os.sep + "clay" + str(i) + ".tif"
orgFile = SPATIAL_DATA_DIR + os.sep + "org" + str(i) + ".tif"
sandList.append(sandFile)
clayList.append(clayFile)
orgList.append(orgFile)
defaultSand = 40
defaultClay = 30
defaultOrg = 2.5
## Predefined variables
CROP_FILE = PREPROC_SCRIPT_DIR + os.sep + 'crop.txt'
CROP_ATTR_LIST = ["IDC", "EXT_COEF", "BMX_TREES", "BLAI", "HVSTI",\
"MAT_YRS", "T_BASE", "FRGRW1", "FRGRW2", "LAIMX1",\
"LAIMX2", "DLAI", "BN1", "BN2", "BN3", "BP1", "BP2",\
"BP3", "BIO_E", "BIOEHI", "CO2HI", "WAVP", "BIO_LEAF",\
"RDMX","CNYLD", "CPYLD", "WSYF", "DLAI", "T_OPT"]
|
# LANDUSE_ATTR_LI | ST and SOIL_ATTR_LIST is selected from sqliteFile database
LANDUSE_ATTR_LIST = ["Manning", "Interc_max", "Interc_min", "RootDepth", \
"USLE_C", "SOIL_T10","USLE_P"]
LANDUSE_ATTR_DB = ["manning","i_max","i_min", "root_depth", "usle_c", "SOIL_T10"]
## Be caution, the sequence from "Sand" to "Poreindex" if fixed because of soil_param.py.
SOIL_ATTR_LIST = ["Sand", "Clay", "WiltingPoint", "FieldCap", "Porosity","Density",\
"Conductivity", "Poreindex", "USLE_K", "Residual", ]
SOIL_ATTR_DB = ["sand", "clay","wp", "fc", "porosity","B_DENSITY","ks", "P_INDEX",\
"usle_k", "rm"]
### There are 15 attributes in SoilLookup table now.
### They are [SOILCODE], [SNAM], [KS](Conductivity), [POROSITY], [FC](field capacity), [P_INDEX](Poreindex), [RM],
### [WP](wiltingpoint), [B_DENSITY], [SAND], [CLAY], [SILT], [USLE_K], [TEXTURE], [HG]
## Hydrological parameters
coeTable = {"T2":[0.05, 0.48],"T10":[0.12, 0.52], "T100":[0.18,0.55]} ## used in radius.py
## Conventional Spatial Raster Data File Names
filledDem = "demFilledTau.tif"
flowDir = "flowDirTauD8.tif"
slope = "slopeTau.tif"
acc = "accTauD8.tif"
streamRaster = "streamRasterTau.tif"
flowDirDinf = "flowDirDinfTau.tif"
dirCodeDinf = "dirCodeDinfTau.tif"
slopeDinf = "slopeDinfTau.tif"
weightDinf = "weightDinfTau.tif"
modifiedOutlet = "outletM.shp"
streamSkeleton = "streamSkeleton.tif"
streamOrder = "streamOrderTau.tif"
chNetwork = "chNetwork.txt"
chCoord = "chCoord.txt"
streamNet = "streamNet.shp"
subbasin = "subbasinTau.tif"
mask_to_ext = "mask.tif"
## masked file names
subbasinM = "subbasinTauM.tif"
flowDirM = "flowDirTauM.tif"
streamRasterM = "streamRasterTauM.tif"
## output to mongoDB file names
reachesOut = "reach.shp"
subbasinOut = "subbasin.tif"
flowDirOut = "flow_dir.tif"
streamLinkOut = "stream_link.tif"
## masked and output to mongoDB file names
slopeM = "slope.tif"
filldemM = "dem.tif"
accM = "acc.tif"
streamOrderM = "stream_order.tif"
flowDirDinfM = "flow_dir_angle_dinf.tif"
dirCodeDinfM = "flow_dir_dinf.tif"
slopeDinfM = "slope_dinf.tif"
weightDinfM = "weight_dinf.tif"
subbasinVec = "subbasin.shp"
basinVec = "basin.shp"
chwidthName = "chwidth.tif"
landuseMFile = "landuse.tif"
soilTexture = "soil_texture.tif"
hydroGroup = "hydro_group.tif"
usleK = "usle_k.tif"
initSoilMoist = "moist_in.tif"
depressionFile = "depression.tif"
CN2File = "CN2.tif"
radiusFile = "radius.tif"
ManningFile = "Manning.tif"
velocityFile = "velocity.tif"
## flow time to the main river from each grid cell
t0_sFile = "t0_s.tif"
## standard deviation of t0_s
delta_sFile = "delta_s.tif"
## potential runoff coefficient
runoff_coefFile = "runoff_co.tif"
|
is None:
return self.sendError(Failure(), raw)
emit_async(catalog.SMTP_SEND_MESSAGE_START,
self._from_address, recipient.dest.addrstr)
d.addCallback(self.sendSuccess)
d.addErrback(self.sendError, raw)
return d
def _maybe_encrypt_and_sign(self, raw, recipient, fetch_remote=True):
"""
Attempt to encrypt and sign the outgoing message.
The behaviour of this method depends on:
1. the original message's content-type, and
2. the availability of the recipient's public key.
If the original message's content-type is "multipart/encrypted", then
the original message is not altered. For any other content-type, the
method attempts to fetch the recipient's public key. If the
recipient's public key is available, the message is encrypted and
signed; otherwise it is only signed.
Note that, if the C{encrypted_only} configuration is set to True and
the recipient's public key is not available, then the recipient
address would have been rejected in SMTPDelivery.validateTo().
The following table summarizes the overall behaviour of the gateway:
+---------------------------------------------------+----------------+
| content-type | rcpt pubkey | enforce encr. | action |
+---------------------+-------------+---------------+----------------+
| multipart/encrypted | any | any | pass |
| other | available | any | encrypt + sign |
| other | unavailable | yes | reject |
| other | unavailable | no | sign |
+---------------------+-------------+---------------+----------------+
:param raw: The raw message
:type raw: str
:param recipient: The recipient for the message
:type: recipient: smtp.User
:return: A Deferred that will be fired with a MIMEMultipart message
and the original recipient Message
:rtype: Deferred
"""
# pass if the original message's content-type is "multipart/encrypted"
origmsg = Parser().parsestr(raw)
if origmsg.get_content_type() == 'multipart/encrypted':
return defer.succeed((origmsg, recipient))
from_address = validate_address(self._from_address)
username, domain = from_address.split('@')
to_address = validate_address(recipient.dest.addrstr)
def maybe_encrypt_and_sign(message):
d = self._encrypt_and_sign(
message, to_address, from_address,
fetch_remote=fetch_remote)
d.addCallbacks(signal_encrypt_sign,
if_key_not_found_send_unencrypted,
errbackArgs=(message,))
return d
def signal_encrypt_sign(newmsg):
emit_async(catalog.SMTP_END_ENCRYPT_AND_SIGN,
self._from_address,
"%s,%s" % (self._from_address, to_address))
return newmsg, recipient
def if_key_not_found_send_unencrypted(failure, message):
failure.trap(KeyNotFound, KeyAddressMismatch)
self.log.info('Will send unencrypted message to %s.' % to_address)
emit_async(catalog.SMTP_START_SIGN, self._from_address, to_address)
d = self._sign(message, from_address)
d.addCallback(signal_sign)
return d
def signal_sign(newmsg):
emit_async(catalog.SMTP_END_SIGN, self._from_address)
return newmsg, recipient
self.log.info("Will encrypt the message with %s and sign with %s."
% (to_address, from_address))
emit_async(catalog.SMTP_START_ENCRYPT_AND_SIGN,
self._from_address,
"%s,%s" % (self._from_address, to_address))
d = self._attach_key(origmsg, from_address)
d.addCallback(maybe_encrypt_and_sign)
return d
def _attach_key(self, origmsg, from_address):
filename = "%s-email-key.asc" % (from_address,)
def get_key_and_attach():
d = self._keymanager.get_key(from_address, fetch_remote=False)
d.addCallback(attach_key)
return d
def attach_key(from_key):
msg = origmsg
if not origmsg.is_multipart():
msg = MIMEMultipart()
for h, v in origmsg.items():
msg.add_header(h, v)
msg.attach(MIMEText(origmsg.get_payload(decode=True),
origmsg.get_content_subtype()))
keymsg = MIMEApplication(from_key.key_data, _subtype='pgp-keys',
_encoder=lambda x: x)
keymsg.add_header('content-disposition', 'attachment',
filename=filename)
msg.attach(keymsg)
return msg
self.log.info("Will send %s public key as an attachment."
% (from_address))
d = get_key_and_attach()
d.addErrback(lambda _: origmsg)
return d
def _encrypt_and_sign(self, origmsg, encrypt_address, sign_address,
fetch_remote=True):
"""
Create an RFC 3156 compliang PGP encrypted and signed message using
C{encrypt_address} to encrypt and C{sign_address} to sign.
:param origmsg: The original message
:type origmsg: email.message.Message
:param encrypt_address: The address used to encrypt the message.
:type encrypt_address: str
:param sign_address: The address used to sign the message.
:type sign_address: str
:return: A Deferred with the MultipartEncrypted message
:rtype: Deferred
"""
# create new multipart/encrypted message with 'pgp-encrypted' protocol
def encrypt(res):
newmsg, origmsg = res
d = self._keymanager.encrypt(
origmsg.as_string(unixfrom=False),
encrypt_address, sign=sign_address,
fetch_remote=fetch_remote)
d.addCallback(lambda encstr: (newmsg, encstr))
return d
def create_encrypted_message(res):
newmsg, encstr = res
encmsg = MIMEApplication(
encstr, _subtype='octet-stream', _encoder=encode_7or | 8bit)
encmsg.add_header('content-disposition', 'attachment',
filename='msg.asc')
# create meta message
metamsg = PGPEncrypted()
metamsg.add_header('Content-Disposition', 'attachment')
# attach pgp message parts to new message
newmsg.attach(metamsg)
| newmsg.attach(encmsg)
return newmsg
d = self._fix_headers(
origmsg,
MultipartEncrypted('application/pgp-encrypted'),
sign_address)
d.addCallback(encrypt)
d.addCallback(create_encrypted_message)
return d
def _sign(self, origmsg, sign_address):
"""
Create an RFC 3156 compliant PGP signed MIME message using
C{sign_address}.
:param origmsg: The original message
:type origmsg: email.message.Message
:param sign_address: The address used to sign the message.
:type sign_address: str
:return: A Deferred with the MultipartSigned message.
:rtype: Deferred
"""
# apply base64 content-transfer-encoding
encode_base64_rec(origmsg)
# get message text with headers and replace \n for \r\n
fp = StringIO()
g = RFC3156CompliantGenerator(
fp, mangle_from_=False, maxheaderlen=76)
g.flatten(origmsg)
msgtext = re.sub('\r?\n', '\r\n', fp.getvalue())
# make sure signed message ends with \r\n as per OpenPGP stantard.
if origmsg.is_multipart():
if not msgtext.endswith("\r\n"):
msgtext += "\r\n"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.