prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
class WordDictionary(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.root = {}
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
nod | e['#'] = '#'
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
| def find(word, node):
if not word:
return '#' in node
c, word = word[0], word[1:]
if c != '.':
return c in node and find(word, node[c])
return any(find(word, d) for d in node.values() if d != '#')
return find(word, self.root)
|
from fastapi.testclient import TestClient
from docs_src.metadata.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {
"title": "ChimichangApp",
"description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n",
"termsOfService": "http://example.com/terms/",
"contact": {
"name": "Deadpoolio the Amazing",
"url": "http://x-force.example.com/contact/",
"email": "dp@x-force.example.com",
},
"license": {
"name": "Apache 2.0",
| "url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
"version": "0.0.1",
},
"paths": {
"/items/": {
"get": {
"summary" | : "Read Items",
"operationId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"name": "Katana"}]
|
import sys
from numpy import *
from scipy import signal
import scipy.io.wavfile
from matplotlib import pyplot
import sklearn.decomposition
def main():
# First load the audio data, the audio data on this | example is obtained from http://www.ism.ac.jp/~shiro/research/blindsep.html
rate, source = scipy.io.wavfile.read('/Use | rs/nareshshah/blind_source_data/X_rsm2.wav')
# The 2 sources are stored in left and right channels of the audio
source_1, source_2 = source[:, 0], source[:, 1]
data = c_[source_1, source_2]
# Normalize the audio from int16 range to [-1, 1]
data = data / 2.0 ** 15
# Perform Fast ICA on the data to obtained separated sources
fast_ica = sklearn.decomposition.FastICA( n_components=2 )
separated = fast_ica.fit_transform( data )
# Check, data = separated X mixing_matrix + mean
assert allclose( data, separated.dot( fast_ica.mixing_.T ) + fast_ica.mean_ )
# Map the separated result into [-1, 1] range
max_source, min_source = 1.0, -1.0
max_result, min_result = max(separated.flatten()), min(separated.flatten())
separated = map( lambda x: (2.0 * (x - min_result))/(max_result - min_result) + -1.0, separated.flatten() )
separated = reshape( separated, (shape(separated)[0] / 2, 2) )
# Store the separated audio, listen to them later
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_1.wav', rate, separated[:, 0] )
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_2.wav', rate, separated[:, 1] )
# Plot the original and separated audio data
fig = pyplot.figure( figsize=(10, 8) )
fig.canvas.set_window_title( 'Blind Source Separation' )
ax = fig.add_subplot(221)
ax.set_title('Source #1')
ax.set_ylim([-1, 1])
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 0], color='r' )
ax = fig.add_subplot(223)
ax.set_ylim([-1, 1])
ax.set_title('Source #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 1], color='r' )
ax = fig.add_subplot(222)
ax.set_ylim([-1, 1])
ax.set_title('Separated #1')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 0], color='g' )
ax = fig.add_subplot(224)
ax.set_ylim([-1, 1])
ax.set_title('Separated #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 1], color='g' )
pyplot.show()
|
#!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from toughradius.radiusd.settings import *
import logging
import datetime
def process(req=None,user=None,radiusd=None,**kwargs):
if not req.get_acct_status_type() == STATUS_TYPE_STOP:
return
runstat=radiusd.runstat
store = radiusd.store
runstat.acct_stop += 1
ticket = req.get_ticket()
if not ticket.nas_addr:
ticket.nas_addr = req.source[0]
_datetime = datetime.datetime.now()
online = store.get_online(ticket.nas_addr,ticket.acct_session_id)
if not online:
session_time = ticket.acct_session_time
stop_time = _datetime.strftime( "%Y-%m-%d %H:%M:%S")
start_time = (_datetime - datetime.timedelta(seconds=int(session_time))).strftime( "%Y-%m-%d %H:%M:%S")
ticket.acct_start_time = start_time
ticket.acct_stop_time = stop_time
| ticket.start_source= STATUS_TYPE_STOP
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
| else:
store.del_online(ticket.nas_addr,ticket.acct_session_id)
ticket.acct_start_time = online['acct_start_time']
ticket.acct_stop_time= _datetime.strftime( "%Y-%m-%d %H:%M:%S")
ticket.start_source = online['start_source']
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
radiusd.syslog.info('[username:%s] Accounting stop request, remove online'%req.get_user_name(),level=logging.INFO)
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission i | s hereby granted, free of charge, to any person obtaining
# a copy of this software and associat | ed documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/duplicate-sources.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that specifying a source file more than once works correctly
and dos not cause a rebuild.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
def cat(target, source, env):
t = open(str(target[0]), 'wb')
for s in source:
t.write(open(str(s), 'rb').read())
t.close()
env = Environment(BUILDERS = {'Cat' : Builder(action = cat)})
env.Cat('out.txt', ['f1.in', 'f2.in', 'f1.in'])
""")
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.run(arguments='--debug=explain .')
test.must_match('out.txt', "f1.in\nf2.in\nf1.in\n")
test.up_to_date(options='--debug=explain', arguments='.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ne_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
politician_we_vote_id = one_candidate['politician_we_vote_id'] \
if 'politician_we_vote_id' in one_candidate else ''
candidate_twitter_handle = one_candidate['candidate_twitter_handle'] \
if 'candidate_twitter_handle' in one_candidate else ''
vote_smart_id = one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else ''
maplight_id = one_candidate['maplight_id'] if 'maplight_id' in one_candidate else ''
# Check to see if there is an entry that matches in all critical ways, minus the we_vote_id
we_vote_id_from_master = we_vote_id
results = candidate_list_manager.retrieve_possible_duplicate_candidates(
candidate_name, google_civic_candidate_name, google_civic_election_id, contest_office_we_vote_id,
politician_we_vote_id, candidate_twitter_handle, vote_smart_id, maplight_id,
we_vote_id_from_master)
if results['candidate_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_candidate)
candidates_results = {
'success': True,
'status': "FILTER_CANDIDATES_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return candidates_results
def candidates_import_from_structured_json(structured_json):
candidate_campaign_manager = CandidateCampaignManager()
candidates_saved = 0
candidates_updated = 0
candidates_not_processed = 0
for one_candidate in structured_json:
candidate_name = one_candidate['candidate_name'] if 'candidate_name' in one_candidate else ''
we_vote_id = one_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
ocd_division_id = one_candidate['ocd_division_id'] if 'ocd_division_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
# This routine imports from another We Vote server, so a contest_office_id doesn't come from import
# Look up contest_office in this local database.
# If we don't find a contest_office by we_vote_id, then we know the contest_office hasn't been imported
# from another server yet, so we fail out.
contest_office_manager = ContestOfficeManager()
contest_office_id = contest_office_manager.fetch_contest_office_id_from_we_vote_id(
contest_office_we_vote_id)
if positive_value_exists(candidate_name) and positive_value_exists(google_civic_election_id) \
and positive_value_exists(we_vote_id) and positive_value_exists(contest_office_id):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
updated_candidate_campaign_values = {
# Values we search against
'google_civic_election_id': google_civic_election_id,
'ocd_division_id': ocd_division_id,
'contest_office_we_vote_id': contest_office_we_vote_id,
'candidate_name': candidate_name,
# The rest of the values
'we_vote_id': we_vote_id,
'maplight_id': one_candidate['maplight_id'] if 'maplight_id' in one_candidate else None,
'vote_smart_id': one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else None,
'contest_office_id': contest_office_id, # Retrieved from above
'politician_we_vote_id':
one_candidate['politician_we_vote_id'] if 'politician_we_vote_id' in one_candidate else '',
'state_code': one_candidate['state_code'] if 'state_code' in one_candidate else '',
'party': one_candidate['party'] if 'party' in one_candidate else '',
'order_on_ballot': one_candidate['order_on_ballot'] if 'order_on_ballot' in one_candidate else 0,
'candidate_url': one_candidate['candidate_url'] if 'candidate_url' in one_candidate else '',
'photo_url': one_candidate['photo_url'] if 'photo_url' in one_candidate else '',
'photo_url_from_maplight':
one_candidate['photo_url_from_maplight'] if 'photo_url_from_maplight' in one_candidate else '',
'photo_url_from_vote_smart':
one_candidate['photo_url_from_vote_smart'] if 'photo_url_from_vote_smart' in one_candidate else '',
'facebook_url': one_candidate['facebook_url'] if 'facebook_url' in one_candidate else '',
'twitter_url': one_candidate['twitter_url'] if 'twitter_url' in one_candidate else '',
'google_plus_url': one_candidate['google_plus_url'] if 'google_plus_url' in one_candidate else '',
'youtube_url': one_candidate['youtube_url'] if 'youtube_url' in one_candidate else '',
'google_civic_candidate_name':
one_candidate['google_civic_candidate_name']
if 'google_civic_candidate_name' in one_candidate else '',
'candidate_email': one_candidate['candidate_email'] if 'candidate_email' in one_candidate else '',
'candidate_phone': one_candidate['candidate_phone'] if 'candidate_phone' in one_candidate else '',
'twitter_user_id': one_candidate['twitter_user_id'] if 'twitter_user_id' in one_candidate else '',
'candidate_twitter_handle': one_candidate[' | candidate_twitter_handle']
if 'candidate_twitter_handle' in one_candidate else '',
'twitter_name': one_candidate['twitter_name'] if 'twitter_nam | e' in one_candidate else '',
'twitter_location': one_candidate['twitter_location'] if 'twitter_location' in one_candidate else '',
'twitter_followers_count': one_candidate['twitter_followers_count']
if 'twitter_followers_count' in one_candidate else '',
'twitter_profile_image_url_https': one_candidate['twitter_profile_image_url_https']
if 'twitter_profile_image_url_https' in one_candidate else '',
'twitter_description': one_candidate['twitter_description']
if 'twitter_description' in one_candidate else '',
'wikipedia_page_id': one_candidate['wikipedia_page_id']
if 'wikipedia_page_id' in one_candidate else '',
'wikipedia_page_title': one_candidate['wikipedia_page_title']
if 'wikipedia_page_title' in one_candidate else '',
'wikipedia_photo_url': one_candidate['wikipedia_photo_url']
if 'wikipedia_photo_url' in one_candidate else '',
'ballotpedia_page_title': one_candidate['ballotpedia_page_title']
if 'ballotpedia_page_title' in one_candidate else '',
'ballotpedia_photo_url': one_candidate['ballotpedia_photo_url']
if 'ballotpedia_photo_url' in one_candidate else '',
'ballot_guide_official_statement': one_candidate['ballot_guide_official_statement']
if 'ballot_guide_official_statement' in one_candidate else '',
}
results = candidate_campaign_manager.update_or_create_candidate_campaign(
we_vote_id, google_civic_election_id, |
view()
self.selection_change_handler = self.thumb_view.connect('selection-changed', self.on_selection_changed)
# Initialization of panorama viewer:
# Since it takes significant amount of memory, we load it only
# once we encounter a panorama image (see on_selection_changed).
#self.load_panorama_viewer()
def do_deactivate(self):
"""The plugin has been deactivated, clean everything up."""
# Remove all modifications and added widgets from the UI scene graph.
# (In this implementation same as when hiding the panorama.)
self.hide_panorama()
# Unregister event handlers.
self.thumb_view.disconnect(self.selection_change_handler)
self.selection_change_handler = None
# Release resources.
self.panorama_view = None
self.panorama_viewer_active = False
self.panorama_viewer_loaded = False
def on_selection_changed(self, thumb_view):
"""An image has been selected."""
# Use the reference of thumb_view passed as parameter, not self.thumb_view (did cause errors).
current_image = thumb_view.get_first_selected_image() # may be None
if current_image:
# Get file path
uri = current_image.get_uri_for_display()
filepath = urllib.parse.urlparse(uri).path
# If it is a panorama, switch to panorama viewer.
if self.use_panorama_viewer(filepath):
# Read panorama metadata
try:
metadata = self.get_pano_xmp(filepath)
# I tried passing just the image file path, but cross-site-scripting
# restrictions do not allow local file:// access.
# Solutions: simple server or data uri.
image = self.image_to_base64(filepath)
# Lazy loading: Create panorama_viewer only when a panorama is encountered.
# TODO: maybe unload it again after a certain amount of non-panorama images.
if not self.panorama_viewer_loaded:
# 1. Load the panorama viewer.
self.load_panorama_viewer(lambda: self.panorama_view.load_image(image, metadata, self.show_panorama) )
else:
# 2. Load the image into the panorama viewer.
# 3. When finished, make it visible.
self.panorama_view.load_image(image, metadata, self.show_panorama)
except Exception as error:
print(error)
# Fallback to display as normal image.
self.hide_panorama()
else:
# It is a normal image.
self.hide_panorama()
# Release resources in the panorama viewer by loading an empty/none image
if self.panorama_viewer_loaded:
empty_image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQI12NgAAIAAAUAAeImBZsAAAAASUVORK5CYII='
self.panorama_view.load_image(empty_image, {})
# Helper methods
def use_panorama_viewer(self, filepath):
metadata = GExiv2.Metadata(filepath)
return metadata.get_tag_string('Xmp.GPano.ProjectionType') == 'equirectangular' \
and metadata.get_tag_string('Xmp.GPano.UsePanoramaViewer') != 'False'
def get_pano_xmp(self, filepath):
"""Read XMP panorama metadata of an image file.
Args:
filepath: an image file to read
Returns:
a dict containing XMP keys with their values
"""
metadata = GExiv2.Metadata(filepath)
# For tags see: http://www.exiv2.org/tags.html
# and http://exiv2.org/tags-xmp-GPano.html
tags_required = {
'Xmp.GPano.FullPanoWidthPixels': 'full_width',
'Xmp.GPano.FullPanoHeightPixels': 'full_height',
'Xmp.GPano.CroppedAreaImageWidthPixels': 'cropped_width',
'Xmp.GPano.CroppedAreaImageHeightPixels': 'cropped_height',
'Xmp.GPano.CroppedAreaLeftPixels': 'cropped_x',
'Xmp.GPano.CroppedAreaTopPixels': 'cropped_y'
}
tags_optional = {
'Xmp.GPano.PoseHeadingDegrees': 'pose_heading',
'Xmp.GPano.InitialHorizontalFOVDegrees': 'initial_h_fov',
'Xmp.GPano.InitialViewHeadingDegrees': 'initial_heading',
'Xmp.GPano.InitialViewPitchDegrees': 'initial_pitch',
'Xmp.GPano.InitialViewRollDegrees': 'initial_roll'
}
result = {}
for (tag, key) in tags_required.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
else:
raise Exception("Required tag %s is missing, cannot use panorama viewer."%tag)
for (tag, key) in tags_optional.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
return result
def load_panorama_viewer(self, on_loaded_cb = None):
"""Initialize the panorama viewer widget.
Args:
on_loaded_cb: an optional callback function/lambda that is called
after loading of the panorama widget completes.
Note:
Instantiation of the WebView is synchronous, but loading of html is asynchronous.
For subsequently interacting with the document, pass a callback.
"""
if not self.panorama_viewer_loaded:
self.image_view = self.window.get_view() # EogScrollView
self.container = self.image_view.get_parent() # its parent, GtkOverlay
# Create the panorama widget.
self.panorama_view = PanoramaViewer(on_loaded_cb)
self.panorama_view.show()
self.panoram | a_viewer_loaded = True
def image_to_base64(self, filepath):
"""Read an image file and returm its content as base64 encoded string.
Args:
filepath: an image file to read
Returns:
a string of the base64 encoded image
"""
with open(filepath, 'rb') as f:
return 'data:' + self.get_image_mimetype(filepath) \
+ ';base64,' + base64.b64encode(f.read()).decode('ascii')
d | ef get_image_mimetype(self, filepath):
ext = os.path.splitext(filepath)[1].lower()
eog_mimetypes = {
'.bmp': 'image/x-bmp',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg',
'.png': 'image/png',
'.tif': 'image/tiff',
'.tiff': 'image/tiff'
}
return eog_mimetypes[ext] if ext in eog_mimetypes else 'image/'+ext[1:]
def show_panorama(self):
"""Show the panorama widget and hide the image viewer."""
if not self.panorama_viewer_active:
# I tried adding both widgets to the container and just toggling their
# visibility or adding them into a Gtk.Stack, but in both cases the
# WebView did not receive mouse events. Replacing the widgets works.
self.container.remove(self.image_view)
self.container.add(self.panorama_view)
self.panorama_viewer_active = True
def hide_panorama(self):
"""Show the image viewer and hide the panorama widget."""
if self.panorama_viewer_active:
self.container.remove(self.panorama_view)
self.container.add(self.image_view)
self.panorama_viewer_active = False
class PanoramaViewer(WebKit2.WebView):
#uri_panorama_viewer = 'file://' + os.path.join(self.plugin_info.get_data_dir(), 'eog_panorama.htm')
uri_panorama_viewer = 'file://' + os.path.join(os.path.dirname(os.path.realpath(__file__)), 'eog_panorama.htm')
custom_scheme = 'eogp' # This should not clash with the plugin path, otherwise it confuses WebKit.
def __init__(self, on_loaded_cb = None):
|
import sublime
import sublime_plugin
from ..core import oa_syntax, decorate_pkg_name
from ..core import ReportGenerationThread
from ...lib.packages import PackageList
###----------------------------------------------------------------------------
class PackageReportThread(ReportGenerationThread):
"""
Generate a tabular report of all installed packages and their state.
"""
def _process(self):
pkg_list = PackageList()
pkg_counts = pkg_list.package_counts()
title = "{} Total Packages".format(len(pkg_list))
t_sep = "=" * len(title)
fmt = '{{:>{}}}'.format(len(str(max(pkg_counts))))
stats = ("{0} [S]hipped with Sublime\n"
"{0} [I]nstalled (user) sublime-package files\n"
"{0} [U]npacked in Packages\\ directory\n"
"{0} Currently in ignored_packages\n"
"{0} Installed Dependencies\n").format(fmt).format(*pkg_counts)
row = "| {:<40} | {:3} | {:3} | {:<3} |".format("", "", "", "")
r_sep = "+------------------------------------------+-----+-----+-----+"
packages = {}
result = [title, t_sep, "", self._generation_time(), stats, r | _sep]
for pkg_name, pkg_info in pkg_list:
packages[pkg_name] = pkg_info.status(detailed=False)
result.append(
"| {:<40} | [{:1}] | [{:1}] | [{:1}] |".format(
decorate_pkg_name(pkg_info, name_only=True),
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not | None else " "))
result.extend([r_sep, ""])
self._set_content("OverrideAudit: Package Report", result, ":packages",
oa_syntax("OA-PkgReport"), {
"override_audit_report_packages": packages,
"context_menu": "OverrideAuditReport.sublime-menu"
})
###----------------------------------------------------------------------------
class OverrideAuditPackageReportCommand(sublime_plugin.WindowCommand):
"""
Generate a tabular report of all installed packages and their state.
"""
def run(self, force_reuse=False):
PackageReportThread(self.window, "Generating Package Report",
self.window.active_view(),
force_reuse=force_reuse).start()
###----------------------------------------------------------------------------
# |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2014 Peerchemist
#
# This file is part of NuBerryPi project.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should h | ave received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
__author__ = "Peerchemist"
__license__ = "GPL"
__version__ = "0.23"
import os, sys
import sh
import ar | gparse
import json
import urllib
import platform
from datetime import timedelta
from datetime import datetime as dt
from colored import fore, back, style
## Class that pulls and parses data
class pbinfo:
def system(self):
def uptime():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_str = str(timedelta(seconds = uptime_seconds))
return(uptime_str)
def distr():
with open('/etc/os-release', 'r') as lsb:
for line in lsb:
if line.startswith('VERSION_ID'):
return(line.split('=')[1].replace('"','').strip())
def temp():
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as temp:
return(float(temp.readline().strip())/1000)
mm = {
'nuberrypi': distr(),
'kernel release': platform.release(),
'uptime': uptime(),
'average load': os.getloadavg(),
'system_temperature': temp()
}
return(mm)
def hardware(self):
mm = {}
with open('/proc/cpuinfo') as cpuinfo:
for line in cpuinfo:
if line.startswith('Hardware'):
hardware = line.split(':')[1].strip()
if hardware == "BCM2708":
mm['hardware'] = "Raspberry Pi"
if line.startswith('Serial'):
ser = line.split(':')[1].strip()
mm['serial'] = ser
with open('/proc/cmdline', 'r') as cmdline:
for i in cmdline.readline().split():
if i.startswith('smsc95xx.macaddr'):
mm['maccaddr'] = str(i.split('=')[1])
if i.startswith('bcm2708.boardrev'):
mm['board_rev'] = str(i.split('=')[1])
return(mm)
def nud(self, argv):
get = sh.nud("getinfo", _ok_code=[0,3,5,87]).stdout
pos_diff = sh.nud("getdifficulty", _ok_code=[0,3,5,87]).stdout
try:
getinfo = json.loads(get)
pos = json.loads(pos_diff)['proof-of-stake']
getinfo["difficulty proof-of-stake"] = pos
except:
return("nud inactive")
## When posting in public, hide IP and balance.
if argv == "private":
del getinfo['balance']
del getinfo['ip']
return(getinfo)
else:
return(getinfo)
## Class that will do all the pretty printing
class box:
def default(self): ## printed when no arguments
box = {}
box['nuberrypi version'] = "v" + pbinfo.system()['nuberrypi']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud(self)
box['serial'] = pbinfo.hardware()['serial']
box['raspi_board_rev'] = pbinfo.hardware()['board_rev']
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['nud'] == "nud inactive":
print(fore.RED + style.BOLD + "WARNING: nud is not running!" + style.RESET)
def public(self): ## When privacy is needed
box = {}
box['NuBerryPi:'] = "v" + pbinfo.system()['nuberrypi']
box['serial'] = pbinfo.hardware()['serial']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud('private')
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
def system(self):
box = pbinfo.system()
print(fore.GREEN + style.UNDERLINED + "NuBerryPi system info:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['system_temperature'] > 76:
print(fore.RED + style.BOLD + "WARNING: system temperature too high!" + style.RESET)
def all(self): ## Switch to show all
box = {}
box['system'] = pbinfo.system()
box['system'].update(pbinfo.hardware())
box['nud'] = pbinfo.nud(self)
print(json.dumps(box, sort_keys=True, indent=4))
def health(self):
report = health.check()
print "Checking if we are on the right chain..."
print "Using" + " " + style.UNDERLINED + "www.peerchain.co" + style.RESET + " as reference."
print
for k,v in report.items():
if v == True:
print(k + ":" + fore.GREEN + style.BOLD + "True" + style.RESET)
else:
print(k + ":" + fore.RED + style.BOLD + "False" + style.RESET)
print
## Checking health of blockchain
class health:
def pull(self):
url = "https://peerchain.co/api/v1/blockLatest/"
response = urllib.urlopen(url)
return(json.loads(response.read()))
def local(self):
local = {}
local["heightInt"] = int(sh.nud("getblockcount", _ok_code=[0,3,5,87]).stdout)
local["hash"] = sh.nud("getblockhash", local["heightInt"],
_ok_code=[0,3,5,87]).stdout.strip()
block_info = json.loads(sh.nud("getblock", local["hash"],
_ok_code=[0,3,5,87]).stdout)
local["prevHash"] = block_info["previousblockhash"]
local["mrkRoot"] = block_info["merkleroot"]
#timestring = block_info["time"].replace("UTC", "").strip()
#local["timeStampUnix"] = dt.strptime(timestring
# , "%Y-%m-%d %H:%M:%S").strftime("%s")
return local
def check(self):
local = self.local()
remote = self.pull()
report = {}
if remote["heightInt"] == local["heightInt"]:
report["block_count_matches"] = True
else:
report["block_count_matches"] = False
if remote["hash"] == local["hash"]:
report["last_block_hash_matches"] = True
else:
report["last_block_hash_matches"] = False
if remote["prevHash"] == local["prevHash"]:
report["previous_block_hash_matches"] = True
else:
report["previous_block_hash_matches"] = False
if remote["mrkRoot"] == local["mrkRoot"]:
report["merkle_root_matches"] = True
else:
report["merkle_root_matches"] = False
return report
pbinfo = pbinfo()
box = box()
health = health()
######################### args
parser = argparse.ArgumentParser(description='Show information on NuBerryPi')
parser.add_argument('-a', '--all', help='show everything', action='store_true')
parser.add_argument('-s','--system', help='show system information', action='store_true')
parser.add_argument('-p', '--nu', help='equal to "ppcoid getinfo"', action='store_true')
parser.add_argument('--public', help='hide private data [ip, balance, serial]', action='store_true')
parser.add_argument('-o', '--output', help='dump data to stdout, use to pipe to some other program',
action='store_true')
parser.add_argument('--health', help='compare local blockchain data with peerchain.co as reference',
action='store_true')
args = parser.parse_args()
## Default, if no arguments
if not any(vars(args).values()):
box.default()
if args.all:
box.all()
if args.system:
box.system()
if args.nu:
print(json.dumps(pbinfo.nud("self"), indent=4, sort_keys=True))
if args.public:
box.public()
if args.output:
sys.stdout.write(box.all())
if args.health:
box.health()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from reportlab.pdfbase import ttfonts
from odoo import api, fields, models
from odoo.report.render.rml2pdf import customfonts
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from odoo.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class ResFont(models.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
family = fields.Char(string="Font family", required=True)
name = fields.Char(string="Font Name", required=True)
path = fields.Char(required=True)
mode = fields.Char(required=True)
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
@api.model
def font_scan(self, lazy=False):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
fonts = self.search([('path', '!=', '/dev/null')])
if not fonts:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk()
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync()
else:
self._scan_disk()
return True
def _scan_disk(self):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
| found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_pa | th, ex)
for family, name, path, mode in found_fonts:
if not self.search([('family', '=', family), ('name', '=', name)]):
self.create({'family': family, 'name': name, 'path': path, 'mode': mode})
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
# Remove inexistent fonts
self.search([('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')]).unlink()
self.pool.signal_caches_change()
return self._sync()
def _sync(self):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
for font in self.search([('path', '!=', '/dev/null')]):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the builtin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
@classmethod
def clear_caches(cls):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(ResFont, cls).clear_caches()
|
import sys
import os
from distutils.core import setup
if sys.version_info.major >= 3:
print 'Sorry, currently only supports Python 2. Patches welcome!'
sys.exit(1)
setup(
name='browser-cookie',
version='0.6',
packages=['browser_cookie'],
package_dir={'browser_cookie' : '.'}, # look for package contents in current directory
author='Richard Penman',
author_email='richard@webscraping.com',
description='Loads cookies from your browser into a cookiejar object so can download with urllib and other libraries the same content you see in the web browser.',
| url='https://bitbucket.org/richardpenman/ | browser_cookie',
install_requires=['pycrypto', 'keyring'],
license='lgpl'
)
|
import logging
from ask import alexa
import car_accidents
import expected_population
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler | (request_obj, context=None):
return alexa.route_request(request_obj)
@alexa.default
def default_handler(request):
logger | .info('default_handler')
return alexa.respond("Sorry, I don't understand.", end_session=True)
@alexa.request("LaunchRequest")
def launch_request_handler(request):
logger.info('launch_request_handler')
return alexa.respond('Ask me about any public data about Sweden.', end_session=True)
@alexa.request("SessionEndedRequest")
def session_ended_request_handler(request):
logger.info('session_ended_request_handler')
return alexa.respond('Goodbye.', end_session=True)
@alexa.intent('AMAZON.CancelIntent')
def cancel_intent_handler(request):
logger.info('cancel_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('AMAZON.HelpIntent')
def help_intent_handler(request):
logger.info('help_intent_handler')
return alexa.respond('You can ask me about car accidents.', end_session=True)
@alexa.intent('AMAZON.StopIntent')
def stop_intent_handler(request):
logger.info('stop_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('CarAccidents')
def car_accidents_intent_handler(request):
logger.info('car_accidents_intent_handler')
logger.info(request.get_slot_map())
city = request.get_slot_value('city')
year = request.get_slot_value('year')
if not city:
return alexa.respond('Sorry, which city?')
num_card_acc = car_accidents.get_num_accidents(year=int(year), city=city)
logger.info('%s accidents in %s in %s', num_card_acc, city, year)
return alexa.respond(
'''
<speak>
There were
<say-as interpret-as="cardinal">%s</say-as>
car accidents in %s in
<say-as interpret-as="date" format="y">%s</say-as>,
</speak>
''' % (num_card_acc, city, year),
end_session=True, is_ssml=True)
@alexa.intent('PopulationSweden')
def population_intent_handler(request):
logger.info('population_sweden_intent_handler')
logger.info(request.get_slot_map())
year = request.get_slot_value('year')
return alexa.respond(
'''
<speak>
in
<say-as interpret-as="date" format="y">%s</say-as>,
The expected population of Sweden is going to be
<say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, expected_population.get_expected_population(year)),
end_session=True, is_ssml=True)
@alexa.intent('WaterUsage')
def water_usage_stockholm(request):
year = request.get_slot_value('year')
logger.info('water_usage_stockholm')
logger.info(request.get_slot_map())
return alexa.respond(
'''
<speak>
the water consumption in Stockholm in <say-as interpret-as="date" format="y">%s</say-as>,
is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, car_accidents.get_water_usage_stockholm(year)),
end_session=True, is_ssml=True)
@alexa.intent('Apartments')
def housing_numbers(request):
year = request.get_slot_value('year')
logger.info('apartments')
logger.info(request.get_slot_map())
return alexa.respond(
'''
<speak>
the number of apartments built during that year in Stockholm, is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (car_accidents.get_num_apartments_stockholm(year)),
)
|
051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_ra | ises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
"""Check classification with sparse input."""
class CustomSVC(SVC):
"""SVC variant that records | the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_ |
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
import os
from os.path import realpath, dirname, isfile, abspath
import json
import time
import uuid
from werkzeug.datastructures import FileStorage
from hackathon.constants import FILE_TYPE, HEALTH_STATUS, HEALTH
from storage import Storage
__all__ = ["LocalStorage"]
class LocalStorage(Storage):
"""Hackathon file storage that saves all templates on local disk
template files will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
uploaded images will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
"""
def save(self, context):
"""Save a file to storage
:type context: Context
:param context: the execution context of file saving
:rtype context
:return the updated context which should including the full path of saved file
"""
context = self.__generate_paths(context)
self.__save_file(context.content, context.physical_path)
self.log.debug("file saved at:" + context.physical_path)
return context
def load(self, context):
"""Load file from storage
:type context: Context
:param context: the execution context of file loading
:rtype dict
:return the file content
"""
path = context.physical_path
file_type = context.file_type
if file_type == FILE_TYPE.TEMPLATE:
with open(path) as template_file:
return json.load(template_file)
else:
return None
def delete(self, context):
"""Delete file from storage
:type context: Context
:param context: the execution context of file deleting
:rtype bool
:return True if successfully deleted else False
"""
path = context.physical_path
if isfile(path):
os.remove(path)
return True
else:
self.log.warn("try to remove dir or non-existed file")
return False
def report_health(self):
"""The status of local storage should be always True"""
return {
HEALTH.STATUS: HEALTH_STATUS.OK
}
def __init__(self):
self.base_dir = self.__get_storage_base_dir()
def __ensure_dir(self, file_path):
"""Make sure the directory of target file exists"""
path = dirname(file_path)
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
def __save_file(self, content, path):
"""Dump file to disk
An existing file with the same name will be erased
:type content: file | dict | FileStorage
:param content: the content of file to be saved. Can be a file object or a dict
:type path: str | unicode
:param path: the file path
"""
self.__ensure_dir(path)
with open(path, 'w') as f:
if isinstance(content, dict):
json.dump(content, f)
elif isinstance(content, file):
f.write(content.read())
elif isinstance(content, FileStorage):
content.save(path)
def __get_storage_base_dir(self):
"""Get the base directory of storage"""
return "%s/.." % dirname(realpath(__file__))
def __generate_paths(self, context):
"""Generate file new name ,physical path and uri
:type context: Context
:param context: execution context
:return updated context
"""
hackathon_name = context.hackathon_name if "hackathon_name" in context else None
# replace file_name with new random name
context.file_name = self.__generate_file_name(context.file_name, hackathon_name)
context.physical_path = self.__generate_physical_path(context.file_name, context.file_type)
context.url = self.__generate_url(context.physical_path, context.file_type)
return context
def __generate_url(self, physical_path, file_type):
"""Return the http URI of file
It's for local storage only and the uploaded images must be in dir /static
:type physical_path: str|unicode
:param physical_path: the absolute physical path of the file
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return public accessable URI
"""
# only upladed images need an URI.
# example: http://localhost:15000/static/pic/upload/win10-201456-1234.jpg
if file_type == FILE_TYPE.HACK_IMAGE:
i = physical_path.index("static")
path = physical_path[i:]
return self.util.get_config("endpoint") + "/" + path
return ""
def __generate_physical_path(self, file_name, file_type, hackathon_name=None):
"""Return the physical path of file including directory and file name
:type file_name: str|unicode
:param file_name: the original file name
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return physical path of the file to be saved
"""
if file_type == FILE_TYPE.HACK_IMAGE:
path = "%s/static/pic/upload%s/%s/%s" % (
self.__get_storage_base_dir(),
"/" + hackathon_name if hackathon_name else "",
time.strftime("%Y%m%d"),
file_name)
return abspath(path)
return abspath("%s/resources/lib/%s" % (
self.__get_storage_base_dir(),
file_name))
def __generate_file_name(self, origin_name, hackathon_name=None):
"""Generate a random file name
:type origin_name: str | unicode
:param origin_name the origin name | of file
:type hackathon_name: str | unicode
:param hackathon_name: name of hackathon related to this file
:rtype str
:return a random file name which includes hackathon_name and time as parts
"""
if not hackathon_name:
hackathon_name = ""
extension = os.path.splitext(origin_name)[1]
new_name = "%s-%s-%s%s | " % (
hackathon_name,
time.strftime("%Y%m%d"),
str(uuid.uuid1())[0:8],
extension
)
return new_name.strip('-')
|
from layer import *
class TanhLayer(Layer):
def __init__(self, *args, **kwargs):
super(TanhLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.TANH
def ApplyActivation(self):
cm.tanh(self.state)
def Sample(self):
self.state.sample_bernoulli_tanh(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_tanh_deriv(self.state)
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
def GetLoss(self, get_deriv=False, **kwargs):
"""Computes loss.
Computes the loss function. Assumes target is in self.data and predictions
are in self.state.
Args:
get_deriv: If True, computes the derivative of the loss function w.r.t the
inputs to this layer and puts the result in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
if self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
self.state.subtract(self.data, target=self.deriv)
error = self.deriv.euclid_norm()**2
perf.error | = error
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for tanh units.')
return perf
def GetSparsi | tyDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means, target=self.means_temp)
self.means_temp2.add(self.means)
self.means_temp2.mult(self.means_temp)
return self.means_temp2
|
from mock import patch, call
import mock
from lxml import etree
from kiwi.sol | ver.repository.rpm_md import SolverRepositoryRpmMd
from kiwi.solver.repository.base import SolverRepositoryBase
class TestSolverRepositoryRpmMd:
def setup(self):
self.xml_data = etree.parse('../data/repomd.xml')
self.uri = mock.Mock()
self.solver = SolverRepositoryRpmMd(self.uri)
@patch.object(SolverRepositoryBase, 'download_from_repository')
@patch.object(SolverRepositoryBase, '_create_solvables')
@patch.object(SolverRepositoryBase, '_create_ | temporary_metadata_dir')
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test__setup_repository_metadata(
self, mock_xml, mock_mkdtemp, mock_create_solvables,
mock_download_from_repository
):
mock_mkdtemp.return_value = 'metadata_dir.XX'
mock_xml.return_value = self.xml_data
self.solver._setup_repository_metadata()
assert mock_download_from_repository.call_args_list == [
call(
'repodata/55f95a93-primary.xml.gz',
'metadata_dir.XX/55f95a93-primary.xml.gz'
),
call(
'repodata/0815-other.xml.gz',
'metadata_dir.XX/0815-other.xml.gz'
)
]
assert mock_create_solvables.call_args_list == [
call('metadata_dir.XX', 'rpmmd2solv'),
call('metadata_dir.XX', 'comps2solv')
]
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test_timestamp(self, mock_xml):
mock_xml.return_value = self.xml_data
assert self.solver.timestamp() == '1478352191'
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Numdiff(AutotoolsPackage):
"""Numdiff is a little program that can be used to compare putatively
similar files line by line and field by field, ignoring small numeric
differences or/and different numeric formats."""
homepage = 'https://www.nongnu.org/numdiff'
url = 'http://nongnu.askapache.com/numdiff/numdiff-5.8.1.tar.gz'
maintainers = ['davydden']
version('5.9.0', '794461a7285d8b9b1f2c4a8149889ea6')
version('5.8.1', 'a295eb391f6cb1578209fc6b4f9d994e')
variant('nls', default=False,
description="Enable Natural Language Support")
variant('gmp', default=False,
description="Use GNU Multiple Precision Arithmetic Library")
depends_on('gettext', when='+nls')
depends_on('gmp', when='+gmp')
def configure_args(self):
spec = self.spec
args = []
if '+nls' in spec:
args.append('--enable-nls')
else:
args.append('--disable-nls')
if '+gmp' in spec:
# compile with -O0 as per upstream known issue with optimization
# and GMP; https://launchpad.net/ubuntu/+source/numdiff/+changelog
# http://www.nongnu.org/numdiff/#issues
# keep this variant off by default as one still encoun | ter
# GNU MP: Cannot allocate memory (size=2305843009206983184) |
args.extend([
'--enable-gmp',
'CFLAGS=-O0'
])
else:
args.append('--disable-gmp')
return args
|
'''
Test the sslheaders plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test sslheaders plugin.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2'),
)
Test.Disk.File('sslheaders.log').Content = 'sslheaders.gold'
server = Test.MakeOriginServer("server", options={'--load': Test.TestDirectory + '/observer.py'})
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts = Test.MakeATSProcess("ts", select_ports=False)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
# ts.addSSLfile("ssl/signer.pem")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.http.cache.http': 0, # Make sure each request is forwarded to the origin server.
'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': (
'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
.format(ts.Variables.port, ts.Variables.ssl_port)),
# 'proxy.config.ssl.client.verify.server': 0,
# 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# 'proxy.config.url_remap.pristine_host_hdr' : 1,
# 'proxy.config.ssl.client.certification_level': 2,
# 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
# 'proxy.config.ssl.TLSv1_3': 0
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.rem | ap_config.AddLine(
'map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.ssl_server_name_yaml.AddLines([
'- fqdn: "*bar.com"',
' verify_client: STRICT',
])
ts.Disk.plugin_c | onfig.AddLine(
'sslheaders.so SSL-Client-ID=client.subject'
)
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.Processes.Default.Command = (
'curl -H "SSL-Client-ID: My Fake Client ID" --verbose --ipv4 --insecure --header "Host: bar.com"' +
' https://localhost:{}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
|
def __init__(self, recid):
self.recid = recid
def _lookup(self, component, path):
# after /<CFG_SITE_RECORD>/<recid>/files/ every part is used as the file
# name
filename = component
def getfile(req, form):
args = wash_urlargd(form, bibdocfile_templates.files_default_urlargd)
ln = args['ln']
_ = gettext_set_language(ln)
uid = getUid(req)
user_info = collect_user_info(req)
verbose = args['verbose']
if verbose >= 1 and not isUserSuperAdmin(user_info):
# Only SuperUser can see all the details!
verbose = 0
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE > 1:
return page_not_authorized(req, "/%s/%s" % (CFG_SITE_RECORD, self.recid),
navmenuid='submit')
if record_exists(self.recid) < 1:
msg = "<p>%s</p>" % _("Requested record does not seem to exist.")
return warning_page(msg, req, ln)
if record_empty(self.recid):
msg = "<p>%s</p>" % _("Requested record does not seem to have been integrated.")
return warning_page(msg, req, ln)
(auth_code, auth_message) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : ln, 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
return page_not_authorized(req, "../", \
text = auth_message)
readonly = CFG_ACCESS_CONTROL_LEVEL_SITE == 1
# From now on: either the user provided a specific file
# name (and a possible version), or we return a list of
# all the available files. In no case are the docids
# visible.
try:
bibarchive = BibRecDocs(self.recid)
except InvenioBibDocFileError:
register_exception(req=req, alert_admin=True)
msg = "<p>%s</p><p>%s</p>" % (
_("The system has encountered an error in retrieving the list of files for this document."),
_("The error has been logged and will be taken in consideration as soon as possible."))
return warning_page(msg, req, ln)
if bibarchive.deleted_p():
req.status = apache.HTTP_GONE
return warning_page(_("Requested record does not seem to exist."), req, ln)
docname = ''
docformat = ''
version = ''
warn = ''
if filename:
# We know the complete file name, guess which docid it
# refers to
## TODO: Change the extension system according to ext.py from setlink
## and have a uniform extension mechanism...
docname = file_strip_ext(filename)
docformat = filename[len(docname):]
if docformat and docformat[0] != '.':
docformat = '.' + docformat
if args['subformat']:
docformat += ';%s' % args['subformat']
else:
docname = args['docname']
if not docformat:
| docformat = args['format']
if args['subformat']:
docformat += | ';%s' % args['subformat']
if not version:
version = args['version']
## Download as attachment
is_download = False
if args['download']:
is_download = True
# version could be either empty, or all or an integer
try:
int(version)
except ValueError:
if version != 'all':
version = ''
display_hidden = isUserSuperAdmin(user_info)
if version != 'all':
# search this filename in the complete list of files
for doc in bibarchive.list_bibdocs():
if docname == bibarchive.get_docname(doc.id):
try:
try:
docfile = doc.get_file(docformat, version)
except InvenioBibDocFileError, msg:
req.status = apache.HTTP_NOT_FOUND
if not CFG_INSPIRE_SITE and req.headers_in.get('referer'):
## There must be a broken link somewhere.
## Maybe it's good to alert the admin
register_exception(req=req, alert_admin=True)
warn += write_warning(_("The format %s does not exist for the given version: %s") % (cgi.escape(docformat), cgi.escape(str(msg))))
break
(auth_code, auth_message) = docfile.is_restricted(user_info)
if auth_code != 0 and not is_user_owner_of_record(user_info, self.recid):
if CFG_BIBDOCFILE_ICON_SUBFORMAT_RE.match(get_subformat_from_format(docformat)):
return stream_restricted_icon(req)
if user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action('viewrestrdoc', {'status' : docfile.get_status()})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : ln, 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
redirect_to_url(req, target)
else:
req.status = apache.HTTP_UNAUTHORIZED
warn += write_warning(_("This file is restricted: ") + str(auth_message))
break
if not docfile.hidden_p():
if not readonly:
ip = str(req.remote_ip)
doc.register_download(ip, docfile.get_version(), docformat, uid)
try:
return docfile.stream(req, download=is_download)
except InvenioBibDocFileError, msg:
register_exception(req=req, alert_admin=True)
req.status = apache.HTTP_INTERNAL_SERVER_ERROR
warn += write_warning(_("An error has happened in trying to stream the request file."))
else:
req.status = apache.HTTP_UNAUTHORIZED
warn += write_warning(_("The requested file is hidden and can not be accessed."))
except InvenioBibDocFileError, msg:
register_exception(req=req, alert_admin=True)
if docname and docformat and not warn:
req.status = apache.HTTP_NOT_FOUND
warn += write_warning(_("Re |
ount: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
btc_fee=response_dict['fee'][0]
if float(btc_fee)<0 or float( from_satoshi(btc_fee))>max_currency_value:
return (None, 'Invalid fee: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
currency=response_dict['currency'][0]
if currency=='OMNI':
currency_id=1
else:
if currency=='T-OMNI':
currency_id=2
else:
if currency=='BTC':
currency_id=0
else:
if currency[:2] == 'SP':
currency_id=int(currency[2:])
else:
| return (None, 'Invalid currency')
marker_addr=None
try:
marker=response_dict['marker'][0]
if marker.lower( | )=='true':
marker_addr=exodus_address
except KeyError:
# if no marker, marker_addr stays None
pass
if pubkey == None:
tx_to_sign_dict={'transaction':'','sourceScript':''}
l=len(from_addr)
if l == 66 or l == 130: # probably pubkey
if is_pubkey_valid(from_addr):
pubkey=from_addr
response_status='OK'
else:
response_status='invalid pubkey'
else:
if not is_valid_bitcoin_address(from_addr):
response_status='invalid address'
else:
from_pubkey=bc_getpubkey(from_addr)
if not is_pubkey_valid(from_pubkey):
response_status='missing pubkey'
else:
pubkey=from_pubkey
response_status='OK'
try:
if pubkey != None:
tx_to_sign_dict=prepare_send_tx_for_signing( pubkey, to_addr, marker_addr, currency_id, amount, btc_fee)
else:
# hack to show error on page
tx_to_sign_dict['sourceScript']=response_status
response='{"status":"'+response_status+'", "transaction":"'+tx_to_sign_dict['transaction']+'", "sourceScript":"'+tx_to_sign_dict['sourceScript']+'"}'
print "Sending unsigned tx to user for signing", response
return (response, None)
except Exception as e:
print "error creating unsigned tx", e
return (None, str(e))
# simple send and bitcoin send (with or without marker)
def prepare_send_tx_for_signing(from_address, to_address, marker_address, currency_id, amount, btc_fee=500000):
print '*** send tx for signing, amount: ' + amount
print ' btc_fee: ' + btc_fee
# consider a more general func that covers also sell offer and sell accept
# check if address or pubkey was given as from address
if from_address.startswith('0'): # a pubkey was given
from_address_pub=from_address
from_address=get_addr_from_key(from_address)
else: # address was given
from_address_pub=addrPub=bc_getpubkey(from_address)
from_address_pub=from_address_pub.strip()
# set change address to from address
change_address_pub=from_address_pub
changeAddress=from_address
satoshi_amount=int( amount )
fee=int( btc_fee )
# differ bitcoin send and other currencies
if currency_id == 0: # bitcoin
# normal bitcoin send
required_value=satoshi_amount
# if marker is needed, allocate dust for the marker
if marker_address != None:
required_value+=1*dust_limit
else:
tx_type=0 # only simple send is supported
required_value=4*dust_limit
#------------------------------------------- New utxo calls
fee_total_satoshi=required_value+fee
dirty_txes = bc_getutxo( from_address, fee_total_satoshi )
if (dirty_txes['error'][:3]=='Con'):
raise Exception({ "status": "NOT OK", "error": "Couldn't get list of unspent tx's. Response Code: " + dirty_txes['code'] })
if (dirty_txes['error'][:3]=='Low'):
raise Exception({ "status": "NOT OK", "error": "Not enough funds, try again. Needed: " + str(fee_total_satoshi) + " but Have: " + dirty_txes['avail'] })
inputs_total_value = dirty_txes['avail']
inputs = dirty_txes['utxos']
#------------------------------------------- Old utxo calls
# get utxo required for the tx
#utxo_all=get_utxo(from_address, required_value+fee)
#utxo_split=utxo_all.split()
#inputs_number=len(utxo_split)/12
#inputs=[]
#inputs_total_value=0
#if inputs_number < 1:
# info('Error not enough BTC to generate tx - no inputs')
# raise Exception('This address must have enough BTC for protocol transaction fees and miner fees')
#for i in range(inputs_number):
# inputs.append(utxo_split[i*12+3])
# try:
# inputs_total_value += int(utxo_split[i*12+7])
# except ValueError:
# info('Error parsing utxo, '+ str(utxo_split) )
# raise Exception('Error: parsing inputs was invalid, do you have enough BTC?')
#inputs_outputs='/dev/stdout'
#for i in inputs:
# inputs_outputs+=' -i '+i
#---------------------------------------------- End Old utxo calls
inputs_outputs='/dev/stdout'
for i in inputs:
inputs_outputs+=' -i '+str(i[0])+':'+str(i[1])
# calculate change
change_value=inputs_total_value-required_value-fee
if change_value < 0:
info('Error not enough BTC to generate tx - negative change')
raise Exception('This address must have enough BTC for miner fees and protocol transaction fees')
if currency_id == 0: # bitcoin
# create a normal bitcoin transaction (not mastercoin)
# dust to marker if required
# amount to to_address
# change to change
if marker_address != None:
inputs_outputs+=' -o '+marker_address+':'+str(dust_limit)
inputs_outputs+=' -o '+to_address+':'+str(satoshi_amount)
else:
# create multisig tx
# simple send - multisig
# dust to exodus
# dust to to_address
# double dust to rawscript "1 [ change_address_pub ] [ dataHex_obfuscated ] 2 checkmultisig"
# change to change
dataSequenceNum=1
dataHex = '{:02x}'.format(0) + '{:02x}'.format(dataSequenceNum) + \
'{:08x}'.format(tx_type) + '{:08x}'.format(currency_id) + \
'{:016x}'.format(satoshi_amount) + '{:06x}'.format(0)
dataBytes = dataHex.decode('hex_codec')
dataAddress = hash_160_to_bc_address(dataBytes[1:21])
# create the BIP11 magic
change_address_compressed_pub=get_compressed_pubkey_format( change_address_pub )
obfus_str=get_sha256(from_address)[:62]
padded_dataHex=dataHex[2:]+''.zfill(len(change_address_compressed_pub)-len(dataHex))[2:]
dataHex_obfuscated=get_string_xor(padded_dataHex,obfus_str).zfill(62)
random_byte=hex(random.randrange(0,255)).strip('0x').zfill(2)
hacked_dataHex_obfuscated='02'+dataHex_obfuscated+random_byte
info('plain dataHex: --'+padded_dataHex+'--')
info('obfus dataHex: '+hacked_dataHex_obfuscated)
valid_dataHex_obfuscated=get_nearby_valid_pubkey(hacked_dataHex_obfuscated)
info('valid dataHex: '+valid_dataHex_obfuscated)
script_str='1 [ '+change_address_pub+' ] [ '+valid_dataHex_obfuscated+' ] 2 checkmultisig'
info('change address is '+changeAddress)
info('too_address is '+to_address)
info('total inputs value is '+str(inputs_total_value))
info('fee is '+str(fee))
info('dust limit is '+str(dust_limit))
info('BIP11 script is '+script_str)
dataScript=rawscript(script_str)
inputs_outputs+=' -o '+exodus_address+':'+str(dust_limit) + \
' -o '+to_address+':'+str(dust_limit) + \
' -o '+dataScript+':'+str(2*dust_limit)
if change_value >= dust_limit:
inputs_outputs+=' -o '+changeAddress+':'+str(change_value)
else:
# under dust limit leave all remaining as fees
pass
tx=mktx(inputs_outputs)
info('inputs_outputs are '+inputs_outputs)
info('parsed tx is '+str(get_json_tx(tx)))
|
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
from PyQt5.Qt import QSystemTrayIcon, QIcon
class TrayIcon(QSystemTrayIcon):
ActivationReason = ['Unknown', 'Context', 'DoubleClick', 'Trigger', 'MiddleClick']
onactivate = pyqtSignal(int, str)
onmessageclick = pyqtSignal()
def __init__(self, parent, toolTip = '', icon = ''):
super(TrayIcon, self).__init__(parent)
self.setObjectName('trayIcon')
self.setIcon(icon)
self.setToolTip(toolTip)
self.activated.connect(self.activateHandler)
self.messageClicked.connect(self.onmessageclick)
# Slots
# 设置工具提示
@pyqtSlot(str)
def setToolTip(self, toolTip):
super(TrayIcon, self).setToolTip(toolTip)
# 设置图标
@pyqtSlot(str)
def setIcon(self, icon):
if icon:
icon = QIcon(icon)
else:
icon = self.parent().windowIcon()
super(TrayIcon, self).setIcon(QIcon(icon))
# 设置右键菜单
@pyqtSlot(QObject)
def setContextMenu(self, menu):
super(TrayIcon, self).setContextMenu(menu)
# 获取是否可见
@pyqtSlot(result = bool)
def isVisible(self):
return s | uper(TrayIcon, self).isVisible()
# 获取是否支持消息弹泡
@pyqtSlot(result = bool)
def supportsMessages(self):
return super(TrayIcon, self).supportsMessages()
# 获取是否支持系统托盘图标
@pyqtSlot(result = bool)
def isSystemTrayAvailable(self):
return super(TrayIcon, self).isSystemTrayAvailable()
# 显示托盘消息
# showMessage
# 设置可见性
# setVisible
# 显示
# show
# 隐藏
# hide
# Sinals
def activateHandler(self, reason):
self.onactivate.emit(reason, TrayIcon.ActivationReason[ | reason])
|
b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`~mpmath.taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`~mpmath.jacobi` approximately satisfies
this equation::
>>> from mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> A0, A1, A2
(26560.2328981879, -21503.7641037294, -5056.46879445852)
"""
legendre = r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Plots**
.. literalinclude :: /plots/legendre.py
.. image :: /plots/legendre.png
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5) | :
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.577 | 35, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
legenp = r"""
Calculates the (associated) Legendre function of the first kind of
degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the first kind, `P_n(z)`. The parameters may be
complex numbers.
In terms of the Gauss hypergeometric function, the (associated) Legendre
function is defined as
.. math ::
P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
With *type=3* instead of *type=2*, the alternative
definition
.. math ::
\hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
and ``LegendreP[n,m,3,z]`` in Mathematica.
The general solution of the (associated) Legendre differential equation
.. math ::
(1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
second kind as implemented by :func:`~mpmath.legenq`.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenp(2, 0, 10); legendre(2, 10)
149.5
149.5
>>> legenp(-2, 0.5, 2.5)
(1.972260393822275434196053 - 1.972260393822275434196053j)
>>> legenp(2+3j, 1-j, -0.5+4j)
(-3.335677248386698208736542 - 5.663270217461022307645625j)
>>> chop(legenp(3, 2, -1.5, type=2))
28.125
>>> chop(legenp(3, 2, -1.5, type=3))
-28.125
Verifying the associated Legendre differential equation::
>>> n, m = 2, -0.5
>>> C1, C2 = 1, -3
>>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
>>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
... (n*(n+1)-m**2/(1-z**2))*f(z)
>>> for z in [0, 2, -1.5, 0.5+2j]:
... chop(deq(mpmathify(z)))
...
0.0
0.0
0.0
0.0
"""
legenq = r"""
Calculates the (associated) Legendre function of the second kind of
degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the second kind, `Q_n(z)`. The parameters may be
complex numbers.
The Legendre functions of the second kind give a second set of
solutions to the (associated) Legendre differential equation.
(See :func:`~mpmath.legenp`.)
Unlike the Legendre functions of the first kind, they are not
polynomials of `z` for integer `n`, `m` but rational or logarithmic
functions with poles at `z = \pm 1`.
There are various ways to define Legendre functions of
the second kind, giving rise to different complex structure.
A version can be selected using the *type* keyword argument.
The *type=2* and *type=3* functions are given respectively by
.. math ::
Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
\left( \cos(\pi m) P_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
\hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
\left( \hat{P}_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
of the first kind. The formulas above should be understood as limits
wh |
# -*- coding: utf-8 -*-
from gensim.models import word2vec
from gensim import models
import jieba
import codecs
import io
from collections import Counter
import operator
import numpy
f = codecs.open("target_article.txt",'r','utf8')
content = f.readlines()
article = []
jieba.set_dictionary('jieba_dict/dict.txt.big')
model = models.Word2Vec.load_word2vec_format('med250.model.bin',binary=True)
# import stopword
stopwordset = set()
with io.open('jieba_dict/stopwords.txt','r',encoding='utf-8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
# Cut The Words , Output: short words in article
for line in content:
seg_list = jieba.cut(line)
for gg in seg_list:
if gg not in stopwordset:
article.append(gg)
# Count frequency
raw_data = Counter(article)
raw_data = { key:raw_data[key] for key in raw_data if key in model.vocab}
low_level = 0
for key in raw_data:
low_level += raw_data[key]
low_level = int(round(low_level*0.01))
# Initial Accumalation
words = []
acc_data = dict()
map_words = []
related_word = dict()
for keys in raw_data:
words.append(keys)
# acc_data[keys] = 0
# Pick up the Friends
for word_1 in words:
cand_words = []
for word_2 in words:
if model.similarity(word_1, word_2) >= 0.6:
cand_words.append(word_2)
map_words.append(cand_words)
for i in range(len(map_words)):
friend_list = map_words[i]
value = 0.0
for friend_1 in friend_list:
for friend_2 in frien | d_list:
if friend_1 == friend_2:
continue
value += model.similarity(friend_1, friend_2)
leng = len(friend_list)
related_word[words[i]] = value/float(leng*leng)
s_imp_words = sorted(related_word.items(), key=operator.itemgetter(1), reverse=True)
for i in s_imp_words[:20]:
print i[0]
print "-----------------------"
#print s_imp_words
# for value in output:
# if value[1] == 0.0:
# continue
# print value[0], value[1]
# print " | -----------------------"
keywords = []
fg = numpy.zeros(len(s_imp_words))
for i in range(len(s_imp_words)):
if fg[i] == 1:
continue
for j in range(i+1,len(s_imp_words)):
if fg[j] != 1:
if model.similarity(s_imp_words[i][0], s_imp_words[j][0]) >= 0.7:
fg[j] = 1
keywords.append(s_imp_words[i])
#print s_imp_words[i][0]
for i in keywords[:10]:
print i[0]
# with io.open("target_keywords.txt",'w',encoding='utf-8') as output:
# for text in keywords:
# output.write(text + '\n')
|
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute"),
_("K to kill the steram")]
self.selected_item = 0
self.max_item = 0
self.playback = getattr(co, stream_type)()
self.streams = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, pid):
if self.type_of_info == 'p':
self.info_window_data = self.playback.properties(pid)
elif self.type_of_info == 'i':
self.info_window_data = self.playback.info(pid)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.streams:
pid = self.streams[self.selected_item][1]
self._update_info_window(pid)
if char in (ord('+'), ):
self.playback.increase_volume(pid)
elif char in (ord('-'), ):
self.playback.decrease_volume(pid)
elif char in (ord('m'),):
self.playback.mute(pid)
elif char in (ord('>'), ):
self.playback.increase_left_volume(pid)
elif char in (ord('.'), ):
self.playback.increase_right_volume(pid)
elif char in (ord('<'), ):
self.playback.decrease_left_volume(pid)
elif char in (ord(','), ):
self.playback.decrease_right_volume(pid)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.playback.properties(pid)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.playback.info(pid)
elif char in (ord('K'), ):
self.playback.kill(pid)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.streams = self.playback.playing()
line_number = 0
self.win.erase()
self.win.box()
for line_number, stream in enumerate(self.streams):
if len(stream) == 5:
(app_name,
app_pid,
volume_left,
volume_right,
mute) = stream
line = '[%s] L:%i%% R:%i%% (%s)' % (app_name, volume_left,
volume_right, app_pid)
else:
| (app_name,
app_pid,
volume_left,
mute) = stream
line = '[%s] M:%i%% (%s)' % (app_name, volume_left, app_pid)
if mute:
line = '%s [M]' % (line)
if self.selected_item == line_number:
self.win.addstr(line_number + 1, 1, line, curses.color_pair(1))
else:
self.win.addstr(li | ne_number + 1, 1, line)
self.max_item = line_number
if self.info_window_data:
draw_info_window(self.win, self.info_window_data)
self.win.refresh()
class TabPlayback(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Playback', _('Playback'))
class TabRecord(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Record', _('Record'))
class GenericDevice(object):
def __init__(self, win, device_type, name):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute")]
self.selected_item = 0
self.max_item = 0
self.device = getattr(co, device_type)()
self.devices = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, info):
if self.type_of_info == 'p':
self.info_window_data = self.device.properties(info)
elif self.type_of_info == 'i':
self.info_window_data = self.device.info(info)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.devices:
name = self.devices[self.selected_item][0]
self._update_info_window(name)
if char in (ord('+'), ):
self.device.increase_volume(name)
elif char in (ord('-'), ):
self.device.decrease_volume(name)
elif char in (ord('m'),):
self.device.mute(name)
elif char in (ord('>'), ):
self.device.increase_left_volume(name)
elif char in (ord('.'), ):
self.device.increase_right_volume(name)
elif char in (ord('<'), ):
self.device.decrease_left_volume(name)
elif char in (ord(','), ):
self.device.decrease_right_volume(name)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.device.properties(name)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.device.info(name)
elif char in (ord('n'), ):
self.device.change_port_next(name)
elif char in (ord('p'), ):
self.device.change_port_previous(name)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.devices = self.device.get_devices()
line_number = 0
self.win.erase()
self.win.box()
for line_number, device in enumerate(self.devices):
if len(device) == 5:
(device_name,
volume_left,
volume_right,
mute,
port) = device
line = '[%s] L:%i%% R:%i%%' % (
device_name.split('.')[-1].capitalize(),
volume_left, volume_right)
else:
(device_name,
volume,
mute,
port) = device
line = '[%s] M:%i%%' % (
device_name.split('.')[-1].capitalize(),
volume)
if port:
str_port = ''
for i in port:
if i[0] == True:
str_port = '%s (%s)' % (
|
ue; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
# We want sphinx to document the ansible modules contained in this repository,
# not those that may happen to be installed in the version
# of Python used to run sphinx. When sphinx loads in order to document,
# the repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = 'devel'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2018 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
# OBSOLETE - removing this - dharmabumstead 2018-02-06
# exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'collapse_navigation': "True",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'current_version': version,
'latest_version': '2.8',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.7', '2.6', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML and HTML Help pages | . A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. | If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_u |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
# type: ignore
"""Interface to MTG-LI L2 product NetCDF files
The reader is based on preliminary test data provided by EUMETSAT.
The data description is described in the
"LI L2 Product User Guide [LIL2PUG] Draft version" documentation.
"""
import logging
from datetime import datetime
import h5netcdf
import numpy as np
from pyresample import geometry
# FIXME: This is not xarray/dask compatible
# TODO: Once migrated to xarray/dask, remove ignored path in setup.cfg
from satpy.dataset import Dataset
from satpy.readers.file_handlers import BaseFileHandler
logger = logging.getLogger(__name__)
class LIFileHandler(BaseFileHandler):
"""MTG LI File Reader."""
def __init__(self, filename, filename_info, filetype_info):
super(LIFileHandler, self).__init__(filename, filename_info, filetype_info)
self.nc = h5netcdf.File(self.filename, 'r')
# Get grid dimensions from file
refdim = self.nc['grid_position'][:]
# Get number of lines and columns
self.nlines = int(refdim[2])
self.ncols = int(refdim[3])
self.cache = {}
logger.debug('Dimension : {}'.format(refdim))
logger.debug('Row/Cols: {} / {}'.format(self.nlines, self.ncols))
logger.debug('Reading: {}'.format(self.filename))
logger.debug('Start: {}'.format(self.start_time))
logger.debug('End: {}'.format(self.end_time))
@property
def start_time(self):
return datetime.strptime(self.nc.attrs['sensing_start'], '%Y%m%d%H%M%S')
@property
def end_time(self):
return datetime.strptime(self.nc.attrs['end_time'], '%Y%m%d%H%M%S')
def get_dataset(self, key, info=None, out=None):
"""Load a dataset
"""
if key in self.cache:
return self.cache[key]
# Type dictionary
typedict = {"af": "flash_accumulation",
"afa": "accumulated_flash_area",
"afr": "flash_radiance",
"lgr": "radiance",
"lef": "radiance",
"lfl": "radiance"}
# Get lightning data out of NetCDF container
logger.debug("Key: {}".format(key['name']))
# Create reference grid
grid = np.full((self.nlines, self.ncols), np.NaN)
# Get product values
values = self.nc[typedict[key['name']]]
rows = self.nc['row']
cols = self.nc['column']
logger.debug('[ Number of values ] : {}'.format((len(values))))
logger.debug('[Min/Max] : <{}> / <{}>'.format(np.min(values),
np.max(values)))
# Convert xy coordinates to flatten indices
ids = np.ravel_multi_index([rows, cols], grid.shape)
# Replace NaN v | alues with data
np.put(grid, ids, values)
# Correct for bottom left origin in LI row/column indices.
rotgrid = np.flipud(grid)
# Rotate the grid by 90 degree clockwise
rotgrid = np.rot90(rotgrid, 3)
logger.warning("LI data has been rotated to fit to reference grid. \
Works only for test dataset")
# Mask invalid values
ds = np.ma.masked_where(np.isnan(rotgrid), rotgrid)
| # Create dataset object
out.data[:] = np.ma.getdata(ds)
out.mask[:] = np.ma.getmask(ds)
out.info.update(key.to_dict())
return out
def get_area_def(self, key, info=None):
"""Create AreaDefinition for specified product.
Projection information are hard coded for 0 degree geos projection
Test dataset doesn't provide the values in the file container.
Only fill values are inserted.
"""
# TODO Get projection information from input file
a = 6378169.
h = 35785831.
b = 6356583.8
lon_0 = 0.
# area_extent = (-5432229.9317116784, -5429229.5285458621,
# 5429229.5285458621, 5432229.9317116784)
area_extent = (-5570248.4773392612, -5567248.074173444,
5567248.074173444, 5570248.4773392612)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'LI_area_name',
"LI area",
'geosli',
proj_dict,
self.ncols,
self.nlines,
area_extent)
self.area = area
logger.debug("Dataset area definition: \n {}".format(area))
return area
|
#!/usr/bin/env python3
import unittest
import tempfile
from lofar.common.dbcredentials import *
def setUpModule():
pass
def tearDownModule():
pass
class TestCredentials(unittest.TestCase):
def test_default_values(self):
c = Credentials()
self.assertEqual(c.type, "postgres")
self.assertEqual(c.host, "localhost")
self.assertEqual(c.port, 0)
#self.assertEqual(c.user, "")
self.assertEqual(c.password, "")
self.assertEqual(c.database, "")
def test_pg_connect_options(self):
c = Credentials()
self.assertEqual(
c.pg_connect_options(),
{ "host": "localhost",
"port": -1,
"user": c.user,
"passwd": "",
"dbname": "",
})
class TestDBCredentials(unittest.TestCase):
def test_set_get(self):
dbc = DBCredentials(filepatterns=[])
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
dbc.set("DATABASE", c_in)
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_get_non_existing(self):
dbc = DBCredentials(filepatterns=[])
with self.assertRaises(DBCredentials.NoSectionError):
dbc.get("UNKNOWN")
def test_list(self):
dbc = DBCredentials(filepatterns=[])
c = Credentials()
c.host = "foo"
dbc.set("FOO", c)
c = Credentials()
c.host = "bar"
dbc.set("BAR", c)
self.assertEqual(sorted(dbc.list()), ["BAR", "FOO"])
def test_config(self):
f = tempfile.NamedTemporaryFile()
f.write(b"""
[database:DATABASE]
type = postgres
host = example.com
port = 1234
user = root
password = secret
database = mydb
""")
f.flush() # don't close since that will delete the TemporaryFile
# test if DATABASE is there
dbc = DBCredentials(filepatterns=[f.name])
self.assertEqual(dbc.list(), ["DATABASE"])
# test if credentials match with what we've written
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_freeform_config_option(self):
f = tempfile.NamedTemporaryFile()
f.write(b"""
[database:DATABASE]
foo = bar
test = word word
""")
f.flush() # don't close since that will delete the TemporaryFile
# extract our conf | ig
dbc = DBCredentials(filepatterns=[f.name])
c_out = dbc.get("DATABASE")
# test if the free-form config options got through
self.assertEqual(c_out.config["foo"], "bar")
self.assertEqual(c_out.config | ["test"], "word word")
def main(argv):
unittest.main()
if __name__ == "__main__":
# run all tests
import sys
main(sys.argv[1:])
|
ex | ec(open("tmp<caret>.txt").re | ad()) |
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language govern | ing permissions and limitations
# under the License.
# The maximum value a signed INT type may have
DB_MAX_INT = 0x7FFFFFFF
# The cinder services binaries and topics' names
API_BINARY = "cinder-api"
SCHEDULER_BINARY = "cinder-scheduler"
VOLUME_BINARY = "cinder-volume"
BACKUP_BINARY = "cinder-backup"
SCHEDULER_TOPIC = SCHEDULER_BINARY
VOLUME_TOPIC = VOLUME_BINARY
BACKUP_TOPIC = BACKUP_BINARY
LOG_BINARIES = | (SCHEDULER_BINARY, VOLUME_BINARY, BACKUP_BINARY, API_BINARY)
# The encryption key ID used by the legacy fixed-key ConfKeyMgr
FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000'
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
# Load extensions
(
sh % "cat"
<< r"""
[extensions]
arcconfig=$TESTDIR/../edenscm/hgext/extlib/phabricator/arcconfig.py
arcdiff=
"""
>> "$HGRCPATH"
)
# Diff with no revision
sh % "hg init repo"
sh % "cd repo"
sh % "touch foo"
sh % "hg add foo"
sh % "hg ci -qm 'No rev'"
sh % "hg diff --since-last-submit" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
# Fake a diff
sh % "echo bleet" > "foo"
sh % "hg ci -qm 'Differential Revision: https://phabricator.fb.com/D1'"
sh % "hg diff --since-last-submit" == r"""
abort: no .arcconfig found
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: no .arcconfig found
[255]"""
# Prep configuration
sh % "echo '{}'" > ".arcrc"
sh % 'echo \'{"config" : {"default" : "https://a.com/api"}, "hosts" : {"https://a.com/api/" : { "user" : "testuser", "oauth" : "garbage_cert"}}}\'' > ".arcconfig"
# Now progressively test the response handling for variations of missing data
sh % "cat" << r"""
[{}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"differential_diffs": {"count": 3},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
# This is the case when the diff is up to date with the current commit;
# there is no diff since what was landed.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"2e6531b7dada2a3e5638e136de05f51e94a427f4\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "2e6531b7dada2a3e5638e136de05f51e94a427f4 Differential Revision: https://phabricator.fb.com/D1"
# This is the case when the diff points at our parent commit, we expect to
# see the bleet text show up. There's a fake ha | sh that I've injected into
# the commit list returned from our mocked phabricator; it is present to
# assert that we order the commits co | nsistently based on the time field.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"88dd5a13bf28b99853a24bddfc93d4c44e07c6bd\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit-2o" == r"""
Phabricator rev: 88dd5a13bf28b99853a24bddfc93d4c44e07c6bd
Local rev: 2e6531b7dada2a3e5638e136de05f51e94a427f4 (.)
Changed: foo
| ...
| +bleet"""
# Make a new commit on top, and then use -r to look at the previous commit
sh % "echo other" > "foo"
sh % "hg commit -m 'Other commmit'"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates -r 2e6531b" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(2e6531b)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
from __future__ import absolute_import, division
import time
import struct
from zope.interface import implementer
from twisted.internet import protocol, interfaces
from twisted.python.compat import _PY3
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
@implementer(interfaces.IProducer)
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
| """Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(s | elf):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
__all__ = ["Echo", "Discard", "Chargen", "QOTD", "Who", "Daytime", "Time"]
if _PY3:
__all3__ = ["Echo"]
for name in __all__[:]:
if name not in __all3__:
__all__.remove(name)
del globals()[name]
del name, __all3__
|
# Time: O(n) ~ O(n^2)
# Space: O(1)
from random import randint
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = self.PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
| return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def PartitionAroundPivot(self, left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx | ]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, traceback, Ice, threading, time, os
import IceStorm
# Ctrl+c handling
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Qt interface
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSvg import *
# Check that RoboComp has been correctly detected
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except:
pass
if len(ROBOCOMP)<1:
print 'ROBOCOMP environment variable not set! Exiting.'
sys.exit()
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRPublish.ice")
import RoboCompASRPublish
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRCommand.ice")
import RoboCompASRCommand
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRComprehension.ice")
import RoboCompASRComprehension
class MainClass(object):
def __init__(self, commandTopic):
print 'Esta clase podria ser la clase principal del progra | ma'
self.commandTopic = commandTopic
def newText | (self, text, current=None):
print 'Nos ha llegado', text
command = RoboCompASRCommand.Command()
partes = text.split()
if len(partes) > 0:
command.action = partes[0]
if len(partes) > 1:
command.complements = partes[1:]
print 'Action', command.action, '(', command.complements,')'
else:
print 'Action', command.action
self.commandTopic.newCommand(command)
else:
print 'Comando vacio?'
def mode(self, text):
print 'Nos llega por la interfaz ASRComprehension', text
class ASRPublishTopicI (RoboCompASRPublish.ASRPublish):
def __init__(self, _handler):
self.handler = _handler
def newText(self, text, current=None):
self.handler.newText(text)
class ASRComprehensionI (RoboCompASRComprehension.ASRComprehension):
def __init__(self, _handler):
self.handler = _handler
def mode(self, text, current=None):
self.handler.mode(text)
class Server (Ice.Application):
def run (self, argv):
status = 0
try:
# Proxy to publish ASRCommand
proxy = self.communicator().getProperties().getProperty("IceStormProxy")
obj = self.communicator().stringToProxy(proxy)
topicManager = IceStorm.TopicManagerPrx.checkedCast(obj)
try:
topic = False
topic = topicManager.retrieve("ASRCommand")
except:
pass
while not topic:
try:
topic = topicManager.retrieve("ASRCommand")
except IceStorm.NoSuchTopic:
try:
topic = topicManager.create("ASRCommand")
except:
print 'Another client created the ASRCommand topic... ok'
pub = topic.getPublisher().ice_oneway()
commandTopic = RoboCompASRCommand.ASRCommandPrx.uncheckedCast(pub)
mainObject = MainClass(commandTopic)
# Subscribe to ASRPublishTopic
proxy = self.communicator().getProperties().getProperty( "IceStormProxy")
topicManager = IceStorm.TopicManagerPrx.checkedCast(self.communicator().stringToProxy(proxy))
adapterT = self.communicator().createObjectAdapter("ASRPublishTopic")
asrTopic = ASRPublishTopicI(mainObject)
proxyT = adapterT.addWithUUID(asrTopic).ice_oneway()
ASRPublishTopic_subscription = False
while not ASRPublishTopic_subscription:
try:
topic = topicManager.retrieve("ASRPublishTopic")
qos = {}
topic.subscribeAndGetPublisher(qos, proxyT)
adapterT.activate()
ASRPublishTopic_subscription = True
except IceStorm.NoSuchTopic:
print "Error! No topic found! Sleeping for a while..."
time.sleep(1)
print 'ASRPublishTopic subscription ok'
# Implement ASRComprehension
asrcomprehensionI = ASRComprehensionI(mainObject)
adapterASRComprehension = self.communicator().createObjectAdapter('ASRComprehension')
adapterASRComprehension.add(asrcomprehensionI, self.communicator().stringToIdentity('asrcomprehension'))
adapterASRComprehension.activate()
self.communicator().waitForShutdown()
except:
traceback.print_exc()
status = 1
if self.communicator():
try:
self.communicator().destroy()
except:
traceback.print_exc()
status = 1
Server( ).main(sys.argv)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.client import Client
class CinderHealth(object):
def __init__(self, creds):
self.cinderclient = Client(**creds)
def cinder_list(self):
try:
cinder_list = self.cinderclient.volumes.list()
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_list)
def cinder_volume_create(self, volume_name, volume_size):
try:
cinder_ret = self.cinderclient.volumes.create(volume_size,
name=volume_name)
except Exception as e:
return (404, e.message, [])
return (200, "success", cin | der_ret)
def cinder_volume_delete(self, volume_id):
try:
cinder_ret = self.cinderclient.volumes.delete(volume_id)
except Exception as | e:
return (404, e.message, [])
return (200, "success", cinder_ret)
|
'''
Created on 30-07-2014
@author: mateusz
'''
from threading import Thread
import gumtreeofferparser as Parser
from injectdependency import Injec | t, InjectDependency
@InjectDependency('urlfetcher')
class OfferFetcher(Thread):
urlfetcher = Inject
def __init__(self, inQueue, outQueue):
Thread.__init__(self, name="OfferFetcher")
self.inQueue = inQueue
self.outQueue = outQueue
def run(self):
while (True): # this is ok for daemon thread
url = self.inQueue.get()
offer = self.getOffer(url)
self.outQueue.put(offer)
| self.inQueue.task_done()
def getOffer(self, url):
html = self.urlfetcher.fetchDocument(url)
offer = Parser.extractOffer(html)
offer["url"] = url
return offer |
from syslog import syslog
module_name = "Syslog"
config = {
"prefix": "Default Prefix"
}
def handle_alert(message):
syslog("{} - {}".format(config["prefix"], me | ssage))
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 20:42:20 2016
@author: haell
"""
def dicho(f, a, b, epsilon):
assert f(a) * f(b) <= 0 and epsilon > 0
g, d = a, b
fg, fd = f(g), f(d)
n = 0
while d - g > 2 * epsilon:
| n += 1
m = (g + d) / 2.
fm = f(m)
if fg * fm <= 0:
d, fd = m, fm
else:
g, fg = m, fm
print(d, g, fd, fg)
return (g + d) / 2., n
print(dicho(lambda x : x*x*10**(-8) - 4*x / 5 + 10**(-8), 7*10**7, 9*10** | 7, 10**-8)) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permi | ssions and limitations
# under the License.
import pyarrow as pa
import pyarrow.types as types
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints: |
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
assert types.is_list(pa.list_(pa.int32()))
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(
pa.dictionary(pa.int32(),
pa.array(['a', 'b', 'c'])))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert not types.is_nested(pa.int32())
# TODO(wesm): Union types not yet implemented in pyarrow
# def test_is_union():
# assert types.is_union(pa.union([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]))
# assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
for case in date_types + time_types + timestamp_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_temporal(pa.int32())
def test_timestamp_type():
# See ARROW-1683
assert isinstance(pa.timestamp('ns'), pa.TimestampType)
|
import unittest
from word_treasure import *
class WordTreasureTestCase(unittest.TestCase):
"""Test for functions in word treasure.
The major aim is to check if there is any
unexpected crash.
Doesnot check the validity of the response"""
def test_definition_call(self):
word1 = " | hello"
word2 = "somenonexistantword"
self.assertEqual(display_definitions(word1), True)
self.assertEqual(display_definitions(word2), None)
|
def test_random_words(self):
limit = 10
self.assertEqual(display_random_words(limit), True)
def test_display_examples(self):
limit = 10
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_examples(word1, limit), True)
self.assertEqual(display_examples(word2, limit), None)
def test_display_top_examples(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_top_examples(word1), True)
self.assertEqual(display_top_examples(word2), None)
def test_display_related_words(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_related_words(word1), True)
self.assertEqual(display_related_words(word2), None)
def test_display_compact(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_compact(word1), True)
self.assertEqual(display_compact(word2), None)
def test_help_display(self):
self.assertEqual(display_help(), True)
if __name__=='__main__':
unittest.main()
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for the Mellanox plugin
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'brocadeports',
sa.Column('port_id', sa.String(length=36), nullable=False,
server_default=''),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'ml2_brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.Column('segment_id', sa.String(length=36), nullable=True),
sa.Column('network_type', sa.String(length=10), nullable=True),
sa.Column('tenant_id', sa.String(l | ength=255), nullable=True,
index=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'ml2_brocadeports',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa. | String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id']))
|
# -*- encoding: utf8 -*-
# A daemon to keep SSH forwarding connected
from __future__ import print_function, absolute_import
import os
import sys
import time
import socket
import logging
class Daemon(object):
def __init__(self):
self.heartbeat = 50
def run(self):
logging.basicConfig(filename='daemon.log')
logging.error('daemon started')
self.daemonize()
while True:
if not self.check_connection():
self.reconnect()
logging.warn('reconnecting')
time.sleep(self.heartbeat)
def check_connection(self):
c = socket.socket()
try:
c.connect(('localhost', 3366))
c.close()
| return True
except socket.error:
return False
def daemonize(self):
pid = os.fork()
if pid:
os.waitpid(pid, os.WNOHANG)
| sys.exit(0)
return
def reconnect(self):
pid = os.fork()
if pid == 0: # child
err = os.execlp('/usr/bin/ssh', 'ssh', '-i',
'/home/xu/.ssh/id_rsa', '-L',
'3366:127.0.0.1:3306', '-p', '42022', 'xu@abc.com')
if err:
logging.error("error to execlp")
sys.exit(1)
elif pid > 0:
os.waitpid(pid, 0)
else:
logging.error('error to fork')
sys.exit(2)
if __name__ == '__main__':
Daemon().run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Search through the su | bfolders of the current folder. For each subfolder found,
chdir() to it, then run all executable scripts ending in .SH in that folder.
Does not exhaustively search for subfolders of subfolders, or subfolders of
subfolders | of subfolders, etc.; it only does exactly what was described in that
first sentence, without recursion.
Note that this calls scripts in an insecure way:
subprocess.call(script_name, shell=True)
so it should only be called on scripts that are trusted completely.
This script is copyright 2017-20 by Patrick Mooney. It is licensed under the GNU
GPL, either version 3 or (at your option) any later version. See the file
LICENSE.md for details.
"""
import glob, os, subprocess
from pprint import pprint
the_dirs = [ d for d in glob.glob("*") if os.path.isdir(d) ]
for which_dir in the_dirs:
olddir = os.getcwd()
try:
os.chdir(which_dir)
print("changed directory to %s" % os.getcwd())
exec_scripts = [ which_script for which_script in list(set(glob.glob('*SH') + glob.glob('*sh'))) if os.access(which_script, os.X_OK) ]
pprint("exec_scripts are: %s" % exec_scripts)
for which_script in exec_scripts:
print("About to call script: %s" % which_script)
subprocess.call('./' + which_script, shell=True)
subprocess.call('chmod a-x %s' % which_script)
except BaseException as e:
print('Something went wrong; the system said %s' % e)
finally:
os.chdir(olddir)
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 4/26/14
###Function: Incidence per 100,000 vs. week number for flu weeks (wks 40-20). Incidence is per 100,000 for the US population in the second calendar year of the flu season.
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv
###Command Line: python F2_incid_time.py
##############################################
### notes ###
# Incidence per 100,000 is normali | zed by total population by second calendar year of | the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# plot values
for s in ps:
plt.plot(xrange(fw), d_incid53ls[s][:fw], marker = 'o', color = colvec[s-2], label = sl[s-2], linewidth = 2)
plt.xlim([0, fw-1])
plt.xticks(range(fw)[::5], wklab[:fw:5])
plt.ylim([0, 60])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Incidence per 100,000', fontsize=fs)
plt.legend(loc='upper left')
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/F2/incid_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# 7/28/14: does 'week' variable in SDI refer to week before or after referenced date? Thanksgiving week does not correpond with correct week number for dip in incidence plot
print [d_incid[wk] for wk in sorted(d_wk) if d_wk[wk]==2]
print [wk for wk in sorted(d_wk) if d_wk[wk]==2]
|
from functools import wraps
from threading import RLock
import traceback
def Synchronized(lock=None):
"""
:param lock: if None - global lock will used, unique for each function
:return:
"""
if n | ot lock:
lock=RLock()
def decorator(fn):
@wraps(fn)
de | f wrapped(*args, **kwargs):
lock.acquire()
try:
return fn(*args, **kwargs)
finally:
lock.release()
return wrapped
return decorator
|
import json
from django import temp | late
register = template.Library( | )
@register.filter
def jsonify(value):
return json.dumps(value)
|
from PIL import Image
from math import ceil, floor
def load_img(src):
return Image.open(src)
def create_master(width, height):
return Image.new("RGBA", (width, height))
def closest_power_two(num):
result = 2
while result < num:
result = result * 2
return result
def create_matrix(cols, rows, images):
x, y = images[0].size # We assume that all images are same size
width = closest_power_two(x)
height = closest_power_two(y)
print("Width: {0} Height: {1}".format(width, height))
offset_x = int((width - x) / 2)
offset_y = int((height - y) / 2)
master = create_master(width * cols, height * rows)
for index, img in enumerate(images):
row = floor(index / cols)
col = index % cols
master.paste(img, (width * col + offset_x, height * row - offset_y))
return master
def hero_sprites(name, action, frames):
from functools import reduce
def generator(name, action, position, frames):
if frames > 1:
return [load_img("img/png/1x/{0}/{1}{2} ({3}).png".format(name, action, position, frame)) for frame in range(1, frames + 1)]
else:
return [lo | ad_img("img/png/1x/{0}/{1}{2}.png".format(name, action, position))]
imgs = list(reduce(lambda a, b: a + b, [generator(name, action, pos, frames) for pos in ["Back", "F | ront", "Left", "Right"]], []))
return imgs
if __name__ == "__main__":
matrix = create_matrix(4, 4, hero_sprites("hero1", "Dead", 3))
matrix.save("img/hero1_dead.png", "PNG")
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF l | icenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for t | he
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`airflow.providers.amazon.aws.transfers.redshift_to_s3`.
"""
import warnings
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.redshift_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
class RedshiftToS3Transfer(RedshiftToS3Operator):
"""
This class is deprecated.
Please use: :class:`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.
"""
def __init__(self, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(**kwargs)
|
from django.db import models
from .bleachfield import BleachField
class BleachCharField(BleachField, models.CharField):
def pre_save(self, model_inst | ance, add):
new_value = getattr(model_instance, self.attname)
clean_value = self.clean_text(new_value)
setattr(model_instance, self.attname, clean_value)
return super(BleachCharField, self).pre_sa | ve(model_instance, add)
|
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
class Controller(object):
def __init__(self, db=None):
self.db_api = db or glance.db.get_api()
self.db_api.configure_db()
@utils.mutating
def update(self, req, image_id, tag_value):
context = req.context
if tag_value not in self.db_api.image_tag_get_all(context, image_id):
self.db_api.image_tag_c | reate(context, image_id, tag_value)
@utils.mutating
def delete(self, req, image_id, tag_value):
try:
self.db_api.image_tag_delete(req.context, image_id, tag_value)
except exception.NotFound:
| raise webob.exc.HTTPNotFound()
class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result):
response.status_int = 204
def delete(self, response, result):
response.status_int = 204
def create_resource():
"""Images resource factory method"""
serializer = ResponseSerializer()
controller = Controller()
return wsgi.Resource(controller, serializer=serializer)
|
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unrescue"))
def shelve(self):
"""
Shelve a running server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelve"))
def unshelve(self):
"""
Restore a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unshelve"))
def delete_shelve(self):
"""
Delete a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelveOffload"))
def create_image(self, name=None, metadata=None):
"""
Create server image
@keyword name: Image name
@type name: str
@keyword metadata: Metadata
@type metadata: dict
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"createImage",
name=name,
metadata=metadata))
def backup(self, name=None, backup_type=None, rotation=None):
"""
Create server backup
@keyword name: name of the backup data
@type name: str
@keyword backup_type: 'daily' or 'weekly'
@type backup_type: str
@keyword rotation: number of backups to maintain
@type rotation: int
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"createBackup",
name=name,
backup_type=backup_type,
rotation=rotation))
def live_migration(self, host=None, disk_over_commit=False):
"""
Move a server to another host without rebooting
@keyword host: Destination host
@type host: str
@keyword disk_over_commit: do disk over commit or not
@type disk_over_commit: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-migrateLive",
host=host,
block_migration=False,
disk_over_commit=disk_over_commit))
def block_migration(self, host=None, disk_over_commit=False):
"""
Move a server to another host without rebooting, with disk copy
@keyword host: Destination host
@type host: str
@keyword disk_over_commit: do disk over commit or not
@type disk_over_commit: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-migrateLive",
host=host,
block_migration=True,
disk_over_commit=disk_over_commit))
def evacuate(self, host=None, password=None, shared=True):
"""
Move a server to another host without rebooting, with d | isk copy
@keyword host: Destination host
@type host: str
@keyword password: new admin | istrator password
@type password: str
@keyword shared: whether the vm is on the shared storage
@type shared: bool
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"evacuate",
host=host,
adminPass=password,
onSharedStorage=shared))
def reset_status(self, status=None):
"""
Move a server to another host
@keyword status: new status of the server ('active', 'pause', ...)
@type status: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-resetState", state=status))
def get_vnc_console(self, type='novnc'):
"""
Get VNC console
@keyword type: 'novnc' or 'xvpvnc' (required)
@type type: str
@return: Console information
@rtype: dict
"""
ret = self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-getVNCConsole",
type=type))
return ret.get('console')
def get_console_log(self, lines=50):
"""
Get console output
@keyword lines: number of lines
@type lines: int
@return: Console logs
@rtype: dict
"""
ret = self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"os-getConsoleOutput",
length=lines))
return ret.get('output')
def get_diagnostics(self):
"""
Get diagnostics
@return: Diagnostics
@rtype: dict
"""
return self._http.get(self._url_resource_path, self._id, 'diagnostics')
def resize(self, flavor=None, disk_config='AUTO'):
"""
Get console output
@keyword flavor: Flavor (required)
@type flavor: osclient2.nova.v2.flavor.Resource
@keyword disk_config: disk configuration ('AUTO')
@type disk_config: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data={"resize": {
"flavorRef": flavor.id,
"OS-DCF:diskConfig": disk_config}})
def confirm_resize(self):
"""
Confirm resizing of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("confirmResize"))
def revert_resize(self):
"""
Revert resizing of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("revertResize"))
def rebuild(self, image=None, disk_config='AUTO', password=None,
ipv4=None, ipv6=None, personality=None):
"""
Rebuild a server
@keyword image: Image
@type image: osclient2.image.Resource
@keyword disk_config: disk configuration ('AUTO')
@type disk_config: str
@keyword password: admin password
@type password: str
@keyword ipv4: IPv4 address
@type ipv4: str
@keyword ipv6: IPv6 address
@type ipv6: str
@keyword persoality: personality data
@type persoality: [str]
@rtype: None
"""
json_body = utils.get_json_body(
"rebuild",
imageRef=image.id,
adminPass=password,
accessIPv4=ipv4,
accessIPv6=ipv6,
personality=personality)
if disk_config is not None:
json_body['rebuild']['OS-DCF:diskConfig'] = disk_config
self._http.post(self._url_resource_path, self._id, 'action',
data=json_body)
def get_actions(self):
"""
Get instance actions
@rtype: dict
"""
ret = self._http.get(self._url_resource_path, self._id,
'os-instance-actions')
return ret.get("instanceActions")
def get_password(self):
"""
Get instance password
@rtype: dict
"""
ret = self._http.get(self._url_resource_path, self._id,
|
from django.db import models
from | jsonfield import JSONField
class Sensor(models.Model):
name = models.CharField(max_length=25)
activated = models.BooleanField(default=False)
type = models.CharField(max_length=10)
meta = JSONFiel | d()
|
import functools
from . import (
constants,
utils,
)
class Card():
def __init__(self, kind=None, strength=None, value=None, verbose=None, **kwargs):
if kind is None:
raise(TypeError("Missing required 'kind' argument."))
self.kind = kind
self.strength = strength
self.value = value
self.verbose = verbose if verbose is not None else kind
super().__init__(**kwargs)
def __valid_comparision(self, arg):
return hasattr(arg, "kind") and hasattr(arg, "strength")
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return NotImplemented
if self.strength is not None:
if value.strength is not None:
return self.strength < value.strength
else:
return False
elif value.strength is not None:
return True
return self.kind < value.kind
def __str__(self):
return self.kind
class SimpleCard(Card):
def __init__(self, colour=None, kind=None, strength=None, **kwargs):
if colour is None:
raise(TypeError("Missing required 'colour' argument."))
self.colour = colour
if kind is None:
if strength is not None:
kind = str(strength)
super().__init__(kind=kind, strength=strength, **kwargs)
def __valid_comparision(self, arg):
if super()._valid_comparision(arg):
if hasattr(arg, "colour") and (arg.colour is not None):
if arg.strength is not None:
return True
return False
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return super().__lt__(value)
if self.strength < value.strength:
return True
if self.strength == value.strength:
| return self.colour < value.colour
return False
def __eq__(self, value):
if not self._valid_comparision(value):
return False
if (self.strength == value.strength) and (self.colour == value.colour):
| return True
def __str__(self):
return self.kind + self.colour[0]
class MahJongg(Card):
def __init__(self):
super().__init__(kind='1', strength=1)
class Dragon(Card):
def __init__(self):
super().__init__(kind='R', value=25, verbose="Dragon")
class Pheonix(Card):
def __init__(self):
super().__init__(kind='P', value=-25, verbose="Pheonix")
class Dog(Card):
def __init__(self):
super().__init__(kind="D", verbose="Dog")
|
d((field_proto.name, field_proto, field_name))
for node_name, field_proto, field_name in fields:
node.add_children([ProtobufNode._from_spec(field_proto, node_name, path,
field_name)])
return node
@staticmethod
def _from_brain_spec(spec, name, parent_path, proto_field_name):
"""Parse an BrainSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(
spec, name, parent_path)
# Add top level "brain spec" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add observation and action specs.
node.add_children([
ProtobufNode._from_observation_spec(spec.observation_spec,
'observation_spec', path,
'observation_spec'),
ProtobufNode._from_action_spec(spec.action_spec, 'action_spec', path,
'action_spec')
])
return node
@staticmethod
def _from_observation_spec(spec, name, parent_path, proto_field_name):
"""Parse an ObservationSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
# Add top level "observations" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add observations/{entity_name} for each of the built-in entities.
for field_name, field_proto in _get_optional_fields_from_proto(
spec, OBSERVATION_OPTIONAL_ENTITIES):
node.add_children([ProtobufNode._from_spec(field_proto, field_name, path,
field_name)])
# Add observations/global_entities/{i} for each of the global entities.
if spec.global_entities:
global_entities_node = ProtobufNode('global_entities',
spec.global_entities,
'global_entities')
node.add_children([global_entities_node])
for i, global_entity_spec in enumerate(spec.global_entities):
global_entities_node.add_children([
ProtobufNode._from_spec(global_entity_spec, str(i), path,
f'global_entities[{i}]')])
return node
@staticmethod
def _from_action_type(spec, name, parent_path, proto_field_name):
"""Parse an ActionType protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
# Create a node for the action using the name supplied by the caller.
spec_field = getattr(spec, spec.WhichOneof('action_types'))
return ProtobufNode._from_spec(spec_field, name, parent_path,
proto_field_name)
@staticmethod
def _from_action_spec(spec, name, parent_path, proto_field_name):
"""Parse an ActionSpec protobuf into a ProtobufNode.
Args:
spec: Protobuf to wrap in a ProtobufNode instance.
name: Name of the node.
parent_path: String path to this protobuf.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance.
"""
name, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
# Add top level "actions" node.
node = ProtobufNode(name, spec, proto_field_name)
# Add actions/{actions_type.name} node for each named action.
for i, action_type in enumerate(spec.actions):
node.add_children([
ProtobufNode._from_spec(action_type, action_type.name, path,
f'actions[{i}]')])
return node
@staticmethod
def _from_spec(spec, name, parent_path, proto_field_name):
"""Parse a spec protobuf into a tree of ProtobufNode instances.
Args:
spec: Protobuf to parse and validate.
name: Name of the top level node to create. If this isn't specified it's
derived from spec.name or the name of the protobuf type.
parent_path: String path to the current node.
proto_field_name: Name of this proto field in the parent proto.
Returns:
ProtobufNode instance that references the spec protobuf and its' children.
Raises:
InvalidSpecError: If the spec is missing required fields, observations
or actions are missing names, use reserved names or have duplicate
names.
"""
_, _, path = ProtobufNode._infer_path_components_from_spec(spec, name,
parent_path)
ProtobufValidator.check_spec(spec, path)
if not ProtobufNode._SPEC_PROTOCLASS_TO_PARSER:
ProtobufNode._SPEC_PROTOCLASS_TO_PARSER = {
action_pb2.ActionSpec: ProtobufNode._from_action_spec,
action_pb2.ActionType: ProtobufNode._from_action_type,
action_pb2.JoystickType: ProtobufNode._from_leaf_spec,
brain_pb2.BrainSpec: ProtobufNode._from_brain_spec,
observation_pb2.EntityFieldType: ProtobufNode._from_entity_field_type,
observation_pb2.EntityType: ProtobufNode._from_entity_type,
observation_pb2.FeelerType: ProtobufNode._from_leaf_spec,
observation_pb2.ObservationSpec: ProtobufNode._from_observation_spec,
primitives_pb2.CategoryType: ProtobufNode._from_leaf_spec,
primitives_pb2.NumberType: ProtobufNode._from_leaf_spec,
primitives_pb2.PositionType: ProtobufNode._from_leaf_spec,
primitives_pb2.RotationType: ProtobufNode._from_leaf_spec
}
parser = ProtobufNode._SPEC_PROTOCLASS_TO_PARSER.get(type(spec))
if not parser:
raise InvalidSpecError(
f'Unknown spec type: {type(spec).__qualname__} ({spec} at '
f'"{parent_path}")')
return parser(spec, name, parent_path, proto_field_name)
@staticmethod
def from_spec(spec, name=None, parent_path=None):
"""Parse a spec protobuf into a tree of ProtobufNode instances.
Args:
spec: Protobuf to parse and validate.
name: Name of the top level node to create. If this isn't specified it's
derived from spec.name or the name of the protobuf type.
parent_path: String path to the current node.
Returns:
ProtobufNode instance that references the spec protobuf and its' children.
Raises:
InvalidSpecError: If the spec is missing required fields, observations
or actions are missing names, use reserved names or have duplicate
names.
"""
return ProtobufNode._from_spec(spec, name, parent_path, '')
def _leaf_data_to_proto_nest(self, data, mapper, check_spec_class,
unused_options):
"""Wrap a data proto in a nest.
Args:
data: Data proto to wrap in a dictionary..
mapper: Optional (proto, path) callable to execute for each leaf proto.
check_spec_class: Whether this method should check the spec proto class.
unused_options: Unused.
Returns:
Dictionary containing the supplied data keyed by the node name.
"""
ProtobufValidator. | check_data(data, self.proto, self.path,
check_spec_class)
return {self.name: mapper(data, self.path)}
def _feeler_to_proto_nest(self, da | ta, mapper, check_spec_class, options):
"""Wrap a feeler data proto in a nest.
Args:
|
# coding=utf-8
"""
The NetworkCollector class collects metrics on network interface usage
using /proc/net/dev.
#### Dependencies
* /proc/net/dev
"""
import diamond.collector
from diamond.collector import str_to_bool
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class NetworkCollector(diamond.collector.Collector):
PROC = '/proc/net/dev'
def get_default_config_help(self):
config_help = super(NetworkCollector, self).get_default_config_help()
config_help.update({
'interfaces': 'List of interface types to collect',
'greedy': 'Greedy match interfaces',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
| """
config = super(NetworkCollector, self).get_default_config()
config.update({
'path': 'network',
'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens',
| 'enx'],
'byte_unit': ['bit', 'byte'],
'greedy': 'true',
})
return config
def collect(self):
"""
Collect network interface stats.
"""
# Initialize results
results = {}
if os.access(self.PROC, os.R_OK):
# Open File
file = open(self.PROC)
# Build Regular Expression
greed = ''
if str_to_bool(self.config['greedy']):
greed = '\S*'
exp = (('^(?:\s*)((?:%s)%s):(?:\s*)' +
'(?P<rx_bytes>\d+)(?:\s*)' +
'(?P<rx_packets>\w+)(?:\s*)' +
'(?P<rx_errors>\d+)(?:\s*)' +
'(?P<rx_drop>\d+)(?:\s*)' +
'(?P<rx_fifo>\d+)(?:\s*)' +
'(?P<rx_frame>\d+)(?:\s*)' +
'(?P<rx_compressed>\d+)(?:\s*)' +
'(?P<rx_multicast>\d+)(?:\s*)' +
'(?P<tx_bytes>\d+)(?:\s*)' +
'(?P<tx_packets>\w+)(?:\s*)' +
'(?P<tx_errors>\d+)(?:\s*)' +
'(?P<tx_drop>\d+)(?:\s*)' +
'(?P<tx_fifo>\d+)(?:\s*)' +
'(?P<tx_colls>\d+)(?:\s*)' +
'(?P<tx_carrier>\d+)(?:\s*)' +
'(?P<tx_compressed>\d+)(?:.*)$') %
(('|'.join(self.config['interfaces'])), greed))
reg = re.compile(exp)
# Match Interfaces
for line in file:
match = reg.match(line)
if match:
device = match.group(1)
results[device] = match.groupdict()
# Close File
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No network metrics retrieved')
return None
network_stats = psutil.network_io_counters(True)
for device in network_stats.keys():
network_stat = network_stats[device]
results[device] = {}
results[device]['rx_bytes'] = network_stat.bytes_recv
results[device]['tx_bytes'] = network_stat.bytes_sent
results[device]['rx_packets'] = network_stat.packets_recv
results[device]['tx_packets'] = network_stat.packets_sent
for device in results:
stats = results[device]
for s, v in stats.items():
# Get Metric Name
metric_name = '.'.join([device, s])
# Get Metric Value
metric_value = self.derivative(metric_name,
long(v),
diamond.collector.MAX_COUNTER)
# Convert rx_bytes and tx_bytes
if s == 'rx_bytes' or s == 'tx_bytes':
convertor = diamond.convertor.binary(value=metric_value,
unit='byte')
for u in self.config['byte_unit']:
# Public Converted Metric
self.publish(metric_name.replace('bytes', u),
convertor.get(unit=u), 2)
else:
# Publish Metric Derivative
self.publish(metric_name, metric_value)
return None
|
ry import get_additional_costs
from erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import get_mins_between_operations
from erpnext.stock.stock_balance import get_planned_qty, update_bin_qty
from frappe.utils.csvutils import getlink
from erpnext.stock.utils import get_bin, validate_warehouse_company, get_latest_stock_qty
from erpnext.utilities.transaction_base import validate_uom_is_integer
class OverProductionError(frappe.ValidationError): pass
class StockOverProductionError(frappe.ValidationError): pass
class OperationTooLongError(frappe.ValidationError): pass
class ItemHasVariantError(frappe.ValidationError): pass
from six import string_types
form_grid_templates = {
"operations": "templates/form_grid/work_order_grid.html"
}
class WorkOrder(Document):
def onload(self):
ms = frappe.get_doc("Manufacturing Settings")
self.set_onload("material_consumption", ms.material_consumption)
self.set_onload("backflush_raw_materials_based_on", ms.backflush_raw_materials_based_on)
def validate(self):
self.validate_production_item()
if self.bom_no:
validate_bom_no(self.production_item, self.bom_no)
self.validate_sales_order()
self.set_default_warehouse()
self.validate_warehouse_belongs_to_company()
self.calculate_operating_cost()
self.validate_qty()
self.validate_operation_time()
self.status = self.get_status()
validate_uom_is_integer(self, "stock_uom", ["qty", "produced_qty"])
self.set_required_items(reset_only_qty = len(self.get("required_items")))
def validate_sales_order(self):
if self.sales_order:
self.check_sales_order_on_hold_or_close()
so = frappe.db.sql("""
select so.name, so_item.delivery_date, so.project
from `tabSales Order` so
inner join `tabSales Order Item` so_item on so_item.parent = so.name
left join `tabProduct Bundle Item` pk_item on so_item.item_code = pk_item.parent
where so.name=%s and so.docstatus = 1 and (
so_item.item_code=%s or
pk_item.item_code=%s )
""", (self.sales_order, self.production_item, self.production_item), as_dict=1)
if not so:
so = frappe.db.sql("""
select
so.name, so_item.delivery_date, so.project
from
`tabSales Order` so, `tabSales Order Item` so_item, `tabPacked Item` packed_item
where so.name=%s
and so.name=so_item.parent
and so.name=packed_item.parent
and so_item.item_code = packed_item.parent_item
and so.docstatus = 1 and packed_item.item_code=%s
""", (self.sales_order, self.production_item), as_dict=1)
if len(so):
if not self.expected_delivery_date:
self.expected_delivery_date = so[0].delivery_date
if so[0].project:
self.project = so[0].project
if not self.material_request:
self.validate_work_order_against_so()
else:
frappe.throw(_("Sales Order {0} is not valid").format(self.sales_order))
def check_sales_order_on_hold_or_close(self):
status = frappe.db.get_value("Sales Order", self.sales_order, "status")
if status in ("Closed", "On Hold"):
frappe.throw(_("Sales Order {0} is {1}").format(self.sales_order, status))
def set_default_warehouse(self):
if not self.wip_warehouse:
self.wip_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_wip_warehouse")
if not self.fg_warehouse:
self.fg_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_fg_warehouse")
def validate_warehouse_belongs_to_company(self):
warehouses = [self.fg_warehouse, self.wip_warehouse]
for d in self.get("required_items"):
if d.source_warehouse not in warehouses:
warehouses.append(d.source_warehouse)
for wh in warehouses:
validate_warehouse_company(wh, self.company)
def calculate_operating_cost(self):
self.planned_operating_cost, self.actual_operating_cost = 0.0, 0.0
for d in self.get("operations"):
d.pla | nned_operating_cost = flt(d.hour_rate) * (flt(d.time_in_mins) / 60.0)
d.actual_operating_cost = flt(d.hour_rate) * (flt(d.actual_operation_time) / 60.0)
self.planned_operating_cost | += flt(d.planned_operating_cost)
self.actual_operating_cost += flt(d.actual_operating_cost)
variable_cost = self.actual_operating_cost if self.actual_operating_cost \
else self.planned_operating_cost
self.total_operating_cost = flt(self.additional_operating_cost) + flt(variable_cost)
def validate_work_order_against_so(self):
# already ordered qty
ordered_qty_against_so = frappe.db.sql("""select sum(qty) from `tabWork Order`
where production_item = %s and sales_order = %s and docstatus < 2 and name != %s""",
(self.production_item, self.sales_order, self.name))[0][0]
total_qty = flt(ordered_qty_against_so) + flt(self.qty)
# get qty from Sales Order Item table
so_item_qty = frappe.db.sql("""select sum(stock_qty) from `tabSales Order Item`
where parent = %s and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# get qty from Packing Item table
dnpi_qty = frappe.db.sql("""select sum(qty) from `tabPacked Item`
where parent = %s and parenttype = 'Sales Order' and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# total qty in SO
so_qty = flt(so_item_qty) + flt(dnpi_qty)
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_sales_order"))
if total_qty > so_qty + (allowance_percentage/100 * so_qty):
frappe.throw(_("Cannot produce more Item {0} than Sales Order quantity {1}")
.format(self.production_item, so_qty), OverProductionError)
def update_status(self, status=None):
'''Update status of work order if unknown'''
if status != "Stopped":
status = self.get_status(status)
if status != self.status:
self.db_set("status", status)
self.update_required_items()
return status
def get_status(self, status=None):
'''Return the status based on stock entries against this work order'''
if not status:
status = self.status
if self.docstatus==0:
status = 'Draft'
elif self.docstatus==1:
if status != 'Stopped':
stock_entries = frappe._dict(frappe.db.sql("""select purpose, sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
group by purpose""", self.name))
status = "Not Started"
if stock_entries:
status = "In Process"
produced_qty = stock_entries.get("Manufacture")
if flt(produced_qty) >= flt(self.qty):
status = "Completed"
else:
status = 'Cancelled'
return status
def update_work_order_qty(self):
"""Update **Manufactured Qty** and **Material Transferred for Qty** in Work Order
based on Stock Entry"""
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_work_order"))
for purpose, fieldname in (("Manufacture", "produced_qty"),
("Material Transfer for Manufacture", "material_transferred_for_manufacturing")):
if (purpose == 'Material Transfer for Manufacture' and
self.operations and self.transfer_material_against == 'Job Card'):
continue
qty = flt(frappe.db.sql("""select sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
and purpose=%s""", (self.name, purpose))[0][0])
completed_qty = self.qty + (allowance_percentage/100 * self.qty)
if qty > completed_qty:
frappe.throw(_("{0} ({1}) cannot be greater than planned quantity ({2}) in Work Order {3}").format(\
self.meta.get_label(fieldname), qty, completed_qty, self.name), StockOverProductionError)
self.db_set(fieldname, qty)
if self.production_plan:
self.update_production_plan_status()
def update_production_plan_status(self):
production_plan = frappe.get_doc('Production Plan', self.production_plan)
production_plan.run_method("update_produced_qty", self.produced_qty, self.production_plan_item)
def on_submit(self):
if not self.wip_warehouse:
frappe.throw(_("Work-in-Progress Warehouse is required before Submit"))
if not self.fg_warehouse:
frappe.throw(_("For Warehouse is required before Submit"))
self.update_work_order_qty_in_so()
self.update_reserved_qty_for_production()
self.update_c |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.IdentifiedObject import IdentifiedObject
class GeographicalRegion(IdentifiedObject):
"""A geographical region of a power system network model.
"""
def __init__(self, Regions=None, *args, **kw_args):
"""Initialises a new 'GeographicalRegion' instance.
@param Regions: The association is used in the naming hierarchy.
"""
self._Regions = []
self.Regions = [] if Regions is None else Regions
super(GeographicalRegion, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Regions"]
_many_refs = ["Regions"]
def getRegions(self):
"""The association is used in the naming hierarchy.
"""
return self._Regions
def setRegions(self, value):
for x in | self._Regio | ns:
x.Region = None
for y in value:
y._Region = self
self._Regions = value
Regions = property(getRegions, setRegions)
def addRegions(self, *Regions):
for obj in Regions:
obj.Region = self
def removeRegions(self, *Regions):
for obj in Regions:
obj.Region = None
|
import argparse
from pdnssync.database import Database
from pdnssync.parse import Parser
from pdnssync.error import get_warn, get_err
parser = Parser()
def validate():
domains = parser.get_domains()
for d in sorted(domains):
domains[d].validate(domains)
def sync(db):
all_db_domains = db.get_domains()
all_domains = parser.get_domains()
list_domains = all_domains.keys()
list_db_domains = all_db_domains.keys()
create_list = list(set(list_domains) - set(list_db_domains))
delete_list = list(set(list_db_domains) - set(list_domains))
db.create_domains(create_list)
db.delete_domains(delete_list)
for i in sorted(list_domains):
d = all_domains[i]
d.sync_domain(db)
def export(db):
all_db_domain = db.get_domains()
for d in all_db_domain:
print('# %s' % d)
records = db.get_records(d)
soa = records[(d, 'SOA')][0].data.split(' ')
print('D %s %s %s' % (d, soa[0], soa[1]))
if (d, 'NS') in records:
ns = records[(d, 'NS')]
ns_names = []
for i in ns:
ns_names.append(i.data)
print('N %s' % ' '.join(ns_names))
if (d, 'MX') in records:
mx = records[(d, 'MX')]
mx_names = []
for i in mx:
mx_names.append("%s %s" % (i.prio, i.data))
print('M %s' % ' '.join(mx_names))
for i in records:
if i[1] == 'A':
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'AAAA':
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'CNAME':
| for j in records[i]:
print('C %s %s' % (i[0], j.data))
if i[1] == 'SRV':
for j in records[i]:
print('S %s %s %s' % (i[0], j.prio, j.data))
if i[1] == 'TXT':
for j in records[i]:
print('X %s %s' % (i[0], j.data))
print()
def do_sync():
| aparser = argparse.ArgumentParser()
aparser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
aparser.add_argument("-w", "--werror", action="store_true", help="also break on warnings")
aparser.add_argument('files', metavar='file', nargs='+', help='the files to parse')
args = aparser.parse_args()
for fname in args.files:
parser.parse(fname)
parser.assign()
validate()
err = get_err()
warn = get_warn()
print('%d error(s) and %d warning(s)' % (err, warn))
if err == 0 and (not args.werror or warn == 0):
db = Database()
sync(db)
else:
print('Errors found, not syncing')
def do_export():
db = Database()
export(db)
|
from __future__ impo | rt absolute_import
from .base import *
from bundle_config import config
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config['postgres']['database'],
'USER': config['postgres']['username'],
'PASSWORD': config['postgres']['password'],
'HOST': c | onfig['postgres']['host'],
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{host}:{port}'.format(
host=config['redis']['host'],
port=config['redis']['port']),
'OPTIONS': {
'PASSWORD': config['redis']['password'],
},
'VERSION': config['core']['version'],
},
}
DEBUG = False
|
import numpy as np
import canal as canal
from .util import NumpyTestCase
class FromJSONTestCase(NumpyTestCase):
class Measurement(canal.Measurement):
int_field = canal.IntegerField()
alternate_db_name = canal.IntegerField(db_name="something_else")
float_field = canal.FloatField()
bool_field = canal.BooleanField()
string_field = canal.StringField()
tag_1 = canal.Tag()
tag_2 = canal.Tag()
def test_from_json_iso_time(self):
test_data = 5*[
[
"2015-01-29T21:55:43.702900257Z",
1,
2,
1.2,
True,
"some content",
"1",
"2"
],
[
"2015-01-29T21:55:43.702900345Z",
2,
3,
2.3,
False,
"some other content",
"1",
"2"
]
]
json_data = dict(
results=[dict(
series=[dict(
name="Measurement",
columns=[
"time",
"int_field",
"something_else",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=test_data
)]
)]
)
test_series = self.Measurement.from_json(json_data)
self.assertndArrayEqual(
test_series.time,
np.array(
5*[
"2015-01-29T21:55:43.702900257Z",
"2015-01-29T21:55:43.702900345Z"
],
dtype='datetime64'
)
)
self.assertndArrayEqual(
test_series.int_field,
np.array(5*[1, 2])
)
self.assertndArrayEqual(
test_series.alternate_db_name,
np.array(5*[2, 3])
)
self.assertndArrayEqual(
test_series.float_field,
np.array(5*[1.2, 2.3])
)
self.assertndArrayEqual(
test_series.bool_field,
np.array(5*[True, False])
)
self.assertndArrayEqual(
test_series.string_field,
np.array(5*["some content", "some other content"])
)
self.assertndArrayEqual(
test_series.tag_1,
np.array(10*["1"])
)
self.as | sertndArrayEqual(
test_series.tag_2,
np.array(10*["2"])
)
def test_from_json_bad_input(self):
with self.assertRaises(ValueError):
list(self.Measurement.from_json({"bad": "input"}))
def test_empty_json(self):
content = dict()
with self.assertRaises(ValueError):
self.Measurement.from_json(content)
def test_from_json_wrong_measurement(self):
test_json = dict(
re | sults=[dict(
series=[dict(
name="SomeOtherMeasurement",
columns=[
"time",
"int_field",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=[]
)]
)]
)
with self.assertRaises(ValueError):
self.Measurement.from_json(test_json)
|
import os
import sys
def main(args):
if len(args) != 2:
print("Usage: python project-diff.py [path-to-project-1] [path-to-project-2]")
return
dir1 = args[0]
dir2 = args[1]
project1 = collect_text_files(dir1)
project2 = collect_text_files(dir2)
files_only_in_1 = []
files_only_in_2 = []
files_in_both = []
perform_venn_analysis(set(project1.keys()), set(project2.keys()), files_only_in_1, files_only_in_2, files_in_both)
if len(files_only_in_1) > 0:
print("The following files are only in Project 1:")
for file in files_only_in_1:
print(" " + file)
print("")
if len(files_only_in_2) > 0:
print("The following files are only in Project 2:")
for file in files_only_in_2:
print(" " + file)
print("")
print(str(len(files_in_both)) + " files in both projects.")
print("")
files_in_both.sort()
files_with_diffs = []
for file in files_in_both:
text_1 = project1[file]
text_2 = project2[file]
diff = perform_diff(text_1, text_2)
if len(diff) > 0:
files_with_diffs.append(file)
print("There's a difference in " + file)
print("\n".join(diff))
print("")
if len(files_with_diffs) == 0:
print("No files with text differences.")
else:
print("Diffs were in the following files:")
print("\n".join(files_with_diffs))
print("")
def perform_venn_analysis(set_a, set_b, only_in_a_out, only_in_b_out, in_both_out):
for item in set_a:
if item not in set_b:
only_in_a_out.append(item)
else:
in_both_out.append(item)
for item in set_b:
if item not in set_a:
only_in_b_out.append(item)
def collect_text_files(root):
output = {}
root = root.replace('\\', '/')
if root.endswith('/'):
root = root[:-1]
collect_text_files_impl(root, '', output)
return output
def get_file_extension(file):
if '.' in file:
return file.split('.')[-1].lower()
return ''
FILE_EXTENSION_IGNORE_LIST = set([
'png', 'jpg',
'xcuserstate',
])
def is_text_file(path):
ext = get_file_extension(path)
return ext not in FILE_EXTENSION_IGNORE_LIST
def collect_text_files_impl(root, current_dir, output):
full_dir = root
if current_dir != '':
full_dir += '/' + current_dir
for file in os.listdir(full_dir.replace('/', os.sep)):
full_file = full_dir + '/' + file
if os.path.isdir(full_file.replace('/', os.sep)):
next_cd = file if current_dir == '' else (current_dir + '/' + file)
collect_text_files_impl(root, next_cd, output)
else:
rel_file = file if current_dir == '' else (current_dir + '/' + file)
if is_text_file(rel_file):
c = open(full_file.replace('/', os.sep), 'rt')
text = c.read()
c.close()
output[rel_file] = text
else:
output[rel_file] = '\n'.join([
"Binary file:",
"size X", # TODO: get file size
"first 20 bytes: ...", # TODO: this
"last 20 bytes: ...", # TODO: do this as well
])
def perform_diff(text_1, text_2):
if text_1 == text_2:
return []
lines_1 = text_1.split('\n')
lines_2 = text_2.split('\n')
trimmed_front = 0
trimmed_back = 0
# Remove identical lines at the beginning and end of the file
while len(lines_1) > trimmed_front and len(lines_2) > trimmed_front and lines_1[trimmed_front] == lines_2[trimmed_front]:
trimmed_front += 1
lines_1 = lines_1[trimmed_front:]
lines_2 = lines_2[trimmed_front:]
while len(lines_1) > trimmed_back and len(lines_2) > trimmed_back and lines_1[-1 - trimmed_back] == lines_2[-1 - trimmed_back]:
trimmed_back += 1
lines_1 = lines_1[:-trimmed_back]
lines_2 = lines_2[:-trimmed_back]
length_1 = len(lines_1)
length_2 = len(lines_2)
grid = []
for x in range(length_2 + 1):
column = []
for y in range(length_1 + 1):
column.append(None)
grid.append(column)
# Perform levenshtein difference
# each grid cell will consist of a tuple: (diff-size, previous-path: up|left|diag)
# Each step to the right indicates taking a line from lines 2
# Each step downwards indicates taking a line from lines 1
# Prepopulate the left and top rows indicating starting the diff by remo | ving all
# lines from lines 1 and adding all lines from lines 2.
for x in range(length_2 + 1):
grid[x][0] = (x, 'left')
for y in range(length_1 + 1):
grid[0][y] = (y, 'up')
grid[0][0] = (0, 'diag')
# Populate the grid. Figure out the minimum diff to get to each point.
for y in range(1, length_1 + 1):
for x in range(1, length_2 + 1):
if lines_1[y - 1] == lines_2[x - 1]:
grid[x][ | y] = (grid[x - 1][y - 1][0], 'diag')
elif (grid[x - 1][y][0] <= grid[x][y - 1][0]):
grid[x][y] = (grid[x - 1][y][0] + 1, 'left')
else:
grid[x][y] = (grid[x][y - 1][0] + 1, 'up')
# Start from the bottom right corner and walk backwards to the origin
x = length_2
y = length_1
diff_chain = []
ellipsis_used = False
while x != 0 and y != 0:
node = grid[x][y]
if node[1] == 'diag':
if not ellipsis_used:
diff_chain.append('...')
ellipsis_used = True
x -= 1
y -= 1
elif node[1] == 'left':
diff_chain.append('+ [' + str(trimmed_front + x) + '] ' + lines_2[x - 1])
x -= 1
ellipsis_used = False
else:
diff_chain.append('- [' + str(trimmed_front + y) + '] ' + lines_1[y - 1])
y -= 1
ellipsis_used = False
diff_chain.reverse()
return diff_chain
main(sys.argv[1:])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import os
import json
from weblab.util import data_filename
from voodoo.gen.caller_checker import caller_check
from voodoo.log import logged
from voodoo.override import Override |
import experiments.ud_xilinx.server as UdXilinxExperiment
import weblab.data.server_type as ServerType
import weblab.experiment.util as ExperimentUtil
module_directory = os.path.join(*__name__.split('.')[:-1])
class UdDemoXilinxExperiment(UdXilinxExperiment.UdXilinxExperiment):
FILES = {
'PLD' : 'cpld.jed',
'FPGA' : 'fpga.bit',
}
def __init__(self, coord_address, locator, cfg_manager, *args, **kwargs):
| super(UdDemoXilinxExperiment,self).__init__(coord_address, locator, cfg_manager, *args, **kwargs)
file_path = data_filename(os.path.join(module_directory, self.FILES[self._xilinx_device]))
self.file_content = ExperimentUtil.serialize(open(file_path, "rb").read())
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info")
def do_start_experiment(self, *args, **kwargs):
"""
Handles experiment startup, returning certain initial configuration parameters.
(Thus makes use of the API version 2).
"""
super(UdDemoXilinxExperiment, self).do_send_file_to_device(self.file_content, "program")
return json.dumps({ "initial_configuration" : """{ "webcam" : "%s", "expected_programming_time" : %s }""" % (self.webcam_url, self._programmer_time), "batch" : False })
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info")
def do_dispose(self):
super(UdDemoXilinxExperiment, self).do_dispose()
return "ok"
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info",except_for='file_content')
def do_send_file_to_device(self, file_content, file_info):
return "sending file not possible in demo"
@logged("info")
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
def do_send_command_to_device(self, command):
return super(UdDemoXilinxExperiment, self).do_send_command_to_device(command)
|
__author__ = 'roman'
from django.utils.functional import SimpleLazyObject
from . import get_card as _get_card
def get_card(request):
| if not hasattr(request, '_cached_card'):
request._cached_card = _get_card(request)
return request._cached_card
class CardAuthMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Card authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
| "'card.middleware.CardAuthMiddleware'."
)
request.card = SimpleLazyObject(lambda: get_card(request))
|
# Step 1: Make all the "turtle" commands available to | us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
si | mon.left(90)
|
#!/usr/bin/env python3
import os
import shutil
import subprocess
import gettext
version = '4.4.0'
builds = [
{ 'language': 'de', 'paper': 'a4paper', 'babel': 'ngerman' },
{ 'language': 'en', 'paper': 'letterpaper', 'babel': 'USenglish' },
{ 'language': 'es', 'paper': 'a4paper', 'babel': 'spanish' },
| { 'language': 'fr', 'paper': 'a4paper', 'babel': 'french' },
{ 'language': 'hu', 'paper': 'a4paper', 'babel': 'magyar' },
{ 'language': 'it', 'paper': 'a4paper', 'babel': 'italian' },
{ 'language': 'sl', 'paper': 'a4paper', 'babel': 'slovene' },
{ 'language': 'uk', 'paper': 'a4paper', 'babel': 'ukrainian' },
]
for i in builds:
for manual in [ 'admin', 'user' ]:
language = i['language']
print( 'Buildi | ng for language "%s"' % ( language ) )
subprocess.Popen( ['msgfmt', 'locale/%s/LC_MESSAGES/%s.po' % ( language, manual ), '-o',
'locale/%s/LC_MESSAGES/%s.mo' % ( language, manual ) ] ).wait()
env = os.environ.copy()
with open('%s/index.rst' % (manual)) as f:
title = f.readline().rstrip()
title = gettext.translation(manual, 'locale', [language], None, True).gettext(title)
env['TITLE'] = title;
env['LANGUAGE'] = language
env['PAPER'] = i['paper']
env['INDEX'] = '%s/index' % ( manual )
env['BABEL'] = i['babel']
env['VERSION'] = version
env['SPHINXOPTS'] = '-j%s' % ( os.cpu_count()+1 )
shutil.rmtree('_build', True)
subprocess.Popen( ['make', 'latexpdf' ], env=env ).wait()
shutil.copyfile('_build/latex/veyon.pdf', 'veyon-%s-manual-%s_%s.pdf' % ( manual, language, version ))
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volu | me3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_backup, 'volume2-backup1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.delete_vm_snapshot, | 'vm1-snapshot1'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot9'],
[TestAction.clone_vm, 'vm1', 'vm3', 'full'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.change_vm_image, 'vm2'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot9'],
])
'''
The final status:
Running:['vm1', 'vm3']
Stopped:['vm2']
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1']
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm2-snapshot9']
Expunged:[]
Ha:[]
Group:
'''
|
# -*- codin | g: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distrib | uted in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import Blueprint
blueprint = Blueprint('first', __name__, url_prefix='/',
template_folder='templates', static_folder='static')
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class STARInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing STAR 2.4.0g1 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/star https://github.com/alexdobin/STAR/archive/STAR_2.4.0g1.tar.gz')
node.ssh.execute('tar -xzf /opt/software/star/STAR_2.4.0g1.tar.gz -C /opt/software/star')
node.ssh.execute('make STAR -C /opt/software/star/STAR-STAR_2.4.0g1/source')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/star/;touch /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "#%Module" | >> /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "set root /opt/software/star/STAR-STAR_2.4.0g1" >> /usr/local/Modules/applications/s | tar/2.4.0g1')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root/bin/Linux_x86_64" >> /usr/local/Modules/applications/star/2.4.0g1') |
#! /bin/python2
import numpy
import cv2
import os
import struct
BLACK = (0,)
WHITE = (255,)
DIR_OUT = "./img/"
SIZE_CANVAS = 50
SIZE_FEATURE = 28
SIZE_BLOCK = 32
DIGITS = tuple([chr(ord("0") + i) for i in range(10)] + [""])
FONTS = (cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN,
cv2.FONT_HERSHEY_DUPLEX, cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
def clear_path():
if not os.path.isdir(DIR_OUT):
os.mkdir(DIR_OUT)
def get_tf(angle, center, offset):
a_radian = numpy.radians(angle)
c = numpy.cos(a_radian)
s = numpy.sin(a_radian)
tl = numpy.matrix([[1.0, 0.0, -center[0]], [0.0, 1.0, -center[1]], [0.0, 0.0, 1.0]])
rot = numpy.matrix([[c, -s, 0.0 ], [s, c, 0.0], [0.0, 0.0, 1.0]])
retl = numpy.matrix([[1.0, 0.0, (center[0] + offset[0])], [0.0, 1.0, (center[1] + offset[1])], [0.0, 0.0, 1.0]])
return retl * rot * tl
os.system("rm -rf " + DIR_OUT + "*")
def create_dataset(fn_f, fn_l, num_sample):
fl = open(fn_l, "wb")
ff = open(fn_f, "wb")
# headers
fl.write(struct.pack(">i", 2049))
fl.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", 2051))
ff.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", SIZE_FEATURE))
ff.write(struct.pack(">i", SIZE_FEATURE))
canvas = numpy.ones((SIZE_CANVAS, SIZE_CANVAS), dtype = numpy.uint8) * 255
# cv2.imwrite(dir_img + "canvas.png", canvas)
for id_img in range(num_sample):
copy = numpy.copy(canvas)
id_digit = numpy.random.randint(0, len(DIGITS))
id_font = numpy.random.randint(0, len(FO | NTS))
thickness = numpy.random.randint(1, 3)
base_line = cv2.getTextSize(DIGITS[id_digit], FONTS[id_font], 1.0, thickness)[1] + 1
scale_font = float(numpy.random.randint(40, 60)) / 100.0
scale = float(SIZE_BLOCK) * 0.5 * scale_font / float(base_line)
shift = float(SIZE_CANVAS) / 2.0 - float(SIZE_BLOC | K) * 0.5 * scale_font
cv2.putText(copy, DIGITS[id_digit], (0, 2 * base_line + 1),
FONTS[id_font], 1.0, BLACK, thickness)
copy = cv2.warpAffine(copy, numpy.matrix([[scale, 0.0, shift], [0.0, scale, shift]]),
copy.shape, borderValue = WHITE)
# draw lines
thickness_line = numpy.random.randint(1, 3)
cv2.line(copy, (0, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
BLACK, thickness_line)
cv2.line(copy, (0, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, 0),
((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, 0),
((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
# rotation
copy = cv2.warpAffine(copy, get_tf(float(numpy.random.randint(-10,11)), (float(SIZE_CANVAS) / 2.0, float(SIZE_CANVAS) / 2.0),
(numpy.random.randint(-3, 4), numpy.random.randint(-3, 4)))[0:2, :],
copy.shape, borderValue = WHITE)
copy = copy[(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2,
(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2]
# cv2.imwrite(DIR_OUT + "{}.png".format(id_img), copy)
copy[copy < 192] = 0
copy[copy >= 192] = 255
copy = copy.astype(numpy.uint8)
ff.write(copy.data)
fl.write(numpy.uint8(id_digit))
if id_img % 1000 == 0:
print id_img, num_sample
fl.close()
ff.close()
create_dataset(DIR_OUT + "printed_feature_train", DIR_OUT + "printed_label_train", 100000)
print "training data complete"
create_dataset(DIR_OUT + "printed_feature_valid", DIR_OUT + "printed_label_valid", 10000)
print "test data complete" |
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import | routers, serializers, viewsets
from .views import Ho | mePageView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^admin/', admin.site.urls),
url(r'^', include('slackdata.urls')),
]
|
from django.apps import | AppConfig
class BugReportsConfig(AppConfig):
| name = "bug_reports"
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. | #
# #
# This program is distributed in the hope that it will be useful, | #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
class oehealth_patient_medication(osv.Model):
_name = 'oehealth.patient.medication'
_columns = {
'patient_id': fields.many2one('oehealth.patient', string='Patient',),
#'doctor': fields.many2one('oehealth.physician', string='Physician',
# help='Physician who prescribed the medicament'),
'adverse_reaction': fields.text(string='Adverse Reactions',
help='Side effects or adverse reactions that the patient experienced'),
'notes': fields.text(string='Extra Info'),
'is_active': fields.boolean(string='Active',
help='Check if the patient is currently taking the medication'),
'course_completed': fields.boolean(string='Course Completed'),
'template': fields.many2one('oehealth.medication.template',
string='Medication Template', ),
'discontinued_reason': fields.char(size=256,
string='Reason for discontinuation',
help='Short description for discontinuing the treatment'),
'discontinued': fields.boolean(string='Discontinued'),
}
oehealth_patient_medication()
|
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(t, ober[t], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols corrected, $t$')
ax.set_title('Number of Symbols Corrected vs. Output BER')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_r_vs_ober(params, show=True, fpath=None):
def plot(axes, t, param, **plotargs):
if param['pb'] is None:
| param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.forma | t(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
n = param['n']
frac_t = 100 * t / n
k = n - 2 * t
r = k / n
axes[0].plot(frac_t, ober[t], **plotargs)
axes[1].plot(r, ober[t], **plotargs)
plt.close('all')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(1/2))
t = np.arange(16)
for param in params:
plot(axes, t, param.copy(), lw=1.5)
for ax in axes:
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
axes[0].set_xlim(0, 10)
axes[0].set_xlabel('Fraction of Symbols corrected, $t/n$ [%]')
axes[0].set_title('Fraction of Symbols corrected vs. Output BER')
axes[0].legend(loc='upper right', fontsize=12)
axes[1].set_xlim(0.8, 1.0)
axes[1].set_xlabel('Coding Rate, $R = k/n = (n - 2t)/n$')
axes[1].set_title('Coding Rate vs. Output BER')
axes[1].legend(loc='upper left', fontsize=12)
plt.tight_layout()
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_pe_vs_ober(params, show=True, fpath=None):
def plot(ax, pe, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(pe, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
pe = 10.0 ** np.arange(-15, -0.5, 0.5)
for param in params:
plot(ax, pe, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(pe[0], pe[-1])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('Input BER, $BER_i$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Input vs. Output BER')
ax.legend(loc='upper left', fontsize=12)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_ebn0_vs_ober(params, show=True, fpath=None):
def plot(ax, ebn0, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
n = param['n']
t = param['t']
R = (n - 2 * t)/n
esn0 = ebn0 + dB(R)
pe = esn02pe(esn0)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(ebn0, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
ebn0 = np.arange(5, 20.5, 0.5)
# Uncoded (FEC input) for reference
pe = esn02pe(ebn0)
iber = ber_in(pe=pe, pb=0.5)
ax.plot(ebn0, pe, lw=1.5, color='black', label='Uncoded BSC')
ax.plot(ebn0, iber, lw=1.5, color='black', linestyle='dashed',
label='Uncoded GBMM(pb=0.5)')
for param in params:
plot(ax, ebn0, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_yscale('log')
ax.set_xlim(ebn0[0], ebn0[-1])
ax.set_xticks(ebn0[::2])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('$E_b/N_0 [dB]$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Eb/N0 vs. Output BER')
ax.legend(fontsize=10)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
if __name__ == '__main__':
argp = argparse.ArgumentParser(description='Create code performance plots.')
argp.add_argument('dir', metavar='DIR', help='plots directory')
argp.add_argument('--no-show', dest='show', action='store_false',
help='Don\'t show, just save to file.')
argns = argp.parse_args()
dirpath = os.path.abspath(argns.dir)
os.makedirs(dirpath, exist_ok=True)
# pe vs ober
params = [
# GBMM
dict(pb=0.5, m=8, n=124, t=4),
dict(pb=0.5, m=8, n=124, t=6),
dict(pb=0.5, m=8, n=124, t=8),
dict(pb=0.5, m=8, n=248, t=4),
dict(pb=0.5, m=8, n=248, t=6),
dict(pb=0.5, m=8, n=248, t=8),
dict(pb=0.5, m=10, n=528, t=7),
# BSC
dict(pb=None, m=8, n=124, t=4),
dict(pb=None, m=8, n=248, t=4)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, 'pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, 'ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=240//8, t=1),
dict(pb=0.5, m=8, n=240//8, t=2),
dict(pb=0.5, m=8, n=240//8, t=3),
# BSC
dict(pb=None, m=8, n=240//8, t=1),
dict(pb=None, m=8, n=240//8, t=2),
dict(pb=None, m=8, n=240//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=120//8, t=1),
dict(pb=0.5, m=8, n=120//8, t=2),
dict(pb=0.5, m=8, n=120//8, t=3),
# BSC
dict(pb=None, m=8, n=120//8, t=1),
dict(pb=None, m=8, n=120//8, t=2),
dict(pb=None, m=8, n=120//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-ebn0-vs-ober.png'))
#sys.exit()
# Short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=240//5),
dict(pe=1e-12, pb=0.5, m=8, n=240//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=240//5),
dict(pe=1e-12, pb=None, m=8, n=240//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-t-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-r-vs-ober.png'))
# Very short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=120//5),
dict(pe=1e-12, pb=0.5, m=8, n=120//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=120//5),
dict(pe=1e-12, pb=None, m=8, n=120//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-t-vs-ober.png'))
plot |
from __future__ import print_function
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
class ImageMenu(Gtk.EventBox):
def __init__ (self, image, child):
GObject.GObject.__init__(self)
self.add(image)
self.subwindow = Gtk.Window()
self.subwindow.set_dec | orated(False)
self.subwindow.set | _resizable(False)
self.subwindow.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.subwindow.add(child)
self.subwindow.connect_after("draw", self.__sub_onExpose)
self.subwindow.connect("button_press_event", self.__sub_onPress)
self.subwindow.connect("motion_notify_event", self.__sub_onMotion)
self.subwindow.connect("leave_notify_event", self.__sub_onMotion)
self.subwindow.connect("delete-event", self.__sub_onDelete)
self.subwindow.connect("focus-out-event", self.__sub_onFocusOut)
child.show_all()
self.setOpen(False)
self.connect("button_press_event", self.__onPress)
def setOpen (self, isopen):
self.isopen = isopen
if isopen:
topwindow = self.get_parent()
while not isinstance(topwindow, Gtk.Window):
topwindow = topwindow.get_parent()
x, y = topwindow.get_window().get_position()
x += self.get_allocation().x + self.get_allocation().width
y += self.get_allocation().y
self.subwindow.move(x, y)
self.subwindow.props.visible = isopen
self.set_state(self.isopen and Gtk.StateType.SELECTED or Gtk.StateType.NORMAL)
def __onPress (self, self_, event):
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self.setOpen(not self.isopen)
def __sub_setGrabbed (self, grabbed):
if grabbed and not Gdk.pointer_is_grabbed():
Gdk.pointer_grab(self.subwindow.get_window(), True,
Gdk.EventMask.LEAVE_NOTIFY_MASK|
Gdk.EventMask.POINTER_MOTION_MASK|
Gdk.EventMask.BUTTON_PRESS_MASK,
None, None, Gdk.CURRENT_TIME)
Gdk.keyboard_grab(self.subwindow.get_window(), True, Gdk.CURRENT_TIME)
elif Gdk.pointer_is_grabbed():
Gdk.pointer_ungrab(Gdk.CURRENT_TIME)
Gdk.keyboard_ungrab(Gdk.CURRENT_TIME)
def __sub_onMotion (self, subwindow, event):
a = subwindow.get_allocation()
self.__sub_setGrabbed(not (0 <= event.x < a.width and 0 <= event.y < a.height))
def __sub_onPress (self, subwindow, event):
a = subwindow.get_allocation()
if not (0 <= event.x < a.width and 0 <= event.y < a.height):
Gdk.pointer_ungrab(event.time)
self.setOpen(False)
def __sub_onExpose (self, subwindow, ctx):
a = subwindow.get_allocation()
context = subwindow.get_window().cairo_create()
context.set_line_width(2)
context.rectangle (a.x, a.y, a.width, a.height)
sc = self.get_style_context()
found, color = sc.lookup_color("p_dark_color")
context.set_source_rgba(*color)
context.stroke()
self.__sub_setGrabbed(self.isopen)
def __sub_onDelete (self, subwindow, event):
self.setOpen(False)
return True
def __sub_onFocusOut (self, subwindow, event):
self.setOpen(False)
def switchWithImage (image, dialog):
parent = image.get_parent()
parent.remove(image)
imageMenu = ImageMenu(image, dialog)
parent.add(imageMenu)
imageMenu.show()
if __name__ == "__main__":
win = Gtk.Window()
vbox = Gtk.VBox()
vbox.add(Gtk.Label(label="Her er der en kat"))
image = Gtk.Image.new_from_icon_name("gtk-properties", Gtk.IconSize.BUTTON)
vbox.add(image)
vbox.add(Gtk.Label(label="Her er der ikke en kat"))
win.add(vbox)
table = Gtk.Table(2, 2)
table.attach(Gtk.Label(label="Minutes:"), 0, 1, 0, 1)
spin1 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin1, 1, 2, 0, 1)
table.attach(Gtk.Label(label="Gain:"), 0, 1, 1, 2)
spin2 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin2, 1, 2, 1, 2)
table.set_border_width(6)
switchWithImage(image, table)
def onValueChanged (spin):
print(spin.get_value())
spin1.connect("value-changed", onValueChanged)
spin2.connect("value-changed", onValueChanged)
win.show_all()
win.connect("delete-event", Gtk.main_quit)
Gtk.main()
|
# -*- coding: utf-8 -*-
"""
test_sphinx
~~~~~~~~~~~
General Sphinx test and check o | utput.
"""
import sys
import pytest
import sphinx
from ipypublish.sphinx.tests import get_test_source_dir
from ipypublish.tests.utils import HTML2JSONParser
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_basic"))
def test_basic(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in st | atus.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_basic_v2")
else:
data_regression.check(parser.parsed, basename="test_basic_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_sortkeys"))
def test_sortkeys(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_sortkeys_v2")
else:
data_regression.check(parser.parsed, basename="test_sortkeys_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_unsorted"))
def test_unsorted(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_unsorted_v2")
else:
data_regression.check(parser.parsed, basename="test_unsorted_v1")
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_missingref")
)
def test_missingref(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
if (
"could not relabel bibglossary reference [missingkey]" not in warnings
and "WARNING: citation not found: missingkey" not in warnings # sphinx < 2
): # sphinx >= 2
raise AssertionError(
"should raise warning for missing citation `missingkey`: {}".format(
warnings
)
)
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_duplicatekey")
)
def test_duplicatekey(app, status, warning, get_sphinx_app_output):
with pytest.raises(KeyError):
app.build()
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="SyntaxError on import of texsoup/data.py line 135",
)
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_tex"))
def test_load_tex(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
|
#! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Carinhas
# Copyright 2013-2014 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Carinhas é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
############################################################
SuperPython - Teste de Funcionalidade Web
############################################################
Verifica a funcionalidade do servidor web.
"""
__author__ = 'carlo'
import unittest
import sys
import bottle
import os
import sys
import os
project_server = os.path.dirname(os.path.abspath(__file__))
project_server = os.path.join(project_server, '../src/')
# print(project_server)
sys.path.insert(0, project_server)
# make sure the default templates directory is known to Bottle
templates_dir = os.path.join(project_server, 'server/views/')
# print(templates_dir)
if templates_dir not in bottle.TEMPLATE_PATH:
bottle.TEMPLATE_PATH.insert(0, templates_dir)
if sys.version_info[0] == 2:
from mock import MagicMock, patch
else:
from unittest.mock import MagicMock, patch, ANY
from webtest import TestApp
from server.control import application as appbottle
import server.modelo_redis as cs
import server.control as ct
class FunctionalWebTest(unittest.TestCase):
def setUp(self):
cs.DBF = '/tmp/redis_test.db'
pass
def test_default_page(self):
""" test_default_page """
app = TestApp(appbottle)
response = app.get('/static/index.html')
self.assertEqual('200 OK', response.status)
self.assertTrue('<title>Jogo Eica - Cadastro</title>' in response.text, response.text[:1000])
def test_default_redirect(self):
"""test_default_redirect """
app = TestApp(appbottle)
response = app.get('/')
self.assertEqual('302 Found', response.status)
def test_register(self):
"""test_register """
# app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
rec_id, response = self._get_id('3333')
self.assertEqual('200 OK', response.status)
self.assertTrue(rec_id in response, str(response))
# rec_id = str(response).split('v | er = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
expected_record = "{'module': 'projeto2222', 'user': 'pr | ojeto2222-lastcodename', 'idade': '00015',"
received_record = cs.DRECORD.get(rec_id)
assert expected_record in str(received_record),\
"{}: {}".format(rec_id, received_record)
def _get_id(self, ref_id='e0cb4e39e071', url='/static/register?doc_id="10000001"&module=projeto2222'):
"""test_store """
app = TestApp(appbottle)
user, idade, ano, sexo = 'projeto2222-lastcodename', '00015', '0009', 'outro'
user_data = dict(doc_id=ref_id, user=user, idade=idade, ano=ano, sexo=sexo)
response = app.get(url, params=user_data)
return str(response).split('ver = main("')[1].split('")')[0], response
def test_store(self):
"""test_store """
app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
# rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
rec_id, _ = self._get_id()
response = app.post('/record/store', self._pontua(rec_id))
self.assertEqual('200 OK', response.status)
self.assertTrue('", "tempo": "20' in response, str(response))
# self.assertTrue('{"module": "projeto2222", "jogada": [{"carta": "2222",' in str(response), str(response))
expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015',"
received_record = str(response)
assert expected_record.replace("'", '"') in received_record,\
"{}: {}".format(rec_id, received_record)
def _pontua(self, ref_id):
ct.LAST = ref_id
jogada = {"doc_id": ref_id,
"carta": 2222,
"casa": 2222,
"move": 2222,
"ponto": 2222,
"tempo": 2222,
"valor": 2222}
return jogada
def test_pontos(self):
rec_id, response = self._get_id()
app = TestApp(appbottle)
app.post('/record/store', self._pontua(rec_id))
ct.LAST = rec_id
response = app.get('/pontos')
self.assertEqual('200 OK', response.status)
self.assertTrue('projeto2222-lastcodename' in response, str(response))
self.assertTrue('<h3>Idade: 10 Genero: outro Ano Escolar: 9</h3>' in response, str(response))
self.assertTrue('<td><span>2222<span></td>' in response, str(response))
if __name__ == '__main__':
unittest.main()
|
from tim | e import time
fr | om pychess.Utils.lutils.lmovegen import genAllMoves
from pychess.Utils.lutils.lmove import toLAN
def do_perft(board, depth, root):
nodes = 0
if depth == 0:
return 1
for move in genAllMoves(board):
board.applyMove(move)
if board.opIsChecked():
board.popMove()
continue
count = do_perft(board, depth - 1, root - 1)
nodes += count
board.popMove()
if root > 0:
print("%8s %10d %10d" % (toLAN(board, move), count, nodes))
return nodes
def perft(board, depth, root):
for i in range(depth):
start_time = time()
nodes = do_perft(board, i + 1, root)
ttime = time() - start_time
print("%2d %10d %5.2f %12.2fnps" %
(i + 1, nodes, ttime, nodes / ttime if ttime > 0 else nodes))
|
#!/usr/bin/python
from __future__ import | print_functio | n
import weather, time
a = time.time(); weather.hourly.load("ottawa"); print time.time() - a
raw_input()
|
def test_expressions_requires_index_name(self):
msg = 'An index must be named to use expressions.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(Lower('field'))
def test_expressions_with_opclasses(self):
msg = (
'Index.opclasses cannot be used with expressions. Use '
'django.contrib.postgres.indexes.OpClass() instead.'
)
with self.assertRaisesMessage(ValueError, msg):
models.Index(
Lower('field'),
name='test_func_opclass',
opclasses=['jsonb_path_ops'],
)
def test_condition_must_be_q(self):
with self.assertRaisesMessage(ValueError, 'Index.condition must be a Q instance.'):
models.Index(condition='invalid', name='long_book_idx')
def test_include_requires_list_or_tuple(self):
msg = 'Index.include must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(name='test_include', fields=['field'], include='other')
def test_include_requires_index_name(self):
msg = 'A covering index must be named.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['field'], include=['other'])
def test_name_auto_generation(self):
index = models.Index(fields=['author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_0f5565_idx')
# '-' for DESC columns should be accounted for in the index name.
index = models.Index(fields=['-author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_708765_idx')
# fields may be truncated in the name. db_column is used for naming.
long_field_index = models.Index(fields=['pages'])
long_field_index.set_name_with_model(Book)
self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx')
# suffix can't be longer than 3 characters.
long_field_index.suffix = 'suff'
msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?'
with self.assertRaisesMessage(AssertionError, msg):
long_field_index.set_name_with_model(Book)
@isolate_apps('model_indexes')
def test_name_auto_generation_with_quoted_db_table(self):
class QuotedDbTable(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = '"t_quoted"'
index = models.Index(fields=['name'])
index.set_name_with_model(QuotedDbTable)
self.assertEqual(index.name, 't_quoted_name_e4ed1b_idx')
def test_deconstruction(sel | f):
index = models.Index(fields=['title'], db_tablespace='idx_tbls')
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{'fields': ['title'], 'name': 'model_index_title_196f42_idx', 'db_tablespace': 'idx_tbls'}
)
def test_deconstruct_with_condition(self):
| index = models.Index(
name='big_book_index',
fields=['title'],
condition=models.Q(pages__gt=400),
)
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
'fields': ['title'],
'name': 'model_index_title_196f42_idx',
'condition': models.Q(pages__gt=400),
}
)
def test_deconstruct_with_include(self):
index = models.Index(
name='book_include_idx',
fields=['title'],
include=['author'],
)
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
'fields': ['title'],
'name': 'model_index_title_196f42_idx',
'include': ('author',),
},
)
def test_deconstruct_with_expressions(self):
index = models.Index(Upper('title'), name='book_func_idx')
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, (Upper('title'),))
self.assertEqual(kwargs, {'name': 'book_func_idx'})
def test_clone(self):
index = models.Index(fields=['title'])
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.fields, new_index.fields)
def test_clone_with_expressions(self):
index = models.Index(Upper('title'), name='book_func_idx')
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.expressions, new_index.expressions)
def test_name_set(self):
index_names = [index.name for index in Book._meta.indexes]
self.assertCountEqual(
index_names,
[
'model_index_title_196f42_idx',
'model_index_isbn_34f975_idx',
'model_indexes_book_barcode_idx',
],
)
def test_abstract_children(self):
index_names = [index.name for index in ChildModel1._meta.indexes]
self.assertEqual(
index_names,
['model_index_name_440998_idx', 'model_indexes_childmodel1_idx'],
)
index_names = [index.name for index in ChildModel2._meta.indexes]
self.assertEqual(
index_names,
['model_index_name_b6c374_idx', 'model_indexes_childmodel2_idx'],
)
class IndexesTests(TestCase):
@skipUnlessDBFeature('supports_tablespaces')
def test_db_tablespace(self):
editor = connection.schema_editor()
# Index with db_tablespace attribute.
for fields in [
# Field with db_tablespace specified on model.
['shortcut'],
# Field without db_tablespace specified on model.
['author'],
# Multi-column with db_tablespaces specified on model.
['shortcut', 'isbn'],
# Multi-column without db_tablespace specified on model.
['title', 'author'],
]:
with self.subTest(fields=fields):
index = models.Index(fields=fields, db_tablespace='idx_tbls2')
self.assertIn('"idx_tbls2"', str(index.create_sql(Book, editor)).lower())
# Indexes without db_tablespace attribute.
for fields in [['author'], ['shortcut', 'isbn'], ['title', 'author']]:
with self.subTest(fields=fields):
index = models.Index(fields=fields)
# The DEFAULT_INDEX_TABLESPACE setting can't be tested because
# it's evaluated when the model class is defined. As a
# consequence, @override_settings doesn't work.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertIn(
'"%s"' % settings.DEFAULT_INDEX_TABLESPACE,
str(index.create_sql(Book, editor)).lower()
)
else:
self.assertNotIn('TABLESPACE', str(index.create_sql(Book, editor)))
# Field with db_tablespace specified on the model and an index without
# db_tablespace.
index = models.Index(fields=['shortcut'])
self.assertIn('"idx_tbls"', str(index.create_sql(Book, editor)).lower())
@skipUnlessDBFeature('supports_tablespaces')
def test_func_with_tablespace(self):
# Functional index with db_tablespace attribute.
index = models.Index(
Lower('shortcut').desc(),
name='functional_tbls',
db_tablespace='idx_tbls2',
)
with connection.schema_editor() as editor:
|
# -*- coding: utf-8 -*-
#
# pynest_api_template.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General P | ublic License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""[[ This template demonstrates how to create | a docstring for the PyNEST API.
If you have modified an API, please ensure you update the docstring!
The format is based on `NumPy style docstring
<https://numpydoc.readthedocs.io/en/latest/format.html>`_ and uses
reStructuredText markup. Please review the syntax rules if you are
unfamiliar with either reStructuredText or NumPy style docstrings.
Copy this file and replace the sample text with a description of the API.
The double bracketed sections [[ ]], which provide explanations, should be
completely removed from your final version - Including this entire
docstring!
]]
"""
def GetConnections(source=None, target=None, synape_model=None, synapse_label=None):
"""Return a `SynapseCollection` representing the connection identifiers.
[[ In a single 'summary line', state what the function does ]]
[[ All functions should have a docstring with at least a summary line ]]
[[ Below summary line (separated by new line), there should be an extended
summary section that should be used to clarify functionality.]]
Any combination of `source`, `target`, `synapse_model` and
`synapse_label` parameters is permitted.
[[ Deprecation warnings should appear directly after the extended summary.
It should state in what version the object was deprecated, when it will
be removed and what recommend way obtains the same functionality]]
.. deprecated:: 1.6.0
`ndobj_old` will be removed in NumPy 2.0.0, it is replaced by
`ndobj_new` because the latter works also with array subclasses.
[[ For all headings ensure the underline --- is at least the length of the
heading ]]
Parameters
----------
source : NodeCollection, optional
Source node IDs, only connections from these
pre-synaptic neurons are returned
target : NodeCollection, optional
Target node IDs, only connections to these
postsynaptic neurons are returned
synapse_model : str, optional
Only connections with this synapse type are returned
synapse_label : int, optional
(non-negative) only connections with this synapse label are returned
Returns
-------
SynapseCollection:
Object representing the source-node_id, target-node_id, target-thread, synapse-id, port of connections, see
:py:class:`.SynapseCollection` for more.
Raises
-------
TypeError
Notes
-------
Details on the connectivity. [[ Here details regarding the code or further
explanations can be included. This section may include mathematical
equations, written in LaTeX format. You can include references to relevant
papers using the reStructuredText syntax. Do not include model formulas ]]
The discrete-time Fourier time-convolution [1]_ property states that
.. math::
x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )
The value of :math:`\omega` is larger than 5.
[[ The See Also section should include 2 or 3 related functions. ]]
See Also
---------
func_a : Function a with its description.
func_b, func_c
References
-----------
[[ Note the format of the reference. No bold nor italics is used. Last name
of author(s) followed by year, title in sentence case and full name of
journal followed by volume and page range. Include the doi if
applicable.]]
.. [1] Bonewald LF. (2011). The amazing osteocyte. Journal of Bone and
Mineral Research 26(2):229–238. DOI: 10.1002/jbmr.320.
"""
# [[ in line comments should be used to explain why this code is here]]
# This code was included because of bug Y when running X
# Temporary, I HOPE HOPE HOPE
if model is not None and syn_spec is not None:
raise kernel.NESTerror(
"'model' is an alias for 'syn_spec' and cannot"
" be used together with 'syn_spec'.")
|
# Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF A | NY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.evaluator.template_functions import function_resolver
from vitrage.evaluator.template_functions import GET_PARAM
from vitrage.evaluator.template_functions.v2.functions import get_param
from vitrage.evaluator.template_validation.base import get | _custom_fault_result
from vitrage.evaluator.template_validation.base import ValidationError
from vitrage.evaluator.template_validation.content.base import \
get_content_correct_result
class GetParamValidator(object):
@classmethod
def validate(cls, template, actual_params):
try:
function_resolver.validate_function(
function_resolver.FuncInfo(
name=GET_PARAM, func=get_param, error_code=0),
template,
actual_params=actual_params)
except ValidationError as e:
return get_custom_fault_result(e.code, e.details)
return get_content_correct_result()
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific l | anguage governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_ | exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lldpparam(base_resource) :
""" Configuration for lldp params resource. """
def __init__(self) :
self._holdtimetxmult = 0
self._timer = 0
self._mode = ""
@property
def holdtimetxmult(self) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20.
"""
try :
return self._holdtimetxmult
except Exception as e:
raise e
@holdtimetxmult.setter
def holdtimetxmult(self, holdtimetxmult) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20
"""
try :
self._holdtimetxmult = holdtimetxmult
except Exception as e:
raise e
@property
def timer(self) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000.
"""
try :
return self._timer
except Exception as e:
raise e
@timer.setter
def timer(self, timer) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000
"""
try :
self._timer = timer
except Exception as e:
raise e
@property
def mode(self) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER
"""
try :
self._mode = mode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lldpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lldpparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update lldpparam.
"""
try :
if type(resource) is not list :
updateresource = lldpparam()
updateresource.holdtimetxmult = resource.holdtimetxmult
updateresource.timer = resource.timer
updateresource.mode = resource.mode
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of lldpparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = lldpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the lldpparam resources that are configured on netscaler.
"""
try :
if not name :
obj = lldpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Mode:
NONE = "NONE"
TRANSMITTER = "TRANSMITTER"
RECEIVER = "RECEIVER"
TRANSCEIVER = "TRANSCEIVER"
class lldpparam_response(base_response) :
def __init__(self, length=1) :
self.lldpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lldpparam = [lldpparam() for _ in range(length)]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from flask_script import Manager, Shell, Server
from flask_migrate import MigrateCommand
from gakkgakk.app import create_app
from gakkgakk.models impo | rt User
from gakkgakk.settings import DevConfig, ProdConfig
from gakkgakk.database import db
reload(sys)
sys.setdefaultencoding('utf-8')
app = create_app(ProdConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the Use | r model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server(host='0.0.0.0', threaded=True))
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
from fnmatch import fnmatchcase
from trac.config import Option
from trac.core import *
from trac.perm import IPermissionPolicy
revision = "$Rev: 11490 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/permissions/public_wiki_policy.py $"
class PublicWikiPolicy(Component):
"""Allow public access to some wiki pages.
This is a sample permission policy plugin illustrating how to check
permission on realms.
Don't forget to integrate that plugin in the appropriate place in the
list of permission policies:
{{{
[trac]
permission_policies = PublicWikiPolicy, DefaultPermissionPolicy
}}}
Then you can configure which pages you want to make public:
{{{
[public_wiki]
view = Public*
modify = PublicSandbox/*
}}}
"""
implements(IPermissionPolicy)
view = Option('public_wiki', 'view', 'Public*',
"""Case-sensitive glob pattern used for granting view permission on
all Wiki pages matching it.""")
modify = Option('public_wiki', 'modify', 'Public*',
"""Case-sensitive glob pattern used for granting modify permissions
on all Wiki pages matching it.""")
def check_permission(self, action, username, resource, perm):
if resource: # fine-grained permission check
if resource.realm == 'wiki': # wiki realm or resource
if resource.id: # ... it' | s a resource
if action == 'WIKI_VIEW': # (think 'VIEW' here)
pattern = self.view
else:
pattern = self.modify
if fnmatchcase(resource.id, pattern):
return True
else: # ... it's a realm
| return True
# this policy ''may'' grant permissions on some wiki pages
else: # coarse-grained permission check
#
# support for the legacy permission checks: no resource specified
# and realm information in the action name itself.
#
if action.startswith('WIKI_'):
return True
# this policy ''may'' grant permissions on some wiki pages
|
#!/usr/bin/env python
from settings import Settings
from scan import Scanner
from logger import Logg | er
def main():
try:
#Read config file
settings=Setting | s()
#Set up logger
logger=Logger(settings)
#Create scanner
scanner=Scanner(settings,logger)
#Begin scanning
scanner.StartScanning()
except KeyboardInterrupt:
scanner.StopScanning()
if __name__ == "__main__":
main()
|
aise a
"""
return (expectation, variance)
def _ftl_jump(self, y, K, **kwargs):
r"""
Jump to a totally new mixture of K number of gaussians.
"""
logger.debug("Re-initializing with K-means++ at K = {}".format(K))
# Initialize new centroids by k-means++
mixtures = []
mls = []
for z in range(30):
mean = kmeans._k_init(y, K, kmeans.row_norms(y, squared=True),
kmeans.check_random_state(None))
# Calculate weights by L2 distances to closest centers.
distance = np.sum((y[:, :, None] - mean.T)**2, axis=1).T
N, D = y.shape
responsibility = np.zeros((K, N))
responsibility[np.argmin(distance, axis=0), np.arange(N)] = 1.0
weight = responsibility.sum(axis=1)/N
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
mixture = self.__class__(
threshold=self.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
# Initialize it.
mixture.set_parameters(mean=mean, weight=weight, covariance=covariance)
# Run E-M on the partial mixture.
R, meta = mixture._expectation_maximization(
y, parent_responsibility=responsibility)
| raise UnsureError
mixtures.append | (mixture)
mls.append(meta["message_length"])
print(np.std(mls))
index = np.argmin(mls)
mixture = mixtures[index]
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return mixture, R, meta #(len(self._proposed_mixtures) - 1, R, meta)
raise a
#self.set_parameters(
# weight=weight, mean=mean, covariance=covariance)
#return responsibility
def _merge_component_with_closest_component(self, y, responsibility, index, **kwargs):
R, meta, mixture = _merge_components(y, self.mean, self.covariance, self.weight,
responsibility, index, index_b, **kwargs)
return mixture, R, meta
def _component_kl_distances(self):
r"""
Calculate the K-L distances for all current components.
"""
K = self.weight.size
if K == 1: return ([])
kl = np.inf * np.ones((K, K))
for i in range(K):
for j in range(i + 1, K):
kl[i, j] = kullback_leibler_for_multivariate_normals(
self.mean[i], self.covariance[i],
self.mean[j], self.covariance[j])
kl[j, i] = kullback_leibler_for_multivariate_normals(
self.mean[j], self.covariance[j],
self.mean[i], self.covariance[i])
# Best for each *from*.
indices = list(zip(*(np.arange(K), np.argsort(kl, axis=1).T[0])))
_ = np.array(indices).T
sorted_indices = np.argsort(kl[_[0], _[1]])
return tuple([indices[_] for _ in sorted_indices if indices[_][0] != indices[_][1]])
return foo
def _optimize_merge_mixture(self, y, responsibility, a_index):
b_index = _index_of_most_similar_component(y,
self.mean, self.covariance, a_index)
# Initialize.
weight_k = np.sum(self.weight[[a_index, b_index]])
responsibility_k = np.sum(responsibility[[a_index, b_index]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
# Delete the b-th component.
del_index = np.max([a_index, b_index])
keep_index = np.min([a_index, b_index])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
R, meta = mixture._expectation_maximization(
y, responsibility=new_responsibility)
#R, ll, I = mixture._expectation(y)
#meta = {"log_likelihood": ll.sum(), "message_length": I}
N, D = y.shape
# Store the mixture.
#slogdet = np.sum(np.log(np.linalg.det(mixture.covariance)))
slogdet = np.sum(_slogdet(mixture.covariance, mixture.covariance_type))
self._proposed_mixtures.append(mixture)
self._mixture_predictors.append([
mixture.weight.size,
np.sum(np.log(mixture.weight)),
meta["log_likelihood"],
slogdet,
-meta["log_likelihood"] + (D+2)/2.0 * slogdet
])
# TODO: Remove predictors that we don't use.
#self._slogs.append(np.linalg.det(mixture.covariance))
return (len(self._proposed_mixtures) - 1, R, meta)
raise a
def _consider_merging_components(self, y, responsibility, current_I):
for i, j in self._component_kl_distances():
# Initialize the merge.
weight_k = np.sum(self.weight[[i, j]])
responsibility_k = np.sum(responsibility[[i, j]], axis=0)
effective_membership_k = np.sum(responsibility_k)
mean_k = np.sum(responsibility_k * y.T, axis=1) / effective_membership_k
covariance_k = _estimate_covariance_matrix(
y, np.atleast_2d(responsibility_k), np.atleast_2d(mean_k),
self.covariance_type, self.covariance_regularization)
del_index = np.max([i, j])
keep_index = np.min([i, j])
new_mean = np.delete(self.mean, del_index, axis=0)
new_covariance = np.delete(self.covariance, del_index, axis=0)
new_weight = np.delete(self.weight, del_index, axis=0)
new_responsibility = np.delete(responsibility, del_index, axis=0)
new_mean[keep_index] = mean_k
new_covariance[keep_index] = covariance_k
new_weight[keep_index] = weight_k
new_responsibility[keep_index] = responsibility_k
mixture = self.__class__(
threshold=1e-3, # MAGICself.threshold,
covariance_type=self.covariance_type,
max_em_iterations=self.max_em_iterations,
covariance_regularization=self.covariance_regularization)
mixture.set_parameters(mean=new_mean, weight=new_weight,
covariance=new_covariance)
# Calculate message length.
R, ll, I = mixture._expectation(y)
logger.debug("Considered merging {} {} --> {}".format(i, j, I))
if I < current_I:
logger.debug("omg this is better! ({} < {})".format(
|
from constants import constants, callback_name_list
from controller import plan_controller, navigable_list_controller, navigable_inline_keyboard_controller, settings_controller
from telepot.namedtuple import ReplyKeyboardRemove
from bot import bot
from decorators.callback import callback_dict as callback_list
"""
callback_list = {
callback_name_list["setting"]: settings_controller.set_settings,
}
"""
def handle_callback_data(msg, action_prefix):
callback_data = msg['data']
message = msg['message']['text'] if 'text' in msg['message'] else msg['message']['caption']
chat_id = msg['message']['chat']['id']
callback_query_id = msg['id']
inline_message_id = msg['message']["from"]["id"]
message_id = m | sg['message']['message_id']
for callback in callback_list:
if callback_data.startswith(callback):
answer = callback_list[callback](callback_query_id, callback_data, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id)
if answer == None:
act | ion_prefix[chat_id] = " "
else:
action_prefix[chat_id] = answer
break
else:
bot.sendMessage(chat_id, constants["callbackNotFound"], reply_markup=ReplyKeyboardRemove())
action_prefix[chat_id] = " "
bot.answerCallbackQuery(callback_query_id) |
f _GetSelectedLineNumbers(self):
# used for the comment/uncomment machinery from ActiveGrid
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
start = self.LineFromPosition(selStart)
end = self.LineFromPosition(selEnd)
if selEnd == self.GetTextLength():
end += 1
return list(range(start, end))
def _GetPositionsBoundingSelectedLines(self):
# used for the comment/uncomment machinery from ActiveGrid
startPos = self.GetCurrentPos()
endPos = self.GetAnchor()
if startPos > endPos:
startPos, endPos = endPos, startPos
if endPos == self.PositionFromLine(self.LineFromPosition(endPos)):
# If it's at the very beginning of a line, use the line above it
# as the ending line
endPos = endPos - 1
selStart = self.PositionFromLine(self.LineFromPosition(startPos))
selEnd = self.PositionFromLine(self.LineFromPosition(endPos) + 1)
return selStart, selEnd
def _ReplaceSelectedLines(self, text):
# used for the comment/uncomment machinery from ActiveGrid
# If multi line selection - keep lines selected
# For single lines, move to next line and select that line
if len(text) == 0:
return
selStart, selEnd = self._GetPositionsBoundingSelectedLines()
self.SetSelection(selStart, selEnd)
self.ReplaceSelection(text)
if len(text.splitlines()) > 1:
self.SetSelection(selStart, selStart + len(text))
else:
self.SetSelection(
self.GetCurrentPos(),
self.GetLineEndPosition(self.GetCurrentLine()))
def smartIdentThisLine(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
signal = {'Py': ':', 'JS': '{'}
# set the indent
self.SetLineIndentation(startLineNum, prevIndent)
self.VCHome()
# check for a colon (Python) or curly brace (JavaScript) to signal an indent
prevLogical = prevLine.split(self._commentType[codeType])[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == signal[codeType]:
self.CmdKeyExecute(wx.stc.STC_CMD_TAB)
elif len(prevLogical) > 0 and prevLogical[-1] == '}' and codeType == 'JS':
self.CmdKeyExecute(wx.stc.STC_SCMOD_SHIFT + wx.stc.STC_CMD_TAB)
def smartIndent(self):
# find out about current positions and indentation
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
prevLine = self.GetLine(startLineNum - 1)
prevIndent = self.GetLineIndentation(startLineNum - 1)
startLineIndent = self.GetLineIndentation(startLineNum)
# calculate how much we need to increment/decrement the current lines
incr = prevIndent - startLineIndent
# check for a colon to signal an indent decrease
prevLogical = prevLine.split('#')[0]
prevLogical = prevLogical.strip()
if len(prevLogical) > 0 and prevLogical[-1] == ':':
incr = incr + 4
# set each line to the correct indentation
self.BeginUndoAction()
for lineNum in range(startLineNum, endLineNum + 1):
thisIndent = self.GetLineIndentation(lineNum)
self.SetLineIndentation(lineNum, thisIndent + incr)
self.EndUndoAction()
def shouldTrySmartIndent(self):
# used when the user presses tab key: decide whether to insert
# a tab char or whether to smart indent text
# if some text has been selected then use indentation
if len(self.GetSelectedText()) > 0:
return True
# test whether any text precedes current pos
lineText, posOnLine = self.GetCurLine()
textBeforeCaret = lineText[:posOnLine]
if textBeforeCaret.split() == []:
return True
else:
return False
def indentSelection(self, howFar=4):
# Indent or outdent current selection by 'howFar' spaces
# (which could be positive or negative int).
startLineNum = self.LineFromPosition(self.GetSelectionStart())
endLineNum = self.LineFromPosition(self.GetSelectionEnd())
# go through line-by-line
self.BeginUndoAction()
for lineN in range(startLineNum, endLineNum + 1):
newIndent = self.GetLineIndentation(lineN) + howFar
if newIndent < 0:
newIndent = 0
self.SetLineIndentation(lineN, newIndent)
self.EndUndoAction()
def Paste(self, event=None):
| dataObj = wx.TextDataObject()
clip = wx.Clipboard().Get()
clip.Open()
success = clip.GetData(dataObj)
clip.Close()
if success:
txt = dataObj.GetText()
# dealing with unicode error in wx3 for Mac
if parse_version(wx.__version__) >= pa | rse_version('3') and sys.platform == 'darwin' and not PY3:
try:
# if we can decode from utf-8 then all is good
txt.decode('utf-8')
except Exception as e:
logging.error(str(e))
# if not then wx conversion broke so get raw data instead
txt = dataObj.GetDataHere()
self.ReplaceSelection(txt.replace("\r\n", "\n").replace("\r", "\n"))
self.analyseScript()
def analyseScript(self):
"""Analyse the script."""
pass
@property
def edgeGuideVisible(self):
return self.GetEdgeMode() != wx.stc.STC_EDGE_NONE
@edgeGuideVisible.setter
def edgeGuideVisible(self, value):
if value is True:
self.SetEdgeMode(wx.stc.STC_EDGE_LINE)
else:
self.SetEdgeMode(wx.stc.STC_EDGE_NONE)
@property
def edgeGuideColumn(self):
return self.GetEdgeColumn()
@edgeGuideColumn.setter
def edgeGuideColumn(self, value):
self.SetEdgeColumn(value)
# def _applyAppTheme(self, target=None):
# """Overrides theme change from ThemeMixin.
# Don't call - this is called at the end of theme.setter"""
# # ThemeMixin._applyAppTheme() # only needed for children
# spec = ThemeMixin.codeColors
# base = spec['base']
#
# # Check for language specific spec
# if self.GetLexer() in self.lexers:
# lexer = self.lexers[self.GetLexer()]
# else:
# lexer = 'invlex'
# if lexer in spec:
# # If there is lang specific spec, delete subkey...
# lang = spec[lexer]
# del spec[lexer]
# #...and append spec to root, overriding any generic spec
# spec.update({key: lang[key] for key in lang})
# else:
# lang = {}
#
# # Override base font with user spec if present
# key = 'outputFont' if isinstance(self, wx.py.shell.Shell) else 'codeFont'
# if prefs.coder[key] != "From theme...":
# base['font'] = prefs.coder[key]
#
# # Pythonise the universal data (hex -> rgb, tag -> wx int)
# invalid = []
# for key in spec:
# # Check that key is in tag list and full spec is defined, discard if not
# if key in self.tags \
# and all(subkey in spec[key] for subkey in ['bg', 'fg', 'font']):
# spec[key]['bg'] = self.hex2rgb(spec[key]['bg'], base['bg'])
# spec[key]['fg'] = self.hex2rgb(spec[key]['fg'], base['fg'])
# if not spec[key]['font']:
# spec[key]['font'] = base['font']
# spec[key]['size'] = int(self.prefs['codeFontSize'])
# else:
# invalid += [key]
# for key in invalid |
"""
Created on 05/12/13
@author: zw606
simple example
assumes images and labels files are named the same but in different folders
(one folder for images, one folder for labels)
"""
import glob
from os.path import join, basename
from spatch.image import spatialcontext
from spatch.image. | mask import get_boundary_mask
from spatch.segmentation.patchbased import SAPS
from spatch.utilities.io import open_image, get_affine, save_3d_labels_data
from spatch.image.spatialcontext import COORDINATES, GDT
INITIAL_SPATIAL_INFO = COORDINATES
REFINEMENT_SPATIAL_INFO = GDT
def get_subject_id(fileName):
nameParts = fileName.split('.')[0].split('_')
return nameParts[0]
def initial_saps_segment(trainingSet, targetFile, imag | esPath, labelsPath, patchSize, k, spatialWeight,
spatialInfoType=INITIAL_SPATIAL_INFO, maskData=None, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=None,
spatialWeight=spatialWeight, minValue=None, maxValue=None,
spatialInfoType=spatialInfoType)
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=maskData, numProcessors=numProcessors)
return results
def refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath, patchSize, k, spatialWeight,
prevResultsPath, dtLabels, boundaryRefinementSize=2, preDtErosion=None,
spatialInfoType=REFINEMENT_SPATIAL_INFO, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=boundaryRefinementSize,
spatialWeight=spatialWeight, minValue=None, maxValue=None,
spatialInfoType=spatialInfoType)
prevResults = open_image(join(prevResultsPath, targetFile))
refinementMask = get_boundary_mask(prevResults, boundaryRefinementSize)
queryMaskDict = {1: refinementMask}
# erosion of labels before calculating spatial context
if preDtErosion is None:
preDtErosion = boundaryRefinementSize
# get spatial context to use from previous results
spatialInfo = spatialcontext.get_dt_spatial_context_dict(prevResults, spatialInfoType=spatialInfoType,
spatialLabels=dtLabels, labelErosion=preDtErosion,
imageData=targetImage).values()
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=queryMaskDict, spatialInfo=spatialInfo,
dtLabels=dtLabels, preDtErosion=preDtErosion, numProcessors=numProcessors)
return results
def run_leave_one_out(imagesPath, labelsPath, savePath, patchSize=7, k=15, spatialWeight=400,
prevResultsPath=None, dtLabels=None, preDtErosion=None, refinementSize=2,
numProcessors=8, fileName="*.nii.gz"):
"""
Assumes images are in common template space,
otherwise registration (not performed here) will be required for each target image
"""
files = glob.glob(join(imagesPath, fileName))
print "Number of files found:", len(files)
dataset = [basename(x) for x in files]
if prevResultsPath is not None:
# do refinement
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight,
prevResultsPath, dtLabels, preDtErosion=preDtErosion,
boundaryRefinementSize=refinementSize,
numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
else:
# do initial segmentation
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = initial_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight, numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--imagesPath", default=None,
help="Set path to images (specify folder)")
parser.add_argument("--labelsPath", default=None,
help="Set path to labels (specify folder) ")
parser.add_argument("--savePath", default=None,
help="Set path to save results (specify folder)")
parser.add_argument("--prevResultsPath", default=None,
help="Set path to initial results for refinement (specify folder)")
parser.add_argument("--fileName", default="*.nii.gz",
help="Specify which files to work on (takes regex)")
parser.add_argument("--patchSize", type=int, default=7, nargs="+",
help="Set the patch size to use")
parser.add_argument("-k", type=int, default=15,
help="Set number of nearest neighbours to use")
parser.add_argument("--spatialWeight", type=float, default=10,
help="Set path to initial results")
parser.add_argument("--dtLabels", type=int, default=None, nargs="+",
help="Set the labels (structures) to use to provide adaptive spatial context")
parser.add_argument("--preDtErosion", type=int, default=None,
help="Set the erosion of labels data to apply prior to any distance transforms")
parser.add_argument("--refinementSize", type=int, default=2,
help="Set boundary size for refinement (number of dilations-erosions used)")
parser.add_argument("--numProcessors", type=int, default=10,
help="Set number of processors to use")
options = parser.parse_args()
run_leave_one_out(options.imagesPath, options.labelsPath, options.savePath, patchSize=options.patchSize,
k=options.k, prevResultsPath=options.prevResultsPath,
dtLabels=options.dtLabels, preDtErosion=options.preDtErosion,
spatialWeight=options.spatialWeight, numProcessors=options.numProcessors,
fileName=options.fileName, refinementSize=options.refinementSize)
print "Done!"
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import random
from math import floor
from .common import InfoExtractor
from ..utils import (
ExtractorError,
remove_end,
sanitized_Request,
)
class IPrimaIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://play\.iprima\.cz/(?:[^/]+/)*(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://play.iprima.cz/particka/particka-92',
'info_dict': {
'id': '39152',
'ext': 'flv',
'title': 'Partička (92)',
'description': 'md5:74e9617e51bca67c3ecfb2c6f9766f45',
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
'info_dict': {
'id': '9718337',
'ext': 'flv',
'title': 'Tchibo Partička - Jarní móda',
'thumbnail': 're:^http:.*\.jpg$',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/zpravy-ftv-prima-2752015',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
raise ExtractorError(
'%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random() * 1073741824), floor(random() * 1073741824))
)
req = sanitized_Request(player_url)
req.add_header('Referer', url)
playerpage = self._download_webpage(req, video_id)
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zon | eGEO')
if zoneGEO != '0':
base_url = base_url.replace('token', 'token_' + zoneGEO)
formats = []
for format_id in ['lq', 'hq', 'hd']:
filename = self._html_search_regex(
r'"%s_id":(.+?),' % format_id, webpage, 'filename')
if filename == 'null':
continue
real_id = self._search_regex(
r'Prima-(?:[ | 0-9]{10}|WEB)-([0-9]+)[-_]',
filename, 'real video id')
if format_id == 'lq':
quality = 0
elif format_id == 'hq':
quality = 1
elif format_id == 'hd':
quality = 2
filename = 'hq/' + filename
formats.append({
'format_id': format_id,
'url': base_url,
'quality': quality,
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
'rtmp_live': True,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': real_id,
'title': remove_end(self._og_search_title(webpage), ' | Prima PLAY'),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
'description': self._search_regex(
r'<p[^>]+itemprop="description"[^>]*>([^<]+)',
webpage, 'description', default=None),
}
|
# -*- coding: utf-8 -*-
#
# This file is covered by the GNU Public Licence v3 licence. See http://www.gnu.org/licenses/gpl.txt
#
'''
List of controllers, with indirections to object loaded by Spring
'''
import springpython.context
from django.http import HttpResponse
from django.template import loader, Context
from os import listdir
from os.path import isdir, isfile, sep
from settings import APPLICATION_CONTEXTS, TEMPLATE_DIRS, DEBUG
import logging
LOGGER = logging.getLogger('app')
class CommonController(object):
def __init__(self):
self.prehandler = None
self.posthandler = None
self.urls = []
def _geturls(self):
raise Exception("No URL defined")
def prehandle(self, request):
tpl = None
if isinstance(self.prehandler, list):
for ph in self.prehandler:
if isinstance(ph, PreHandler):
tpl = ph.handle(request)
if tpl != None:
break
| elif isinstance(self.prehandler, PreHandler):
tpl = self.prehandler.handle(request)
return tpl
def posthandle(self, request, tpl):
if isinstance(self.posthandler, list):
for ph in self.posthandler:
if isinstanc | e(ph, PostHandler):
ph.handle(request, tpl)
elif isinstance(self.posthandler, PostHandler):
self.posthandler.handle(request, tpl)
class PreHandler(object):
def handle(self, request):
pass
class PostHandler(object):
def handle(self, request, tpl):
pass
# Templates loading
class TemplatesContainer(object):
def __init__(self, tpldir=TEMPLATE_DIRS, prefix=''):
self.__templates = {}
self.__tpldir = tpldir
self.__prefix = prefix
self.__load()
def after_properties_set(self):
pass
def set_app_context(self, context):
pass
def __load(self):
# Load all templates found. Replace directory by _
for fileent in listdir(self.__tpldir):
if isfile(self.__tpldir + sep + fileent):
self.__templates[fileent.replace('.html', '')] = loader.get_template(self.__prefix + fileent)
elif isdir(self.__tpldir + sep + fileent):
self.__templates[fileent] = TemplatesContainer(self.__tpldir + sep + fileent, self.__prefix + fileent + sep)
def __getattr__(self, name):
if DEBUG:
self.__load()
if name not in self.__templates:
LOGGER.error('Internal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return self.__templates[name]
def render(self, name, context={}):
name_i = name.split('.', 2)
tpl = self
while type(tpl) == TemplatesContainer:
try:
tpl = tpl.__getattr__(name_i.pop(0))
except:
LOGGER.error('Internal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return tpl.render(Context(context))
def content(self, content):
return HttpResponse(content=content, mimetype="text/html", status=200)
def response(self, name, context={}, status=200, mimetype="text/html"):
return HttpResponse(content=self.render(name, context), mimetype=mimetype, status=status)
def redirect(self, url):
return HttpResponse(content='<html><head><meta http-equiv="refresh" content="0; url=%s"/></head></html>' % url, mimetype="text/html", status=200)
def forbidden(self):
return self.response('forbidden')
def empty(self):
return self.content('')
def error(self, msg):
return self.response('message_return', { 'error':msg })
def underConstruction(self):
return self.response('under_construction')
# Controllers are entry point of the application, so this is the good place to load the application (lazy loading)
ApplicationContext = springpython.context.ApplicationContext(APPLICATION_CONTEXTS)
'''
Declare controller. This first layer has two purposes :
1/ Check security
2/ Call the IoC managed controller method
'''
# Controllers
templates = ApplicationContext.get_object('templatesContainer')
controllersmap = {}
def run_controller(request, *kargs, **kwargs):
kwargsremain = {}
for key, val in kwargs.iteritems():
if key == 'controller':
controller = kwargs['controller']
elif key == 'method':
method = kwargs['method']
elif key == 'right':
right = kwargs['right']
else:
kwargsremain[key] = val
if controller not in controllersmap.keys():
controllersmap[controller] = ApplicationContext.get_object(controller)
controllerObj = controllersmap[controller]
try:
if right is not None and request.session.get(right, default=None) is None:
tpl = templates.forbidden()
else:
tpl = controllerObj.prehandle(request)
if tpl is None:
tpl = getattr(controllerObj, method)(request, *kargs, **kwargsremain)
controllerObj.posthandle(request, tpl)
except Exception as exc:
tpl = templates.error(exc)
return tpl
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
# from django.conf import settings
urlpatterns = patterns('www.sight.views',
url(r'^$', 'sight_map'),
url(r'^(?P<sight_id>\d+)$', 'sight_detail') | ,
| )
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino_xl.lib.tickets.models import *
from lino.api import _
Ticket.hide_elements('closed')
# class Ticket(Ticket):
# class Meta(Ticket.Meta):
# app_label = 'tickets'
# verbose_name = _("Plea")
# verbose_name_plural = _("Pleas")
# abstract = dd.is_abstract_model(__name__, 'Ticket')
# ActiveTickets._label = _("Active pleas")
# UnassignedTickets._label = _("Unassigned pleas")
# PublicTickets._label = _("Public pleas")
# TicketsToTriage._label = _("Pleas to triage")
# TicketsToTalk._label = _("Pleas to talk")
# # TicketsToDo._label = _("Pleas to to")
# AllTickets._label = _("All pleas")
dd.update_field(
'tickets.Ticket', 'upgrade_notes', verbose_name=_("Solution"))
# dd.update_field(
# ' | tickets.Ticket', 'state', default=TicketStates.todo.as_callable)
class T | icketDetail(TicketDetail):
main = "general history_tab more"
general = dd.Panel("""
general1:60 votes.VotesByVotable:20 uploads.UploadsByController
description:30 comments.CommentsByRFC:30 skills.DemandsByDemander #working.SessionsByTicket:20
""", label=_("General"))
general1 = """
summary:40 id:6 deadline
user:12 end_user:12 #faculty #topic
site workflow_buttons
"""
history_tab = dd.Panel("""
changes.ChangesByMaster:50 #stars.StarsByController:20
""", label=_("History"), required_roles=dd.login_required(Triager))
more = dd.Panel("""
more1:60 #skills.AssignableWorkersByTicket:20
upgrade_notes LinksByTicket skills.OffersByDemander
""", label=_("More"), required_roles=dd.login_required(Triager))
more1 = """
created modified ticket_type:10
state priority project
# standby feedback closed
"""
Tickets.detail_layout = TicketDetail()
|
#Key, dictionary[key, int], int --> dictionary[key, int]
#Given a key, dictionary and increment, set the dictionary value at
#key to dictionary[key] + inc. If there is no old value, set to inc |
def incrementDict(dictKey, dictionary, inc=1):
if(dictKey in dictionary):
dictionary[dictKey] += inc
else:
dictionary[dictKey] = inc
return dictionary
#dictionary[key, int] -> boolean
#Given a dictionary of counts return true if at least one is non zero
#and false otherwise
def nonZeroCount(dictionary) | :
for k,v in dictionary.iteritems():
assert(v >= 0)
if(v > 0):
return True
return False |
_helper
import os
import stat
import sys
import uu
import io
plaintext = b"The symbols on top of your keyboard are !@#$%^&*()_+|~\n"
encodedtext = b"""\
M5&AE('-Y;6)O;',@;VX@=&]P(&]F('EO=7(@:V5Y8F]A<F0@87)E("% (R0E
*7B8J*"E?*WQ^"@ """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename, backtick=False):
if backtick:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext.replace(b' ', b'`') + b"\n`\nend\n")
else:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
return res
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", backtick=True)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1", True))
with self.assertRaises(TypeError):
uu.encode(inp, out, "t1", 0o644, True)
def test_decode(self):
for backtick in True, False:
inp = io.BytesIO(encodedtextwrapped(0o666, "t1", backtick=backtick))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1", backtick=backtick)
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
def test_garbage_padding(self):
# Issue #22406
encodedtext1 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x20\n"
b"end\n"
)
encodedtext2 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x60\n"
b"end\n"
)
plaintext = b"\x33" # 00110011
for encodedtext in encodedtext1, encodedtext2:
with self.subTest("uu.decode()"):
inp = io.BytesIO(encodedtext)
out = io.BytesIO()
uu.decode(inp, out, quiet=True)
self.assertEqual(out.getvalue(), plaintext)
with self.subTest("uu_codec"):
import codecs
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
def test_newlines_escaped(self):
# Test newlines are escaped with uu.encode
inp = io.BytesIO(plaintext)
out = io.BytesIO()
filename = "test.txt\n\roverflow.txt"
safefilename = b"test.txt\\n\\roverflow.txt"
uu.encode(inp, out, filename)
self.assertIn(safefilename, out.getvalue())
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encode | dtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def setUp(self):
# uu.encode() supports only ASCII file names
self.tmpin = os_helper.TESTFN_ASCII + "i"
self.tmpout = os_helper.TESTFN_ASCII + "o"
self.addC | leanup(os_helper.unlink, self.tmpin)
self.addCleanup(os_helper.unlink, self.tmpout)
def test_encode(self):
with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
with open(self.tmpin, 'rb') as fin:
with open(self.tmpout, 'wb') as fout:
uu.encode(fin, fout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
def test_decode(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
def test_decode_filename(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
uu.decode(self.tmpin)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpin, 'rb') as f:
self.assertRaises(uu.Error, uu.decode, f)
def test_decode_mode(self):
# Verify that decode() will set the given mode for the out_file
expected_mode = 0o444
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(expected_mode, self.tmpout))
# make file writable again, so it can be removed (Windows only)
self.addCleanup(os.chmod, self.tmpout, expected_mode | stat.S_IWRITE)
with open(self.tmpin, 'rb') as f:
uu.decode(f)
self.assertEqual(
stat.S_IMODE(os.stat(self.tmpout).st_mode),
expected_mode
)
if __name__=="__mai |
""" Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalC | ontext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downs | ample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
|
from django.co | nf.urls import patterns, include, url
from cover.views import CoverView
urlpatterns = patterns('cover.views',
url( | r'^$', CoverView.as_view(), name='cover'),
)
|
SID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class CommentSearch:
"""The search interface for governing comment searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_comments(self, comment_ids):
"""Execute this search among the given list of comments.
:param comment_ids: list of comments
:type comment_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``comment_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_comment_results(self, comment_search_order):
"""Specify an ordering to the search results.
:param comment_search_order: comment search order
:type comment_search_order: ``osid.commenting.CommentSearchOrder``
:raise: ``NullArgument`` -- ``comment_search_order`` is ``null``
:raise: ``Unsupported`` -- ``comment_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_comment_search_record(self, comment_search_record_type):
"""Gets the comment search record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search record
:rtype: ``osid.commenting.records.CommentSearchRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchRecord
class CommentSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_comments(self):
"""Gets the comment list resulting from a search.
:return: the comment list
:rtype: ``osid.commenting.CommentList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentList
comments = property(fget=get_comments)
@abc.abstractmethod
def get_comment_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.CommentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentQueryInspector
comment_query_inspector = property(fget=get_comment_query_inspector)
@abc.abstractmethod
def get_comment_search_results_record(self, comment_search_record_type):
"""Gets the comment search results record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search results record
:rtype: ``osid.commenting.records.CommentSearchResultsRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchResultsRecord
class BookSearch:
"""The search interface for governing book searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_books(self, book_ids):
"""Execute this search among the given list of books.
:param book_ids: list of books
:type book_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``book_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_book_results(self, book_search_order):
"""Specify an ordering to the search results.
:param book_search_order: book search order
:type book_search_order: ``osid.commenting.BookSearchOrder``
:raise: ``NullArgument`` -- ``book_search_order`` is ``null``
:raise: ``Unsupported`` -- ``book_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_book_search_record(self, book_search_record_type):
"""Gets the book search record corresponding to the given book search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
: | type book_search_record_type: ``osid.type.Type``
:return: the book search record
:rtype: ``osid.commenting.records.BookSearchRecord``
:raise: | ``NullArgument`` -- ``book_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.BookSearchRecord
class BookSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_books(self):
"""Gets the book list resulting from a search.
:return: the book list
:rtype: ``osid.commenting.BookList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookList
books = property(fget=get_books)
@abc.abstractmethod
def get_book_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.BookQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookQueryInspector
book_query_inspector = property(fget=get_book_query_inspector)
@abc.abstractmethod
def get_book_search_results_record(self, book_search_record_type):
"""Gets the book search results record corresponding to the given book search record Type.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
:type book_search_record_type: ``osid.type.Type``
:return: the book search results record
:rtype: ``osid.commenting.records.BookSearchResultsRecord``
:raise: ``NullArgument`` -- ``BookSearchRecordType`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be i |
on']
}
elif location in self.location_not_found:
# Don't call the API.
pass
else:
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': location}
r = self.requests.get(url, params=params)
try:
logging.debug("Using Maps API to find %s" % (location))
r_json = r.json()
geo_code = r_json['results'][0]['geometry']['location']
except:
if location not in self.location_not_found:
logging.debug("Can't find geocode for " + location)
self.location_not_found.append(location)
if geo_code:
geo_point = {
"lat": geo_code['lat'],
"lon": geo_code['lng']
}
self.geolocations[location] = geo_point
return geo_point
def get_github_cache(self, kind, _key):
""" Get cache data for items of _type using _key as the cache dict key """
cache = {}
res_size = 100 # best size?
_from = 0
index_github = "github/" + kind
url = self.elastic.url + "/"+index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logging.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
| item = hit['_source']
cache[item[_key]] = item
_from += res_size
r = self.requests.get(url+"&from=%i" % _from)
type_items = | r.json()
if 'hits' not in type_items:
break
return cache
def geo_locations_from_es(self):
return self.get_github_cache("geolocations", "location")
def geo_locations_to_es(self):
max_items = self.elastic.max_items_bulk
current = 0
bulk_json = ""
url = self.elastic.url + "/github/geolocations/_bulk"
logging.debug("Adding geoloc to %s (in %i packs)" % (url, max_items))
for loc in self.geolocations:
if current >= max_items:
self.requests.put(url, data=bulk_json)
bulk_json = ""
current = 0
geopoint = self.geolocations[loc]
location = geopoint.copy()
location["location"] = loc
# First upload the raw issue data to ES
data_json = json.dumps(location)
# Don't include in URL non ascii codes
safe_loc = str(loc.encode('ascii', 'ignore'),'ascii')
geo_id = str("%s-%s-%s" % (location["lat"], location["lon"],
safe_loc))
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (geo_id)
bulk_json += data_json +"\n" # Bulk document
current += 1
self.requests.put(url, data = bulk_json)
logging.debug("Adding geoloc to ES Done")
def get_elastic_mappings(self):
""" geopoints type is not created in dynamic mapping """
mapping = """
{
"properties": {
"assignee_geolocation": {
"type": "geo_point"
},
"user_geolocation": {
"type": "geo_point"
},
"title_analyzed": {
"type": "string",
"index":"analyzed"
}
}
}
"""
return {"items":mapping}
def get_field_unique_id(self):
return "ocean-unique-id"
def get_project_repository(self, eitem):
repo = eitem['origin']
return repo
@metadata
def get_rich_item(self, item):
rich_issue = {}
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin"]
for f in copy_fields:
if f in item:
rich_issue[f] = item[f]
else:
rich_issue[f] = None
# The real data
issue = item['data']
rich_issue['time_to_close_days'] = \
get_time_diff_days(issue['created_at'], issue['closed_at'])
if issue['state'] != 'closed':
rich_issue['time_open_days'] = \
get_time_diff_days(issue['created_at'], datetime.utcnow())
else:
rich_issue['time_open_days'] = rich_issue['time_to_close_days']
rich_issue['user_login'] = issue['user']['login']
user = issue['user_data']
if user is not None:
rich_issue['user_name'] = user['name']
rich_issue['author_name'] = user['name']
rich_issue['user_email'] = user['email']
if rich_issue['user_email']:
rich_issue["user_domain"] = self.get_email_domain(rich_issue['user_email'])
rich_issue['user_org'] = user['company']
rich_issue['user_location'] = user['location']
rich_issue['user_geolocation'] = self.get_geo_point(user['location'])
else:
rich_issue['user_name'] = None
rich_issue['user_email'] = None
rich_issue["user_domain"] = None
rich_issue['user_org'] = None
rich_issue['user_location'] = None
rich_issue['user_geolocation'] = None
rich_issue['author_name'] = None
assignee = None
if issue['assignee'] is not None:
assignee = issue['assignee_data']
rich_issue['assignee_login'] = issue['assignee']['login']
rich_issue['assignee_name'] = assignee['name']
rich_issue['assignee_email'] = assignee['email']
if rich_issue['assignee_email']:
rich_issue["assignee_domain"] = self.get_email_domain(rich_issue['assignee_email'])
rich_issue['assignee_org'] = assignee['company']
rich_issue['assignee_location'] = assignee['location']
rich_issue['assignee_geolocation'] = \
self.get_geo_point(assignee['location'])
else:
rich_issue['assignee_name'] = None
rich_issue['assignee_login'] = None
rich_issue['assignee_email'] = None
rich_issue["assignee_domain"] = None
rich_issue['assignee_org'] = None
rich_issue['assignee_location'] = None
rich_issue['assignee_geolocation'] = None
rich_issue['id'] = issue['id']
rich_issue['id_in_repo'] = issue['html_url'].split("/")[-1]
rich_issue['title'] = issue['title']
rich_issue['title_analyzed'] = issue['title']
rich_issue['state'] = issue['state']
rich_issue['created_at'] = issue['created_at']
rich_issue['updated_at'] = issue['updated_at']
rich_issue['closed_at'] = issue['closed_at']
rich_issue['url'] = issue['html_url']
labels = ''
if 'labels' in issue:
for label in issue['labels']:
labels += label['name']+";;"
if labels != '':
labels[:-2]
rich_issue['labels'] = labels
rich_issue['repository'] = rich_issue['origin']
rich_issue['pull_request'] = True
rich_issue['item_type'] = 'pull request'
if not 'head' in issue.keys() and not 'pull_request' in issue.keys():
rich_issue['pull_request'] = False
rich_issue['item_type'] = 'issue'
rich_issue['github_repo'] = item['origin'].replace(GITHUB,'')
rich_issue['github_repo'] = re.sub('.git$', '', rich_issue['github_repo'])
rich_issue["url_id"] = rich_issue['github_repo']+"/issues/"+rich_issue['id_in_repo']
if self.prjs_map:
rich_issue.update(self.get_item_project(rich_issue))
if self.sortinghat:
rich_issue.update(self.get_item_sh(item, self.roles))
rich_issue.update(self.get_grimoire_fields(issue['cre |
from django.core.management.base import BaseCommand
from candidates.models import OrganizationExtra
class Command(BaseCommand):
def handle(self, *args, **options):
for party_extra in OrganizationExtra.objects \
.filter(base__classification='Party') \
| .select_related('base') \
.prefetch_related('images'):
images = list(party_extra.images.all())
if len(images) < 2:
continue
print "====================================================="
party = party_extra.base
print len(images), party_extra.slug, party.name.encode('utf-8')
for image in images:
print ' --'
print | ' ' + image.source.encode('utf-8')
print ' ' + image.image.url
|
'''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font)
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.st | dout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt) | :
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg(self, evt):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.