prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
""" mbed SDK Copyright (c) 2011-2016 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from exporters import Exporter from os.path import splitext, basename class CodeRed(Exporter): NAME = 'CodeRed' TOOLCHAIN = 'GCC_CR' MBED_CONFIG_HEADER_SUPPORTED = True TARGETS = [ 'LPC1768', 'LPC4088', 'LPC4088_DM', 'LPC4330_M4', 'LPC1114', 'LPC11U35_401', 'LPC11U35_501', 'UBLOX_C027', 'ARCH_PRO', 'LPC1549', 'LPC11U68', 'LPCCAPPUCCINO', 'LPC824', 'L
PC11U37H_401', ] def generate(self): libraries = [] for lib in self.resources.libraries: l, _ = splitext(basename(lib)) libraries.append(l[3:]) c
tx = { 'name': self.project_name, 'include_paths': self.resources.inc_dirs, 'linker_script': self.resources.linker_script, 'object_files': self.resources.objects, 'libraries': libraries, 'symbols': self.toolchain.get_symbols() } ctx.update(self.flags) self.gen_file('codered_%s_project.tmpl' % self.target.lower(), ctx, '.project') self.gen_file('codered_%s_cproject.tmpl' % self.target.lower(), ctx, '.cproject')
#!/usr/bin/python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example creates a click-to-download ad in a given ad group. This type of ad is also known as an app promotion ad. To list ad groups, run get_ad_groups.py. The LoadFromStorage method is pulling credentials and prop
erties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. Tags: AdGroupAdService.mutate Api: A
dWordsOnly """ __author__ = 'Joseph DiLallo' from googleads import adwords AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE' def main(client, ad_group_id): # Initialize appropriate service. ad_group_ad_service = client.GetService('AdGroupAdService', version='v201506') # Create the template elements for the ad. You can refer to # https://developers.google.com/adwords/api/docs/appendix/templateads # for the list of available template fields. ad_data = { 'uniqueName': 'adData', 'fields': [ { 'name': 'headline', 'fieldText': 'Enjoy your drive in Mars', 'type': 'TEXT' }, { 'name': 'description1', 'fieldText': 'Realistic physics simulation', 'type': 'TEXT' }, { 'name': 'description2', 'fieldText': 'Race against players online', 'type': 'TEXT' }, { 'name': 'appId', 'fieldText': 'com.example.demogame', 'type': 'TEXT' }, { 'name': 'appStore', 'fieldText': '2', 'type': 'ENUM' } ] } # Create click to download ad. click_to_download_app_ad = { 'xsi_type': 'TemplateAd', 'name': 'Ad for demo game', 'templateId': '353', 'finalUrls': [ 'http://play.google.com/store/apps/details?id=com.example.demogame' ], 'displayUrl': 'play.google.com', 'templateElements': [ad_data] } # Create ad group ad. ad_group_ad = { 'adGroupId': ad_group_id, 'ad': click_to_download_app_ad, # Optional. 'status': 'PAUSED' } # Add ad. ads = ad_group_ad_service.mutate([ {'operator': 'ADD', 'operand': ad_group_ad} ]) # Display results. if 'value' in ads: for ad in ads['value']: print ('Added new click-to-download ad to ad group ID \'%s\' ' 'with URL \'%s\'.' % (ad['ad']['id'], ad['ad']['finalUrls'][0])) else: print 'No ads were added.' if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, AD_GROUP_ID)
""" Launcher functionality for the Google Compute Engine (GCE) """ import json import logging import os from dcos_launch import onprem, util from dcos_launch.platforms import gcp from dcos_test_utils.helpers import Host from googleapiclient.errors import HttpError log = logging.getLogger(__name__) def get_credentials(env=None) -> tuple: path = None if env is None: env = os.environ.copy() if 'GCE_CREDENTIALS' in env: json_credentials = env['GCE_CREDENTIALS'] elif 'GOOGLE_APPLICATION_CREDENTIALS' in env: path = env['GOOGLE_APPLICATION_CREDENTIALS'] json_credentials = util.read_file(path) else: raise util.LauncherError( 'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env') return json_credentials, path class OnPremLauncher(onprem.AbstractOnpremLauncher): # Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS def __init__(self, config: dict, env=None): creds_string, _ = get_credentials(env) self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string)) self.config = config @property def deployment(self): """ Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the corresponding real deployment (active machines) exists and doesn't contain any errors. """ try: deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'], self.config['gce_zone']) info = deployment.get_info() errors = info['operation'].get('error') if errors: raise util.LauncherError('DeploymentContainsErrors', str(errors)) return deployment except HttpError as e: if e.resp.status == 404: raise util.LauncherError('DeploymentNotFound', "The deployment you are trying to access doesn't exist") from e raise e def create(self) -> dict: self.key_helper() node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents'] + self.config['num_private_agents']) gcp.BareClusterDeployment.create( self.gcp_wrapper, self.config['deployment_name'], self.config['gce_zone'], node_count, self.config['disk_size'], self.config['disk_type'], self.config['source_image'], self.config['machine_type'], self.config['image_project'], self.config['ssh_user'], self.config['ssh_public_key'], self.config['disable_updates'], self.config['use_preemptible_vms'], tags=self.config.get('tags')) return self.config def key_helper(self): """ Generates a public key and a private key and stores them in the config. The public key will be applied to all the instances in the deployment later on when wait() is called. """ if self.config['key_helper']: private_key, public_key = util.generate_rsa_keypair() self.config['ssh_priva
te_key'] = private_key.decode() self.config['ssh_public_key'] = public_key.decode() def get_cluster_hosts(self) -> [Host]: return list(self.deployment.hosts)[1:] def get_bootstrap_host(self) -> Host: return list(self.deployment.hosts)[0] def wait(self): """ Waits for the deploymen
t to complete: first, the network that will contain the cluster is deployed. Once the network is deployed, a firewall for the network and an instance template are deployed. Finally, once the instance template is deployed, an instance group manager and all its instances are deployed. """ self.deployment.wait_for_completion() def delete(self): """ Deletes all the resources associated with the deployment (instance template, network, firewall, instance group manager and all its instances. """ self.deployment.delete()
# Copyright (c) 2010-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In-Memory Object Server for Swift """ import os from swift import gettext_ as _ from eventlet import Timeout from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.http import is_success from swift.obj.mem_diskfile import InMemoryFileSystem from swift.obj import server class ObjectController(server.ObjectController): """ Implements the WSGI application for the Swift In-Memory Object Server. """ def setup(self, conf): """ Nothing specific to do for the in-memory version. :param conf: WSGI configuration parameter """ self._filesystem = InMemoryFileSystem() def get_diskfile(self, device, partition, account, container, obj, **kwargs): """ Utility method for instantiating a DiskFile object supporting a given REST API. An implementation of the object server that wants to use a different DiskFile class would simply over-ride this method to provide that behavior. """ return self._filesystem.get_diskfile(account, container, obj, **kwargs) def async_update(self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice, policy_idx): """ Sends or saves an async update. :param op: operation performed (ex: 'PUT', or 'DELETE') :param account: account name for the object :param container: container name for the object :param obj: object name :param host: host that the container is on :param partition: partition that the container is on
:param contdevice: device name that the container is on :param headers_out: dictionary of headers to send in the container
request :param objdevice: device name that the object is in :param policy_idx: the associated storage policy index """ headers_out['user-agent'] = 'obj-server %s' % os.getpid() full_path = '/%s/%s/%s' % (account, container, obj) if all([host, partition, contdevice]): try: with ConnectionTimeout(self.conn_timeout): ip, port = host.rsplit(':', 1) conn = http_connect(ip, port, contdevice, partition, op, full_path, headers_out) with Timeout(self.node_timeout): response = conn.getresponse() response.read() if is_success(response.status): return else: self.logger.error(_( 'ERROR Container update failed: %(status)d ' 'response from %(ip)s:%(port)s/%(dev)s'), {'status': response.status, 'ip': ip, 'port': port, 'dev': contdevice}) except (Exception, Timeout): self.logger.exception(_( 'ERROR container update failed with ' '%(ip)s:%(port)s/%(dev)s'), {'ip': ip, 'port': port, 'dev': contdevice}) # FIXME: For now don't handle async updates def REPLICATE(self, request): """ Handle REPLICATE requests for the Swift Object Server. This is used by the object replicator to get hashes for directories. """ pass def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI object server apps""" conf = global_conf.copy() conf.update(local_conf) return ObjectController(conf)
#!/usr/bin/env python # Copyright (c) 2008-14 Qtrac Ltd. All rights reserved. # This program or module is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 2 of the License, or # version 3 of the License, or (at your option) any later version. It is # provided for educational purposes and is distributed in the hope that # it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See # the GNU General Public Lice
nse for more details. from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future_builtins import * from PyQt4.QtCore import (QRegExp, Qt) from PyQt4.QtCore
import pyqtSignal as Signal from PyQt4.QtGui import (QCheckBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox, QRegExpValidator, QSpinBox) class NumberFormatDlg(QDialog): changed = Signal() def __init__(self, format, parent=None): super(NumberFormatDlg, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.format = format self.create_widgets() self.layout_widgets() self.create_connections() self.setWindowTitle("Set Number Format (Modeless)") def create_widgets(self): punctuationRe = QRegExp(r"[ ,;:.]") self.thousandsLabel = QLabel("&Thousands separator") self.thousandsEdit = QLineEdit(self.format["thousandsseparator"]) self.thousandsLabel.setBuddy(self.thousandsEdit) self.thousandsEdit.setMaxLength(1) self.thousandsEdit.setValidator( QRegExpValidator(punctuationRe, self)) self.decimalMarkerLabel = QLabel("Decimal &marker") self.decimalMarkerEdit = QLineEdit(self.format["decimalmarker"]) self.decimalMarkerLabel.setBuddy(self.decimalMarkerEdit) self.decimalMarkerEdit.setMaxLength(1) self.decimalMarkerEdit.setValidator( QRegExpValidator(punctuationRe, self)) self.decimalMarkerEdit.setInputMask("X") self.decimalPlacesLabel = QLabel("&Decimal places") self.decimalPlacesSpinBox = QSpinBox() self.decimalPlacesLabel.setBuddy(self.decimalPlacesSpinBox) self.decimalPlacesSpinBox.setRange(0, 6) self.decimalPlacesSpinBox.setValue(self.format["decimalplaces"]) self.redNegativesCheckBox = QCheckBox("&Red negative numbers") self.redNegativesCheckBox.setChecked(self.format["rednegatives"]) self.buttonBox = QDialogButtonBox(QDialogButtonBox.Apply| QDialogButtonBox.Close) def layout_widgets(self): grid = QGridLayout() grid.addWidget(self.thousandsLabel, 0, 0) grid.addWidget(self.thousandsEdit, 0, 1) grid.addWidget(self.decimalMarkerLabel, 1, 0) grid.addWidget(self.decimalMarkerEdit, 1, 1) grid.addWidget(self.decimalPlacesLabel, 2, 0) grid.addWidget(self.decimalPlacesSpinBox, 2, 1) grid.addWidget(self.redNegativesCheckBox, 3, 0, 1, 2) grid.addWidget(self.buttonBox, 4, 0, 1, 2) self.setLayout(grid) def create_connections(self): self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect( self.apply) self.buttonBox.rejected.connect(self.reject) def apply(self): thousands = unicode(self.thousandsEdit.text()) decimal = unicode(self.decimalMarkerEdit.text()) if thousands == decimal: QMessageBox.warning(self, "Format Error", "The thousands separator and the decimal marker " "must be different.") self.thousandsEdit.selectAll() self.thousandsEdit.setFocus() return if len(decimal) == 0: QMessageBox.warning(self, "Format Error", "The decimal marker may not be empty.") self.decimalMarkerEdit.selectAll() self.decimalMarkerEdit.setFocus() return self.format["thousandsseparator"] = thousands self.format["decimalmarker"] = decimal self.format["decimalplaces"] = ( self.decimalPlacesSpinBox.value()) self.format["rednegatives"] = ( self.redNegativesCheckBox.isChecked()) self.changed.emit()
"""Test loading historical builds and jobs.""" from __future__ import absolute_import, unicode_literals from travis_log_fetch.config import _get_travispy from travis_log_fetch._target import Target from travis_log_fetch.get import ( get_travis_repo, get_historical_builds, get_historical_build, get_historical_job, ) import pytest class TestHistorical(object): def test_latest(self): _travis = _get_travispy() repo = get_travis_repo(_travis, 'travispy/on_pypy') builds = get_historical_builds(_travis, repo) build = next(builds) assert build.repository_id == 2598880 assert build.id == repo.last_build_id def test_after(self): _travis = _get_travispy() repo = get_travis_repo(_travis, 'travispy/on_pypy') builds = get_historical_builds(_travis, repo, _after=3, _load_jobs=False) build = next(builds) assert build.repository_id == 2598880 assert build.number == '2' build = next(builds) assert build.repository_id == 2598880 assert build.number == '1' def test_all_small(self): _travis = _get_travispy()
repo = get_travis_repo(_travis, 'travispy/on_pypy') builds
= get_historical_builds(_travis, repo) ids = [] for build in builds: assert build.repository_id == 2598880 ids.append(build.id) assert ids == [53686685, 37521698, 28881355] def test_multiple_batches_menegazzo(self): """Test using a repository that has greater than 2*25 builds.""" # Ideally each has one or two jobs, so that doesnt slow down the test, # and the logs are small in case the log is fetched with the job. _travis = _get_travispy() repo = get_travis_repo(_travis, 'menegazzo/travispy') builds = get_historical_builds(_travis, repo, _load_jobs=False) ids = [] prev_number = None for build in builds: assert build.repository_id == 2419489 if int(build.number) in [80]: # There are two '80' # See https://github.com/travis-ci/travis-ci/issues/2582 print('duplicate build number {0}: {1}'.format( build.number, build.id)) assert build.id in [45019395, 45019396] if build.id == 45019395: assert prev_number == int(build.number) else: assert prev_number == int(build.number) + 1 elif prev_number: # All other build numbers decrease rather orderly assert prev_number == int(build.number) + 1 prev_number = int(build.number) if ids: assert build.id < ids[-1] ids.append(build.id) if len(ids) > 100: break assert len(ids) == len(set(ids)) def test_multiple_batches_bootstrap(self): """Test using a repository that has lots of builds, esp. PRs.""" _travis = _get_travispy() repo = get_travis_repo(_travis, 'twbs/bootstrap') builds = get_historical_builds(_travis, repo, _after=12071, _load_jobs=False) ids = [] prev_number = None for build in builds: assert build.repository_id == 12962 if int(build.number) in [12069, 12062, 12061, 12054, 12049, 12048, 12041, 12038, 12037, 12033]: # Many duplicates # See https://github.com/travis-ci/travis-ci/issues/2582 print('duplicate build number {0}: {1}'.format( build.number, build.id)) if build.id in [53437234, 53350534, 53350026, 53263731, 53263730, # two extra 12054 53180440, 53179846, 53062896, 53019568, 53004896, 52960766]: assert prev_number == int(build.number) else: assert prev_number == int(build.number) + 1 elif prev_number: # All other build numbers decrease rather orderly assert prev_number == int(build.number) + 1 prev_number = int(build.number) if ids: assert build.id < ids[-1] ids.append(build.id) # There are many more duplicates, so we stop here. if int(build.number) == 12033: break assert len(ids) == len(set(ids)) def test_logical_single_job_build(self): target = Target.from_extended_slug('travispy/on_pypy#1.1') _travis = _get_travispy() job = get_historical_job(_travis, target) assert job.repository_id == 2598880 assert job.number == '1.1' assert job.id == 28881356 def test_logical_multiple_job_build(self): target = Target.from_extended_slug('menegazzo/travispy#101.3') _travis = _get_travispy() job = get_historical_job(_travis, target) assert job.repository_id == 2419489 assert job.number == '101.3' assert job.id == 82131391 def test_logical_duplicate_build(self): target = Target.from_extended_slug('menegazzo/travispy#80.3') _travis = _get_travispy() pytest.raises(AssertionError, get_historical_build, _travis, target)
e = None ale_lib.act.argtypes = [c_void_p, c_int] ale_lib.act.restype = c_int ale_lib.game_over.argtypes = [c_void_p] ale_lib.game_over.restype = c_bool ale_lib.reset_game.argtypes = [c_void_p] ale_lib.reset_game.restype = None ale_lib.getLegalActionSet.argtypes = [c_void_p, c_void_p] ale_lib.getLegalActionSet.restype = None ale_lib.getLegalActionSize.argtypes = [c_void_p] ale_lib.getLegalActionSize.restype = c_int ale_lib.getMinimalActionSet.argtypes = [c_void_p, c_void_p] ale_lib.getMinimalActionSet.restype = None ale_lib.getMinimalActionSize.argtypes = [c_void_p] ale_lib.getMinimalActionSize.restype = c_int ale_lib.getFrameNumber.argtypes = [c_void_p] ale_lib.getFrameNumber.restype = c_int ale_lib.lives.argtypes = [c_void_p] ale_lib.lives.restype = c_int ale_lib.getEpisodeFrameNumber.argtypes = [c_void_p] ale_lib.getEpisodeFrameNumber.restype = c_int ale_lib.getScreen.argtypes = [c_void_p, c_void_p] ale_lib.getScreen.restype = None ale_lib.getRAM.argtypes = [c_void_p, c_void_p] ale_lib.getRAM.restype = None ale_lib.getRAMSize.argtypes = [c_void_p] ale_lib.getRAMSize.restype = c_int ale_lib.getScreenWidth.argtypes = [c_void_p] ale_lib.getScreenWidth.restype = c_int ale_lib.getScreenHeight.argtypes = [c_void_p] ale_lib.getScreenHeight.restype = c_int ale_lib.getScreenRGB.argtypes = [c_void_p, c_void_p] ale_lib.getScreenRGB.restype = None ale_lib.getScreenGrayscale.argtypes = [c_void_p, c_void_p] ale_lib.getScreenGrayscale.restype = None ale_lib.saveState.argtypes = [c_void_p] ale_lib.save
State.restype = None ale_lib.loadState.argtypes = [c_void_p] ale_lib.loadState.restype = None ale_lib.cloneState.argtypes = [c_void_p] ale_lib.cloneState.restype = c_void_p ale_lib.restoreState.argtypes = [c_void_p, c_void_p] ale_lib.restoreState.re
stype = None ale_lib.cloneSystemState.argtypes = [c_void_p] ale_lib.cloneSystemState.restype = c_void_p ale_lib.restoreSystemState.argtypes = [c_void_p, c_void_p] ale_lib.restoreSystemState.restype = None ale_lib.deleteState.argtypes = [c_void_p] ale_lib.deleteState.restype = None ale_lib.saveScreenPNG.argtypes = [c_void_p, c_char_p] ale_lib.saveScreenPNG.restype = None ale_lib.encodeState.argtypes = [c_void_p, c_void_p, c_int] ale_lib.encodeState.restype = None ale_lib.encodeStateLen.argtypes = [c_void_p] ale_lib.encodeStateLen.restype = c_int ale_lib.decodeState.argtypes = [c_void_p, c_int] ale_lib.decodeState.restype = c_void_p class ALEInterface(object): def __init__(self): self.obj = ale_lib.ALE_new() def getString(self, key): return ale_lib.getString(self.obj, key) def getInt(self, key): return ale_lib.getInt(self.obj, key) def getBool(self, key): return ale_lib.getBool(self.obj, key) def getFloat(self, key): return ale_lib.getFloat(self.obj, key) def setString(self, key, value): ale_lib.setString(self.obj, key, value) def setInt(self, key, value): ale_lib.setInt(self.obj, key, value) def setBool(self, key, value): ale_lib.setBool(self.obj, key, value) def setFloat(self, key, value): ale_lib.setFloat(self.obj, key, value) def loadROM(self, rom_file): ale_lib.loadROM(self.obj, rom_file) def act(self, action): return ale_lib.act(self.obj, int(action)) def game_over(self): return ale_lib.game_over(self.obj) def reset_game(self): ale_lib.reset_game(self.obj) def getLegalActionSet(self): act_size = ale_lib.getLegalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getLegalActionSet(self.obj, as_ctypes(act)) return act def getMinimalActionSet(self): act_size = ale_lib.getMinimalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getMinimalActionSet(self.obj, as_ctypes(act)) return act def getFrameNumber(self): return ale_lib.getFrameNumber(self.obj) def lives(self): return ale_lib.lives(self.obj) def getEpisodeFrameNumber(self): return ale_lib.getEpisodeFrameNumber(self.obj) def getScreenDims(self): """returns a tuple that contains (screen_width, screen_height) """ width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) return (width, height) def getScreen(self, screen_data=None): """This function fills screen_data with the RAW Pixel data screen_data MUST be a numpy array of uint8/int8. This could be initialized like so: screen_data = np.empty(w*h, dtype=np.uint8) Notice, it must be width*height in size also If it is None, then this function will initialize it Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.zeros(width*height, dtype=np.uint8) ale_lib.getScreen(self.obj, as_ctypes(screen_data)) return screen_data def getScreenRGB(self, screen_data=None): """This function fills screen_data with the data in RGB format screen_data MUST be a numpy array of uint8. This can be initialized like so: screen_data = np.empty((height,width,3), dtype=np.uint8) If it is None, then this function will initialize it. """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.empty((height, width,3), dtype=np.uint8) ale_lib.getScreenRGB(self.obj, as_ctypes(screen_data[:])) return screen_data def getScreenGrayscale(self, screen_data=None): """This function fills screen_data with the data in grayscale screen_data MUST be a numpy array of uint8. This can be initialized like so: screen_data = np.empty((height,width,1), dtype=np.uint8) If it is None, then this function will initialize it. """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.empty((height, width,1), dtype=np.uint8) ale_lib.getScreenGrayscale(self.obj, as_ctypes(screen_data[:])) return screen_data def getRAMSize(self): return ale_lib.getRAMSize(self.obj) def getRAM(self, ram=None): """This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size, dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it. """ if(ram is None): ram_size = ale_lib.getRAMSize(self.obj) ram = np.zeros(ram_size, dtype=np.uint8) ale_lib.getRAM(self.obj, as_ctypes(ram)) return ram def saveScreenPNG(self, filename): """Save the current screen as a png file""" return ale_lib.saveScreenPNG(self.obj, filename) def saveState(self): """Saves the state of the system""" return ale_lib.saveState(self.obj) def loadState(self): """Loads the state of the system""" return ale_lib.loadState(self.obj) def cloneState(self): """This makes a copy of the environment state. This copy does *not* include pseudorandomness, making it suitable for planning purposes. By contrast, see cloneSystemState. """ return ale_lib.cloneState(self.obj) def restoreState(self, state): """Reverse operation of cloneState(). This does not restore pseudorandomness, so that repeated calls to restoreState() in the stochastic controls setting will not lead to the same outcomes. By contrast, see restoreSystemState. """ ale_lib.restoreState(self.obj, state) def cloneSystemState(self): """This makes a copy of the system & environment state, suitable for
# Bu kod calismay
acak, mantigi anlamak icin yazildi. from gittigidiyor.applicationservice import * from gittigidiyor.auth i
mport * if __name__ == "__main__": # HTTP Basic authentication credentials.. It blows up for the wrong credentials.. auth = Auth("testuser", "testpassword", None, None) api = ApplicationService(auth) result = api.createApplication("testdeveloper", "Test Application", "This is the test application", "C", "W", "", "xml", "xml", "tr") print result
from django.db.models.expressions import F, Func from rest_framework import serializers from .models import PdfStorage class PdfStorageListSerializer(serializers.ModelSerializer): author = serializers.SerializerM
ethodField("full_name") class Meta: model = PdfStorage fields = [ "id", "name", "topic", "author", "created", ] def full_name(self, pdf): return pdf.author.person_name() class PaidPdfDownloadLinkSerializer(serializers.ModelSerializer): download_url = serializers.SerializerMethodField()
class Meta: model = PdfStorage fields = ["download_url"] def get_download_url(self, obj): return obj.pdf_file.url class AllRelatedIdsSerializer(serializers.Serializer): ids = serializers.SerializerMethodField() class Meta: fields = ["ids"] def get_ids(self, obj): all_ids = ( PdfStorage.objects.annotate(ids=Func(F("related_ids"), function="unnest")) .values_list("ids", flat=True) .distinct() ) return all_ids
ef setUp(self): self.setup_beets() def tearDown(self): self.teardown_beets() @patch('beetsplug.thumbnails.util') def test_write_metadata_im(self, mock_util): metadata = {"a": u"A", "b": u"B"} write_metadata_im("foo", metadata) try: command = u"convert foo -set a A -set b B foo".split(' ') mock_util.command_output.assert_called_once_with(command) except AssertionError: command = u"convert foo -set b B -set a A foo".split(' ') mock_util.command_output.assert_called_once_with(command) @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') @patch('beetsplug.thumbnails.os.stat') def test_add_tags(self, mock_stat, _): plugin = ThumbnailsPlugin() plugin.write_metadata = Mock() plugin.get_uri = Mock(side_effect={b"/path/to/cover": "COVER_URI"}.__getitem__) album = Mock(artpath=b"/path/to/cover") mock_stat.return_value.st_mtime = 12345 plugin.add_tags(album, b"/path/to/thumbnail") metadata = {"Thumb::URI": "COVER_URI", "Thumb::MTime": u"12345"} plugin.write_metadata.assert_called_once_with(b"/path/to/thumbnail", metadata) mock_stat.assert_called_once_with(album.artpath) @patch('beetsplug.thumbnails.os') @patch('beetsplug.thumbnails.ArtResizer') @patch('beetsplug.thumbnails.get_im_version') @patch('beetsplug.thumbnails.get_pil_version') @patch('beetsplug.thumbnails.GioURI') def test_check_local_ok(self, mock_giouri, mock_pil, mock_im, mock_artresizer, mock_os): # test local resizing capability mock_artresizer.shared.local = False plugin = ThumbnailsPlugin() self.assertFalse(plugin._check_local_ok()) # test dirs creation mock_artresizer.shared.local = True def exists(path): if path == NORMAL_DIR: return False if path == LARGE_DIR: return True raise ValueError(u"unexpected path {0!r}".format(path)) mock_os.path.exists = exists plugin = ThumbnailsPlugin() mock_os.makedirs.assert_called_once_with(NORMAL_DIR) self.assertTrue(plugin._check_local_ok()) # test metadata writer function mock_os.path.exists = lambda _: True mock_pil.return_value = False mock_im.return_value = False with self.assertRaises(AssertionError): ThumbnailsPlugin() mock_pil.return_value = True self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_pil) mock_im.return_value = True self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im) mock_pil.return_value = False self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im) self.assertTrue(ThumbnailsPlugin()._check_local_ok()) # test URI getter function giouri_inst = mock_giouri.return_value giouri_inst.available = True self.assertEqual(ThumbnailsPlugin().get_uri, giouri_inst.uri) giouri_inst.available = False self.assertEqual(ThumbnailsPlugin().get
_uri.__self__.__class__, PathlibURI) @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') @patch('beetsplug.thumbnails.ArtResizer') @patch('beetsplug.thumbnails.util') @patch('beetsplug.thumbnails.os') @patch('beetsplug.thumbnails.shutil') def test_make_cover_thumbnail(self, mock_shutils, mock_os, mock_util, mock_artresizer, _): thumbnail_dir = os.path.no
rmpath(b"/thumbnail/dir") md5_file = os.path.join(thumbnail_dir, b"md5") path_to_art = os.path.normpath(b"/path/to/art") mock_os.path.join = os.path.join # don't mock that function plugin = ThumbnailsPlugin() plugin.add_tags = Mock() album = Mock(artpath=path_to_art) mock_util.syspath.side_effect = lambda x: x plugin.thumbnail_file_name = Mock(return_value=b'md5') mock_os.path.exists.return_value = False def os_stat(target): if target == md5_file: return Mock(st_mtime=1) elif target == path_to_art: return Mock(st_mtime=2) else: raise ValueError(u"invalid target {0}".format(target)) mock_os.stat.side_effect = os_stat plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) mock_os.path.exists.assert_called_once_with(md5_file) mock_os.stat.has_calls([call(md5_file), call(path_to_art)], any_order=True) resize = mock_artresizer.shared.resize resize.assert_called_once_with(12345, path_to_art, md5_file) plugin.add_tags.assert_called_once_with(album, resize.return_value) mock_shutils.move.assert_called_once_with(resize.return_value, md5_file) # now test with recent thumbnail & with force mock_os.path.exists.return_value = True plugin.force = False resize.reset_mock() def os_stat(target): if target == md5_file: return Mock(st_mtime=3) elif target == path_to_art: return Mock(st_mtime=2) else: raise ValueError(u"invalid target {0}".format(target)) mock_os.stat.side_effect = os_stat plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) self.assertEqual(resize.call_count, 0) # and with force plugin.config['force'] = True plugin.make_cover_thumbnail(album, 12345, thumbnail_dir) resize.assert_called_once_with(12345, path_to_art, md5_file) @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') def test_make_dolphin_cover_thumbnail(self, _): plugin = ThumbnailsPlugin() tmp = bytestring_path(mkdtemp()) album = Mock(path=tmp, artpath=os.path.join(tmp, b"cover.jpg")) plugin.make_dolphin_cover_thumbnail(album) with open(os.path.join(tmp, b".directory"), "rb") as f: self.assertEqual( f.read().splitlines(), [b"[Desktop Entry]", b"Icon=./cover.jpg"] ) # not rewritten when it already exists (yup that's a big limitation) album.artpath = b"/my/awesome/art.tiff" plugin.make_dolphin_cover_thumbnail(album) with open(os.path.join(tmp, b".directory"), "rb") as f: self.assertEqual( f.read().splitlines(), [b"[Desktop Entry]", b"Icon=./cover.jpg"] ) rmtree(tmp) @patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok') @patch('beetsplug.thumbnails.ArtResizer') def test_process_album(self, mock_artresizer, _): get_size = mock_artresizer.shared.get_size plugin = ThumbnailsPlugin() make_cover = plugin.make_cover_thumbnail = Mock(return_value=True) make_dolphin = plugin.make_dolphin_cover_thumbnail = Mock() # no art album = Mock(artpath=None) plugin.process_album(album) self.assertEqual(get_size.call_count, 0) self.assertEqual(make_dolphin.call_count, 0) # cannot get art size album.artpath = b"/path/to/art" get_size.return_value = None plugin.process_album(album) get_size.assert_called_once_with(b"/path/to/art") self.assertEqual(make_cover.call_count, 0) # dolphin tests plugin.config['dolphin'] = False plugin.process_album(album) self.assertEqual(make_dolphin.call_count, 0) plugin.config['dolphin'] = True plugin.process_album(album) make_dolphin.assert_called_once_with(album) # small art get_size.return_value = 200, 200 plugin.process_album(album) make_cover.assert_called_once_with(album, 128,
# This file is part of Indico. # Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from flask import request from indico.modules.admin.views import WPAdmin from indico.modules.users import User from indico.util.i18n import _ from indico.web.breadcrumbs import render_breadcrumbs from indico.web.views import WPDecorated, WPJinjaMixin class WPUser(WPJinjaMixin, WPDecorated): """Base WP for user profile pages. Whenever you use this, you MUST include `user` in the params passed to `render_template`. Any RH using this should inherit from `RHUserBase` which already handles user/admin access. In this case, simply add ``user=self.user`` to your `render_template` call. """ template_prefix = 'users/' def __init__(self, rh, active_menu_item, **kwargs): kwargs['active_menu_item'] = active_menu_item WPDecorated.__init__(self, rh, **kwargs) def _get_breadcrumbs(self): if 'user_id' in request.view_args: user = User.get(request.view_args['user_id']) profile_breadcrumb = _('Profile of {name}').format(name=user.full_name) else:
profile_breadcrumb = _('My Profile') return render_breadcrumbs(profile_breadcrumb) def _get_body(self, params): return self._get
_page_content(params) class WPUserDashboard(WPUser): bundles = ('module_users.dashboard.js',) class WPUsersAdmin(WPAdmin): template_prefix = 'users/' bundles = ('module_users.js',)
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): depen
dencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [
migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)), ), ]
import re import shutil import time import os import os.path from pyquery.pyquery import PyQuery import requests import requests.utils from WhatManager2.settings import MEDIA_ROOT from bibliotik import manage_bibliotik from bibliotik.models import BibliotikTorrent, BibliotikFulltext from bibliotik.settings import BIBLIOTIK_UPLOAD_URL, BIBLIOTIK_DOWNLOAD_TORRENT_URL from home.models import DownloadLocation def extract_torrents(html): result = [] pq = PyQuery(html) for row in pq('#torrents_table tbody tr.torrent').items(): data = { 'id': row.attr('id')[len('torrent-'):], 'type': row('td:eq(0) img').attr('title'), 'title': row('td:eq(1) span.title').text(), 'publishers': [], 'authors': [], 'year': row('td:eq(1) span.torYear').text()[1:-1], 'format': row('td:eq(1) span.torFormat').text()[1:-1], 'retail': bool(row('td:eq(1) span.torRetail')), 'tags': [] } for dlink in row('td:eq(1) > a').items(): href = dlink.attr('href') if '/creators/' in href: data['authors'].append({ 'id': href[href.rfind('/') + 1:], 'name': dlink.text() }) elif '/publishers/' in href: data['publishers'].append({ 'id': href[href.rfind('/') + 1:], 'name': dlink.text()
}) for tag in row('td:eq(1) > span.taglist > a').items(): href = tag.attr('href') data['tags'].append({ 'id': href[href.rfind('/') + 1:], 'name': tag.text() }) result.append(data) return result class BibliotikClient(object): def __init__(self, session_id): self.session_id = session_id self.session = reques
ts.Session() requests.utils.add_dict_to_cookiejar(self.session.cookies, { 'id': session_id }) self.auth_key = None def get_auth_key(self): if self.auth_key: return self.auth_key for i in xrange(3): try: response = self.session.get('https://bibliotik.me/upload/ebooks') response.raise_for_status() break except Exception: pass response.raise_for_status() pq = PyQuery(response.content) self.auth_key = pq('input[name="authkey"]').val() if not self.auth_key: raise Exception('Could not get the authkey') return self.auth_key def send_upload(self, payload, payload_files): return self.session.post(BIBLIOTIK_UPLOAD_URL, data=payload, files=payload_files, allow_redirects=False) def download_torrent(self, torrent_id): torrent_page = BIBLIOTIK_DOWNLOAD_TORRENT_URL.format(torrent_id) for i in xrange(3): try: r = self.session.get(torrent_page, allow_redirects=False) r.raise_for_status() if r.status_code == 200 and 'application/x-bittorrent' in r.headers['content-type']: filename = re.search('filename="(.*)"', r.headers['content-disposition']).group(1) return filename, r.content else: raise Exception('Wrong status_code or content-type') except Exception as ex: print u'Error while download bibliotik torrent. Will retry: {0}'.format(ex) time.sleep(3) download_exception = ex raise download_exception def search(self, query): url = 'https://bibliotik.me/torrents/' response = self._search_request(url, query) if not response.url.startswith(url): raise Exception(u'Search redirected to {0}. Probably invalid id. Was {1}.'.format( response.url, self.session_id )) return { 'results': extract_torrents(response.content), } def _search_request(self, url, query): for i in xrange(3): try: response = self.session.get(url, params={ 'search': query }) response.raise_for_status() return response except Exception as ex: time.sleep(3) exception = ex raise exception def upload_book_to_bibliotik(bibliotik_client, book_upload): print 'Sending request for upload to bibliotik.me' payload_files = dict() payload_files['TorrentFileField'] = ('torrent.torrent', book_upload.bibliotik_torrent_file) payload = dict() payload['upload'] = '' payload['authkey'] = bibliotik_client.get_auth_key() payload['AuthorsField'] = book_upload.author payload['TitleField'] = book_upload.title payload['IsbnField'] = book_upload.isbn or '' payload['PublishersField'] = book_upload.publisher payload['PagesField'] = book_upload.pages or '' payload['YearField'] = book_upload.year payload['FormatField'] = { 'AZW3': '21', 'EPUB': '15', 'PDF': '2', }[book_upload.format] payload['LanguageField'] = '1' # English if book_upload.retail: payload['RetailField'] = '1' payload['TagsField'] = ','.join(book_upload.tag_list) payload['ImageField'] = book_upload.cover_url payload['DescriptionField'] = book_upload.description response = bibliotik_client.send_upload(payload, payload_files) response.raise_for_status() if response.status_code == requests.codes.ok: with open(os.path.join(MEDIA_ROOT, 'bibliotik_upload.html'), 'wb') as f: f.write(response.content) raise Exception('Bibliotik does not want this. Written to media/') redirect_match = re.match('^https://bibliotik.me/torrents/(?P<id>\d+)$', response.headers['location']) if not redirect_match: raise Exception('Could not get new torrent ID.') torrent_id = redirect_match.groupdict()['id'] book_upload.bibliotik_torrent = BibliotikTorrent.get_or_create(bibliotik_client, torrent_id) book_upload.save() # Add the torrent to the client location = DownloadLocation.get_bibliotik_preferred() download_dir = os.path.join(location.path, unicode(book_upload.bibliotik_torrent.id)) book_path = os.path.join(download_dir, book_upload.target_filename) if not os.path.exists(download_dir): os.mkdir(download_dir) os.chmod(download_dir, 0777) shutil.copyfile( book_upload.book_data.storage.path(book_upload.book_data), book_path ) os.chmod(book_path, 0777) manage_bibliotik.add_bibliotik_torrent( book_upload.bibliotik_torrent.id, location=location, bibliotik_client=bibliotik_client ) return book_upload def search_torrents(query): b_fulltext = BibliotikFulltext.objects.only('id').all() b_fulltext = b_fulltext.extra(where=['MATCH(`info`, `more_info`) AGAINST (%s IN BOOLEAN MODE)'], params=[query]) b_fulltext = b_fulltext.extra(select={'score': 'MATCH (`info`) AGAINST (%s)'}, select_params=[query]) b_fulltext = b_fulltext.extra(order_by=['-score']) b_torrents_dict = BibliotikTorrent.objects.in_bulk([b.id for b in b_fulltext]) b_torrents = list() for i in b_fulltext: b_torrent = b_torrents_dict[i.id] coef = 1.0 if b_torrent.retail: coef *= 1.2 if b_torrent.format == 'EPUB': coef *= 1.1 elif b_torrent.format == 'PDF': coef *= 0.9 b_torrent.score = i.score * coef b_torrents.append(b_torrent) return b_torrents
# Copyright (C) 2007, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Thi
s program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Licens
e for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import sys from bzrlib.builtins import cmd_cat from bzrlib.tests import StringIOWrapper from bzrlib.tests.transport_util import TestCaseWithConnectionHookedTransport class TestCat(TestCaseWithConnectionHookedTransport): def setUp(self): super(TestCat, self).setUp() # Redirect sys.stdout as this is what cat uses self.outf = StringIOWrapper() self.overrideAttr(sys, 'stdout', self.outf) def test_cat(self): # FIXME: sftp raises ReadError instead of NoSuchFile when probing for # branch/foo/.bzr/branch-format when used with the paramiko test # server. from bzrlib.tests import TestSkipped raise TestSkipped('SFTPTransport raises incorrect exception' ' when reading from paramiko server') wt1 = self.make_branch_and_tree('branch') self.build_tree_contents([('branch/foo', 'foo')]) wt1.add('foo') wt1.commit('add foo') self.start_logging_connections() cmd = cmd_cat() cmd.run(self.get_url('branch/foo')) self.assertEquals(1, len(self.connections)) self.assertEquals('foo', self.outf.getvalue())
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os all_modifs={} def fixdir(dir): global all_modifs for k in all_modifs: for v in all_modifs[k]: modif(os.path.join(dir,'waflib'),k,v) def modif(dir,name,fun): if name=='*': lst=[] for y in'. Tools extras'.split(): for x in os.listdir(os.path.join(dir,y)): if x.endswith('.py'): lst.append(y+os.sep+x) for x in lst: modif(dir,x,fun) return filename=os.path.join(dir,name) f=open(filename,'r') try: txt=f.read() finally: f.close() txt=fun(txt) f=open(filename,'w') try: f.write(txt) finally: f.close() def subst(*k): def do_subst(fun): global all_modifs for x in k: try: all_modifs[x].append(fun) except KeyError: all_modifs[x]=[fun] return fun return do_subst @subst('*')
def r1(code): code=code.replace(',e:',',e:') code=code.replace("",'') code=code.replace('','') return code @subst('Runner.py') def r4(code): code=code.replace('next(self.biter)','self.biter.nex
t()') return code
import time from torba.server import util def sessions_lines(data): """A generator returning lines for a list of sessions. data is the return value of rpc_sessions().""" fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} ' '{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}') yield fmt.format('ID', 'Flags', 'Client', 'Proto', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer') for (id_, flags, peer, client, proto, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size, time) in data: yield fmt.format(id_, flags, client, proto, '{:,d}'.format(reqs), '{:,d}'.format(txs_sent), '{:,d}'.format(subs), '{:,d}'.format(recv_count), '{:,d}'.format(recv_size // 1024), '{:,d}'.format(send_count), '{:,d}'.format(send_size // 1024), util.formatted_time(time, sep=''), peer) def groups_lines(data): """A generator returning lines for a list of groups. data is the return value of rpc_groups().""" fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}' '{:>7} {:>9} {:>7} {:>9}') yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB') for (id_, session_count, bandwidth, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size) in data: yield fmt.format(id_, '{:,d}'.format(session_count), '{:,d}'.format(bandwidth // 1024), '{:,d}'.format(reqs), '{:,d}'.format(txs_sent), '{:,d}'.format(subs), '{:,d}'.format(recv_count), '{:,d}'.format(recv_size // 1024), '{:,d}'.format(send_count), '{:,d}'.format(send_size // 1024)) def peers_lines(data): """A generator returning lines for a list of peers. data is the return value of rpc_peers().""" def time_fmt(t): if not t: return 'Never' return util.formatted_time(now - t) now = time.time() fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>4} ' '{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}') yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min', 'Max', 'Pruning', 'Last Good', 'Last Try', 'Tries', 'Source', 'IP Address') for item in data: features = item['features'] hostname = item['host'] host = features['hosts'][hostname] yield fmt.format(hostname[:30], item['status'], host.get('tcp_port') or '', host.get('ssl_port') or '', features['server_version'] or 'unknown', features['protocol_min'], features['protocol_max'], features['pruning']
or '', time_fmt(item['last_good']), time_fmt(item['last_try']), item['try_count'],
item['source'][:20], item['ip_addr'] or '')
list[ind] elif event.key == 'L': print(self.sources[self.sources.sourceid==self.sources['sourceid'][self.index]]) elif event.key == 'T': sdiff = self.accelerator*(self.smax - self.smin)/255.0 self.smax += sdiff self.smin += sdiff self.update_scale() elif event.key == 'B': sdiff = self.accelerator*(self.smax - self.smin)/255.0 self.smax -= sdiff self.smin -= sdiff self.update_scale() elif event.key == 'N': sdiff = self.accelerator*(self.smax - self.smin)/255.0 self.smax -= sdiff self.smin += sdiff self.update_scale() elif event.key == 'W': sdiff = self.accelerator*(self.smax - self.smin)/255.0 self.smax += sdiff self.smin -= sdiff self.update_scale() elif event.key == 'U': print("Current stretch limits: %10.4g, %10.4g"%(self.smin, self.smax)) self.smin = float(input("New lower value?")) self.smax = float(input("New upper value?")) self.update_scale() def find_map(obsid, band, mapdir, template="{}{}_map.fits.zip"): """ Walk the map directory and return the map data and marker size Parameters: ----------- obsid (int): observation id (10-digit integer) band (string) : blue, green, red, PSW, PMW or PLW mapdir (string) : top-level of map directory template (string) : how to format obsid and filter into a map name Returns: -------- img_data : numpy array of image data filter : 'blue', 'green', 'red' for PACS, 'PSW', 'PMW', 'PSW' for SPIRE mrkr_size : size of markers in pixels wcs : astropy.wcs object for the image """ fname = template.format(obsid, band) fullname = fname for root, dir, files in os.walk(os.path.expanduser(mapdir), followlinks=True): for name in files: if name.endswith(fname): fullname = os.path.join(root, fname) break elif name.endswith(fname.replace('map','pmd')): fullname = os.path.join(root, fname.replace('map','pmd')) break elif name.endswith(fname.replace('L25','L3')): fullname = os.path.join(root, fname.replace('L25','L3')) break elif name.endswith(fname.replace('L25','L2').replace('JSMAP','PMAP')): fullname = os.path.join(root, fname.replace('L25','L2').replace('JSMAP','PMAP')) break # Get the data hdu = fits.open(fullname) img_data = hdu[1].data filter = band if (band == 'B'): if (hdu[0].header['WAVELNTH'] == 100.0): filter = 'green' else: filter = 'blue' elif (band == 'R'): filter = 'red' # Handle illegal CUNITn in PACS SPG12 and earlier maps for key in ['CUNIT1', 'CUNIT2']: if key in hdu[1].header.keys(): del hdu[1].header[key] img_wcs = WCS(hdu[1].header) deg_per_pix = np.sqrt(np.abs(np.linalg.det(img_wcs.pixel_scale_matrix))) beams = {'blue':5.5, 'green':7.0, 'red':11.5, 'PSW':17.0, 'PMW':32.0, 'PLW':42.0} beam_size = beams[filter]/3600. mrkr_size = beam_size/deg_per_pix return(img_data, filter, mrkr_size, img_wcs) def sourcelist_pscdb(obsid, filter, sql_statement, dbname, username, hostname, port=5432): """ Return dataframe from source table Parameters: ----------- obsid (int): observation id (10-digit integer) filter (string) : red, green, blue, PSW, PMW or PLW sql_statement (string) : Query to database dbname (string) : database name username (string) : user name hostname (string) : host name port (int) : port for connecting to server, defaults to 5432 Returns: -------- sources : Pandas dataframe of the sources """ import psycopg2 as pg import pandas.io.sql as psql with pg.connect("dbname={} user={} host={} port={}".format(dbname, username, hostname, port)) as connection: sources = psql.read_sql(sql_statement.format(obsid,filter), connection) return(sources) def display_sources(sources, img_data, mrkr_size, wcs, cmap='grays', susscolor="blue", tmlcolor="green", tm2color="orange", titlestring="SPIRE PSC"): """ display sources overlaid on image Parameters: ----------- sources : dataframe including ra and dec values img_data : numpy array of the image mrkr_size : diameter of the markers in pixel units wcs : astropy.wcs wcs object for the image (to convert ra,dec to pixels) cmap : vispy color map, defaults to 'grays'. See vispy.colors.get_colormaps() Returns: -------- None """ nsrc = len(sources) pos = np.empty( shape=(0, 0) ) if (nsrc > 0): if (is_pacs == True): sworld = np.vstack([sources['susra'].values.astype(np.float64), sources['susdec'].values.astype(np.float64)]).T else: sworld = np.vstack([sources['ra'].values,sources['dec'].values]).T pos = wcs.wcs_world2pix(sworld,0) + 0.5 else: print("No sources found") sys.exit(-1); keydict = dict(escape='close', p=lambda x: max(0,i-1), n=lambda x: min(nsrc,i+1)) #canvas = scene.SceneCanvas(keys=keydict) canvas = scene.SceneCanvas(keys='interactive') canvas.size = img_data.shape canvas.title = titlestring canvas.show() # Set viewbox to display the image with interactive pan/zoom view = canvas.central_widget.add_view() # Create the image #image = scene.visuals.Image(bytescale(img_data, cmin=0.8*np.nanmin(img_data), # cmax=1.05*np.nanmax(img_data)), parent=view.scene) # note that vispy.color.get_colormaps() returns all the ColorMaps image = scene.visuals.Image(bytescale(img_data, cmin=0.9*np.nanmin(img_data), cmax=1.02*np.nanmax(img_data)), #clim=(0.8*np.nanmin(img_data), 1.05*np.nanmax(img_data)), cmap=cmap, parent=view.scene) # Set 2D camera (the camera will scale to the contents in the scene) view.camera = SourceInspectCamera(image,img_data,sources,pos,index=0,aspect=1) view.camera.set_range() # Add the markers if ((nsrc > 0) and (susscolor != None)): p1 = scene.visuals.Markers(parent=view.scene) p1.set_data(pos, face_color=None, edge_color=susscolor, scaling=True, edge_width=2.0, size=mrkr_size) if ((nsrc > 0) and (tmlcolor != None)): tmlworld = np.vstack([sources['ratml'].values,sources['dectml'].values]).T postml = wcs.wcs_world2pix(tmlworld,0) + 0.5 p2 = scene.visuals.Markers(parent=view.scene) p2.set_data(postml, face_color=None, edge_color=tmlcolor, scaling=True, edge_width=1.5, size=mrkr_size) if ((nsrc > 0) and (tm2color != None)): tm2world = np.vstack([sources['ratm2'].values,sources['dectm2'].values]).T postm2 = wcs.wcs_world2pix(tm2world,0) + 0.5 p3 = scene.visuals.Markers(parent=view.scene) p3.set_data(postm2, face_color=None, edge_color=tm2color, scaling=True, edge_width=1.5, size=mrkr_size) app.run() return if __name__ == '__main__' and sys.flags.interactive == 0: if (sys.argv[0].endswith('ppscinspector')): is_pacs = True import argparse parser = argparse.ArgumentParser() parser.add_argument("obsid", help="observation id", type=int) if (is_pacs): parser.add_argument("band", help="PACS band, must be B or R") else: parser.add_argument("band", help="SPIRE band, must be PSW, PMW or PMW") parser.add_ar
gument("mapdir", help="top-level map
directory")
# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- from lib389.plugins import Plugin, Plugins import argparse from lib389.cli_base import ( _generic_list, _generic_get, _generic_get_dn, _generic_create, _generic_delete, _get_arg, _get_args, _get_attributes, _warn, ) SINGULAR = Plugin MANY = Plugins RDN = 'cn' def plugin_list(inst, basedn, log, args): _generic_list(inst, basedn, log.getChild('plugin_list'), MANY) def plugin_get(inst, basedn, log, args): rdn = _get_arg( args.selector, msg="Enter %s to retrieve" % RDN) _generic_get(inst, basedn, log.getChild('plugin_get'), MANY, rdn) def plugin_ge
t_dn(inst, basedn, log, args): dn = _get_arg( args.dn, msg="Enter dn to retrieve") _generic_get_dn(inst, basedn, log.getChild('plugin_get_dn'), MANY, dn) # Plugin enable def plugin_enable(inst, basedn, log, args): dn = _get_arg( args.dn, msg="Enter plugin dn to enable") mc = MANY(inst, basedn) o = mc.get(dn=dn) o.
enable() o_str = o.display() log.info('Enabled %s', o_str) # Plugin disable def plugin_disable(inst, basedn, log, args, warn=True): dn = _get_arg( args.dn, msg="Enter plugin dn to disable") if warn: _warn(dn, msg="Disabling %s %s" % (SINGULAR.__name__, dn)) mc = MANY(inst, basedn) o = mc.get(dn=dn) o.disable() o_str = o.display() log.info('Disabled %s', o_str) # Plugin configure? def plugin_configure(inst, basedn, log, args): pass def generic_show(inst, basedn, log, args): """Display plugin configuration.""" plugin = args.plugin_cls(inst) log.info(plugin.display()) def generic_enable(inst, basedn, log, args): plugin = args.plugin_cls(inst) plugin.enable() log.info("Enabled %s", plugin.rdn) def generic_disable(inst, basedn, log, args): plugin = args.plugin_cls(inst) plugin.disable() log.info("Disabled %s", plugin.rdn) def generic_status(inst, basedn, log, args): plugin = args.plugin_cls(inst) if plugin.status() == True: log.info("%s is enabled", plugin.rdn) else: log.info("%s is disabled", plugin.rdn) def add_generic_plugin_parsers(subparser, plugin_cls): show_parser = subparser.add_parser('show', help='display plugin configuration') show_parser.set_defaults(func=generic_show, plugin_cls=plugin_cls) enable_parser = subparser.add_parser('enable', help='enable plugin') enable_parser.set_defaults(func=generic_enable, plugin_cls=plugin_cls) disable_parser = subparser.add_parser('disable', help='disable plugin') disable_parser.set_defaults(func=generic_disable, plugin_cls=plugin_cls) status_parser = subparser.add_parser('status', help='display plugin status') status_parser.set_defaults(func=generic_status, plugin_cls=plugin_cls) def create_parser(subparsers): plugin_parser = subparsers.add_parser('plugin', help="Manage plugins available on the server") subcommands = plugin_parser.add_subparsers(help="action") list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins") list_parser.set_defaults(func=plugin_list) get_parser = subcommands.add_parser('get', help='get') get_parser.set_defaults(func=plugin_get) get_parser.add_argument('selector', nargs='?', help='The plugin to search for') get_dn_parser = subcommands.add_parser('get_dn', help='get_dn') get_dn_parser.set_defaults(func=plugin_get_dn) get_dn_parser.add_argument('dn', nargs='?', help='The plugin dn to get') enable_parser = subcommands.add_parser('enable', help='enable a plugin in the server') enable_parser.set_defaults(func=plugin_enable) enable_parser.add_argument('dn', nargs='?', help='The dn to enable') disable_parser = subcommands.add_parser('disable', help='disable the plugin configuration') disable_parser.set_defaults(func=plugin_disable) disable_parser.add_argument('dn', nargs='?', help='The dn to disable')
, or # at https://www.sourcefabric.org/superdesk/license import logging import superdesk from flask import current_app as app from settings import DAYS_TO_KEEP from datetime import timedelta from werkzeug.exceptions import HTTPException from superdesk.notification import push_notification from superdesk.io import providers from superdesk.celery_app import celery from superdesk.utc import utcnow from superdesk.workflow import set_default_state from superdesk.errors import ProviderError from superdesk.stats import stats from superdesk.upload import url_for_media from superdesk.media.media_operations import download_file_from_url, process_file from superdesk.media.renditions import generate_renditions UPDATE_SCHEDULE_DEFAULT = {'minutes': 5} LAST_UPDATED = 'last_updated' STATE_INGESTED = 'ingested' logger = logging.getLogger(__name__) superdesk.workflow_state(STATE_INGESTED) superdesk.workflow_action( name='ingest' ) def is_valid_type(provider, provider_type_filter=None): """Test if given provider has valid type and should be updated. :param provider: provider to be updated :param provider_type_filter: active provider type filter """ provider_type = provider.get('type') if provider_type not in providers: return False if provider_type_filter and provider_type != provider_type_filter: return False return True def is_scheduled(provider): """Test if given provider should be scheduled for update. :param provider: ingest provider """ now = utcnow() last_updated = provider.get(LAST_UPDATED, now - timedelta(days=100)) # if never updated run now update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT) return last_updated + timedelta(**update_schedule) < now def is_closed(provider): """Test if provider is closed. :param provider: ingest provider """ return provider.get('is_closed', False) def filter_expired_items(provider, items): try: days_to_keep_content = provider.get('days_to_keep', DAYS_TO_KEEP) expiration_date = utcnow() - timedelta(days=days_to_keep_content) return [item for item in items if item.get('versioncreated', utcnow()) > expiration_date] except Exception as ex: raise ProviderError.providerFilterExpiredContentError(ex, provider) def get_provider_rule_set(provider): if provider.get('rule_set'): return superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None) def get_task_ttl(provider): update_schedule = provider.get('update_schedule', UPDATE_SCHEDULE_DEFAULT) return update_schedule.get('minutes', 0) * 60 + update_schedule.get('hours', 0) * 3600 def get_task_id(provider): return 'update-ingest-{0}-{1}'.format(provider.get('name'), provider.get('_id')) class UpdateIngest(superdesk.Command): """Update ingest providers.""" option_list = ( superdesk.Option('--provider', '-p', dest='provider_type'), ) def run(self, provider_type=None): for provider in superdesk.get_resource_service('ingest_providers
').get(req=None, lookup={}): if is_valid_type(provider, provider_type) and is_scheduled(provider) and not is_closed(provider): kwargs = { 'provider': provider, 'rule_set': get_provider_rule_set(provider)
} update_provider.apply_async( task_id=get_task_id(provider), expires=get_task_ttl(provider), kwargs=kwargs) @celery.task def update_provider(provider, rule_set=None): """ Fetches items from ingest provider as per the configuration, ingests them into Superdesk and updates the provider. """ superdesk.get_resource_service('ingest_providers').update(provider['_id'], { LAST_UPDATED: utcnow(), # Providing the _etag as system updates to the documents shouldn't override _etag. app.config['ETAG']: provider.get(app.config['ETAG']) }) for items in providers[provider.get('type')].update(provider): ingest_items(items, provider, rule_set) stats.incr('ingest.ingested_items', len(items)) logger.info('Provider {0} updated'.format(provider['_id'])) push_notification('ingest:update') def process_anpa_category(item, provider): try: anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories') if anpa_categories: for anpa_category in anpa_categories['items']: if anpa_category['is_active'] is True \ and item['anpa-category']['qcode'].lower() == anpa_category['value'].lower(): item['anpa-category'] = {'qcode': item['anpa-category']['qcode'], 'name': anpa_category['name']} break except Exception as ex: raise ProviderError.anpaError(ex, provider) def apply_rule_set(item, provider, rule_set=None): """ Applies rules set on the item to be ingested into the system. If there's no rule set then the item will be returned without any change. :param item: Item to be ingested :param provider: provider object from whom the item was received :return: item """ try: if rule_set is None and provider.get('rule_set') is not None: rule_set = superdesk.get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None) if rule_set and 'body_html' in item: body = item['body_html'] for rule in rule_set['rules']: body = body.replace(rule['old'], rule['new']) item['body_html'] = body return item except Exception as ex: raise ProviderError.ruleError(ex, provider) def ingest_items(items, provider, rule_set=None): all_items = filter_expired_items(provider, items) items_dict = {doc['guid']: doc for doc in all_items} for item in [doc for doc in all_items if doc.get('type') != 'composite']: ingest_item(item, provider, rule_set) for item in [doc for doc in all_items if doc.get('type') == 'composite']: for ref in [ref for group in item.get('groups', []) for ref in group.get('refs', []) if 'residRef' in ref]: ref.setdefault('location', 'ingest') itemRendition = items_dict.get(ref['residRef'], {}).get('renditions') if itemRendition: ref.setdefault('renditions', itemRendition) ingest_item(item, provider, rule_set) def ingest_item(item, provider, rule_set=None): try: item.setdefault('_id', item['guid']) providers[provider.get('type')].provider = provider item['ingest_provider'] = str(provider['_id']) item.setdefault('source', provider.get('source', '')) set_default_state(item, STATE_INGESTED) if 'anpa-category' in item: process_anpa_category(item, provider) apply_rule_set(item, provider, rule_set) ingest_service = superdesk.get_resource_service('ingest') if item.get('ingest_provider_sequence') is None: ingest_service.set_ingest_provider_sequence(item, provider) rend = item.get('renditions', {}) if rend: baseImageRend = rend.get('baseImage') or next(iter(rend.values())) if baseImageRend: href = providers[provider.get('type')].prepare_href(baseImageRend['href']) update_renditions(item, href) old_item = ingest_service.find_one(_id=item['guid'], req=None) if old_item: ingest_service.put(item['guid'], item) else: try: ingest_service.post([item]) except HTTPException as e: logger.error("Exception while persisting item in ingest collection", e) ingest_service.put(item['guid'], item) except ProviderError: raise except Exception as ex: raise ProviderError.ingestError(ex, provider) def update_renditions(item, href): inserted = [] try:
""" Contains a function to generate and upload a LaTeX-rendered math image. """ import subprocess import sys import typing def uploadLatex(math: typing.List[str], slackAPI: object, channel: object, users: list) -> str: """ Generates a LaTeX math image from the LaTeX source contained in `math`, and posts it to the api `slackapi` in channel `channel`. Returns a string describing any errors that occurred. """ toParse = "".join(math).replace("&amp;","&") # create a temporary directory response = subprocess.run(["mktemp", "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # check for errors if response.returncode != 0 or response.stderr.decode() != '': return "EE: latex: couldn't make temp. dir: '"+response.stderr.decode()+"'" # Decode and store the temporary directory name latexdir = response.stdout.decode().splitlines()[0] # Generate the image using l2p response = subprocess.run(["l2p", "-i", toParse, "-o", latexdir+"/latex_output.png"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Check for errors, both posting to the channel (because it's probable that a user messed up) # as well as logging to the logfile if response.stderr.decode() != '': msg = "Unable to parse expression
: %s: %s" slackAPI.chat.post_message(channel['name'],
msg % ("`%s` because" % toParse, "`%s`" % response.stderr.decode())) return "EE: latex: " + msg % ("'%s'" % toParse, "'%s'" % response.stderr.decode()) # If all went well, upload then delete the file slackAPI.files.upload(latexdir+"/latex_output.png", channels=channel['id']) retstr = "II: latex: uploaded image to slack (input: '%s')" % toParse response = subprocess.run(["rm", "-r", "-f", latexdir], stderr=subprocess.PIPE) if response.returncode != 0 or response.stderr.decode() != "": return retstr+"\nEE: latex: error encountered during cleanup: '%s'" % response.stderr.decode() return retstr
# -*- coding: utf-8 -*- from __future__ im
port unicode_literals from django.db import migrations class Migration(migrations.Migration)
: dependencies = [ ('ngw', '0009_config_eventdefaultperms'), ] operations = [ migrations.DeleteModel( name='ChoiceContactField', ), migrations.DeleteModel( name='DateContactField', ), migrations.DeleteModel( name='DateTimeContactField', ), migrations.DeleteModel( name='EmailContactField', ), migrations.DeleteModel( name='FileContactField', ), migrations.DeleteModel( name='ImageContactField', ), migrations.DeleteModel( name='LongTextContactField', ), migrations.DeleteModel( name='MultipleChoiceContactField', ), migrations.DeleteModel( name='MultipleDoubleChoiceContactField', ), migrations.DeleteModel( name='NumberContactField', ), migrations.DeleteModel( name='PasswordContactField', ), migrations.DeleteModel( name='PhoneContactField', ), migrations.DeleteModel( name='RibContactField', ), migrations.DeleteModel( name='TextContactField', ), ]
from temboo
.Library.Google.Drive.Changes.Get import Get, GetInputSet, GetResultSet, GetChoreographyExecution from temboo.Library.Google.Drive.Changes.List import List, ListInputSet, ListResultSet, ListChoreographyExecution
# -*- coding: utf-8 -*- # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz> # Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com> # Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. fr
om django.apps import AppConfig from django.apps import apps from django.db.models import signals def connect_issues_signals(): from taiga.projects.tagging import signals as tagging_handlers from . import signals as handlers # Finished date signals.pre_save.connect(handlers.set_finished_date_when_edit_issue, sender=apps.get_model("
issues", "Issue"), dispatch_uid="set_finished_date_when_edit_issue") # Tags signals.pre_save.connect(tagging_handlers.tags_normalization, sender=apps.get_model("issues", "Issue"), dispatch_uid="tags_normalization_issue") def connect_issues_custom_attributes_signals(): from taiga.projects.custom_attributes import signals as custom_attributes_handlers signals.post_save.connect(custom_attributes_handlers.create_custom_attribute_value_when_create_issue, sender=apps.get_model("issues", "Issue"), dispatch_uid="create_custom_attribute_value_when_create_issue") def connect_all_issues_signals(): connect_issues_signals() connect_issues_custom_attributes_signals() def disconnect_issues_signals(): signals.pre_save.disconnect(sender=apps.get_model("issues", "Issue"), dispatch_uid="set_finished_date_when_edit_issue") signals.pre_save.disconnect(sender=apps.get_model("issues", "Issue"), dispatch_uid="tags_normalization_issue") def disconnect_issues_custom_attributes_signals(): signals.post_save.disconnect(sender=apps.get_model("issues", "Issue"), dispatch_uid="create_custom_attribute_value_when_create_issue") def disconnect_all_issues_signals(): disconnect_issues_signals() disconnect_issues_custom_attributes_signals() class IssuesAppConfig(AppConfig): name = "taiga.projects.issues" verbose_name = "Issues" def ready(self): connect_all_issues_signals()
from biokbase.workspace.client import Workspace import requests import json import sys from time import time from fix_workspace_info import fix_all_workspace_info from pprint import pprint kb_port = 9999 mini_ws_url = f"http://localhost:{kb_port}/services/ws" mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode" mini_ws_admin = "wsadmin" narrative_spec_file = '../../../narrative_object.spec' old_narrative_spec_file = 'old_narrative_object.spec' test_narrative_data = 'narrative_test_data.json' test_user = "kbasetest" #### # BEFORE YOU RUN THIS: # 1. Spin up mini_kb with the workspace env pointed to my branch: # that is, the "-env" line in the ws command points to # "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini" # # 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up # for the Shock configuration. Do the following: # a. enter the mongo container # > docker exec -it mini_kb_ci
-mongo_1 /bin/bash # b. start mongo (just "mongo" at the prompt) # c. Run the following to use gridFS: # > use workspace # > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } }) # d. Exit that container, and restart the workspace container # > docker-compose restart ws # # With the setup done, this s
cript should do the job of creating accounts, importing the Narrative type, # loading test data, etc. def create_user(user_id): """ Returns a token for that user. """ headers = { "Content-Type": "application/json" } r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)})) if r.status_code != 200 and r.status_code != 400: print("Can't create dummy user!") r.raise_for_status() r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'})) if r.status_code != 200: print("Can't make dummy token!") r.raise_for_status() token = json.loads(r.text) return token['token'] def load_narrative_type(ws): """ Loads the KBaseNarrative.Narrative type info into mini kb. ws = Workspace client configured for admin """ ws.request_module_ownership("KBaseNarrative") ws.administer({ 'command': 'approveModRequest', 'module': 'KBaseNarrative' }) with open(old_narrative_spec_file, "r") as f: old_spec = f.read() ws.register_typespec({ 'spec': old_spec, 'dryrun': 0, 'new_types': [ 'Narrative', 'Cell', 'Worksheet', 'Metadata' ] }) ws.release_module('KBaseNarrative') for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys(): if '.Narrative' in n: old_ver = n.split('-')[-1] with open(narrative_spec_file, "r") as f: spec = f.read() ws.register_typespec({ 'spec': spec, 'dryrun': 0, 'new_types': [] }) ws.release_module('KBaseNarrative') for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys(): if '.Narrative' in n: new_ver = n.split('-')[-1] return { 'old_ver': old_ver, 'new_ver': new_ver } def load_narrative_test_data(ws, vers): """ Loads the test data set into mini kb ws. Returns this structure: wsid: { narrative_id: int correct_ws_meta: {} correct_ws_perms: {} } there's more than 1 wsid (should be ~7-10), but that's it. """ with open(test_narrative_data, 'r') as f: test_data = json.loads(f.read().strip()) uploaded_data = list() for ws_data in test_data["old"]: uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver'])) for ws_data in test_data["new"]: uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver'])) return uploaded_data def _load_workspace_data(ws, ws_data, idx, narrative_ver): """ Loads up a single workspace with data and returns a dict about it. Dict contains: id = the workspace id perms = the workspace permissions correct_meta = the correct workspace metadata (for validation) """ print(ws_data.keys()) narratives = ws_data['narratives'] ws_meta = ws_data['ws_meta'] ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))}) ws_id = ws_info[0] info = { "ws_id": ws_id, "ws_info": ws_info, "nar_info": [], "perms": ws_data["perms"], "correct_meta": ws_data["correct_meta"], "loaded_meta": ws_meta } if len(narratives): for idx, nar in enumerate(narratives): objects = ws.save_objects({ 'id': ws_id, 'objects': [{ 'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver), 'data': nar, 'name': 'Narrative-{}'.format(idx) }] }) info['nar_info'].append(objects[0]) if len(ws_meta): ws.alter_workspace_metadata({ 'wsi': {'id': ws_id}, 'new': ws_meta }) perms = ws_data["perms"] if len(perms) > 1: admin_perm = perms['wsadmin'] ws.set_permissions({ 'id': ws_id, 'new_permission': admin_perm, 'users': ['wsadmin'] }) return info def main(): admin_token = create_user(mini_ws_admin) admin_ws = Workspace(url=mini_ws_url, token=admin_token) versions = load_narrative_type(admin_ws) versions = { 'old_ver': '1.0', 'new_ver': '2.0' } user_token = create_user(test_user) user_ws = Workspace(url=mini_ws_url, token=user_token) loaded_info = load_narrative_test_data(user_ws, versions) pprint(loaded_info) # fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100) # for ws_data in loaded_info: # ws_id = ws_data['ws_id'] # ws_meta = user_ws.get_workspace_info({'id': ws_id})[8] # try: # assert(ws_meta == ws_data['correct_meta']) # except: # print("WS: {}".format(ws_id)) # pprint(ws_meta) # print("doesn't match") # pprint(ws_data['correct_meta']) if __name__ == '__main__': sys.exit(main())
from pythonforandroid.toolchain import Bootstrap, current_directory, info, info_main, shprint from pythonforandroid.util import ensure_dir from os.path import join import sh class WebViewBootstrap(Bootstrap): name = 'webview' recipe_depends = list( set(Bootstrap.recipe_depends).union({'genericndkbuild'}) ) def assemble_distribution(self): info_main('# Creating Android project from build and {} bootstrap'.format( self.name)) shprint(sh.rm, '-rf', self.dist_dir) shprint(sh.cp, '-r', self.build_dir, self.dist_dir) with current_directory(self.dist_dir): with open('local.properties', 'w') as fileh: fileh.write('sdk.dir={}'.format(self.ctx.sdk_dir)) arch = self.ctx.archs[0] if len(self.ctx.archs) > 1: raise ValueError('built for more than one arch, but bootstrap cannot handle that yet') info('Bootstrap running with arch {}'.format(arch)) with current_directory(self.dist_dir): info('Copying python distribution') self.distribute_libs(arch, [self.ctx.get_libs_dir(arch.arch)]) self.distribute_aars(arch) self.dis
tribute_javaclasses(self.ctx.javaclass_dir,
dest_dir=join("src", "main", "java")) python_bundle_dir = join('_python_bundle', '_python_bundle') ensure_dir(python_bundle_dir) site_packages_dir = self.ctx.python_recipe.create_python_bundle( join(self.dist_dir, python_bundle_dir), arch) if 'sqlite3' not in self.ctx.recipe_build_order: with open('blacklist.txt', 'a') as fileh: fileh.write('\nsqlite3/*\nlib-dynload/_sqlite3.so\n') if not self.ctx.with_debug_symbols: self.strip_libraries(arch) self.fry_eggs(site_packages_dir) super().assemble_distribution() bootstrap = WebViewBootstrap()
from uber.tests import * @pytest.fixture def attendee_id(): with Session() as session: return session.query(Attendee).filter_by(first_name='Regular', last_name='Attendee').one().id @pytest.fixture(autouse=True) def mock_apply(monkeypatch): monkeypatch.setattr(Attendee, 'apply', Mock()) return Attendee.apply def test_invalid_gets(): with Session() as session: pytest.raises(Exception, session.attendee) pytest.raises(Exc
eption, session.attendee, '') pytest.raises(Exception, session.attendee, []) pytest.raises(Exception, session.attendee, None) pytest.raises(Exception, session.attendee, str(uuid4())) pytest.raises(Exception, session.attendee, {'id': str(uuid4())}) def test_basic_get(attendee_id, mock_apply): with Session() as session: assert session.attendee(attendee_id)
.first_name == 'Regular' assert not mock_apply.called assert session.attendee(id=attendee_id).first_name == 'Regular' assert not mock_apply.called assert session.attendee({'id': attendee_id}).first_name == 'Regular' assert mock_apply.called def test_empty_get(mock_apply): with Session() as session: assert session.attendee({}).paid == NOT_PAID # basic sanity check assert mock_apply.called def test_ignore_csrf(request): with Session() as session: pytest.raises(Exception, session.attendee, {'paid': NEED_NOT_PAY}) session.attendee({'paid': NEED_NOT_PAY}, ignore_csrf=True) session.attendee({'paid': NEED_NOT_PAY}, allowed=['paid']) request.addfinalizer(lambda: setattr(cherrypy.request, 'method', 'GET')) cherrypy.request.method = 'POST' session.attendee({'paid': NEED_NOT_PAY})
onfig.v3r205;DecFiles.v27r37 #-- Visible : Y from Gaudi.Configuration import * from GaudiConf import IOHelper IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000001_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000002_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000003_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000004_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000005_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000006_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000007_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000008_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000009_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000010_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000011_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000012_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000013_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000014_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000015_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000016_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000017_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000018_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000019_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000020_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000021_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000022_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000023_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000024_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000025_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000026_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000027_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000032_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000033_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000034_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000045_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000057_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000058_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000062_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000073_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000074_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000075_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000076_1.allstreams.dst', 'LF
N:/lhcb
/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000077_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000078_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000079_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000080_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000081_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000082_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000083_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000084_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000085_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000086_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000087_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000088_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000089_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000090_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000091_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000092_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000093_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000094_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000095_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000096_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000097_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000098_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000099_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000100_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000101_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000102_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000103_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000104_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000105_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000106_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000107_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000108_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000109_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000110_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000111_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000112_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000113_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000114_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000115_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000116_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000117_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000118_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000119_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000120_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000121_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000122_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000123_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000124_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000125_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000126_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000127_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000128_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000129_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000130_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000131_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/0000/00043567_00000132_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00043567/000
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLE
S from swgpy.object import * def create(kernel): result = Mission() result.template = "object/mission/base/shared_base_mission.iff" result.attribute_
template_id = -1 result.stfName("string_id_table","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 ''' modify_yaml ansible module ''' import yaml DOCUMENTATION = ''' --- module: modify_yaml short_description: Modify yaml key value pairs author: Andrew Butcher requirements: [ ] ''' EXAMPLES = ''' - modify_yaml: dest: /etc/origin/master/master-config.yaml yaml_key: 'kubernetesMasterConfig.masterCount' yaml_value: 2 ''' # pylint: disable=missing-docstring def set_key(yaml_data, yaml_key, yaml_value): changes = [] ptr = yaml_data for key in yaml_key.split('.'): if key not in ptr and key != yaml_key.split('.')[-1]: ptr[key] = {} ptr = ptr[key] elif key == yaml_key.split('.')[-1]: if (k
ey in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):
ptr[key] = yaml_value changes.append((yaml_key, yaml_value)) else: ptr = ptr[key] return changes def main(): ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting the key to the desired value. ''' # disabling pylint errors for global-variable-undefined and invalid-name # for 'global module' usage, since it is required to use ansible_facts # pylint: disable=global-variable-undefined, invalid-name, # redefined-outer-name global module module = AnsibleModule( argument_spec=dict( dest=dict(required=True), yaml_key=dict(required=True), yaml_value=dict(required=True), backup=dict(required=False, default=True, type='bool'), ), supports_check_mode=True, ) dest = module.params['dest'] yaml_key = module.params['yaml_key'] yaml_value = module.safe_eval(module.params['yaml_value']) backup = module.params['backup'] # Represent null values as an empty string. # pylint: disable=missing-docstring, unused-argument def none_representer(dumper, data): return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') yaml.add_representer(type(None), none_representer) try: yaml_file = open(dest) yaml_data = yaml.safe_load(yaml_file.read()) yaml_file.close() changes = set_key(yaml_data, yaml_key, yaml_value) if len(changes) > 0: if backup: module.backup_local(dest) yaml_file = open(dest, 'w') yaml_string = yaml.dump(yaml_data, default_flow_style=False) yaml_string = yaml_string.replace('\'\'', '""') yaml_file.write(yaml_string) yaml_file.close() return module.exit_json(changed=(len(changes) > 0), changes=changes) # ignore broad-except error to avoid stack trace to ansible user # pylint: disable=broad-except except Exception, e: return module.fail_json(msg=str(e)) # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
""" desispec.fiberflat ================== Utility functions to compute a fiber flat correction and apply it We try to keep all the (fits) io separated. """ from __future__ import absolute_import, division import numpy as np from desispec.io import read_frame from desispec.io import write_fiberflat from desispec.fiberflat import compute_fiberflat from desispec.log import get_logger from desispec.io.qa import load_qa_fram
e from desispec.io import write_qa_frame from desispec.qa import qa_plots import argparse def parse(options=None): parser = argparse.ArgumentParser(description="Compute the fiber flat field correction from a DESI continuum lamp frame") parser.add_argument('--infile', type = str, default = None, required=True,
help = 'path of DESI frame fits file corresponding to a continuum lamp exposure') parser.add_argument('--outfile', type = str, default = None, required=True, help = 'path of DESI fiberflat fits file') parser.add_argument('--qafile', type=str, default=None, required=False, help='path of QA file') parser.add_argument('--qafig', type = str, default = None, required=False, help = 'path of QA figure file') args = None if options is None: args = parser.parse_args() else: args = parser.parse_args(options) return args def main(args) : log=get_logger() log.info("starting") # Process frame = read_frame(args.infile) fiberflat = compute_fiberflat(frame) # QA if (args.qafile is not None): log.info("performing fiberflat QA") # Load qaframe = load_qa_frame(args.qafile, frame, flavor=frame.meta['FLAVOR']) # Run qaframe.run_qa('FIBERFLAT', (frame, fiberflat)) # Write if args.qafile is not None: write_qa_frame(args.qafile, qaframe) log.info("successfully wrote {:s}".format(args.qafile)) # Figure(s) if args.qafig is not None: qa_plots.frame_fiberflat(args.qafig, qaframe, frame, fiberflat) # Write write_fiberflat(args.outfile, fiberflat, frame.meta) log.info("successfully wrote %s"%args.outfile)
class Solution(object): def isSelfCrossing(self, x): """ :type x: List[int] :rtype: bool """ inf = float('inf') n = len(x) if n < 3: return False ruld = [0, 0, 0, 0] # right, up, left, down next_max = inf current = [-x[1], x[0]] for i, elem in enumerate(x[2:], 2): i %= 4 if elem >= next_max: return True xy = 1 if i in {0, 2} else 0 pn = 1 if i in {0, 3} else -1 new = current[xy] + pn * elem
if pn * new > pn * ruld[i - 3]: next_max = inf else: if next_max is inf and pn * new >= pn * ruld[i - 1]: ruld[i - 2] = ruld[i] next_max = abs(ruld[i - 2] - current[xy ^ 1])
ruld[i - 1], current[xy] = current[xy], new return False assert Solution().isSelfCrossing([2, 1, 1, 2]) assert not Solution().isSelfCrossing([1, 2, 3, 4]) assert Solution().isSelfCrossing([1, 1, 1, 1]) assert not Solution().isSelfCrossing([3,3,4,2,2]) assert Solution().isSelfCrossing([1,1,2,1,1]) assert not Solution().isSelfCrossing([3,3,3,2,1,1])
PROJECT_PATH = __path__[0] TI
EMPO_REGISTRY = {} REDIS_GROUP_NAMESPACE = 'tiempogroup' RECENT_KEY = 'tiempo:recent_tasks' RESULT_PREFIX = 'tiemp
o:task_result' __version__ = "1.2.3"
er): contents = input_api.ReadFile(f, 'rb') # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) if eof_files: return [output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)] return [] def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api, source_file_filter=None): """Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass. It is faster because it is reading the file only once. """ cr_files = [] eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') if '\r' in contents: cr_files.append(f.LocalPath()) # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) outputs = [] if cr_files: outputs.append(output_api.PresubmitPromptWarning( 'Found a CR character in these files:', items=cr_files)) if eof_files: outputs.append(output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)) return outputs def _ReportErrorFileAndLine(filename, line_num, dummy_line): """Default error formatter for _FindNewViolationsOfRule.""" return '%s:%s' % (filename, line_num) def _FindNewViolationsOfRule(callable_rule, input_api, source_file_filter=None, error_formatter=_ReportErrorFileAndLine): """Find all newly introduced violations of a per-line rule (a callable). Arguments: callable_rule: a callable taking a file extension and line of input and returning True if the rule is satisfied and False if there was a problem. input_api: object to enumerate the affected files. source_file_filter: a filter to be passed to the input api. error_formatter: a callable taking (filename, line_number, line) and returning a formatted error string. Returns: A list of the newly-introduced violations reported by the rule. """ errors = [] for f in input_api.AffectedFiles(include_deletes=False, file_filter=source
_file_filter): # For speed, we do two
passes, checking first the full file. Shelling out # to the SCM to determine the changed region can be quite expensive on # Win32. Assuming that most files will be kept problem-free, we can # skip the SCM operations most of the time. extension = str(f.LocalPath()).rsplit('.', 1)[-1] if all(callable_rule(extension, line) for line in f.NewContents()): continue # No violation found in full text: can skip considering diff. for line_num, line in f.ChangedContents(): if not callable_rule(extension, line): errors.append(error_formatter(f.LocalPath(), line_num, line)) return errors def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None): """Checks that there are no tab characters in any of the text files to be submitted. """ # In addition to the filter, make sure that makefiles are blacklisted. if not source_file_filter: # It's the default filter. source_file_filter = input_api.FilterSourceFile def filter_more(affected_file): basename = input_api.os_path.basename(affected_file.LocalPath()) return (not (basename in ('Makefile', 'makefile') or basename.endswith('.mk')) and source_file_filter(affected_file)) tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line, input_api, filter_more) if tabs: return [output_api.PresubmitPromptWarning('Found a tab character in:', long_text='\n'.join(tabs))] return [] def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None): """Checks that the user didn't add TODO(name) without an owner.""" unowned_todo = input_api.re.compile('TO''DO[^(]') errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x), input_api, source_file_filter) errors = ['Found TO''DO with no owner in ' + x for x in errors] if errors: return [output_api.PresubmitPromptWarning('\n'.join(errors))] return [] def CheckChangeHasNoStrayWhitespace(input_api, output_api, source_file_filter=None): """Checks that there is no stray whitespace at source lines end.""" errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line, input_api, source_file_filter) if errors: return [output_api.PresubmitPromptWarning( 'Found line ending with white spaces in:', long_text='\n'.join(errors))] return [] def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None): """Checks that there aren't any lines longer than maxlen characters in any of the text files to be submitted. """ maxlens = { 'java': 100, # This is specifically for Android's handwritten makefiles (Android.mk). 'mk': 200, '': maxlen, } # Language specific exceptions to max line length. # '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a # superset of CPP_EXCEPTIONS. CPP_FILE_EXTS = ('c', 'cc') CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma') JAVA_FILE_EXTS = ('java',) JAVA_EXCEPTIONS = ('import ', 'package ') OBJC_FILE_EXTS = ('h', 'm', 'mm') OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include', '#pragma') LANGUAGE_EXCEPTIONS = [ (CPP_FILE_EXTS, CPP_EXCEPTIONS), (JAVA_FILE_EXTS, JAVA_EXCEPTIONS), (OBJC_FILE_EXTS, OBJC_EXCEPTIONS), ] def no_long_lines(file_extension, line): # Check for language specific exceptions. if any(file_extension in exts and line.startswith(exceptions) for exts, exceptions in LANGUAGE_EXCEPTIONS): return True file_maxlen = maxlens.get(file_extension, maxlens['']) # Stupidly long symbols that needs to be worked around if takes 66% of line. long_symbol = file_maxlen * 2 / 3 # Hard line length limit at 50% more. extra_maxlen = file_maxlen * 3 / 2 line_len = len(line) if line_len <= file_maxlen: return True if line_len > extra_maxlen: return False if any((url in line) for url in ('file://', 'http://', 'https://')): return True if 'url(' in line and file_extension == 'css': return True return input_api.re.match( r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line) def format_error(filename, line_num, line): return '%s, line %s, %s chars' % (filename, line_num, len(line)) errors = _FindNewViolationsOfRule(no_long_lines, input_api, source_file_filter, error_formatter=format_error) if errors: msg = 'Found lines longer than %s characters (first 5 shown).' % maxlen return [output_api.PresubmitPromptWarning(msg, items=errors[:5])] else: return [] def CheckLicense(input_api, output_api, license_re, source_file_filter=None, accept_empty_files=True): """Verifies the license header. """ license_re = input_api.re.compile(license_re, input_api.re.MULTILINE) bad_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') if accept_empty_files and not contents: continue if not license_re.search(contents): bad_files.append(f.LocalPath()) if bad_files: if input_api.is_committing: res_type = output_api.PresubmitPromptWarning else: res_type = output_api.PresubmitNotifyResult return [res_type( 'License must match:\n%s\n' % license_re.pattern + 'Found a bad license header in these files:', items=bad_files)] return [] def CheckChangeSvnEolStyle(input_api, output_api, so
#!/usr/bin/env python # -*- coding: utf-8 -*- # # encfs.py # # Copyright 2013 Antergos # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Configures Antergos to encrypt user's home with encFS """ #import logging import os import shutil import subprocess def setup(username, dest_dir): """ Encrypt user's home folder """ # encfs pam_mount packages are needed # pam_encfs from AUR # https://wiki.debian.org/TransparentEncryptionForHomeFolder # Edit configuration files name = os.path.join(dest_dir, "etc/security/pam_encfs.conf") shutil.copy(name, name + ".cnchi") with open(name, "r") as pam_encfs: lines = pam_encfs.readlines() i = len(lines) - 1 lines[i] = "# " + lines[i] with open(name, "w") as pam_encfs: pam_encfs.write(lines) pam_encfs.write("# Added by Cnchi - Antergos Installer\n") pam_encfs.write("-\t/home/.encfs\t-\t-v\t-\n") name = os.path.join(dest_dir, "etc/security/pam_env.conf") shutil.copy(name, name + ".cnchi") with open(name, "a") as pam_env: pam_env.write("# Added by Cnchi - Antergos Installer\n") pam_env.write("# Set the ICEAUTHORITY file location to allow GNOME to start on encfs $HOME\n") pam_env.write("ICEAUTHORITY DEFAULT=/tmp/.ICEauthority_@{PAM_USER}\n") name = os.path.join(dest_dir, "etc/fuse.conf") shutil.copy(name, name + ".cnchi") with open(name, "a") as fuse_conf: fuse_conf.write("# Added by Cnchi - Antergos Installer\n") fuse_conf.write("user_allow_other\n") name = os.path.join(dest_dir, "etc/pam.d/system-login") shutil.copy(name, name + ".cnchi") with open(name, "a") as system_login: system_login.write("# Added by Cnchi - Antergos Installer\n") system_login.write("session required\tpam_encfs.so\n") system_login.write("session optional\tpam_mount.so\n") name = os.path.join(dest_dir, "etc/pam.d/system-auth") shutil.copy(name, name + ".cnchi") with open(name, "a") as system_auth: system_auth.write("# Added by Cnchi - Antergos Installer\n") system_auth.write("auth sufficient\tpam_encfs.so\n
") system_auth.write("auth optional\tpam_mount.so\n") # Setup finished # Move user home dir out of the way mounted_dir = os.path.join(self.dest_dir, "home/", use
rname) backup_dir = os.path.join(self.dest_dir, "var/tmp/", username) subprocess.check_call(['mv', src_dir, backup_dir]) # Create necessary dirs, encrypted and mounted(unecrypted) encrypted_dir = os.path.join(self.dest_dir, "home/.encfs/", username) subprocess.check_call(['mkdir', '-p', encrypted_dir, mounted_dir]) # Set owner subprocess.check_call(['chown', '%s:users' % username, encrypted_dir, mounted_dir]) # Create encrypted directory subprocess.check_call(['encfs', '-v', encrypted_dir, mounted_dir]) # Restore user home files src = os.path.join(backup_dir, "*") subprocess.check_call(['mv', src, mounted_dir]) src = os.path.join(backup_dir, ".[A-Za-z0-9]*") subprocess.check_call(['mv', src, mounted_dir]) # Delete home backup subprocess.check_call(['rmdir', backup_dir])
import jmri.jmrit.jython.Jynstrument as Jynstrument import jmri.jmrit.catalog.Named
Icon as NamedIcon import jmri.jmrit.symbolicprog.tabbedframe.PaneOpsProgAction as PaneOpsProgAction import javax.swing.JButton as JButton class DecoderPro(Jynstrument): def getExpectedContextClassName(self): return "javax.swing.JComponent" def init(self): jbNew = JButton( PaneOpsProgAction() ) jbNew.setIcon( NamedIcon("resources/decoderpro.gif","resources/decoderpro.gif") ) jbNew.addMouseListener(self.getMouseListeners()[0]) # In order to get
the popupmenu on the button too jbNew.setToolTipText( jbNew.getText() ) jbNew.setText( None ) self.add(jbNew) def quit(self): pass
"""Applic
ation base, containing global templates.""" default_app_config = 'pontoon.base.apps.BaseConfig' MOZILLA_REPOS = ( 'ssh://hg.mozilla.org/users/m_owca.info/firefox-aurora/', 'ssh://hg.mozilla.org/users/m_owca.info/firefox-for-android-aurora/', 'ssh://hg.mozilla.org/users/m_owca.info/thunderbird-aurora/', 'ssh://hg.mozilla.org/users/m_owca.info/lightning-aurora/', 'ssh://hg.mozilla.org/users/m_owca.info/seamonkey-aurora/', ) c
lass SyncError(RuntimeError): """Error class for errors relating to the project sync process."""
from Estructura import
espaceado class Arbol_Sintactico_Abstracto: def __init__(self,alcance,hijos): self.hijos = hijos self.alcance = alcance self.cont = 1 def imprimir(self,tabulacion): if (len(self.hijos) > 1): print tabulacion + "SECUENCIA" for hijo in self.hijos: hijo.nivel = 1 hijo.imprimir(
espaceado(tabulacion)) def ejecutar(self): for hijo in self.hijos: hijo.nivel = 1 hijo.ejecutar()
import glob from subprocess import call test_failures = {} test_successes = {} files = [file for file in glob.glob('../**/build.gradle', recursive=True)] for f in files: if f.startswith('../test'): continue # clean all projects in the platform before executing build print("Cleaning all
projects first...") call(['../gradlew', '-p', '../', 'clean']) print("Executing " + f + "...") rc = call(['../gradlew', '-b', f, 'build']) if rc == 0: test
_successes[f] = rc else: test_failures[f] = rc print("Return code: " + str(rc)) print("FAILURES:") for key in test_failures: print(key + ": " + "FAILED(rc=" + str(test_failures[key]) + ")!") print("\n\n") print("SUCCESSES:") for key in test_successes: print(key + ": PASS")
(Filter): """ Highlight special code tags in comments and docstrings. Options accepted: `codetags` : list of strings A list of strings that are flagged as code tags. The default is to highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``. """ def __init__(self, **options): Filter.__init__(self, **options) tags = get_list_opt(options, 'codetags', ['XXX', 'TODO', 'BUG', 'NOTE']) self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ re.escape(tag) for tag in tags if tag ])) def filter(self, lexer, stream): regex = self.tag_re for ttype, value in stream: if ttype in String.Doc or \ ttype in Comment and \ ttype not in Comment.Preproc: for sttype, svalue in _replace_special(ttype, value, regex, Comment.Special): yield sttype, svalue else: yield ttype, value class KeywordCaseFilter(Filter): """ Convert keywords to lowercase or uppercase or capitalize them, which means first letter uppercase, rest lowercase. This can be useful e.g. if you highlight Pascal code and want to adapt the code to your styleguide. Options accepted: `case` : string The casing to convert keywords to. Must be one of ``'lower'``, ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. """ def __init__(self, **options): Filter.__init__(self, **options) case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower') self.convert = getattr(unicode, case) def filter(self, lexer, stream): for ttype, value in stream: if ttype in Keyword: yield ttype, self.convert(value) else: yield ttype, value class NameHighlightFilter(Filter): """ Highlight a normal Name token with a different token type. Example:: filter = NameHighlightFilter( names=['foo', 'bar', 'baz'], tokentype=Name.Function, ) This would highlight the names "foo", "bar" and "baz" as fu
nctions. `Name.Function` is the default token type. Options accepted: `name
s` : list of strings A list of names that should be given the different token type. There is no default. `tokentype` : TokenType or string A token type or a string containing a token type name that is used for highlighting the strings in `names`. The default is `Name.Function`. """ def __init__(self, **options): Filter.__init__(self, **options) self.names = set(get_list_opt(options, 'names', [])) tokentype = options.get('tokentype') if tokentype: self.tokentype = string_to_tokentype(tokentype) else: self.tokentype = Name.Function def filter(self, lexer, stream): for ttype, value in stream: if ttype is Name and value in self.names: yield self.tokentype, value else: yield ttype, value class ErrorToken(Exception): pass class RaiseOnErrorTokenFilter(Filter): """ Raise an exception when the lexer generates an error token. Options accepted: `excclass` : Exception class The exception class to raise. The default is `pygments.filters.ErrorToken`. *New in Pygments 0.8.* """ def __init__(self, **options): Filter.__init__(self, **options) self.exception = options.get('excclass', ErrorToken) try: # issubclass() will raise TypeError if first argument is not a class if not issubclass(self.exception, Exception): raise TypeError except TypeError: raise OptionError('excclass option is not an exception class') def filter(self, lexer, stream): for ttype, value in stream: if ttype is Error: raise self.exception(value) yield ttype, value class VisibleWhitespaceFilter(Filter): """ Convert tabs, newlines and/or spaces to visible characters. Options accepted: `spaces` : string or bool If this is a one-character string, spaces will be replaces by this string. If it is another true value, spaces will be replaced by ``·`` (unicode MIDDLE DOT). If it is a false value, spaces will not be replaced. The default is ``False``. `tabs` : string or bool The same as for `spaces`, but the default replacement character is ``»`` (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value is ``False``. Note: this will not work if the `tabsize` option for the lexer is nonzero, as tabs will already have been expanded then. `tabsize` : int If tabs are to be replaced by this filter (see the `tabs` option), this is the total number of characters that a tab should be expanded to. The default is ``8``. `newlines` : string or bool The same as for `spaces`, but the default replacement character is ``¶`` (unicode PILCROW SIGN). The default value is ``False``. `wstokentype` : bool If true, give whitespace the special `Whitespace` token type. This allows styling the visible whitespace differently (e.g. greyed out), but it can disrupt background colors. The default is ``True``. *New in Pygments 0.8.* """ def __init__(self, **options): Filter.__init__(self, **options) for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items(): opt = options.get(name, False) if isinstance(opt, basestring) and len(opt) == 1: setattr(self, name, opt) else: setattr(self, name, (opt and default or '')) tabsize = get_int_opt(options, 'tabsize', 8) if self.tabs: self.tabs += ' '*(tabsize-1) if self.newlines: self.newlines += '\n' self.wstt = get_bool_opt(options, 'wstokentype', True) def filter(self, lexer, stream): if self.wstt: spaces = self.spaces or ' ' tabs = self.tabs or '\t' newlines = self.newlines or '\n' regex = re.compile(r'\s') def replacefunc(wschar): if wschar == ' ': return spaces elif wschar == '\t': return tabs elif wschar == '\n': return newlines return wschar for ttype, value in stream: for sttype, svalue in _replace_special(ttype, value, regex, Whitespace, replacefunc): yield sttype, svalue else: spaces, tabs, newlines = self.spaces, self.tabs, self.newlines # simpler processing for ttype, value in stream: if spaces: value = value.replace(' ', spaces) if tabs: value = value.replace('\t', tabs) if newlines: value = value.replace('\n', newlines) yield ttype, value class GobbleFilter(Filter): """ Gobbles source code lines (eats initial characters). This filter drops the first ``n`` characters off every line of code. This may be useful when the source code fed to the lexer is indented by a fixed amount of space that isn't desired in the output. Options accepted: `n` : int The number of characters to gobble. *New in Pygments 1.2.* """ def __init__(self, **options): Filter.__init__(self, **options) self.n = get_int_opt(options, 'n', 0) def gobble(self, value, left): if left < len(value): return value[left:], 0 else: return '', left - len(value) def filter(self, lexer, stream): n = self.n left = n # How many characters left to
class BasePlugin(object): """ Extend this/copy its structure to create plugins. Your plugin class must be `Plugin` to be loaded. Can include commands (command_*), admin commands (admin_). Additionally, yaib will look functions for many of the connection events. Any commands with a docstring will be automatically added to the help command output, categorized by plugin name. Command docstrings can include {nick} and {command_prefix} which will automatically be replaced in the help text with the current values. """ name = 'BasePlugin' def __init__(self, yaib, configuration): self.yaib = yaib # save a shortcut to just this plugin's settings self.settings = self.yaib.getPluginSettings(self.name) # configure the plugin self.configure(configuration) # create any default settings self.createDefaultSettings() @property def command_prefix(self): # this is a property so it stays updated, even if the setting changes return self.yaib.command_prefix @property def nick(self): return self.yaib.nick def configure(self, configuration): """ Overwrite this to handle configuration. @param configuration: (object) the entire yaib config file. """ pass def createDefaultSettings(self): """ Ca
lled during initialization. Use self.settings.setMulti({...}, initial=True) """ pass def getDbSession(self): return self.yaib.persistence.getDbSession() def formatDoc(self, message): """Formats the given message with the {nick} and {command_prefix}.""" return self.yaib.formatDoc(mess
age) def callLater(self, delay, func, *args, **kwargs): """ Wait for the delay (in seconds) then call the function with the given arguments.""" return self.yaib.callLater(delay, func, *args, **kwargs) def onShutdown(self): """Called when yaib is shutting down. Clean anything up and save all the settings necessary.""" pass def send(self, channel, message): """Send a message in the given channel.""" return self.yaib.sendMessage(channel, message) def reply(self, channel, nick, message): """ If the channel is the bot (ie, was a private message to the bot) sends a message back to the sender, otherwise sends to the channel. """ return self.send( channel if channel != self.nick else nick, message ) def action(self, channel, action): """Send an action in the given channel.""" return self.yaib.action(channel, action) def onPluginsLoaded(self): """Called when ALL the plugins are loaded.""" pass def onNickChange(self, nick, old_nick): """Called when {nick}'s nick changes.""" pass def onConnected(self): """Called when connected to a server.""" pass def onMessageOfTheDay(self, message): """Called with the server's message of the day.""" pass def onNotification(self, user, nick, channel, message): """Called when noticed""" pass def onUserAction(self, user, nick, channel, action): """Called when a user performs an action.""" pass def onPrivateMessage(self, user, nick, message): """Called when a user sends {nick} a private message""" pass def onMessage(self, user, nick, channel, message, highlight): """Called when something is said in a channel""" pass def onSend(self, channel, message): """Called when {nick} sends a message to a channel (can be PM).""" pass def onAction(self, channel, action): """Called when {nick} does an action in a channel""" pass def onCommand(self, user, nick, channel, command, more): """Called when {nick} runs a command on behalf of a user.""" pass def onAdminCommand(self, user, nick, channel, command, more): """Called when {nick} runs an admin command on behalf of a user.""" pass def onJoined(self, channel): """Called after joining a channel.""" pass def onLeave(self, channel): """Called after leaving a channel.""" pass def onKicked(self, kicker_user, kicker, channel, message): """Called when {nick} is kicked from a channel.""" pass def onUserJoined(self, user, nick, channel): """Called when a user joins a channel.""" pass def onUserLeave(self, user, nick, channel): """Called when a user leaves a channel.""" pass def onUserQuit(self, user, nick, quitMessage): """Called when a user disconnects from the server.""" pass def onUserKicked(self, kickee, channel, kicker_user, kicker, message): """Called when a user is kicked from a channel""" pass def onUserRenamed(self, user, old_nick, new_nick): """Called when a user changes their nick""" pass def onUserList(self, channel_type, channel_name, user_list): """ Called when user_list is given for a channel (ie, upon joining the channel). NOTE: this is a list of nicks, not user strings. """ pass
# -*- coding: utf-8 -*- # Generated by D
jango 1.9.7 on 2016-11-01 22:55 from __future__ import unicode_literals from django.db import migrations from django.db import mod
els class Migration(migrations.Migration): dependencies = [ ('contentcuration', '0038_contentnode_author'), ] operations = [ migrations.AlterField( model_name='formatpreset', name='id', field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ( 'document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail')], max_length=150, primary_key=True, serialize=False), ), ]
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be> # Copyright (C
) 2014-2015 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # Lice
nse, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This code is partially taken from django-rest-framework: # Copyright (c) 2011-2014, Tom Christie from django.core.urlresolvers import RegexURLResolver from django.conf.urls import patterns, url, include from .settings import api_settings def apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required): ret = [] for urlpattern in urlpatterns: if isinstance(urlpattern, RegexURLResolver): # Set of included URL patterns regex = urlpattern.regex.pattern namespace = urlpattern.namespace app_name = urlpattern.app_name kwargs = urlpattern.default_kwargs # Add in the included patterns, after applying the suffixes patterns = apply_suffix_patterns(urlpattern.url_patterns, suffix_pattern, suffix_required) ret.append(url(regex, include(patterns, namespace, app_name), kwargs)) else: # Regular URL pattern regex = urlpattern.regex.pattern.rstrip("$") + suffix_pattern view = urlpattern._callback or urlpattern._callback_str kwargs = urlpattern.default_args name = urlpattern.name # Add in both the existing and the new urlpattern if not suffix_required: ret.append(urlpattern) ret.append(url(regex, view, kwargs, name)) return ret def format_suffix_patterns(urlpatterns, suffix_required=False, allowed=None): """ Supplement existing urlpatterns with corresponding patterns that also include a ".format" suffix. Retains urlpattern ordering. urlpatterns: A list of URL patterns. suffix_required: If `True`, only suffixed URLs will be generated, and non-suffixed URLs will not be used. Defaults to `False`. allowed: An optional tuple/list of allowed suffixes. eg ["json", "api"] Defaults to `None`, which allows any suffix. """ suffix_kwarg = api_settings.FORMAT_SUFFIX_KWARG if allowed: if len(allowed) == 1: allowed_pattern = allowed[0] else: allowed_pattern = "(%s)" % "|".join(allowed) suffix_pattern = r"\.(?P<%s>%s)$" % (suffix_kwarg, allowed_pattern) else: suffix_pattern = r"\.(?P<%s>[a-z0-9]+)$" % suffix_kwarg return apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required)
#!/usr/local/bin/python import sys import urllib import urllib2 import json import datetime YAHOO_URL = 'http://query.yahooapis.com/v1/public/yql?env=http%3A%2F%2Fdatatables.org%2Falltables.env&format=json&diagnostics=true&q=' def getJSON(fileName): f = open(fileName) jsonData = json.load(f) f.close() return jsonData def writeJSON(jsonData, fileName): f = open(fileName, 'w') json.dump(jsonData, f) f.close() def fixSymbol(symbol) : if len(symbol) > 1 and symbol[-2] == "/": symbol = symbol[:-2] + '-' + symbol[-1] if '/' in symbol : symbol = symbol.split('/')[0] return symbol.replace('^', '-P').rstrip() def getReturn(returns): if len(returns.keys()) == 0: return 0 firstDate = returns.keys()[0] lastDate = returns.keys()[0] for date in returns.keys(): if date < firstDate: firstDate = date if date > lastDate: lastDate = date openPrice = float(returns[firstDate][0]) closePrice = float(returns[lastDate][1]) return (closePrice - openPrice) / openPrice def getReturnForCompany(symbol, date, numOfDays): endDate = datetime.datetime.strptime(date, '%Y-%m-%d') + datetime.timedelta(days=numOfDays) sym = fixSymbol(symbol) query = 'select * from yahoo.finance.historicaldata where symbol = "'+sym+'" and startDate = "'+str(date)+'" and endDate = "'+str(endDate.date())+'"' encoded_query = urllib.quote(query) try: url = YAHOO_URL + encoded_query jsonRawData = urllib2.urlopen(url) jsonData = json.load(jsonRawData) if jsonData['query']['results'] == None: return 0.0 if type(jsonData['query']['results']['quote']) == type({}): quotes = [jsonData['query']['results']['quote']] else: quotes = jsonData['query']['results']['quote'] returns = {} for data in quotes: returns[data['Date']] = (data['Open'], data['Close']) return getReturn(returns) except: return 0.0 def returnsJSONSnippet(jsonData, days): returns = {} progress = 0 size = float(len(jsonData.keys())) for article in jsonData.keys(): date = jsonData[article]['date'] companies = jsonData[article]['company'] articleReturns = [] for company in companies: articleReturns.append(getReturnForCompany(company, date, days)) articleReturn = sum(articleReturns) / len(articleReturns) returns[article] = articleReturn if progress % 100 == 0: print progress / size, progress, 'out of', size progress += 1 return returns def returnsJSONFull(jsonData, days): returns = {} progress = 0 size = float(len(jsonData)) for article in jsonData: date = article['date'] companies = article['company'] articleReturns = [] for company in companies: articleReturns.append(getReturnForCompany(company, date, days)) articleReturn = sum(articleReturns) / len(articleReturns) key = article['title'][0] + '
' + article['text'] returns[key] = articleReturn if progress % 100 == 0: print progress / size, progress, 'out of', size progress += 1 return returns inputFile = sys.argv[2] outputFile = sys.argv[3] days = int(sys.argv[4]) jsonData = getJSON(inputFile) if sys.argv[1] == 'snippet': jsonToWrite = returnsJSONSnippet(jsonData, days) elif sys.argv[1] == 'full':
jsonToWrite = returnsJSONFull(jsonData, days) writeJSON(jsonToWrite, outputFile)
#!/usr/bin/
env python # coding=utf-8 import json import sys dat
a = { 'g1': { 'hosts': [ '172.17.0.2' ] } } with open('w.log', 'w') as f: f.write(str(sys.argv)) print json.dumps(data)
import random import Image import ImageFont import ImageDraw import ImageFilter import hashlib from random_words import RandomWords def gen_captcha(text, fnt, fnt_sz, file_name, fmt='JPEG'): """Generate a captcha image""" # randomly select the foreground color fgcolor = random.randint(0,0xffff00) # make the background color the opposite of fgcolor bgcolor = fgcolor ^ 0xffffff # create a font object font = ImageFont.truetype(fnt,fnt_sz) # determine dimensions of the text dim = font.getsize(text) # create a new image slightly larger that the text im = Image.new('RGB', (dim[0]+5,dim[1]+5), bgcolor) d = ImageDraw.Draw(im) x, y = im.size r = random.randint # draw 100 random colored boxes on the background for num in range(100): d.rectangle((r(0,x),r(0,y),r(0,x),r(0,y)),fill=r(0,0xffffff)) # add the text to the image d.text((3,3), text, font=font, fill=fgcolor) im = im.filter(ImageFilter.EDGE_ENHANCE_MORE) # save the image to a file im.save
(file_name, format=fmt) def new_word(): rw = RandomWords() word = rw.random_word() return word, hashlib.sh
a224(word).hexdigest() if __name__ == '__main__': """Example: This grabs a random word from the dictionary 'words' (one word per line) and generates a jpeg image named 'test.jpg' using the truetype font 'porkys.ttf' with a font size of 25. """ words = open('static/words').readlines() word = words[random.randint(1,len(words))] gen_captcha(word.strip(), 'static/porkys.ttf', 25, "captchas/test.jpg")
TION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_optional_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_optional_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_necessary_login_fail_username(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_necessary_login_fail_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_necessary_login_fail_username_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic({'username': 'admin_man', 'email': 'admin1@email.com', 'password1': 'password12', 'password2': 'password12'}) response = self.client.post(self.login_url, {'username': 'admin_man', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_registration_email_verification_neccessary_verified_login(self, login_data): mail_count = len(mail.outbox) reg_response = self.common_test_registration_basic(self.reusable_register_user_data1) self.assertEquals(len(m
ail.outbox), mail_count + 1)
new_user = get_user_model().objects.latest('id') login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST) # verify email email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data1['email']).emailconfirmation_set.order_by('-created')[0] verify_response = self.client.post(self.verify_url, {'key': email_confirmation.key}, format='json') self.assertEquals(verify_response.status_code, status.HTTP_200_OK) login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_verified_login_username(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_verified_login_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'email': 'admin1@email.com', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_verified_login_username_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) """ Password Reset Tests ==================== """ def test_password_reset(self): """ Test basic functionality of password reset. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=True) def test_password_reset_fail_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record (notify email not in system). """ payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"error":"User with email doesn\'t exist. Did not send reset email."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=False) def test_password_reset_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record. """ payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') def test_password_reset_confirm_fail_invalid_token(self): """ Test password reset confirm fails if token is invalid. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': '-wrong-token-'
import web import base import local def orNone(a, b): if a is None: return b return a class Field(object): def __init__(self, name, description, primary=False, validator=None): self.name = name self.primary = primary self.description = description if validator == None: validator = lambda *args: (True, '') self.validator = validator self.record = None # yet def full_name(self): return self.record.record_name + '_' + self.name def validate(self, value): return self.validator(value) def html_input(self, default_values): return "<input type='text' id='%s' name='%s' value='%s'>" % ( self.full_name(), self.full_name(), default_values.get(self.full_name(), '') ) class PasswordField(Field): def __init__(self, *args, **kwargs): Field.__init__(self, *args, **kwargs) def validate(self, value): #if len(value) < 8: # return False, 'pass_too_short' return True, '' def html_input(self, default_values): return "<input type='password' id='%s' name='%s'>" % ( self.full_name(), self.full_name(), ) class Record(object): def __init__(self, record_name, table=None, fields=[], role=None, add_title=None, admin_title=None, add_action=None, remove_action=None): self.record_name = record_name self.table = table self.fields = fields self.add_action = add_action self.remove_action = remove_action self.primary_key = None self._check_primary_key() for field in self.fields: field.record = self self.role = role self.add_title = orNone(add_title, 'Dar de alta %s' % (self.record_name,)) self.admin_title = orNone(admin_title, 'Administrar %s' % (self.record_name,)) def all_elements(self): if self.table is None: return [] return local.db.select(self.table) def _check_primary_key(self): nprimary = 0 for field in self.fields: if field.primary: nprimary += 1 self.primary_key = field if nprimary != 1: raise Exception('Warning: %s should have exactly one primary key' % ( self.record_name) ) def class_admin(self, parent_class=base.Content): metaself = self class C(parent_class): role_required = self.role def request(self): "Main administration page." return local.render.admin_list( record=metaself, ) return C def class_add_service(self, parent_class=base.Content): metaself = self class C(parent_class): role_required = self.role def request(self): "Render the form for creating instances of this record." cookies = web.cookies() input = web.input() # get the default value for each field # from the cookies default_values = {} for field in metaself.fields: default = cookies.get('last_' + field.full_name(), None) if default is None: default = '' default_values[field.full_name()] = default if input.get('errfield', False): f
ocus_on = input.errfield else: focus_on = metaself.fields[0].full_name() return local.render.add_form( input=web.input(), action='/%s/add' % (metaself.record_name,), description=metaself.add_title,
fields=metaself.fields, default_values=default_values, focus=focus_on ) return C def class_add(self, parent_class=base.Action): metaself = self class C(parent_class): role_required = self.role def request(self): "Add an instance of this record." data = web.input() # Check that the values for each field are valid bad_fields = [] errmsg = False any_error = False dictionary = {} for field in metaself.fields: value = data.get(field.full_name()) dictionary[field.name] = value web.setcookie('last_' + field.full_name(), value) ok, message = field.validate(value) if not ok: any_error = True bad_fields.append('error_' + field.full_name()) if not errmsg: errmsg = message if any_error: raise web.seeother('/%s/add_service?errmsg=%s%s' % ( metaself.record_name, errmsg, ''.join(['&%s=1' % (f,) for f in bad_fields]) )) if metaself.table is not None: # Check that there are no repeated keys primary_value = dictionary[metaself.primary_key.name] it = local.db.query('select count(*) as total from ' + metaself.table + \ ' where ' + metaself.primary_key.name + '=$primary_value', vars=locals()) if it[0].total > 0: raise web.seeother('/%s/add_service?errmsg=already_exists&error_%s=1' % ( metaself.record_name, metaself.primary_key.full_name() )) if metaself.table is not None and metaself.add_action is None: local.db.insert(metaself.table, **dictionary) if metaself.add_action is not None: metaself.add_action(dictionary) else: raise web.seeother('/%s/admin' % (metaself.record_name,)) return C def class_remove(self, parent_class=base.Action): metaself = self class C(parent_class): role_required = self.role def request(self): dictionary = {} if metaself.remove_action is not None: metaself.remove_action(dictionary) return C
# -*- coding: utf-8 -*- # Licensed under the MIT license # http://opensource.org/licenses/mit-license.php # Copyright 2014, Hartmut Goebel <h.goebel@goebel-consult.de> """ Test cases for L{backends.ampache_storage} """ from lxml import etree from twisted.trial import unittest from coherence.backends import ampache_storage SONG = ''' <!-- taken from https://github.com/ampache/ampache/wiki/XML-API but the original was not valid XML, so we can not trust it --> <root> <song id="3180"> <title>Hells Bells</title> <artist id="129348">AC/DC</artist> <album id="2910">Back in Black</album> <tag id="2481" count="3">Rock &amp; Roll</tag> <tag id="2482" count="1">Rock</tag> <tag id="2483" count="1">Roll</tag> <track>4</track> <time>234</time> <url>http://localhost/play/index.php?oid=123908...</url> <size>654321</size> <art>http://localhost/image.php?id=129348</art> <preciserating>3</preciserating> <rating>2.9</rating> </song> </root> ''' SONG_370 = ''' <!-- real-world example from Ampache 3.7.0 --> <root> <song id="3440"> <title><![CDATA[Achilles Last Stand]]></title> <artist id="141"><![CDATA[Led Zeppelin]]></artist> <album id="359"><![CDATA[Presence]]></album> <tag id="" count="0"><![CDATA[]]></tag> <filename><![CDATA[/mnt/Musique/Led Zeppelin/Presence/01 - Achilles Last Stand.mp3]]></filename> <track>1</track> <time>625</time> <year>1976</year> <bitrate>248916</bitrate> <mode>vbr</mode> <mime>audio/mpeg</mime> <url><![CDATA[http://songserver/ampache/play/index.php?ssid=1e11a4&type=song&oid=3440&uid=4&name=Led%20Zeppelin%20-%20Achilles%20Last%20Stand.mp3]]></url> <size>19485595</size> <mbid></mbid> <album_mbid></album_mbid> <artist_mbid></artist_mbid> <art><![CDATA[http://songserver/ampache/image.php?id=359&object_type=album&auth=1e11a40&name=art.]]></art> <preciserating>0</preciserating> <rating>0</rating> <averagerating></averagerating> </song> </root> ''' class DummyStore: def __init__(self): pass proxy = False class TestAmpache(unittest.TestCase): def setUp(self): pass def test_song(self): """Test songs with XML from Ampache 3.7.0""" doc = etree.fromstring(SONG) song = doc.find('song') store = DummyStore() track = ampache_storage.Track(st
ore, song) self.assertEqual(track.get_id(), 'song.3180') self.assertEqual(track.parent_id, 'album.2910') self.assertEqual(track.duration, '0:03:54') self.assertEqual(track.get_url(), 'http://localhost/play/index.php?oid=123908...') self.assertEqual(track.get_name(), 'Hells Bells') self.assertEqual(track.title, 'Hells Bells') self.assertEqual(track.artist, 'AC/DC') self.assertEqual(tr
ack.album, 'Back in Black') self.assertEqual(track.genre, None) self.assertEqual(track.track_nr, '4') self.assertEqual(track.cover, 'http://localhost/image.php?id=129348') self.assertEqual(track.mimetype, 'audio/mpeg') # guessed self.assertEqual(track.size, 654321) self.assertIs(track.get_path(), None) self.assertEqual(track.get_children(), []) self.assertEqual(track.get_child_count(), 0) def test_song_370(self): """Test songs with XML from Ampache 3.7.0""" doc = etree.fromstring(SONG_370) song = doc.find('song') store = DummyStore() track = ampache_storage.Track(store, song) self.assertEqual(track.get_id(), 'song.3440') self.assertEqual(track.parent_id, 'album.359') self.assertEqual(track.duration, '0:10:25') self.assertEqual(track.get_url(), 'http://songserver/ampache/play/index.php?ssid=1e11a4&type=song&oid=3440&uid=4&name=Led%20Zeppelin%20-%20Achilles%20Last%20Stand.mp3') self.assertEqual(track.get_name(), 'Achilles Last Stand') self.assertEqual(track.title, 'Achilles Last Stand') self.assertEqual(track.artist, 'Led Zeppelin') self.assertEqual(track.album, 'Presence') self.assertEqual(track.genre, None) self.assertEqual(track.track_nr, '1') self.assertEqual(track.cover, 'http://songserver/ampache/image.php?id=359&object_type=album&auth=1e11a40&name=art.') self.assertEqual(track.mimetype, 'audio/mpeg') self.assertEqual(track.size, 19485595) self.assertIs(track.get_path(), None) self.assertEqual(track.get_children(), []) self.assertEqual(track.get_child_count(), 0)
from pygame import K_UP, K_DOWN, K_LEFT, K_RIGHT from Caracter import Caracter class CommandHandler(object): #0 1 2 3 4 5 6 7 8 9 10 11 12 13 _automata_transitions= [[11,11,0, 4, 0, 0, 11,11,0, 11,0, 11,13,0],#up [9, 2, 0, 0, 0, 0, 9, 9, 0, 0, 0, 12,0, 0],#down [0, 6, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],#left [1, 0, 0, 0, 5, 0, 7, 0, 0, 0, 0, 1, 0, 0]]#right
# The final states final_list = [3,7,9,11,13] final_state = 0 def __init__(self, caracter): self.caracter = caracter self.actual_state = 0 def refresh_state(self, in_key): self.final_state = 0 input_code = -1 if in_key == K
_UP: input_code = 0 elif in_key == K_DOWN: input_code = 1 elif in_key == K_LEFT: input_code = 2 elif in_key == K_RIGHT: input_code = 3 self.actual_state = self._automata_transitions[input_code][self.actual_state] if self.actual_state == 3: if self.caracter.onGround == False: self.caracter.pendingRoll = True elif self.actual_state == 7: self.caracter.doSprint() elif self.actual_state == 9: if self.caracter.onGround == False: self.caracter.pendingGetDown = True else: self.caracter.doGetDown() elif self.actual_state == 11: self.caracter.doJump() elif self.actual_state == 13: self.caracter.doClimb() #print "estado atual:" + str(self.actual_state) if self.final_state in self.final_list : self.actual_state = 0 return self.final_state return self.actual_state
""" CRISPR_db_parser Madeleine Bonsma March 7, 2015 Updated May 3, 2016 This script takes a list of spacers downloaded from the CRISPRdb website and splits them into individual files, one file per organism. Result files are saved in "data/spacers". """ import linecache import os # CRISPR db parser # MB Mar 07 2015 filename = "data/spacerdatabase.txt" # File from CRISPRdb to sort spacer_db = open(filename, "r") # check if directory for saving exists directory = "data/spacers" if not os.path.exists(directory): os.makedirs(directory) # places to dump accession numbers during execution refseq_list = [] refseq_dict = {} for num, line in enumerate(spacer_db, 1): check = True # awkward while loop if line[0] == ">": # use the headers, indicated by >, to sort # delete 1st character to make loop same each time around line = line[1:] counter = 0 while check: counter += 1 # this part of the header is the NCBI accession refseq = line[0:9] if refseq not in refseq_list: # open new file if it's a new bacteria refseq_dict[refseq] = open( "data/spacers/%s.fasta" % refseq, "w" ) if "|" in line:
# if more than one bacteria contain spacer i = line.index("|")
# include in header the locus identifier and spacer # position identifier writeline = line[10:i] writeline2 = writeline.replace('_', '.') else: # if it's only one bacteria writeline = line[10:] writeline2 = writeline.replace('_', '.') # write header and spacer to file refseq_dict[refseq].write(">" + writeline2 + "\n") refseq_dict[refseq].write( linecache.getline("%s" % filename, num + 1) ) # since the file is organized alphabetically by the # first bacteria in the header, if we see a different # first bacteria we can close the previous file to free # up space. This might be buggy. if counter == 1: try: refseq_prev = linecache.getline( "%s" % filename, num - 2 )[1:10] refseq_dict[refseq_prev].close() except: # throws exception on the first time through, # otherwise wouldn't pass refseq_list.append(refseq) if refseq in refseq_list: if "|" in line: i = line.index("|") # include in header the locus identifier and spacer # position identifier writeline = line[10:i] writeline2 = writeline.replace('_', '.') else: writeline = line[10:] writeline2 = writeline.replace('_', '.') refseq_dict[refseq].write(">" + writeline2 + "\n") refseq_dict[refseq].write( linecache.getline("%s" % filename, num + 1) ) try: i = line.index("|") # change the header so that the next bacteria is up for # the loop line = line[i + 1:] except: check = False for key in refseq_dict: if not refseq_dict[key].closed: refseq_dict[key].close() spacer_db.close()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the Licens
e. from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine consumers = Table("consumers", meta, autoload=True) if not hasattr(
consumers.c, "generation"): # This is adding a column to an existing table, so the server_default # bit will make existing rows 0 for that column. consumers.create_column(Column("generation", Integer, default=0, server_default=text("0"), nullable=False))
_rate:", x[2] #print "modulation:", x[3] #print "fec_inner:", x[4] #print "inversion:", 2 tlist.append(parm) def getInitialTerrestrialTransponderList(tlist, region): list = nimmanager.getTranspondersTerrestrial(region) #self.transponders[self.parsedTer].append((2,freq,bw,const,crh,crl,guard,transm,hierarchy,inv)) #def buildTerTransponder(frequency, inversion = 2, bandwidth = 3, fechigh = 6, feclow = 6, #modulation = 2, transmission = 2, guard = 4, hierarchy = 4): for x
in list: if x[0] == 2: #TERRESTRIAL parm = buildTerTransponder(x[1], x[9], x[2], x[4], x[5], x[3], x[7], x[6], x[8]) tlist.append(parm) cable_bands = { "DVBC_BAND_EU_VHF_I" : 1 << 0, "DVBC_BAND_EU_MID" : 1 << 1, "DVBC_BAND_EU_VHF_III" : 1 << 2, "DVBC_BAND_EU_SUPER" : 1 << 3, "DVBC_BAND_EU_HYPER" : 1 << 4, "DVBC_BAND_EU
_UHF_IV" : 1 << 5, "DVBC_BAND_EU_UHF_V" : 1 << 6, "DVBC_BAND_US_LO" : 1 << 7, "DVBC_BAND_US_MID" : 1 << 8, "DVBC_BAND_US_HI" : 1 << 9, "DVBC_BAND_US_SUPER" : 1 << 10, "DVBC_BAND_US_HYPER" : 1 << 11, } class CableTransponderSearchSupport: # def setCableTransponderSearchResult(self, tlist): # pass # def cableTransponderSearchFinished(self): # pass def tryGetRawFrontend(self, feid): res_mgr = eDVBResourceManager.getInstance() if res_mgr: raw_channel = res_mgr.allocateRawChannel(self.feid) if raw_channel: frontend = raw_channel.getFrontend() if frontend: frontend.closeFrontend() # immediate close... del frontend del raw_channel return True return False def cableTransponderSearchSessionClosed(self, *val): print "cableTransponderSearchSessionClosed, val", val self.cable_search_container.appClosed.remove(self.cableTransponderSearchClosed) self.cable_search_container.dataAvail.remove(self.getCableTransponderData) if val and len(val): if val[0]: self.setCableTransponderSearchResult(self.__tlist) else: self.cable_search_container.sendCtrlC() self.setCableTransponderSearchResult(None) self.cable_search_container = None self.cable_search_session = None self.__tlist = None self.cableTransponderSearchFinished() def cableTransponderSearchClosed(self, retval): print "cableTransponderSearch finished", retval self.cable_search_session.close(True) def getCableTransponderData(self, str): #prepend any remaining data from the previous call str = self.remainingdata + str #split in lines lines = str.split('\n') #'str' should end with '\n', so when splitting, the last line should be empty. If this is not the case, we received an incomplete line if len(lines[-1]): #remember this data for next time self.remainingdata = lines[-1] lines = lines[0:-1] else: self.remainingdata = "" for line in lines: data = line.split() if len(data): if data[0] == 'OK': print str parm = eDVBFrontendParametersCable() qam = { "QAM16" : parm.Modulation_QAM16, "QAM32" : parm.Modulation_QAM32, "QAM64" : parm.Modulation_QAM64, "QAM128" : parm.Modulation_QAM128, "QAM256" : parm.Modulation_QAM256 } inv = { "INVERSION_OFF" : parm.Inversion_Off, "INVERSION_ON" : parm.Inversion_On, "INVERSION_AUTO" : parm.Inversion_Unknown } fec = { "FEC_AUTO" : parm.FEC_Auto, "FEC_1_2" : parm.FEC_1_2, "FEC_2_3" : parm.FEC_2_3, "FEC_3_4" : parm.FEC_3_4, "FEC_5_6": parm.FEC_5_6, "FEC_7_8" : parm.FEC_7_8, "FEC_8_9" : parm.FEC_8_9, "FEC_NONE" : parm.FEC_None } parm.frequency = int(data[1]) parm.symbol_rate = int(data[2]) parm.fec_inner = fec[data[3]] parm.modulation = qam[data[4]] parm.inversion = inv[data[5]] self.__tlist.append(parm) tmpstr = _("Try to find used Transponders in cable network.. please wait...") tmpstr += "\n\n" tmpstr += data[1] tmpstr += " kHz " tmpstr += data[0] self.cable_search_session["text"].setText(tmpstr) def startCableTransponderSearch(self, nim_idx): if not self.tryGetRawFrontend(nim_idx): self.session.nav.stopService() if not self.tryGetRawFrontend(nim_idx): if self.session.pipshown: # try to disable pip self.session.pipshown = False del self.session.pip if not self.tryGetRawFrontend(nim_idx): self.cableTransponderSearchFinished() return self.__tlist = [ ] self.remainingdata = "" self.cable_search_container = eConsoleAppContainer() self.cable_search_container.appClosed.append(self.cableTransponderSearchClosed) self.cable_search_container.dataAvail.append(self.getCableTransponderData) cableConfig = config.Nims[nim_idx].cable tunername = nimmanager.getNimName(nim_idx) try: bus = nimmanager.getI2CDevice(nim_idx) if bus is None: print "ERROR: could not get I2C device for nim", nim_idx, "for cable transponder search" bus = 2 except: # older API if nim_idx < 2: if HardwareInfo().get_device_name() == "dm500hd": bus = 2 else: bus = nim_idx else: if nim_idx == 2: bus = 2 # DM8000 first nim is /dev/i2c/2 else: bus = 4 # DM8000 second num is /dev/i2c/4 if tunername == "CXD1981": cmd = "cxd1978 --init --scan --verbose --wakeup --inv 2 --bus %d" % bus else: cmd = "tda1002x --init --scan --verbose --wakeup --inv 2 --bus %d" % bus if cableConfig.scan_type.value == "bands": cmd += " --scan-bands " bands = 0 if cableConfig.scan_band_EU_VHF_I.value: bands |= cable_bands["DVBC_BAND_EU_VHF_I"] if cableConfig.scan_band_EU_MID.value: bands |= cable_bands["DVBC_BAND_EU_MID"] if cableConfig.scan_band_EU_VHF_III.value: bands |= cable_bands["DVBC_BAND_EU_VHF_III"] if cableConfig.scan_band_EU_UHF_IV.value: bands |= cable_bands["DVBC_BAND_EU_UHF_IV"] if cableConfig.scan_band_EU_UHF_V.value: bands |= cable_bands["DVBC_BAND_EU_UHF_V"] if cableConfig.scan_band_EU_SUPER.value: bands |= cable_bands["DVBC_BAND_EU_SUPER"] if cableConfig.scan_band_EU_HYPER.value: bands |= cable_bands["DVBC_BAND_EU_HYPER"] if cableConfig.scan_band_US_LOW.value: bands |= cable_bands["DVBC_BAND_US_LO"] if cableConfig.scan_band_US_MID.value: bands |= cable_bands["DVBC_BAND_US_MID"] if cableConfig.scan_band_US_HIGH.value: bands |= cable_bands["DVBC_BAND_US_HI"] if cableConfig.scan_band_US_SUPER.value: bands |= cable_bands["DVBC_BAND_US_SUPER"] if cableConfig.scan_band_US_HYPER.value: bands |= cable_bands["DVBC_BAND_US_HYPER"] cmd += str(bands) else: cmd += " --scan-stepsize " cmd += str(cableConfig.scan_frequency_steps.value) if cableConfig.scan_mod_qam16.value: cmd += " --mod 16" if cableConfig.scan_mod_qam32.value: cmd += " --mod 32" if cableConfig.scan_mod_qam64.value: cmd += " --mod 64" if cableConfig.scan_mod_qam128.value: cmd += " --mod 128" if cableConfig.scan_mod_qam256.value: cmd += " --mod 256" if cableConfig.scan_sr_6900.value: cmd += " --sr 6900000" if cableConfig.scan_sr_6875.value: cmd += " --sr 6875000" if cableConfig.scan_sr_ext1.value > 450: cmd += " --sr " cmd += str(cableConfig.scan_sr_ext1.value) cmd += "000" if cableConfig.scan_sr_ext2.value > 450: cmd += " --sr " cmd += str(cableConfig.scan_sr_ext2.value) cmd += "000" print "TDA1002x CMD is", cmd self.cable_search_container.execute(cmd) tmpstr = _("Try to find used transponders in cable network.. please wait...") tmpstr += "\n\n..." self.cable_search_session = self.session.openWithCallback(self.cableTransponderSearchSessionClosed, MessageBox, tmpstr, MessageBox.TYPE_INFO) class DefaultSatLists(DefaultWizard): def __init__(self, session, silent = True, showSteps = False): self.xmlfile = "defaultsatlists.xml" DefaultWizard.__init__(self, session, silent, showSteps, neededTag = "services") print "configuredSats:", nimmanager.getConfiguredSats() def setDirectory(self): self.directory = [] self.directory.append(resolveFilename(SCOPE_DEFAULTDIR)) import os os.system("mount %s %s" % (resolveFilename(SCOPE_DEFAULTPARTITION), resolveFilename(SCOPE_DEFAULTPARTITIONMOUNTDIR))) self.directory.append(resolveFilename(SCOPE_DEFAULTPARTITIONMOUNTDIR))
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compli
ance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations #
under the License. def get_connector_properties(root_helper, my_ip, multipath, enforce_multipath, host=None): """Fake os-brick.""" props = {} props['ip'] = my_ip props['host'] = host iscsi = ISCSIConnector('') props['initiator'] = iscsi.get_initiator() props['wwpns'] = ['100010604b019419'] props['wwnns'] = ['200010604b019419'] props['multipath'] = multipath props['platform'] = 'x86_64' props['os_type'] = 'linux2' return props class ISCSIConnector(object): """Mimick the iSCSI connector.""" def __init__(self, root_helper, driver=None, execute=None, use_multipath=False, device_scan_attempts=3, *args, **kwargs): self.root_herlp = root_helper, self.execute = execute def get_initiator(self): return "fake_iscsi.iqn"
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import google.api_core.grpc_helpers import google.api_core.operations_v1 from google.cloud.asset_v1p2beta1.proto import asset_service_pb2_grpc class AssetServiceGrpcTransport(object): """gRPC transport class providing stubs for google.cloud.asset.v1p2beta1 AssetService API. The transport provides access to the raw gRPC stubs, which can be used to take advantage of advanced
features of gRPC. """ # The scopes needed to make gRPC calls to all of the methods defined # in this service. _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) def __init__( self, channel=None, credentials=None, address="cloudasset.googleapis.com:443" ): """Instantiate the transport class. Args: channel (grpc.Channel): A ``Channel`` instance through which to
make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. address (str): The address where the service is hosted. """ # If both `channel` and `credentials` are specified, raise an # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( "The `channel` and `credentials` arguments are mutually " "exclusive." ) # Create the channel. if channel is None: channel = self.create_channel( address=address, credentials=credentials, options={ "grpc.max_send_message_length": -1, "grpc.max_receive_message_length": -1, }.items(), ) self._channel = channel # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { "asset_service_stub": asset_service_pb2_grpc.AssetServiceStub(channel) } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), # instantiate an LRO client. self._operations_client = google.api_core.operations_v1.OperationsClient( channel ) @classmethod def create_channel( cls, address="cloudasset.googleapis.com:443", credentials=None, **kwargs ): """Create and return a gRPC channel object. Args: address (str): The host for the channel to use. credentials (~.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. kwargs (dict): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return google.api_core.grpc_helpers.create_channel( address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs ) @property def channel(self): """The gRPC channel used by the transport. Returns: grpc.Channel: A gRPC channel object. """ return self._channel @property def export_assets(self): """Return the gRPC stub for :meth:`AssetServiceClient.export_assets`. Exports assets with time and resource types to a given Cloud Storage location. The output format is newline-delimited JSON. This API implements the ``google.longrunning.Operation`` API allowing you to keep track of the export. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].ExportAssets @property def batch_get_assets_history(self): """Return the gRPC stub for :meth:`AssetServiceClient.batch_get_assets_history`. Batch gets the update history of assets that overlap a time window. For RESOURCE content, this API outputs history with asset in both non-delete or deleted status. For IAM\_POLICY content, this API outputs history when the asset and its attached IAM POLICY both exist. This can create gaps in the output history. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].BatchGetAssetsHistory @property def create_feed(self): """Return the gRPC stub for :meth:`AssetServiceClient.create_feed`. Creates a feed in a parent project/folder/organization to listen to its asset updates. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].CreateFeed @property def get_feed(self): """Return the gRPC stub for :meth:`AssetServiceClient.get_feed`. Gets details about an asset feed. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].GetFeed @property def list_feeds(self): """Return the gRPC stub for :meth:`AssetServiceClient.list_feeds`. Lists all asset feeds in a parent project/folder/organization. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].ListFeeds @property def update_feed(self): """Return the gRPC stub for :meth:`AssetServiceClient.update_feed`. Updates an asset feed configuration. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].UpdateFeed @property def delete_feed(self): """Return the gRPC stub for :meth:`AssetServiceClient.delete_feed`. Deletes an asset feed. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ return self._stubs["asset_service_stub"].DeleteFeed
domain="www.example.com", account_key=account_key), achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.chall_to_challb( challenges.HTTP01( token=b"\xba\xa9\xda?<m\xaewmx\xea\xad\xadv\xf4\x02\xc9y" b"\x80\xe2_X\t\xe7\xc7\xa4\t\xca\xf7&\x945" ), "pending"), domain="ipv6.com", account_key=account_key), achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_u
til.chall_to_challb( challenges.HTTP01( token=b"\x8c\x8a\xbf_-f\\cw\xee\xd6\xf8/\xa5\xe3\xfd" b"\xeb9\xf1\xf5\xb9\xefVM\xc9w\xa4u\x9c\xe1\x87\xb4" ), "pending"), domain="www.example.org", account_key=account_key), achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.chall_to_challb( challenges.HTTP01(token=b"kNdwjxO
eX0I_A8DXt9Msmg"), "pending"), domain="migration.com", account_key=account_key), achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.chall_to_challb( challenges.HTTP01(token=b"kNdwjxOeX0I_A8DXt9Msmg"), "pending"), domain="ipv6ssl.com", account_key=account_key), ] def setUp(self): super().setUp() config = self.get_nginx_configurator( self.config_path, self.config_dir, self.work_dir, self.logs_dir) from certbot_nginx._internal import http_01 self.http01 = http_01.NginxHttp01(config) def test_perform0(self): responses = self.http01.perform() self.assertEqual([], responses) @mock.patch("certbot_nginx._internal.configurator.NginxConfigurator.save") def test_perform1(self, mock_save): self.http01.add_chall(self.achalls[0]) response = self.achalls[0].response(self.account_key) responses = self.http01.perform() self.assertEqual([response], responses) self.assertEqual(mock_save.call_count, 1) def test_perform2(self): acme_responses = [] for achall in self.achalls: self.http01.add_chall(achall) acme_responses.append(achall.response(self.account_key)) http_responses = self.http01.perform() self.assertEqual(len(http_responses), 5) for i in range(5): self.assertEqual(http_responses[i], acme_responses[i]) def test_mod_config(self): self.http01.add_chall(self.achalls[0]) self.http01.add_chall(self.achalls[2]) self.http01._mod_config() # pylint: disable=protected-access self.http01.configurator.save() self.http01.configurator.parser.load() # vhosts = self.http01.configurator.parser.get_vhosts() # for vhost in vhosts: # pass # if the name matches # check that the location block is in there and is correct # if vhost.addrs == set(v_addr1): # response = self.achalls[0].response(self.account_key) # else: # response = self.achalls[2].response(self.account_key) # self.assertEqual(vhost.addrs, set(v_addr2_print)) # self.assertEqual(vhost.names, set([response.z_domain.decode('ascii')])) @mock.patch('certbot_nginx._internal.parser.NginxParser.add_server_directives') def test_mod_config_http_and_https(self, mock_add_server_directives): """A server_name with both HTTP and HTTPS vhosts should get modded in both vhosts""" self.configuration.https_port = 443 self.http01.add_chall(self.achalls[3]) # migration.com self.http01._mod_config() # pylint: disable=protected-access # Domain has an HTTP and HTTPS vhost # 2 * 'rewrite' + 2 * 'return 200 keyauthz' = 4 self.assertEqual(mock_add_server_directives.call_count, 4) @mock.patch('certbot_nginx._internal.parser.nginxparser.dump') @mock.patch('certbot_nginx._internal.parser.NginxParser.add_server_directives') def test_mod_config_only_https(self, mock_add_server_directives, mock_dump): """A server_name with only an HTTPS vhost should get modded""" self.http01.add_chall(self.achalls[4]) # ipv6ssl.com self.http01._mod_config() # pylint: disable=protected-access # It should modify the existing HTTPS vhost self.assertEqual(mock_add_server_directives.call_count, 2) # since there was no suitable HTTP vhost or default HTTP vhost, a non-empty one # should have been created and written to the challenge conf file self.assertNotEqual(mock_dump.call_args[0][0], []) @mock.patch('certbot_nginx._internal.parser.NginxParser.add_server_directives') def test_mod_config_deduplicate(self, mock_add_server_directives): """A vhost that appears in both HTTP and HTTPS vhosts only gets modded once""" achall = achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.chall_to_challb( challenges.HTTP01(token=b"kNdwjxOeX0I_A8DXt9Msmg"), "pending"), domain="ssl.both.com", account_key=AUTH_KEY) self.http01.add_chall(achall) self.http01._mod_config() # pylint: disable=protected-access # Should only get called 5 times, rather than 6, because two vhosts are the same self.assertEqual(mock_add_server_directives.call_count, 5*2) def test_mod_config_insert_bucket_directive(self): nginx_conf = self.http01.configurator.parser.abs_path('nginx.conf') expected = ['server_names_hash_bucket_size', '128'] original_conf = self.http01.configurator.parser.parsed[nginx_conf] self.assertFalse(util.contains_at_depth(original_conf, expected, 2)) self.http01.add_chall(self.achalls[0]) self.http01._mod_config() # pylint: disable=protected-access self.http01.configurator.save() self.http01.configurator.parser.load() generated_conf = self.http01.configurator.parser.parsed[nginx_conf] self.assertTrue(util.contains_at_depth(generated_conf, expected, 2)) def test_mod_config_update_bucket_directive_in_included_file(self): # save old example.com config example_com_loc = self.http01.configurator.parser.abs_path('sites-enabled/example.com') with open(example_com_loc) as f: original_example_com = f.read() # modify example.com config modified_example_com = 'server_names_hash_bucket_size 64;\n' + original_example_com with open(example_com_loc, 'w') as f: f.write(modified_example_com) self.http01.configurator.parser.load() # run change self.http01.add_chall(self.achalls[0]) self.http01._mod_config() # pylint: disable=protected-access self.http01.configurator.save() self.http01.configurator.parser.load() # not in nginx.conf expected = ['server_names_hash_bucket_size', '128'] nginx_conf_loc = self.http01.configurator.parser.abs_path('nginx.conf') nginx_conf = self.http01.configurator.parser.parsed[nginx_conf_loc] self.assertFalse(util.contains_at_depth(nginx_conf, expected, 2)) # is updated in example.com conf generated_conf = self.http01.configurator.parser.parsed[example_com_loc] self.assertTrue(util.contains_at_depth(generated_conf, expected, 0)) # put back example.com config with open(example_com_loc, 'w') as f: f.write(original_example_com) self.http01.configurator.parser.load() @mock.patch("certbot_nginx._internal.configurator.NginxConfigurator.ipv6_info") def test_default_listen_addresses_no_memoization(self, ipv6_info): # pylint: disable=protected-access ipv6_info.return_value = (True, True) self.http01._default_listen_addresses() self.assertEqual(ipv6_info.call_count, 1) ipv6_info.return_value = (False, False) self.http01._default_listen_addresses() self.assertEqual(ipv6_info.call_count, 2) @mock.patch("certbot_nginx._internal.configu
def grade(tid, answer): if answer.find("'twas_sum_EZ_programming,_am_I_rite?")
!= -1: return { "correct": True, "message": "Nice job!" } return { "corre
ct": False, "message": "If you're confused, read some tutorials :)" }
#################################################################################################### # Copyright (C) 2016 by Ingo Keller, Katrin Lohan # # <brutusthetschiepel@gmail.com> # # # # This file is part of pyJD (Python/Yarp Tools for the JD robot). # # # # pyJD is free software: you can redistribute it and/or modify it under the terms of the # # GNU Affero General Public License as published by the Free Software Foundation, either # # version 3 of the License, or (at your option) any later version. # # # # pyJD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with pyJD. If not, see <http://www.gnu.org/licenses/>. # #################################################################################################### import argparse import socket import time import yarp EMSG_YARP_NOT_FOUND = "Could not connect to the yarp server. Try running 'yarp detect'." EMSG_ROBOT_NOT_FOUND = 'Could not connect to the robot at %s:%s' class EZModule(yarp.RFModule): """ The EZBModule class provides a base class for developing modules for the JD robot. """ # Default IP Address and Port for the JD Humanoid Robot. TCP_IP = '192.168.1.1' TCP_PORT = 23 # Existing motor ID's are D0-D9, D12-D14 and D16-D18 there are more limits LIMITS = [ (30, 180), (70, 170), (0, 170), (0, 170), (0, 60), (0, 180), (0, 90), (0, 60), (0, 180), (0, 180), (0, 180), (0, 160), (0, 180), (0, 130), (0, 180), (0, 160), (0, 180), (50, 130), (0, 180), (0, 180), (0, 180) ] def __init__(self, ip, port, prefix): yarp.RFModule.__init__(self) self.ip = ip self.port = int(port) self.prefix = prefix # self.last_pos = [-1] * len(EZModule.LIMITS) def configure(self, rf): name = self.__class__.__name__ if self.prefix: name = self.prefix + '/' + name self.setName(name) # RPC Port self.rpc_port = yarp.RpcServer() # name settings port_name = '/%s/%s' % (name, 'rpc') if not self.rpc_port.open(port_name): raise RuntimeError, EMSG_YARP_NOT_FOUND self.attach_rpc_server(self.rpc_port) return True def interruptModule(self): self.rpc_port.interrupt() for x in dir(self): if x.endswith('Port') and 'interrupt' in dir(getattr(self, x)): getattr(self, x).interrupt() return True def close(self): self.rpc_port.close() for x in dir(self): if x.endswith('Port') and 'close' in dir(getattr(self, x)): getattr(self, x).close() return True def getPeriod(self): return 0.1 def updateModule(self): # XXX: I do not know why we need that, but if method is empty the module gets stuck time.sleep(0.000001) return True def createInputPort(self, name, mode = 'unbuffered'): """ This method returns an input port. @param obj - the object that the port is created for @param name - if a name is provided it gets appended to the modules name @param buffered - if buffered is True a buffered port will be used otherwise not; default is True. @result port """ return self.__createPort(name + ':i', None, mode) def __createPort(self, name, target = None, mode = 'unbuffered'): """ This method returns a port object. @param name - yarp name for the port @param obj - object for which the port is created @param buffered - if buffered is True a buffered port will be used otherwise not; default is True. @result port """ # create port if mode == 'buffered': port = yarp.BufferedPortBottle() elif mode == 'rpcclient': port = yarp.RpcClient() elif mode == 'rpcserver': port = yarp.RpcServer() else: port = yarp.Port() # build port name port_name = [''] # prefix handling if hasattr(self, 'prefix') and self.prefix: port_name.append(self.prefix) port_name.append(self.__class__.__name__) port_name.append(name) # open port if not port.open('/'.join(port_name)): raise RuntimeError, EMSG_YARP_NOT_FOUND # add output if given if target: port.addOutput(target) if hasattr(self, '_ports'): self._ports.append(port) return port def createOutputPort(self, name, target = None, mode = 'unbuffered'): """ This method returns an output port. @param obj - the object that the port is created for @param name - if a name is provided it gets appended to the modules name @param buffered - if buffered is True a buffered port will be used otherwise not; default is True. @result port """ return self.__createPort(name + ':o', target, mode) #################################################################################################### # # Default methods for running the modules standalone # #################################################################################################### def createArgParser(): """ This method creates a base argument parser. @return Argument Parser object """ parser = argparse.ArgumentParser(description='Create a JDModule to control the JD robot.') parser.add_argument( '-i', '--ip', dest = 'ip', default = str(EZModule.TCP_IP), help = 'IP address for the JD robot.') parser.add_argument( '-p', '--port',
dest = 'port', default = str(EZModule.TCP_PORT), help = 'Port for the JD robot') parser.add_argument( '-n', '--name',
dest = 'name', default = '', help = 'Name prefix for Yarp port names') return parser.parse_args() def main(module_cls): """ This is a main method to run a module from command line. @param module_cls - an EZModule based class that can be started as a standalone module. """ args = createArgParser() yarp.Network.init() resource_finder = yarp.ResourceFinder() resource_finder.setVerbose(True) # resource_finder.configure(argc,argv); module = module_cls(args.ip, args.port, args.name) module.runModule(resource_finder) yarp.Network.fini()
from sys import argv script, user_name = argv # Decalare text or prompt to be seen by the user # for all request for inout prompt = '> ' print "Hi %s, I'm the %s script." % (user_name, script) print "I'd like to ask you a few questions." print "Do you like me %s?" % user_name # The 'prompt = >' is seen by user as they are asked for some input likes = raw_input(prompt) print "Where do you live %s?" % user_name # The 'prompt = >' is seen by user as they are asked for some input l
ives = raw_input(prompt) print "What kind of computer do you have?" # The 'prompt = >' is seen by user as they are asked for some input computer = raw_input(prompt) print """ Alright, so you said %r about
liking me. You live in %r. Not sure where that is. And you have a %r computer. Nice. """ % (likes, lives, computer)
ain'].vocab self._data_dict['valid'] = Text(self.timesteps, valid_path, vocab=vocab) return self._data_dict class PTB(Dataset): """ Penn Treebank data set from http://arxiv.org/pdf/1409.2329v5.pdf Arguments: timesteps (int): number of timesteps to embed the data onehot_input (bool): tokenizer (str): name of the tokenizer function within this class to use on the data """ def __init__(self, timesteps, path='.', onehot_input=True, tokenizer=None, reverse_target=False, get_prev_target=False): url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data' self.filemap = {'train': 5101618, 'test': 449945, 'valid': 399782} keys = list(self.filemap.keys()) filenames = [self.gen_filename(phase) for phase in keys] sizes = [self.filemap[phase] for phase in keys] super(PTB, self).__init__(filenames, url, sizes, path=path) self.timesteps = timesteps self.onehot_input = onehot_input self.tokenizer = tokenizer if tokenizer is not None: assert hasattr(self, self.tokenizer) self.tokenizer_func = getattr(self, self.tokenizer) else: self.tokenizer_func = None self.reverse_target = reverse_target self.get_prev_target = get_prev_target @staticmethod def newline_tokenizer(s): """ Tokenizer which breaks on newlines. Arguments: s (str): String to tokenize. Returns: str: String with "<eos>" in place of newlines. """ # replace newlines with '<eos>' so that # the newlines count as words return s.replace('\n', '<eos>').split() @staticmethod def gen_filename(phase): """ Filename generator. Arguments: phase(str): Phase Returns: string: ptb.<phase>.txt """ return 'ptb.%s.txt' % phase def load_data(self): self.file_paths = {} for phase in self.filemap: fn = self.gen_filename(phase) size = self.filemap[phase] self.file_paths[phase] = self.load_zip(fn, size) return self.file_paths def gen_iterators(self): self.load_data() self._data_dict = {} self.vocab = None for phase in ['train', 'test', 'valid']: file_path = self.file_paths[phase] get_prev_target = self.get_prev_target if phase is 'train' else False self._data_dict[phase] = Text(self.timesteps, file_path, tokenizer=self.tokenizer_func, onehot_input=self.onehot_input, vocab=self.vocab, reverse_target=self.reverse_target, get_prev_target=get_prev_target) if self.vocab is None: self.vocab = self._data_dict['train'].vocab return self._data_dict class HutterPrize(Dataset): """ Hutter Prize data set from http://prize.hutter1.net/ """ def __init__(self, path='.'): super(HutterPrize, self).__init__('enwik8.zip', 'http://mattmahoney.net/dc', 35012219, path=path) def load_data(self): self.filepath = self.load_zip(self.filename, self.size) return self.filepath class IMDB(Dataset): """ IMDB data set from http://www.aclweb.org/anthology/P11-1015.. """ def __init__(self, vocab_size, sentence_length, path='.'): url = 'https://s3.amazonaws.com/text-datasets' super(IMDB, self).__init__('imdb.pkl', url, 33213513, path=path) self.vocab_size = vocab_size self.sentence_length = sentence_length self.filepath = None def load_data(self): self.filepath = self.load_zip(self.filename, self.size) return self.filepath def gen_iterators(self): if self.filepath is None: self.load_data() data = pad_data(self.filepath, vocab_size=self.vocab_size, sentence_length=self.sentence_length) (X_train, y_train), (X_test, y_test), nclass = data self._data_dict = {'nclass': nclass} self._data_dict['train'] = ArrayIterator(X_train, y_train, nclass=2) self._data_dict['test'] = ArrayIterato
r(X_test, y_test, nclass=2) return self._data_dict class SICK(Dataset): """ Semantic Similarity dataset from qcri.org (Semeval 2014).
Arguments: path (str): path to SICK_data directory """ def __init__(self, path='SICK_data/'): url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/' self.filemap = {'train': 87341, 'test_annotated': 93443, 'trial': 16446} keys = list(self.filemap.keys()) self.zip_paths = None self.file_paths = [self.gen_filename(phase) for phase in keys] self.sizes = [self.filemap[phase] for phase in keys] super(SICK, self).__init__(filename=self.file_paths, url=url, size=self.sizes, path=path) @staticmethod def gen_zipname(phase): """ Zip filename generator. Arguments: phase(str): Phase of training/evaluation Returns: string: sick_<phase>.zip """ return "sick_{}.zip".format(phase) @staticmethod def gen_filename(phase): """ Filename generator for the extracted zip files. Arguments: phase(str): Phase of training/evaluation Returns: string: SICK_<phase>.txt """ return "SICK_{}.txt".format(phase) def load_data(self): """ Conditional data loader will download and extract zip files if not found locally. """ self.zip_paths = {} for phase in self.filemap: zn = self.gen_zipname(phase) size = self.filemap[phase] self.zip_paths[phase] = self.load_zip(zn, size) return self.zip_paths def load_eval_data(self): """ Load the SICK semantic-relatedness dataset. Data is a tab-delimited txt file, in the format: Sentence1\tSentence2\tScore. Data is downloaded and extracted from zip files if not found in directory specified by self.path. Returns: tuple of tuples of np.array: three tuples containing A & B sentences for train, dev, and text, along with a fourth tuple containing the scores for each AB pair. """ if self.zip_paths is None: self.load_data() trainA, trainB, devA, devB, testA, testB = [], [], [], [], [], [] trainS, devS, testS = [], [], [] with open(self.path + self.gen_filename('train'), 'rb') as f: for line in f: text = line.strip().split(b'\t') trainA.append(text[1]) trainB.append(text[2]) trainS.append(text[3]) with open(self.path + self.gen_filename('trial'), 'rb') as f: for line in f: text = line.strip().split(b'\t') devA.append(text[1]) devB.append(text[2]) devS.append(text[3]) with open(self.path + self.gen_filename('test_annotated'), 'rb') as f: for line in f: text = line.strip().split(b'\t') testA.append(text[1])
# Playlist.py # # reads all available playlists, adjusts song paths, removes not copied songs, # writes resulting playlist to destination import mlsSong as sng import config import glob import os import sys import codecs def
Playlist(): # get a list of all playlists playlists = glob.glob(config.SOURCE_PLAYLISTFOLDER + "\\*.m3u*") # keep only the file name for (i, playlist) in enumerate(playlists): (filepath, filename) = os.path.split(playlist) playlists[i] = filename # Winamp fail: playlists are saved with pretty random-looking names. # Look up the new names in a look-up file. Playlists that
are not found # won't be copied. for oldPlaylist in playlists: newPlaylist = "" for lutPlaylist in config.PLAYLIST_LUT: print oldPlaylist print lutPlaylist[0] if lutPlaylist[0] == oldPlaylist: newPlaylist = lutPlaylist[1] print "Playlist name conversion: from", oldPlaylist, "to", newPlaylist break if newPlaylist == "": print "No playlist name conversion found for", oldPlaylist break # "s" as in Source_playlist # ------------------------- # open source playlist try: s = codecs.open(config.SOURCE_PLAYLISTFOLDER + "\\" + oldPlaylist, 'r', encoding='UTF-8') ## s = open(config.SOURCE_PLAYLISTFOLDER + "\\" + oldPlaylist, 'r') except: print "Playlist", oldPlaylist, "could not be read!" continue # "d" as in Destination_playlist # ------------------------------ # check if destination playlist file already exists try: d = open(config.DEST_PLAYLISTFOLDER + "\\" + newPlaylist, 'r') except: # file does not exist, create it d = open(config.DEST_PLAYLISTFOLDER + "\\" + newPlaylist, 'w') else: # file already exists, delete it and create a new one d.close() os.remove(config.DEST_PLAYLISTFOLDER + "\\" + newPlaylist) d = open(config.DEST_PLAYLISTFOLDER + "\\" + newPlaylist, 'w') # write header line d.write("#EXTM3U\n") # read first line, it should be '#EXTM3U' b = s.readline() print b if b == '#EXTM3U\r\n': print "EXTM3U playlist." extm3u = True else: extm3u = False # I'm pretty sure b is already the first song, so don't read another # line before properly processing it skipFirst = True for lines in s: if extm3u: a = s.readline() # 'EXTINF:' song.trackLength,Artist - Title # This line can be left unchanged. if not skipFirst: b = s.readline() # file path: strip SOURCE_MUSICFOLDER, replace it with DEST_MUSICFOLDER print b b = b.replace(config.SOURCE_MUSICFOLDER, config.DEST_MUSICFOLDER) print b else: skipFirst = False # process b: # - if b is a relative path, convert it to absolute # ... TO DO # - find song, where config.songList[x].fileNameOld = b # ... TO DO # - if config.songList[x].added == 0: continue (song was not copied; don't add it to playlist) # ... TO DO # write new path to b b = config.songList[x].fileNameNew + "\n" if not extm3u: # create line a a = "EXTINF:" + config.songList[x].trackLength + "," a = a + config.songList[x].trackArtist + " - " a = a + config.songList[x].trackTitle + "\n" d.write(a) d.write(b) s.close() d.close()
# -*- coding: utf-8 -*- """ celery.utils.mail ~~~~~~~~~~~~~~~~~ How task error emails are formatted and sent. """ from __future__ import absolute_import import sys import smtplib import socket import traceback import warnings from email.mime.text import MIMEText from .functional import maybe_list from .imports import symbol_by_name supports_timeout = sys.version_info >= (2, 6) _local_hostname = None def get_local_hostname(): global _local_hostname if _local_hostname is None: _local_hostname = socket.getfqdn() return _local_hostname class SendmailWarning(UserWarning): """Problem happened while sending the email message.""" class Message(object): def __init__(self, to=None, sender=None, subject=None, body=None, charset='us-ascii'): self.to = maybe_list(to) self.sender = sender self.subject = subject self.body = body self.charset = charset def __repr__(self): return '<Email: To:%r Subject:%r>' % (self.to, self.subject) def __str__(self): msg = MIMEText(self.body, 'plain', self.charset) msg['Subject'] = self.subject msg['From'] = self.sender msg['To'] = ', '.join(self.to) return msg.as_string() class Mailer(object): supports_timeout = supports_timeout def __init__(self, host='localhost', port=0, user=None, password=None, timeout=2, use_ssl=False, use_tls=False): self.host = host self.port = port self.user = user self.password = password self.timeout = timeout self.use_ssl = use_ssl self.use_tls = use_tls def send(self, message, fail_silently=False): try: if self.supports_timeout: self._se
nd(message, timeout=self.timeout) else:
import socket old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(self.timeout) try: self._send(message) finally: socket.setdefaulttimeout(old_timeout) except Exception, exc: if not fail_silently: raise warnings.warn(SendmailWarning( 'Mail could not be sent: %r %r\n%r' % ( exc, {'To': ', '.join(message.to), 'Subject': message.subject}, traceback.format_stack()))) def _send(self, message, **kwargs): Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP client = Client(self.host, self.port, local_hostname=get_local_hostname(), **kwargs) if self.use_tls: client.ehlo() client.starttls() client.ehlo() if self.user and self.password: client.login(self.user, self.password) client.sendmail(message.sender, message.to, str(message)) try: client.quit() except socket.sslerror: client.close() class ErrorMail(object): """Defines how and when task error e-mails should be sent. :param task: The task instance that raised the error. :attr:`subject` and :attr:`body` are format strings which are passed a context containing the following keys: * name Name of the task. * id UUID of the task. * exc String representation of the exception. * args Positional arguments. * kwargs Keyword arguments. * traceback String representation of the traceback. * hostname Worker hostname. """ # pep8.py borks on a inline signature separator and # says "trailing whitespace" ;) EMAIL_SIGNATURE_SEP = '-- ' #: Format string used to generate error email subjects. subject = """\ [celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s """ #: Format string used to generate error email content. body = """ Task %%(name)s with id %%(id)s raised exception:\n%%(exc)r Task was called with args: %%(args)s kwargs: %%(kwargs)s. The contents of the full traceback was: %%(traceback)s %(EMAIL_SIGNATURE_SEP)s Just to let you know, py-celery at %%(hostname)s. """ % {'EMAIL_SIGNATURE_SEP': EMAIL_SIGNATURE_SEP} error_whitelist = None def __init__(self, task, **kwargs): self.task = task self.email_subject = kwargs.get('subject', self.subject) self.email_body = kwargs.get('body', self.body) self.error_whitelist = getattr(task, 'error_whitelist', None) or () def should_send(self, context, exc): """Returns true or false depending on if a task error mail should be sent for this type of error.""" allow_classes = tuple(map(symbol_by_name, self.error_whitelist)) return not self.error_whitelist or isinstance(exc, allow_classes) def format_subject(self, context): return self.subject.strip() % context def format_body(self, context): return self.body.strip() % context def send(self, context, exc, fail_silently=True): if self.should_send(context, exc): self.task.app.mail_admins(self.format_subject(context), self.format_body(context), fail_silently=fail_silently)
from unidown.tools import unlink_dir_rec class TestD
eleteDirRec: def test_non_existence(self, tmp_path): no_folder = tmp_path.joinpath("./donotexist/")
assert not no_folder.exists() unlink_dir_rec(no_folder) assert not no_folder.exists() def test_recursive(self, tmp_path): for number in range(1, 4): with tmp_path.joinpath(str(number)).open('w'): pass sub_folder = tmp_path.joinpath("sub") sub_folder.mkdir(parents=True, exist_ok=True) for number in range(1, 4): with sub_folder.joinpath(str(number)).open('w'): pass tmp_path.joinpath("sub2").mkdir() unlink_dir_rec(tmp_path) assert not tmp_path.exists()
"""Validators class.""" # -*- coding: utf-8 -*- from wtforms import ValidationError class Uni
queValidator(object): """Validador para chequear variables unicas.""" def __init__(self, model, field, message=None): s
elf.model = model self.field = field if not message: message = u'Existe otro Elemento con el mismo valor.' self.message = message def __call__(self, form, field): _id = None params = {self.field: field.data, 'deleted': False} existing = self.model.objects.filter(**params).first() if 'id' in form.data: _id = str(form.id.data) if existing and (_id is None or _id != str(existing.id)): raise ValidationError(self.message)
questPayer='requester') assert (response['Grants'][1]['Permission'] == 'READ') and ( response['Grants'][0]['Permission'] == 'FULL_CONTROL' ) def test_load_fileobj(self, s3_bucket): hook = S3Hook() with tempfile.TemporaryFile() as temp_file: temp_file.write(b"Content") temp_file.seek(0) hook.load_file_obj(temp_file, "my_key", s3_bucket) resource = boto3.resource('s3').Object(s3_bucket, 'my_key') # pylint: disable=no-member assert resource.get()['Body'].read() == b'Content' def test_load_fileobj_acl(self, s3_bucket): hook = S3Hook() with tempfile.TemporaryFile() as temp_file: temp_file.write(b"Content") temp_file.seek(0) hook.load_file_obj(temp_file, "my_key", s3_bucket, acl_policy='public-read') response = boto3.client('s3').get_object_acl( Bucket=s3_bucket, Key="my_key", RequestPayer='requester' ) # pylint: disable=no-member # noqa: E501 # pylint: disable=C0301 assert (response['Grants'][1]['Permission'] == 'READ') and ( response['Grants'][0]['Permission'] == 'FULL_CONTROL' ) def test_load_file_gzip(self, s3_bucket): hook = S3Hook() with tempfile.NamedTemporaryFile(delete=False) as temp_file: temp_file.write(b"Content") temp_file.seek(0) hook.load_file(temp_file.name, "my_key", s3_bucket, gzip=True) resource = boto3.resource('s3').Object(s3_bucket, 'my_key') # pylint: disable=no-member assert gz.decompress(resource.get()['Body'].read()) == b'Content' os.unlink(temp_file.name) def test_load_file_acl(self, s3_bucket): hook = S3Hook() with tempfile.NamedTemporaryFile(delete=False) as temp_file: temp_file.write(b"Content") temp_file.seek(0) hook.load_file(temp_file.name, "my_key", s3_bucket, gzip=True, acl_policy='public-read') response = boto3.client('s3').get_object_acl( Bucket=s3_bucket, Key="my_key", RequestPayer='requester' ) # pylint: disable=no-member # noqa: E501 # pylint: disable=C0301 assert (response['Grants'][1]['Permission'] == 'READ') and ( response['Grants'][0]['Permission'] == 'FULL_CONTROL' ) os.unlink(temp_file.name) def test_copy_object_acl(self, s3_bucket): hook = S3Hook() with tempfile.NamedTemporaryFile() as temp_file: temp_file.write(b"Content") temp_file.seek(0) hook.load_file_obj(temp_file, "my_key", s3_bucket) hook.copy_object("my_key", "my_key", s3_bucket, s3_bucket) response = boto3.client('s3').get_object_acl( Bucket=s3_bucket, Key="my_key", RequestPayer='requester' ) # pylint: disable=no-member # noqa: E501 # pylint: disable=C0301 assert (response['Grants'][0]['Permission'] == 'FULL_CONTROL') and (len(response['Grants']) == 1) @mock_s3 def test_delete_bucket_if_bucket_exist(self, s3_bucket): # assert if the bucket is created mock_hook = S3Hook() mock_hook.create_bucket(bucket_name=s3_bucket) assert mock_hook.check_for_bucket(bucket_name=s3_bucket) mock_hook.delete_bucket(bucket_name=s3_bucket, force_delete=True) assert not mock_hook.check_for_bucket(s3_bucket) @mock_s3 def test_delete_bucket_if_not_bucket_exist(self, s3_bucket): # assert if exception is raised if bucket not present mock_hook = S3Hook() with pytest.raises(ClientError) as ctx: assert mock_hook.dele
te_bucket(bucket_name=s3_bucket, force_delete=True) assert ctx.value.response['Error']['Code'] == 'NoSuchBucket' @mock.patch.object(S3Hook, 'get_connection', return_value=Connection(schema='test_bucket')) def test_provide_bucket_name(self, mock_get_co
nnection): class FakeS3Hook(S3Hook): @provide_bucket_name def test_function(self, bucket_name=None): return bucket_name fake_s3_hook = FakeS3Hook() test_bucket_name = fake_s3_hook.test_function() assert test_bucket_name == mock_get_connection.return_value.schema test_bucket_name = fake_s3_hook.test_function(bucket_name='bucket') assert test_bucket_name == 'bucket' def test_delete_objects_key_does_not_exist(self, s3_bucket): hook = S3Hook() with pytest.raises(AirflowException) as ctx: hook.delete_objects(bucket=s3_bucket, keys=['key-1']) assert isinstance(ctx.value, AirflowException) assert str(ctx.value) == "Errors when deleting: ['key-1']" def test_delete_objects_one_key(self, mocked_s3_res, s3_bucket): key = 'key-1' mocked_s3_res.Object(s3_bucket, key).put(Body=b'Data') hook = S3Hook() hook.delete_objects(bucket=s3_bucket, keys=[key]) assert [o.key for o in mocked_s3_res.Bucket(s3_bucket).objects.all()] == [] def test_delete_objects_many_keys(self, mocked_s3_res, s3_bucket): num_keys_to_remove = 1001 keys = [] for index in range(num_keys_to_remove): key = f'key-{index}' mocked_s3_res.Object(s3_bucket, key).put(Body=b'Data') keys.append(key) assert sum(1 for _ in mocked_s3_res.Bucket(s3_bucket).objects.all()) == num_keys_to_remove hook = S3Hook() hook.delete_objects(bucket=s3_bucket, keys=keys) assert [o.key for o in mocked_s3_res.Bucket(s3_bucket).objects.all()] == [] def test_unify_bucket_name_and_key(self): class FakeS3Hook(S3Hook): @unify_bucket_name_and_key def test_function_with_wildcard_key(self, wildcard_key, bucket_name=None): return bucket_name, wildcard_key @unify_bucket_name_and_key def test_function_with_key(self, key, bucket_name=None): return bucket_name, key @unify_bucket_name_and_key def test_function_with_test_key(self, test_key, bucket_name=None): return bucket_name, test_key fake_s3_hook = FakeS3Hook() test_bucket_name_with_wildcard_key = fake_s3_hook.test_function_with_wildcard_key('s3://foo/bar*.csv') assert ('foo', 'bar*.csv') == test_bucket_name_with_wildcard_key test_bucket_name_with_key = fake_s3_hook.test_function_with_key('s3://foo/bar.csv') assert ('foo', 'bar.csv') == test_bucket_name_with_key with pytest.raises(ValueError) as ctx: fake_s3_hook.test_function_with_test_key('s3://foo/bar.csv') assert isinstance(ctx.value, ValueError) @mock.patch('airflow.providers.amazon.aws.hooks.s3.NamedTemporaryFile') def test_download_file(self, mock_temp_file): mock_temp_file.return_value.__enter__ = Mock(return_value=mock_temp_file) s3_hook = S3Hook(aws_conn_id='s3_test') s3_hook.check_for_key = Mock(return_value=True) s3_obj = Mock() s3_obj.download_fileobj = Mock(return_value=None) s3_hook.get_key = Mock(return_value=s3_obj) key = 'test_key' bucket = 'test_bucket' s3_hook.download_file(key=key, bucket_name=bucket) s3_hook.check_for_key.assert_called_once_with(key, bucket) s3_hook.get_key.assert_called_once_with(key, bucket) s3_obj.download_fileobj.assert_called_once_with(mock_temp_file) def test_generate_presigned_url(self, s3_bucket): hook = S3Hook() presigned_url = hook.generate_presigned_url( client_method="get_object", params={'Bucket': s3_bucket, 'Key': "my_key"} ) url = presigned_url.split("?")[1] params = {x[0]: x[1] for x in [x.split("=") for x in url[0:].split("&")]} assert {"AWSAccessKeyId", "Signature", "Expires"}.issubset(set(params.keys())) def test_should_throw_error_if_extra_args_is_not_dict(self): with pytest.raises(ValueError): S3Hook(extra_args=1) def test_s
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-10-02 21:26 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("order", "0004_auto_20160111_1108"), ("wellsfargo", "0007_financingplan_advertising_enabled"), ] operations = [ migrations.CreateModel( name="FraudScreenResult", fields=[ ( "id", models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ),
), ( "screen_type", models.CharField(max_length=25, verbose_name="Fraud Screen Type"), ), ( "decision", models.CharField( choices=[ ("REJECT", "Transaction was rejected"), ("ACCEPT", "Transaction was accepted"), ("ERROR", "Error occurred while running fraud screen"), ], max_length=25, verbose_name="Decision", ), ), ("message", models.TextField(verbose_name="Message")), ("created_datetime", models.DateTimeField(auto_now_add=True)), ("modified_datetime", models.DateTimeField(auto_now=True)), ( "order", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="order.Order" ), ), ], options={ "ordering": ("-created_datetime", "-id"), }, ), ]
#-*- coding: utf-8 -*- import os, re from traceback import format_exc as fme from exprParser import Parser, ParserContext class Shell: echo = False echoStrs = { 'on': True, 'off': False, 'true': True, 'false': False, } commands = {} history = [] values = [] ops = [] def processEchoCommand(self, args): try: if len(args) == 0: pass else: echoStateStr = args[0].lower() self.echo = self.echoStrs[echoStateStr] print 'echo = %r' % (self.echo, ) except Exception, e: self.error('invalid echo setting value %s' % (echoStateStr, )) def processExitCommand(self, args): self.msg('bye!') exit() def makeHistoryCommandArgs(self, args): h = self.history if len(args) > 0: arg = args[0] if arg.isdigit(): return int(arg) return len(h) def processHistoryCommand(self, args): h = self.history historyLen = self.makeHistoryCommandArgs(args) for item, i in zip(h, reversed(range(historyLen))): self.msg('%d. %s' % (i + 1, item)) def processOps(self, args): ops = self.ops for op in self.ops: self.
msg(op) def msg(self, txt): print txt def error(self, msg): print msg def installCommands(self): c = self.commands c[':echo'] = self.processEchoCommand c[':exit'] = self.processExitCommand c[':history'] = self.processHistoryCommand c[':ops'] = self.processOps def inputOperation(self, userInput): parser = Parser() context = ParserContext() context.unnamedVariables = self.values parser.context = context parser.parse(userInput) d = p
arser.ret self.values.append(d) self.msg('$%d=' % (len(self.values), )) self.msg(str(d)) #self.printDeterminant(self.values[-1]) return True def isValidDeterminant(self, d): rl = -1 for r in d: if rl == -1: rl = len(r) elif len(r) != rl: self.msg('invalid determinant') return False return True def printDeterminant(self, d): msg = '' for r in d: msg += '|' for e in r: msg +=str(e) + '\t' msg += '|\n' self.msg(msg) def processOperationInput(self, userInput): userInput = userInput.strip() return self.inputOperation(userInput) return False def runShell(self): self.installCommands() while 1: userInput = raw_input('>>') if len(userInput.strip()) == 0: continue if True == self.echo: self.msg(userInput) inputs = userInput.split(' ') if len(inputs) > 0: cmdName = inputs[0] if cmdName in self.commands: try: self.history.append(userInput) self.commands[cmdName](inputs[1:]) except Exception, e: print e print fme() elif self.processOperationInput(userInput): self.ops.append(userInput) pass else: self.error('unknow command/operation "%s"' % (userInput)) if __name__ == '__main__': s = Shell() s.runShell()
##################################################################### # u1.py # # (c) Copyright 2021, Benjamin Parzella. All rights reserved. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Softwa
re Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ##################################################################### """SECS 1 byte unsigned integer variable
type.""" from .base_number import BaseNumber class U1(BaseNumber): """ Secs type for 1 byte unsigned data. :param value: initial value :type value: list/integer :param count: number of items this value :type count: integer """ format_code = 0o51 text_code = "U1" _base_type = int _min = 0 _max = 255 _bytes = 1 _struct_code = "B" preferred_types = [int]
# -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from django.db import models from django.utils.translation import ugettext_lazy as _ from django.template import Library from treemenus.models import MenuItem register = Library() class MenuItemExtension(models.Model): menu_item = models.OneToOneField(MenuItem, related_name="extension") visivel = models.BooleanField(default
=False) css = models.CharField(_(u'CSS style'), null=True, blank=True, max_length=300)
# $Id: icmp.py,v 1.1.1.1 2005/10/29 18:20:48 provos Exp $ from dpkt import Packet, in_cksum as _icmp_cksum import ip # Types (icmp_type) and codes (icmp_code) - # http://www.iana.org/assignments/icmp-parameters ICMP_CODE_NONE = 0 # for types without codes ICMP_ECHOREPLY = 0 # echo reply ICMP_UNREACH = 3 # dest unreachable, codes: ICMP_UNREACH_NET = 0 # bad net ICMP_UNREACH_HOST = 1 # bad host ICMP_UNREACH_PROTO = 2 # bad protocol ICMP_UNREACH_PORT = 3 # bad port ICMP_UNREACH_NEEDFRAG = 4 # IP_DF caused drop ICMP_UNREACH_SRCFAIL = 5 # src route failed ICMP_UNREACH_NET_UNKNOWN = 6 # unknown net ICMP_UNREACH_HOST_UNKNOWN = 7 # unknown host ICMP_UNREACH_ISOLATED = 8 # src host isolated ICMP_UNREACH_NET_PROHIB = 9 # for crypto devs ICMP_UNREACH_HOST_PROHIB = 10 # ditto ICMP_UNREACH_TOSNET = 11 # bad tos for net ICMP_UNREACH_TOSHOST = 12 # bad tos for host ICMP_UNREACH_FILTER_PROHIB = 13 # prohibited access ICMP_UNREACH_HOST_PRECEDENCE = 14 # precedence error ICMP_UNREACH_PRECEDENCE_CUTOFF = 15 # precedence cutoff ICMP_SRCQUENCH = 4 # packet lost, slow down ICMP_REDIRECT = 5 # shorter route, codes: ICMP_REDIRECT_NET = 0 # for network ICMP_REDIRECT_HOST = 1 # for host ICMP_REDIRECT_TOSNET = 2 # for tos and net ICMP_REDIRECT_TOSHOST = 3 # for tos and host ICMP_ALTHOSTADDR = 6 # alternate host address ICMP_ECHO = 8 # echo service ICMP_RTRADVERT = 9 # router advertise, codes: ICMP_RTRADVERT_NORMAL = 0 # normal ICMP_RTRADVERT_NOROUTE_COMMON = 16 # selective routing ICMP_RTRSOLICIT = 10 # router solicitation ICMP_TIMEXCEED = 11 # time exceeded, code: ICMP_TIMEXCEED_INTRANS = 0 # ttl==0 in transit ICMP_TIMEXCEE
D_REASS = 1 # ttl==0 in reass ICMP_PARAMPROB = 12 # ip header bad ICMP_PARAMPROB_ERRATPTR = 0 # req. opt. absent ICMP_PARAMPROB_OPTABSENT = 1 # req. opt. absent ICMP_PARAMPROB_LENGTH = 2 # bad length ICMP_TSTAMP = 13 # timestamp request ICMP_TSTAMPREPLY = 14 # timestamp reply ICMP_INFO = 15
# information request ICMP_INFOREPLY = 16 # information reply ICMP_MASK = 17 # address mask request ICMP_MASKREPLY = 18 # address mask reply ICMP_TRACEROUTE = 30 # traceroute ICMP_DATACONVERR = 31 # data conversion error ICMP_MOBILE_REDIRECT = 32 # mobile host redirect ICMP_IP6_WHEREAREYOU = 33 # IPv6 where-are-you ICMP_IP6_IAMHERE = 34 # IPv6 i-am-here ICMP_MOBILE_REG = 35 # mobile registration req ICMP_MOBILE_REGREPLY = 36 # mobile registration reply ICMP_DNS = 37 # domain name request ICMP_DNSREPLY = 38 # domain name reply ICMP_SKIP = 39 # SKIP ICMP_PHOTURIS = 40 # Photuris ICMP_PHOTURIS_UNKNOWN_INDEX = 0 # unknown sec index ICMP_PHOTURIS_AUTH_FAILED = 1 # auth failed ICMP_PHOTURIS_DECOMPRESS_FAILED = 2 # decompress failed ICMP_PHOTURIS_DECRYPT_FAILED = 3 # decrypt failed ICMP_PHOTURIS_NEED_AUTHN = 4 # no authentication ICMP_PHOTURIS_NEED_AUTHZ = 5 # no authorization ICMP_TYPE_MAX = 40 class ICMP(Packet): """Internet Control Message Protocol.""" __hdr__ = ( ('type', 'B', 8), ('code', 'B', 0), ('sum', 'H', 0) ) class Echo(Packet): __hdr__ = (('id', 'H', 0), ('seq', 'H', 0)) class Quote(Packet): __hdr__ = (('pad', 'I', 0),) def unpack(self, buf): Packet.unpack(self, buf) self.data = self.ip = ip.IP(self.data) class Unreach(Quote): __hdr__ = (('pad', 'H', 0), ('mtu', 'H', 0)) class Quench(Quote): pass class Redirect(Quote): __hdr__ = (('gw', 'I', 0),) class ParamProbe(Quote): __hdr__ = (('ptr', 'B', 0), ('pad1', 'B', 0), ('pad2', 'H', 0)) class TimeExceed(Quote): pass _typesw = { 0:Echo, 3:Unreach, 4:Quench, 5:Redirect, 8:Echo, 11:TimeExceed } def unpack(self, buf): Packet.unpack(self, buf) try: self.data = self._typesw[self.type](self.data) setattr(self, self.data.__class__.__name__.lower(), self.data) except: self.data = buf def __str__(self): if not self.sum: self.sum = _icmp_cksum(Packet.__str__(self)) return Packet.__str__(self)
import sys ROBOT_LISTENER_API_VERSION = 2 def start_keyword(name, attrs): sys.stdout.wri
te('start keyword %s\n' % name) sys.stderr.write('start keyword %s\n' % name) def end_keyword(name, attrs): sys.stdout.write('
end keyword %s\n' % name) sys.stderr.write('end keyword %s\n' % name)
from elasticsearch import TransportError import olympi
a.core.logger from olympia.amo.utils import render log = olympia.core.logger.getLogger('z.es') class ElasticsearchExceptionMiddleware(object): def process_exception(self, request, exception): if issubclass(exception.__class__, TransportError): log.exception(u'Elasticsearch error') return render(r
equest, 'search/down.html', status=503)
class Egg(object): def __init__(self, xpos, ypos, t, s): self.x = xpos # x-coordinate self.y = ypos # y-coordinate self.tilt = t # Left and right angle offset self.angle = 0 # Used to define the tilt self.scalar = s / 100.0 # Height of the egg def wobble(self): self.tilt = cos(self.angle) / 8 self.angle += 0.1 def display(self): noStroke() fill(255) w
ith pushMatrix(): translate(self.x, self.y) rotate(self.tilt) scale(self.scalar) with beginShape(): vertex(0, -100)
bezierVertex(25, -100, 40, -65, 40, -40) bezierVertex(40, -15, 25, 0, 0, 0) bezierVertex(-25, 0, -40, -15, -40, -40) bezierVertex(-40, -65, -25, -100, 0, -100)
import numpy as np def CG(A, X, B, maxiter=20, tolerance=1.0e-10, verbose=False): """Solve X*A=B using conjugate gradient method. ``X`` and ``B`` are ``ndarrays```of shape ``(m
, nx, ny, nz)`` coresponding to matrices of size ``m*n`` (``n=nx*ny*nz``) and ``A`` is a callable representing an ``n*n`` matrix:: A(X, Y) will store ``X*A`` in the output array ``Y``. On return ``X`` will be the solution to ``X*A=B`` within ``tolerance``."
"" m = len(X) shape = (m, 1, 1, 1) R = np.empty(X.shape, X.dtype.char) Q = np.empty(X.shape, X.dtype.char) A(X, R) R -= B P = R.copy() c1 = A.sum(np.reshape([abs(np.vdot(r, r)) for r in R], shape)) for i in range(maxiter): error = sum(c1.ravel()) if verbose: print 'CG-%d: %e' % (i, error) if error < tolerance: return i, error A(P, Q) #alpha = c1 / reshape([vdot(p, q) for p, q in zip(P, Q)], shape) alpha = c1 / A.sum(np.reshape([np.vdot(q,p) for p, q in zip(P, Q)], shape)) X -= alpha * P R -= alpha * Q c0 = c1 c1 = A.sum(np.reshape([abs(np.vdot(r, r)) for r in R], shape)) beta = c1 / c0 P *= beta P += R raise ArithmeticError('Did not converge!')
""" Grade API v1 URL specification """ from django.conf.urls import url, patterns import views urlpatterns = patterns( '', url(r'^g
rades/courses/$', views.CourseGradeList.as_view()), url(r'^grades/courses/(?P<org>[A-Za-z0-9_.-]+)[+](?P<name>[A-Za-z0-9_.-]+)[+](?P<run
>[A-Za-z0-9_.-]+)/$', views.CourseGradeDetail.as_view()), url(r'^grades/students/$', views.StudentList.as_view()), url(r'^grades/students/(?P<student_id>[0-9]+)/$', views.StudentGradeDetail.as_view()), )
urlencoded_get_query_multivalued_param(self): r = requests.get(httpbin('get'), params=dict(test=['foo', 'baz'])) assert r.status_code == 200 assert r.url == httpbin('get?test=foo&test=baz') def test_different_encodings_dont_break_post(self): r = requests.post(httpbin('post'), data={'stuff': json.dumps({'a': 123})}, params={'blah': 'asdf1234'}, files={'file': ('test_requests.py', open(__file__, 'rb'))}) assert r.status_code == 200 def test_unicode_multipart_post(self): r = requests.post(httpbin('post'), data={'stuff': u'ëlïxr'}, files={'file': ('test_requests.py', open(__file__, 'rb'))}) assert r.status_code == 200 r = requests.post(httpbin('post'), data={'stuff': u'ëlïxr'.encode('utf-8')}, files={'file': ('test_requests.py', open(__file__, 'rb'))}) assert r.status_code == 200 r = requests.post(httpbin('post'), data={'stuff': 'elixr'}, files={'file': ('test_requests.py', open(__file__, 'rb'))}) assert r.status_code == 200 r = requests.post(httpbin('post'), data={'stuff': 'elixr'.encode('utf-8')}, files={'file': ('test_requests.py', open(__file__, 'rb'))}) assert r.status_code == 200 def test_unicode_multipart_post_fieldnames(self): filename = os.path.splitext(__file__)[0] + '.py' r = requests.Request(method='POST', url=httpbin('post'), data={'stuff'.encode('utf-8'): 'elixr'}, files={'file': ('test_requests.py', open(filename, 'rb'))}) prep = r.prepare() assert b'name="stuff"' in prep.body assert b'name="b\'stuff\'"' not in prep.body def test_unicode_method_name(self): files = {'file': open('test_requests.py', 'rb')} r = requests.request(method=u'POST', url=httpbin('post'), files=files) assert r.status_code == 200 def test_custom_content_type(self): r = requests.post(httpbin('post'), data={'stuff': json.dumps({'a': 123})}, files={'file1': ('test_requests.py', open(__file__, 'rb')), 'file2': ('test_requests', open(__file__, 'rb'), 'text/py-content-type')}) assert r.status_code == 200 assert b"text/py-content-type" in r.request.body def test_hook_receives_request_arguments(self): def hook(resp, **kwargs): assert resp is not None assert kwargs != {} requests.Request('GET', HTTPBIN, hooks={'response': hook}) def test_session_hooks_are_used_with_no_request_hooks(self): hook = lambda x, *args, **kwargs: x s = requests.Session() s.hooks['response'].append(hook) r = requests.Request('GET', HTTPBIN) prep = s.prepare_request(r) assert prep.hooks['response'] != [] assert prep.hooks['response'] == [hook] def test_session_hooks_are_overriden_by_request_hooks(self): hook1 = lambda x, *args, **kwargs: x hook2 = lambda x, *args, **kwargs: x assert hook1 is not hook2 s = requests.Session() s.hooks['response'].append(hook2) r = requests.Request('GET', HTTPBIN, hooks={'response': [hook1]}) prep = s.prepare_request(r) assert prep.hooks['response'] == [hook1] def test_prepared_request_hook(self): def hook(resp, **kwargs): resp.hook_working = True return resp req = requests.Request('GET', HTTPBIN, hooks={'response': hook}) prep = req.prepare() s = requests.Session() s.proxies = getproxies() resp = s.send(prep) assert hasattr(resp, 'hook_working') def test_prepared_from_session(self): class DummyAuth(requests.auth.AuthBase): def __call__(self, r): r.headers['Dummy-Auth-Test'] = 'dummy-auth-test-ok' return r req = requests.Request('GET', httpbin('headers')) assert not req.auth s = requests.Session() s.auth = DummyAuth() prep = s.prepare_request(req) resp = s.send(prep) assert resp.json()['headers']['Dummy-Auth-Test'] == 'dummy-auth-test-ok' def test_links(self): r = requests.Response() r.headers = { 'cache-control': 'public, max-age=60, s-maxage=60', 'connection': 'keep-alive', 'content-encoding': 'gzip', 'content-type': 'application/json; charset=utf-8', 'date': 'Sat, 26 Jan 2013 16:47:56 GMT', 'etag': '"6ff6a73c0e446c1f61614769e3ceb778"', 'last-modified': 'Sat, 26 Jan 2013 16:22:39 GMT', 'link': ('<https://api.github.com/users/kennethreitz/repos?' 'page=2&per_page=10>; rel="next", <https://api.github.' 'com/users/kennethreitz/repos?page=7&per_page=10>; ' ' rel="last"'), 'server': 'GitHub.com', 'status': '200 OK', 'vary': 'Accept', 'x-content-type-options': 'nosniff', 'x-github-media-type': 'github.beta', 'x-ratelimit-limit': '60', 'x-ratelimit-remaining': '57' } assert r.links['next']['rel'] == 'next' def test_cookie_parameters(self): key = 'some_cookie' value = 'some_value' secure = True domain = 'test.com' rest = {'HttpOnly': True} jar = requests.cookies.RequestsCookieJar() jar.set(key, value, secure=secure, domain=domain, rest=rest) assert len(jar) == 1 assert 'some_cookie' in jar cookie = list(jar)[0] assert cooki
e.secure == secure assert cookie.domain == domain assert cookie._rest['HttpOnly'] == rest['HttpOnly'] def test_cookie_as_dict_keeps_len(self): key = 'some_cookie' value = 'some_value' key1 = 'some_cookie1' value1 = 'some_value1' jar = requests.cookies.RequestsCookieJar() jar.set(key, value) jar.set(key1, value1) d1 = dict(jar) d2 = dict(jar.iteritems()) d3 = dict
(jar.items()) assert len(jar) == 2 assert len(d1) == 2 assert len(d2) == 2 assert len(d3) == 2 def test_cookie_as_dict_keeps_items(self): key = 'some_cookie' value = 'some_value' key1 = 'some_cookie1' value1 = 'some_value1' jar = requests.cookies.RequestsCookieJar() jar.set(key, value) jar.set(key1, value1) d1 = dict(jar) d2 = dict(jar.iteritems()) d3 = dict(jar.items()) assert d1['some_cookie'] == 'some_value' assert d2['some_cookie'] == 'some_value' assert d3['some_cookie1'] == 'some_value1' def test_cookie_as_dict_keys(self): key = 'some_cookie' value = 'some_value' key1 = 'some_cookie1' value1 = 'some_value1' jar = requests.cookies.RequestsCookieJar() jar.set(key, value) jar.set(key1, value1) keys = jar.keys() assert keys == list(keys) # make sure one can use keys multiple times assert list(keys) == list(keys) def test_cookie_as_dict_values(self): key = 'some_cookie' value = 'some_value' key1 = 'some_cookie1' value1 = 'some_value1' jar = requests.cookies.RequestsCookieJar() jar.set(key, value) jar.set(key1, value1) values = jar.values() assert values == list(values) # make sure one can use values multiple times assert list(values) == list(values) def test_cookie_as_dict_items(self): key = 'some_cookie' value = 'some_value'
int), ('gyroscope', int), ('accelerometer', int), ('magnetometer', int), ], # ID_RECHARGE_POWERDOWN: [], ID_BATTERY_CHARGE_RATIO: [('charge', int)], # ID_GO_TO_SLEEP: [('duration', int)], # ID_SHUTDOWN: [], # ID_MOTOR_ACCEL: [('acceleration', float)], ID_STATUS_BUTTON: [('state', int)], ID_MOTOR_ENCODER: [('channel', int), ('count', int)], ID_MOTOR_ERROR: [('error', int)],# single byte ID_TWIST_DONE: [('error', int)], # 0=no error, 1=edge, 2=ultrasonic, 3=tilt, 4=accel, 5=encoder } BOTH_FORMATS_IN = { ID_ALL_STOP: [], ID_LED: [('index', int), ('state', int)], ID_LED_AUTO: [('state', int)], ID_GET_VALUE: [('id', str)], ID_FORCE_SENSORS: [('state', int)], } HEAD_FORMATS_IN = { ID_PAN_SPEED: [('speed', 'int32')], ID_GO_TO_CENTER: [('type', str)], ID_CALIBRATE: [('type', str)], ID_PAN_ANGLE: [('angle', int)], ID_TILT_ANGLE: [('angle', int)], ID_TILT_POWER: [('enabled', int)], ID_PAN_POWER: [('enabled', int)], # ID_MICROPHONE_ENABLE: [('state', int)], } TORSO_FORMATS_IN = { ID_SONAR_POWER: [('enabled', int)], ID_MOTOR_SPEED: [('left', int), ('right', int)], ID_MOTOR_ACCEL: [('acceleration', int)], ID_RECHARGE_POWERDOWN: [], ID_GO_TO_SLEEP: [('duration', int)], ID_SHUTDOWN: [], # Mimics Twist format. http://docs.ros.org/api/geometry_msgs/html/msg/Twist.html # Linear is linear.x, Angular is angular.z. ID_TWIST: [('linear', float), ('angular', float), ('seconds', float), ('force', int)], } # Packets using these IDs will require acknowledgement. ACK_IDS = set([ ID_LED, ID_LED_AUTO, ID_SONAR_POWER, ID_MOTOR_SPEED, ID_MOTOR_ACCEL, ID_GO_TO_CENTER, ID_TILT_ANGLE, ID_PAN_ANGLE, ]) MOTOR_FORWARD = 'forward' MOTOR_REVERSE = 'reverse' MOTOR_TURN_CW = 'turn_cw' MOTOR_TURN_CCW = 'turn_ccw' MOTOR_BREAK = 'break' MOTOR_PIVOT_LEFT_CW = 'pivot_left_cw' MOTOR_PIVOT_LEFT_CCW = 'pivot_left_ccw' MOTOR_PIVOT_RIGHT_CW = 'pivot_right_cw' MOTOR_PIVOT_RIGHT_CCW = 'pivot_right_ccw' # ComMotion Manual, Page 4 # The desired speed from -255 to +255. Positive values are forward, negative values are reverse. MOTOR_MAX_SPEED = 255 MOTOR_EIGTH_SPEED = int(round(MOTOR_MAX_SPEED * 0.125)) MOTOR_QUARTER_SPEED = int(round(MOTOR_MAX_SPEED * 0.25)) MOTOR_HALF_SPEED = int(round(MOTOR_MAX_SPEED * 0.5)) MOTOR_THREE_QUARTER_SPEED = int(round(MOTOR_MAX_SPEED * 0.75)) MOTOR_DEFAULT_SPEED = MOTOR_QUARTER_SPEED MOTOR_DEFAULT_ACCEL = 128 # velocity_units/sec MOTOR_MIN_ACCEL = 1 MOTOR_MAX_ACCEL = MOTOR_MAX_SPEED # Measured. MOTOR_MAX_SPEED_REAL = 745 * MM/SEC MOTOR_DEFAULT_ACCEL_REAL = float(MOTOR_DEFAULT_ACCEL) / MOTOR_MAX_SPEED * MOTOR_MAX_SPEED_REAL / SEC # Pololu 2282 Gear Motor => 464.64 counts per revolution of the gearbox's output shaft # Driver wheel radius = 14 mm # Tread length = 228 mm #(revolution_of_shaft/counts) * (wheel_diameter)/(revolution_of_shaft) #(revolution_of_shaft/464.6 counts) * (2*pi*14 mm)/(1 revolution_of_shaft) * (1m/1000mm) = meter/count #METERS_PER_COUNT = (3.14159265 * 0.1524) / 64000 * (1/1000.) #TODO:the 464.6 counts may mean for quadrature, but we're only using a single channel # Note, ROS distance assumes meters. METERS_PER_COUNT = (3.141592653589793 * 28) / 464.6 / 1000. # Convert the relative speed to absolute velocity in meters/second. SPEED_TO_VELOCITY = 0.35/MOTOR_MAX_SPEED VELOCITY_TO_SPEED = MOTOR_MAX_SPEED/0.35 TILT_CENTER = 90 TILT_MIN = 90-65 TILT_MAX = 90+65 PAN_MAX = 360 OK = 'OK' PYTHON_TO_ROS_TYPES = { bool: 'bool', int: 'int32', float: 'float32', str: 'string', } # The maximum width of the body. TORSO_DIAMETER_MM = 126 + 24 TORSO_DIAMETER = TORSO_DIAMETER_MM * MM # The distance between the treads. TORSO_TREAD_WIDTH = 100 * MM TORSO_TREAD_WIDTH_METERS = TORSO_TREAD_WIDTH.to(METER).magnitude # The distance from the ground to the center of the head. HEIGHT_CENTER_HEIGHT_MM = 235 HEIGHT_CENTER_HEIGHT = HEIGHT_CENTER_HEIGHT_MM * MM ARDUINO_PING_TIMEOUT = 5 MOTION_WANDER = 'wander' MOTION_FORWARD_X_MM = 'forward_x_mm' MOTION_TURN_X_DEGREES = 'turn_x_degrees' MOTION_PVIOT_X_DEGREES = 'pivot_x_degrees' MOTIONS = [ (MOTION_WANDER, 'wander'), (MOTION_FORWARD_X_MM, 'forward'), (MOTION_TURN_X_DEGREES, 'turn'), (MOTION_PVIOT_X_DEGREES, 'pivot'), ] SOUND_TTS = 'tts' SOUND_TONE = 'tone' # CPU temperature limits (in Celcius) # The Pi starts to underclock itself at 85C and the components get damaged at 90C CPU_TEMP_ERROR = 85 # over this shown error CPU_TEMP_WARN = 82.5 # over this shown warning, below shown ok CPU_USAGE_PERCENT_ERROR = 99 CPU_USAGE_PERCENT_WARN = 90 # CPU clock speed limits. CPU_CLOCK_SPEED_
PERCENT_ERROR = 25 CPU_CLOCK_SPEED_PERCENT_WARN = 50 # Disk limits. DISK_USAGE_PERCENT_ERROR = 95 DISK_USAGE_PERCENT_WARN = 90 # Memory limits. MEMORY_USAGE_PERCENT_ERROR = 95 MEMORY_USAGE_PERCENT_WARN = 90 # Links BASE_FOOTPRINT = 'base_footprint' BASE_LINK = 'base_link' NECK = 'neck' HE
AD = 'head' ODOM = 'odom' # Joints FOOTPRINT_TO_TORSO_JOINT = 'footprint_to_base_link_joint' TORSO_TO_NECK_JOINT = 'base_link_to_neck_joint' NECK_TO_HEAD_JOINT = 'neck_to_head_joint' HEAD_TO_CAMERA_JOINT = 'head_to_camera_joint' # Battery limits. BATTERY_CHARGE_RATIO_ERROR = 0.8 BATTERY_CHARGE_RATIO_WARN = 0.85 # Camera. # http://elinux.org/Rpi_Camera_Module CAMERA_ANGLE_OF_VIEW_H = 54 CAMERA_ANGLE_OF_VIEW_V = 41 EXPORT_TO_ARDUINO = [ 'METERS_PER_COUNT', 'TORSO_TREAD_WIDTH_METERS', 'VELOCITY_TO_SPEED', 'SPEED_TO_VELOCITY', 'MOVEMENT_ERROR_NONE', 'MOVEMENT_ERROR_EDGE', 'MOVEMENT_ERROR_ULTRASONIC', 'MOVEMENT_ERROR_TILT', 'MOVEMENT_ERROR_ACCEL', 'MOVEMENT_ERROR_ENCODER', 'MOVEMENT_ERROR_BUMPER', 'MOVEMENT_ULTRASONIC_THRESHOLD_CM', 'MOVEMENT_MAX_TILT', ] # Diagnostic part names. def write_ros_messages(d, prefix): msg_dir = '../../../ros_homebot_msgs/msg' for k, v in d.iteritems(): name = re.sub(r'[^a-z]+', ' ', ALL_IDS[k]) name = (''.join(map(str.title, name.split(' ')))) if name != 'Pong': name = name + 'Change' #name = prefix.title() + name v = [('device', 'uint8')] + v print(name, v) with open(os.path.join(msg_dir, '%s.msg' % name), 'w') as fout: for _name, _type in v: _ros_type = PYTHON_TO_ROS_TYPES.get(_type, _type) print('%s %s' % (_ros_type, _name), file=fout) def write_ros_services(d, prefix): msg_dir = '../../../ros_homebot_msgs/srv' for k, v in d.iteritems(): name = re.sub(r'[^a-z]+', ' ', ALL_IDS[k]) name = (''.join(map(str.title, name.split(' ')))) #name = prefix.title() + name #v = [('device', 'uint8')] + v print(name, v) with open(os.path.join(msg_dir, '%s.srv' % name), 'w') as fout: for _name, _type in v: _ros_type = PYTHON_TO_ROS_TYPES.get(_type, _type) print('%s %s' % (_ros_type, _name), file=fout) print('---', file=fout) def write_cpp_headers(): # Output the IDs to a C/C++ header. with open('../../../ros_homebot_firmware/common/src/ID.h', 'w') as fout: print('// AUTO-GENERATED. DO NOT EDIT. SEE homebot/constants.py.', file=fout) items = [ _ for _ in globals().items() if _[0].startswith('ID_')] for _name, _value in sorted(items, key=lambda o: o[1]): print("#define %s '%s'" % (_name.ljust(4*6), _value), file=fout) items = [ _ for _ in globals().items() if _[0].startswith('NAME_') and not _[0].startswith('NAME_TO_')] for _name, _value in sorted(items, key=lambda o: o[0]): print('#define %s "%s"' % (_name.ljust(4*6), _value), file=fout) for _name in EXPORT_TO_ARDUINO: _value = globals()[_name] print('#define %s %s' % (_name.ljust(4*6), repr(_value)), file=fout) print('Wrote ID.h.') if __name__ == '__main__': write_cpp_headers() print(''' Now run: cd /home/`user`/git/homebot/src/ros . ./setup.bash catkin_make --pkg ros_homebot_msgs ''') t =
_key_id, aws_secret_access_key, region=None, ): """Query STS for a users' account_id""" client = get_client( "sts", profile_name, aws_access_key_id, aws_secret_access_key, region, ) return client.get_caller_identity().get("Account") def get_client( client, profile_name, aws_access_key_id, aws_secret_access_key, region=None, ): """Shortcut for getting an initialized instance of the boto3 client.""" boto3.setup_default_session( profile_name=profile_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region, ) return boto3.client(client) def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None): """Register and upload a function to AWS Lambda.""" print("Creating your new Lambda function") byte_stream = read(path_to_zip_file, binary_file=True) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") account_id = get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region",), ) role = get_role_name( cfg.get("region"), account_id, cfg.get("role", "lambda_basic_execution"), ) client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get( "function_name" ) print("Creating lambda function with name: {}".format(func_name)) if use_s3: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": { "S3Bucket": "{}".format(buck_name), "S3Key": "{}".format(s3_file), }, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } else: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": {"ZipFile": byte_stream}, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } if "tags" in cfg: kwargs.update( Tags={key: str(value) for key, value in cfg.get("tags").items()} ) if "environment_variables" in cfg: kwargs.update( Environment={ "Variables": { key: get_environment_variable_value(value) for key, value in cfg.get("environment_variables").items() }, }, ) client.create_function(**kwargs) concurrency = get_concurrency(cfg) if concurrency > 0: client.put_function_concurrency( FunctionName=func_name, ReservedConcurrentExecutions=concurrency ) def update_function( cfg, path_to_zip_file, existing_cfg, use_s3=False, s3_file=None, preserve_vpc=False, ): """Updates the code of an existing Lambda function""" print("Updating your Lambda function") byte_stream = read(path_to_zip_file, binary_file=True) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") account_id = get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region",), ) role = get_role_name( cfg.get("region"), account_id, cfg.get("role", "lambda_basic_execution"), ) client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") if use_s3: client.update_function_code( FunctionName=cfg.get("function_name"), S3Bucket="{}".format(buck_name), S3Key="{}".format(s3_file), Publish=True, ) else: client.update_function_code( FunctionName=cfg.get("function_name"), ZipFile=byte_stream, Publish=True, ) kwargs = { "FunctionName": cfg.get("function_name"), "Role": role, "Runtime": cfg.get("runtime"), "Hand
ler": cfg.get("handler"), "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512), } if preserve_vpc: kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get( "VpcConfig" ) if kwargs["VpcConfig"] is None: kwargs["VpcConfig"] = { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), } else: del kwargs["VpcConfig"]["VpcId"] else: kwargs["VpcConfig"] = { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), } if "environment_variables" in cfg: kwargs.update( Environment={ "Variables": { key: str(get_environment_variable_value(value)) for key, value in cfg.get("environment_variables").items() }, }, ) ret = client.update_function_configuration(**kwargs) concurrency = get_concurrency(cfg) if concurrency > 0: client.put_function_concurrency( FunctionName=cfg.get("function_name"), ReservedConcurrentExecutions=concurrency, ) elif "Concurrency" in existing_cfg: client.delete_function_concurrency( FunctionName=cfg.get("function_name") ) if "tags" in cfg: tags = {key: str(value) for key, value in cfg.get("tags").items()} if tags != existing_cfg.get("Tags"): if existing_cfg.get("Tags"): client.untag_resource( Resource=ret["FunctionArn"], TagKeys=list(existing_cfg["Tags"].keys()), ) client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) def upload_s3(cfg, path_to_zip_file, *use_s3): """Upload a function to AWS S3.""" print("Uploading your new Lambda function") profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") client = get_client( "s3", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) byte_stream = b"" with open(path_to_zip_file, mode="rb") as fh: byte_stream = fh.read() s3_key_prefix = cfg.get("s3_key_prefix", "/dist") checksum = hashlib.new("md5", byte_stream).hexdigest() timestamp = str(time.time()) filename = "{prefix}{checksum}-{ts}.zip".format( prefix=s3_key_prefix, checksum=checksum, ts=timestamp, ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") fu
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # class XmlNamespace(object): def __init__(self, namespace): self._namespace = namespace def name(self, tag): return "{%s}%s" % (self._namespace, tag) class XmlNamer(object): """Initialize me with a DOM node or a DOM document node (the toplevel node you get when parsing an XML file). Then use me to generate fully qualified XML names. >>> xml = '<office:document-styles xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0"></office>' >>> from lxml import etree >>> namer = XmlNamer(etree.fromstring(xml)) >>> namer.name('office', 'blah') {urn:oasis:names:tc:opendocument:xmlns:office:1.0}blah >>> namer.name('office:blah') {urn:oasis:names:tc:opendocument:xmlns:office:1.0}blah I can also give you XmlNamespace objects if you give me the abbreviated namespace name. These are useful if you need to reference a namespace continuously. >>> office_ns = name.namespace('office') >>> office_ns.name('foo') {urn:oasis:names:tc:opendocument:xmlns:office:1.0}foo """ def __init__(self, dom_node): # Allow the user to pass a dom node of the # XML document nodle if hasattr(dom_node, 'nsmap'): self.nsmap = dom_node.nsmap else: self.nsmap = dom_node.getroot().nsmap def name(self, namespace_shortcut, tag=None): # If the user doesn't pass an argument into 'tag' # then namespace_shortcut contains a tag of the form
# 'short-namespace:tag'
if tag is None: try: namespace_shortcut, tag = namespace_shortcut.split(':') except ValueError: # If there is no namespace in namespace_shortcut. tag = namespace_shortcut.lstrip("{}") return tag return "{%s}%s" % (self.nsmap[namespace_shortcut], tag) def namespace(self, namespace_shortcut): return XmlNamespace(self.nsmap[namespace_shortcut])
# Django settings for temp project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': {'ENGINE': 'django.db.backends.sqlite3'} } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '1s^z*4c6clc@+)c8dstu#eh4bi5907+&h_$2_&=y!3=a_!))u6' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjackin
g protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'test_urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'temp.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget t
o use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'messages_extends', ) MESSAGE_STORAGE = 'messages_extends.storages.FallbackStorage' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
# -*- coding: utf-8 -*- """ Plugins related to folders and paths """ from hyde.plugin import Plugin from hyde.fs import Folder class FlattenerPlugin(Plugin): """ The plugin class for flattening nested folders. """ def __init__(self, site): super(FlattenerPlugin, self).__init__(site) def begin_site(self): """ Finds all the folders that need flattening and changes the relative deploy path of all resources in those folders. """ items = [] try: items = self.site.config.flattener.items except AttributeError: pass for item in items: node = None target = '' try:
node = self.site.content.node_from_relative_path(item.source) target = Folder(item.target)
except AttributeError: continue if node: for resource in node.walk_resources(): target_path = target.child(resource.name) self.logger.debug( 'Flattening resource path [%s] to [%s]' % (resource, target_path)) resource.relative_deploy_path = target_path
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-25 15:19 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shipping', '0005_auto_20170616_1351'), ('teamstore', '0003_auto_20170624_1533'), ] operation
s = [ migrations.AddField( model_name='teamstore', name='shipping_method', field=models.Fore
ignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='shipping.ShippingMethod', verbose_name='team shipping method'), ), ]
""" A sub-package for efficiently dealing with polynomials. Within the documentation for this sub-pac
kage, a "finite power series," i.e., a polynomial (also referred to simply as a "series") is represented by a 1-D numpy array of the polynomial's coefficients, ordered from lowest order term to highest. For example, array([1,2,3]) represents ``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial applicable to the specific module in question, e.g., `polynomial` (which "wraps" the "standard" basis) or `chebyshev`. For optimal performance, all operations o
n polynomials, including evaluation at an argument, are implemented as operations on the coefficients. Additional (module-specific) information can be found in the docstring for the module of interest. """ from polynomial import * from chebyshev import * from polyutils import * from numpy.testing import Tester test = Tester(__file__).test bench = Tester(__file__).bench
''' This is the library for getting DotA2 insight information. Problem: there was no way to get dota internal data about heroes, their abilities... in suitable for further work form. Solution: this library allows you to get access to all the data in the in-game files. But information abo
ut single hero does not have much use, so there is a way to get stats of selected heroes or get information about certain match. ''' from atod.meta import meta_info from atod.models.interfaces import Member, Group from atod.models.ability import Ability from atod.models.abilities import Ab
ilities from atod.models.hero import Hero from atod.models.heroes import Heroes from atod.models.match import Match from atod.utils.pick import get_recommendations # from atod.utils import dota_api
from django.conf import settings from django.conf.urls import url from plans.views import CreateOrderView, OrderListView, InvoiceDetailView, AccountActivationView, \ OrderPaymentReturnView, CurrentPlanView, UpgradePlanView, OrderView, BillingInfoRedirectView, \ BillingInfoCreateView, BillingInfoUpdateView, BillingInfoDeleteView, CreateOrderPlanChangeView, ChangePlanView, \ PricingView, FakePaymentsView urlpatterns = [ url(r'^pricing/$', PricingView.as_view(), name='pricing'), url(r'^account/$', CurrentPlanView.as_view(), name='current_plan'), url(r'^account/activation/$', AccountActivationView.as_view(), name='account_activation'), url(r'^upgrade/$', UpgradePlanView.as_view(), name='upgrade_plan'), url(r'^order/extend/new/(?P<pk>\d+)/$', CreateOrderView.as_view(), name='create_order_plan'), url(r'^order/upgrade/new/(?P<pk>\d+)/$', CreateOrderPlanChangeView.as_view(), name='create_order_plan_change'), url(r'^change/(?P<pk>\d+)/$', ChangePlanView.as_view(), name='change_plan'), url(r'^order/$', OrderListView.as_view(), name='order_list'), url(r'^order/(?P<pk>\d+)/$', OrderView.as_view(), name='order'), url(r'^order/(?P<pk>\d+)/payment/success/$', OrderPaymentReturnView.as_view(status='success'), name='order_payment_success'), url(r'^order/(?P<pk>\d+)/payment/failure/$',
OrderPaymentReturnView.as_view(status='failure'), name='order_payment_failure'), url(r'^billing/$', BillingInfoRedirectView.as_view(), name='billing_info'), url(r'^billing/create/$', BillingInfoCreateView.as_view(
), name='billing_info_create'), url(r'^billing/update/$', BillingInfoUpdateView.as_view(), name='billing_info_update'), url(r'^billing/delete/$', BillingInfoDeleteView.as_view(), name='billing_info_delete'), url(r'^invoice/(?P<pk>\d+)/preview/html/$', InvoiceDetailView.as_view(), name='invoice_preview_html'), ] if getattr(settings, 'DEBUG', False): urlpatterns += [ url(r'^fakepayments/(?P<pk>\d+)/$', FakePaymentsView.as_view(), name='fake_payments'), ]
""" Tests for foe.command.interpret._set
up_readline
""" from foe.command.interpret import _get_input def test_no_config(): assert not _get_input()
.async_register( DOMAIN, "{}_{}".format(p_type, SERVICE_SAY), async_say_handle, descriptions.get(SERVICE_SAY), schema=SCHEMA_SERVICE_SAY) setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config in config_per_platform(config, DOMAIN)] if setup_tasks: yield from asyncio.wait(setup_tasks, loop=hass.loop) @asyncio.coroutine def async_clear_cache_handle(service): """Handle clear cache service call.""" yield from tts.async_clear_cache() hass.services.async_register( DOMAIN, SERVICE_CLEAR_CACHE, async_clear_cache_handle, descriptions.get(SERVICE_CLEAR_CACHE), schema=SCHEMA_SERVICE_CLEAR_CACHE) return True class SpeechManager(object): """Representation of a speech store.""" def __init__(self, hass): """Initialize a speech store.""" self.hass = hass self.providers = {} self.use_cache = DEFAULT_CACHE self.cache_dir = DEFAULT_CACHE_DIR self.time_memory = DEFAULT_TIME_MEMORY self.file_cache = {} self.mem_cache = {} @asyncio.coroutine def async_init_cache(self, use_cache, cache_dir, time_memory): """Init config folder and load file cache.""" self.use_cache = use_cache self.time_memory = time_memory def init_tts_cache_dir(cache_dir): """Init cache folder.""" if not os.path.isabs(cache_dir): cache_dir = self.hass.config.path(cache_dir) if not os.path.isdir(cache_dir): _LOGGER.info("Create cache dir %s.", cache_dir) os.mkdir(cache_dir) return cache_dir try: self.cache_dir = yield from self.hass.loop.run_in_executor( None, init_tts_cache_dir, cache_dir) except OSError as err: raise HomeAssistantError("Can't init cache dir {}".format(err)) def get_cache_files(): """Return a dict of given engine files.""" cache = {} folder_data = os.listdir(self.cache_dir) for file_data in folder_data: record = _RE_VOICE_FILE.match(file_data) if record: key = KEY_PATTERN.format( record.group(1), record.group(2), record.group(3), record.group(4) ) cache[key.lower()] = file_data.lower() return cache try: cache_files = yield from self.hass.loop.run_in_executor( None, get_cache_files) except OSError as err: raise HomeAssistantError("Can't read cache dir {}".format(err)) if cache_files: self.file_cache.update(cache_files) @asyncio.coroutine def async_clear_cache(self): """Read file cache and delete files.""" self.mem_cache = {} def remove_files(): """Remove files from filesystem.""" for _, filename in self.file_cache.items(): try: os.remove(os.path.join(self.cache_dir, filename)) except OSError as err: _LOGGER.warning( "Can't remove cache file '%s': %s", filename, err) yield from self.hass.loop.run_in_executor(None, remove_files) self.file_cache = {} @callback def async_register_engine(self, engine, provider, config): """Register a TTS provider.""" provider.hass = self.hass if provider.name is None: provider.name = engine self.providers[engine] = provider @asyncio.coroutine def async_get_url(self, engine, message, cache=None, language=None, options=None): """Get URL for play message. This method is a coroutine. """ provider = self.providers[engine] msg_hash = hashlib.sha1(bytes(message, 'utf-8')).hexdigest() use_cache = cache if cache is not None else self.use_cache # Languages language = language or provider.default_language if language is None or \ language not in provider.supported_languages: raise HomeAssistantError("Not supported language {0}".format( language)) # Options if provider.default_options and options: options = provider.default_options.copy().update(options) options = options or provider.default_options if options is not None: invalid_opts = [opt_name for opt_name in options.keys() if opt_name not in provider.supported_options] if invalid_opts: raise HomeAssistantError( "Invalid options found: %s", invalid_opts) options_key = ctypes.c_size_t(hash(frozenset(options))).value else: options_key = '-' key = KEY_PATTERN.format( msg_hash, language, options_key, engine).lower() # Is speech already in memory if key in self.mem_cache: filename = self.mem_cache[key][MEM_CACHE_FILENAME] # Is file store in file cache elif use_cache and key in self.file_cache: filename = self.file_cache[key] self.hass.async_add_job(self.async_file_to_mem(key)) # Load speech from provider into memory else: filename = yield from self.async_get_tts_audio( engine, key, message, use_cache, language, options) return "{}/api/tts_proxy/{}".format( self.hass.config.api.base_url, filename) @asyncio.coroutine def async_get_tts_audio(self, engine, key, message, cache, language, options): """Receive TTS and store for view in cache. This method is a coroutine. """ provider = self.providers[engine] extension, data = yield from provider.async_get_tts_audio( message, language, options) if data is None or extension is None: raise HomeAssistantError( "No TTS from {} for '{}'".format(engine, message)) # Create file infos filename = ("{}.{}".format(key, extension)).lower() data = self.write_tags( filename, data, provider, message, language, options) # Save to memory self._async_store_to_memcache(key, filename, data) if cache: self.hass.async_add_job( self.async_save_tts_audio(key, filename, data)) return filename @asyncio.coroutine def async_save_tts_audio(self, key, filename, data): """Store voice data to file and file_cache. This method is a coroutine. """ voice_file = os.path.join(self.cache_dir, filename) def save_speech(): """Store speech to filesystem.""" with open(voice_file, 'wb') as speech: speech.write(data) try: yield from self.hass.loop.run_in_executor(None, save_speech) self.file_cache[key] = filename except OSError: _LOGGER.error("Can't write %s", filename) @asyncio.coroutine def async_file_to_mem(self, key): """Load voice from file cache into memory. This method is a coroutine. """ filename = self.file_cache.get(key) if not filename: raise HomeAssistantError("Key {} not in file cache!".format(key)) voice_file = os.path.join(self.cache_dir, filename) def load_speech():
"""Load a speech from filesystem.""" with open(voice_file, 'rb') as speech: return speech.read() try: data = yield from self.hass.loop.run_in_executor(None, load_speech) except OSError: del self.file_cache[key]
raise HomeAssistantError("Can't read {}".format(voice_file)) self._async_store_to_memcache(key, filename, data) @callback def _async_store_to_memcache(self, key, filename, data): """Store data to
terms=[['P(-1,1)','P(-1,2)','Metric(1,2003)','Metric(2,1003)'], # from C term ['P(-1,1)','P(-1,2)','Metric(1,1003)','Metric(2,2003)'], # from C term ['P(-1,1)','P(-1,2)','Metric(1,2)','Metric(1003,2003)'], # from C term ['P(1,2)','P(2,1)','Metric(1003,2003)'], # from D term (sym) ['P(1,2)','P(2003,1)','Metric(2,1003)'], # 1st term ['P(1,2)','P(1003,1)','Metric(2,2003)'], # 1st swap ['P(2,1)','P(2003,2)','Metric(1,1003)'], # 2nd term ['P(2,1)','P(1003,2)','Metric(1,2003)'], # 2nd swap ['P(1003,2)','P(2003,1)','Metric(1,2)'], # 3rd term ['P(1003,1)','P(2003,2)','Metric(1,2)'], # 3rd swap ['Metric(1,2003)','Metric(2,1003)'], # from mass term ['Metric(1,1003)','Metric(2,2003)'], # from mass term ['Metric(1,2)','Metric(1003,2003)'], # from mass term ['P(1,1)','P(2,1)','Metric(1003,2003)'], # gauge terms ['P(1,2)','P(2,2)','Metric(1003,2003)'], # gauge terms ['P(1,1)','P(2,2)','Metric(1003,2003)'], # gauge terms ['P(1003,1)','P(1,1)','Metric(2,2003)'], # gauge terms ['P(1003,2)','P(2,2)','Metric(1,2003)'], # gauge terms ['P(2003,1)','P(1,1)','Metric(2,1003)'], # gauge terms ['P(2003,2)','P(2,2)','Metric(1,1003)'], # gauge terms ] signs=[1.,1.,-1.,1.,-1.,-1.,-1.,-1.,1.,1.,1.,1.,-1.,1.,1.,0.25,-1.,-1.,-1.,-1.] new_couplings=[False]*len(terms) elif(lorentztag == 'FFVT' ) : terms = [['Gamma(2004,2,1)','Metric(3,1004)'], ['Gamma(1004,2,1)','Metric(3,2004)'], ['Gamma(3,2,1)','Metric(1004,2004)'], ['Gamma(2004,2,-1)','Metric(3,1004)'], ['Gamma(1004,2,-1)','Metric(3,2004)'], ['Gamma(3,2,-1)','Metric(1004,2004)']] signs=[1.,1.,-0.5,1.,1.,-0.5] new_couplings=[False]*3*len(terms) elif(lorentztag == 'VVVT' ) : # the F(mu nu,rho sigma lambda) terms first terms = [['P(2004,2)','Metric(1,1004)','Metric(2,3)'],['P(2004,3)','Metric(1,1004)','Metric(2,3)'], ['P(1004,2)','Metric(1,2004)','Metric(2,3)'],['P(1004,3)','Metric(1,2004)','Metric(2,3)'], ['P(2004,3)','Metric(1,3)','Metric(2,1004)'],['P(2004,1)','Metric(1,3)','Metric(2,1004)'], ['P(1004,3)','Metric(1,3)','Metric(2,2004)'],['P(1004,1)','Metric(1,3)','Metric(2,2004)'], ['P(2004,1)','Metric(1,2)','Metric(3,1004)'],['P(2004,2)','Metric(1,2)','Metric(3,1004)'], ['P(1004,1)','Metric(1,2)','Metric(3,2004)'],['P(1004,2)','Metric(1,2)','Metric(3,2004)'], ['P(3,1)','Metric(1,2004)','Metric(2,1004)'],['P(3,2)','Metric(1,2004)','Metric(2,1004)'], ['P(3,1)','Metric(1,1004)','Metric(2,2004)'],['P(3,2)','Metric(1,1004)','Metric(2,2004)'], ['P(3,1)','Metric(1,2)','Metric(1004,2004)'],['P(3,2)','Metric(1,2)','Metric(1004,2004)'], ['P(2,3)','Metric(1,2004)','Metric(3,1004)'],['P(2,1)','Metric(1,2004)','Metric(3,1004)'], ['P(2,3)','Metric(1,1004)','Metric(3,2004)'],['P(2,1)','Metric(1,1004)','Metric(3,2004)'], ['P(2,3)','Metric(1,3)','Metric(1004,2004)'],['P(2,1)','Metric(1,3)','Metric(1004,2004)'], ['P(1,2)','Metric(2,2004)','Metric(3,1004)'],['P(1,3)','Metric(2,2004)','Metric(3,1004)'], ['P(1,2)','Metric(2,1004)','Metric(3,2004)'],['P(1,3)','Metric(2,1004)','Metric(3,2004)'], ['P(1,2)','Metric(2,3)','Metric(1004,2004)'],['P(1,3)','Metric(2,3)','Metric(1004,2004)']] signs = [1.,-1.,1.,-1.,1.,-1.,1.,-1.,1.,-1.,1.,-1., 1.,-1.,1.,-1.,-1.,1.,1.,-1.,1.,-1.,-1.,1.,1.,-1.,1.,-1.,-1.,1.] new_couplings=[False]*len(terms) l = lambda c: len(pos[c]) if l(8)!=3 : ordering = VVVordering(vertex) # unknown else : raise Exception('Unknown data type "%s".' % lorentztag) iterm=0 try : for term in terms: for perm in itertools.permutations(term): label = '*'.join(perm) for istruct in range(0,len(structures)) : if label in structures[istruct] : reminder = structures[istruct].replace(label,'1.',1) loc=iterm if(reminder.find("ProjM")>=0) : reminder=re.sub("\*ProjM\(.*,.\)","",reminder) loc+=len(terms) elif(reminder.find("ProjP")>=0) : reminder=re.sub("\*ProjP\(.*,.\)","",reminder) loc+=2*len(terms) structures[istruct] = "Done" val = eval(reminder, {'cmath':cmath} )*signs[iterm] if(new_couplings[loc]) : new_couplings[loc] += val else : new_couplings[loc] = val iterm+=1 except : SkipThisVertex() # check we've handled all the terms for val in structures: if(val!="Done") : raise SkipThisVertex() # special for FFVT if(lorentztag=="FFVT") : t_couplings=new_couplings new_couplings=[False]*9 for i in range(0,9) : j = i+3*(i//3) k = i+3+3*(i//3) if( not t_couplings[j]) : new_couplings[i] = t_couplings[k] else : new_couplings[i] = t_couplings[j] # set the couplings for icoup in range(0,len(new_couplings)) : if(new_couplings[icoup]) : new_couplings[icoup] = '(%s) * (%s) *(%s)' % (new_couplings[icoup],prefactors,value) if(len(all_couplings)==0) : all_couplings=new_couplings else : for icoup in range(0,len(new_couplings)) : if(new_couplings[icoup] and all_couplings[icoup]) : all_couplings[icoup] = '(%s) + (%s
) ' % (new_couplings[icoup],all_couplings[icoup]) elif(new_couplings[icoup]) : all_couplings[icoup] = new_couplings[icoup] # return the results return (ordering,all_couplings) def processTensorCouplings(lorentztag,vertex,model,parmsubs,all_couplings,order) : # check for fermion vertices (i.e. has L/R couplings) fermions = "FF" in lorentztag # test and process the values of the couplings tval = ["U
nknown"]*3 value = ["Unknown"]*3 # loop over the colours for icolor in range(0,len(all_couplings)) : lmax = len(all_couplings[icolor]) if(fermions) : lmax //=3 # loop over the different terms for ix in range(0,lmax) : test = [False]*3 imax=3 # normal case if( not fermions ) : test[0] = all_couplings[icolor][ix] imax=1 else : # first case vector but no L/R couplings if( not all_couplings[icolor][lmax+ix] and not all_couplings[icolor][2*lmax+ix] ) : test[0] = all_couplings[icolor][ix] imax=1 # special for mass terms and massless particles if(not all_couplings[icolor][ix]) : code = abs(vertex.particles[order[0]-1].pdg_code) if(ix==6 and (code ==12 or code ==14 or code==16) ) : continue else : raise SkipThisVertex() # second case L/R couplings elif( not all_couplings[icolor][ix] ) : # L=R, replace with vector if(all_couplings[icolor][lmax+ix] == all_couplings[icolor][2*lmax+ix]) : test[0] = all_couplings[icolor][lmax+ix] imax=1 else : test[1] = all_couplings[icolor][lmax+ix] test[2]
''' Copyright (C) 2012 mental
smash.org <contact@mentalsmash.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANT
Y; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. '''
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Custom managers for the Division model. """ from __future__ import unicode_literals from calaccess_processed.managers import BulkLoadSQLManager class OCDAssemblyDivisionManager(BulkLoadSQLManager): """ Custom manager for state as
sembly OCD Divisions. """ def get_queryset(self): """ Filters down to state assembly divisions. """ qs = super(OCDAssemblyDivisionManager, self).get_queryset()
return qs.filter(subid1='ca', subtype2='sldl') class OCDSenateDivisionManager(BulkLoadSQLManager): """ Custom manager for state senate OCD Divisions. """ def get_queryset(self): """ Filters down to state senate divisions. """ qs = super(OCDSenateDivisionManager, self).get_queryset() return qs.filter(subid1='ca', subtype2='sldu') class OCDCaliforniaDivisionManager(BulkLoadSQLManager): """ Custom manager for OCD Divisions in California. """ def get_queryset(self): """ Filters down to divisions in California. """ qs = super(OCDCaliforniaDivisionManager, self).get_queryset() return qs.filter(subid1='ca') def california(self): """ Returns state of California division. """ qs = super(OCDCaliforniaDivisionManager, self).get_queryset() return qs.get(id='ocd-division/country:us/state:ca')
ture, osmTableData['polygons'], workspace, UPPER=None ) for cls in alOut: if cls not in mergeOut: mergeOut[cls] = [alOut[cls]] else: mergeOut[cls].append(alOut[cls]) # ************************************************************************ # # 5 - Get data from lines table (railway | waterway) # # ************************************************************************ # """ bfOut = { cls_code : [rst_name, ...], ... } """ bfOut = basic_buffer( osm_db, nomenclature, osmTableData['lines'], workspace ) for cls in bfOut: if cls not in mergeOut: mergeOut[cls] = bfOut[cls] else: mergeOut[cls] += bfOut[cls] # ************************************************************************ # # 7 - Assign untagged Buildings to tags # # ************************************************************************ # if nomenclature != "GLOBE_LAND_30": buildsOut = assign_points_tag_to_buildings( osm_db, nomenclature, osmTableData['points'], osmTableData['polygons'], workspace ) for cls in buildsOut: if cls not in mergeOut: mergeOut[cls] = [buildsOut[cls]] else: mergeOut[cls].append(buildsOut[cls]) # ************************************************************************ # # Produce LULC Map # # ************************************************************************ # """ Merge all results for one cls into one raster mergeOut = { cls_code : [rst_name, rst_name, ...], ... } into mergeOut = { cls_code : patched_raster, ... } """ for cls in mergeOut: if len(mergeOut[cls]) == 1: mergeOut[cls] = mergeOut[cls][0] else: mosaic_raster(mergeOut[cls], 'mosaic_{}'.format(str(cls))) mergeOut[cls] = 'mosaic_{}'.format(str(cls)) """ Merge all Class Raster using a priority rule """ __priorities = PRIORITIES[nomenclature] lst_rst = [] for cls in __priorities: if cls not in mergeOut: continue else: lst_rst.append(mergeOut[cls]) mosaic_raster(lst_rst, os.path.splitext(os.path.basename(lulcRst))[0]) grass_converter(os.path.splitext(os.path.basename(lulcRst))[0], lulcRst) return lulcRst # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # def vector_based(osmdata, nomenclature, refRaster, lulcShp, epsg=3857, overwrite=None): """ Convert OSM Data into Land Use/Land Cover Information An vector based approach. TODO: Add a detailed description. """ # ************************************************************************ # # Python Modules from Reference Packages # # ************************************************************************ # import os import pandas # ************************************************************************ # # GASP dependencies # # ************************************************************************ # from gasp.oss.ops import create_folder from gasp.grs import run_grass from gasp.gdal.manage.general import ogr_merge # Rules to be used from gasp.osm2lulc.rules.rule1 import vector_selection as selection from gasp.osm2lulc.rules.rule2 import vector_get_roads as get_roads from gasp.osm2lulc.rules.rule3_4 import vector_area as area from gasp.osm2lulc.rules.rule5 import vector_base_buffer as basic_buffer from gasp.osm2lulc.rules.rule7 import vector_assign_pntags_to_build # ************************************************************************ # # Global Settings # # ************************************************************************ # from .var import osmTableData, PRIORITIES workspace = os.path.join(os.path.dirname(lulcShp), 'osmtolulc') # Check if workspace exists if os.path.exists(workspace): if overwrite: create_folder(workspace) else: raise ValueError('Path {} already exists'.format(workspace)) else: create_folder(workspace) # ************************************************************************ # # Convert OSM file to SQLITE DB # # ************************************************************************ # osm_db = osm_to_sqdb(osmdata, os.path.join(workspace, 'osm.sqlite')) # ************************************************************************ # # Transform SRS of OSM Data # # ************************************************************************ # osmTableData = osm_project(osm_db, epsg) # ************************************************************************ # # Start a GRASS GIS Session # # ************************************************************************ # grass_base = run_grass(workspace, grassBIN='grass74', location='grloc', srs=epsg) import grass.script as grass import grass.script.setup as gsetup gsetup.init(grass_base, workspace, 'grloc', 'PERMANENT') # ************************************************************************ # # IMPORT SOME GASP MODULES FOR GRASS GIS # # ************************************************************************ # from gasp.grsconv import grass_converter from gasp.grs.g import raster_to_region from gasp.grs.v.overlay import union from gasp.grs.v.overlay import erase from gasp.grs.db import reset_table # ************************************************************************ # # SET GRASS GIS LOCATION EXTENT # # ************************************************************************ # extRst = grass_converter(refRaster, 'extent_raster') raster_to_region(extRst) # ************************************************************************ # # MapResults # # ************************************************************************ # mergeOut = {} # ************************************************************************ # # 1 - Selection Rule # # ************************************************************************ # """ selOut = { cls_code : rst_name, ... } """ selOut = selection( osm_db, nomenclature, osmTableData['polygons'], workspace ) for cls in selOut: mergeOut[cls] = [selOut[cls]] # ************************************************************************ # # 2 - Get Information About Roads Location # # ************************************************************************ # """ roads = { cls_code : rst_name, ... } """ roads = get_roads( osm_db, nomenclature,
osmTableData['lines'], osmTableData['polygons'], workspace ) for cls in roads: if cls not in mergeOut: mergeOut[cls] = [roads[cls]] else: mergeOut[cls].append(roads[cls]) # ***************************************************************
********* # # 3 - Area Upper than # # ************************************************************************ # """ auOut = { cls_code : rst_name, ... } """ auOut = area(osm_db, nomenclature, osmTableData['polygons'], workspace, UPPER=True) for cls in auOut: if cls not in mergeOut: mergeOut[cls] = [auOut[cls]] else: mergeOut[cls].append(auOut[cls]) # ************************************************************************ # # 4 - Area Lower than # # ************************************************************************ # """ alOut = { cls_code : rst_name, ...
# -*- coding: utf-8 -*- from django.db import models, migrations class Migration(migrations.Migra
tion): dependencies = [ ('entries', '0005_resultsmode_json'), ] operations = [ migrations.AlterField( model_name='resultsmode', name='json
', field=models.TextField(default='', blank=True), ), ]
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for trainer hooks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.contrib.boosted_trees.python.utils import losses from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import googletest class LossesTest(test_util.TensorFlowTestCase): def test_per_example_exp_loss(self): def _logit(p): return np.log(p) - np.log(1 - p) labels_positive = array_ops.ones([10, 1], dtypes.float32) weights = array_ops.ones([10, 1], dtypes.float32) labels_negative = array_ops.zeros([10, 1], dtypes.float32) predictions_probs = np.array( [[0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [0.99]], dtype=np.float32) prediction_logits = _logit(predictions_probs) eps = 0.2 with self.test_session(): pred
ictions_tensor = constant_op.constant( prediction_logits, dtype=dtypes.float32) loss_for_positives, _ = losses.per_example_exp_loss( labels_positive, weights, predictions_tensor, eps=eps) loss_for_negatives, _ = losses.per_example_exp_loss( labels_negative, weights, predictions_tensor, eps=eps) pos_loss = loss_for_positives.eval() neg_los
s = loss_for_negatives.eval() # For positive labels, points <= 0.3 get max loss of e. # For negative labels, these points have minimum loss of 1/e. for i in range(2): self.assertEqual(math.exp(1), pos_loss[i]) self.assertEqual(math.exp(-1), neg_loss[i]) # For positive lables, p oints with predictions 0.7 and larger get minimum # loss value of 1/e. For negative labels, these points are wrongly # classified and get loss e. for i in range(6, 10): self.assertEqual(math.exp(-1), pos_loss[i]) self.assertEqual(math.exp(1), neg_loss[i]) # Points in between 0.5-eps, 0..5+eps get loss exp(-label_m*y), where # y = 1/eps *x -1/(2eps), where x is the probability and label_m is either # 1 or -1 (for label of 0). for i in range(2, 6): self.assertAlmostEqual( math.exp(-1.0 * (predictions_probs[i] * 1.0 / eps - 0.5 / eps)), pos_loss[i]) self.assertAlmostEqual( math.exp(1.0 * (predictions_probs[i] * 1.0 / eps - 0.5 / eps)), neg_loss[i]) def test_per_example_squared_loss(self): def _squared_loss(p, y): return np.mean(1.0 * (p - y) * (p - y)) labels = np.array([[0.123], [224.2], [-3], [2], [.3]], dtype=np.float32) weights = array_ops.ones([5, 1], dtypes.float32) predictions = np.array( [[0.123], [23.2], [233], [52], [3]], dtype=np.float32) with self.test_session(): loss_tensor, _ = losses.per_example_squared_loss(labels, weights, predictions) loss = loss_tensor.eval() for i in range(5): self.assertAlmostEqual( _squared_loss(labels[i], predictions[i]), loss[i]) if __name__ == "__main__": googletest.main()
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import pytest from pants.backend.core.tasks.list_goals import ListGoals from pants.backend.core.tasks.task import Task from pants.goal.goal import Goal from pants.goal.task_registrar import TaskRegistrar from pants_test.tasks.task_test_base import ConsoleTaskTestBase class ListGoalsTest(ConsoleTaskTestBase): _INSTALLED_HEADER = 'Installed goals:' _UNDOCUMENTED_HEADER = 'Undocumented goals:' _LIST_GOALS_NAME = 'goals' _LIST_GOALS_DESC = 'List all documented goals.' _LLAMA_NAME = 'llama' _LLAMA_DESC = 'With such handsome fiber, no wonder everyone loves Llamas.' _ALPACA_NAME = 'alpaca' @classmethod def task_type(cls): return ListGoals class LlamaTask(Task): pass class AlpacaTask(Task): pass def test_list_goals(self): Goal.clear() self.assert_console_output(self._INSTALLED_HEADER) TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\ .install().with_description(self._LIST_GOALS_DESC) self.assert_console_output( self._INSTALLED_HEADER, ' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC), ) TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\ .install().with_description(self._LLAMA_DESC) self.assert_console_output( self._INSTALLED_HEADER, ' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC), ' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC), ) TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\ .install() self.assert_console_output( self._INSTALLED_HEADER, ' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC), ' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC), ) def test_list_goals_all(
self): Goal.clear() TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\ .install().with_description(self._LIST_GOALS_DESC) Ta
skRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\ .install().with_description(self._LLAMA_DESC) TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\ .install() self.assert_console_output( self._INSTALLED_HEADER, ' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC), ' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC), '', self._UNDOCUMENTED_HEADER, ' {0}'.format(self._ALPACA_NAME), options={ 'all': True } ) # TODO(John Sirois): Re-enable when fixing up ListGoals `--graph` in # https://github.com/pantsbuild/pants/issues/918 @pytest.mark.xfail def test_list_goals_graph(self): Goal.clear() TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\ .install().with_description(self._LIST_GOALS_DESC) TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\ .install().with_description(self._LLAMA_DESC) TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\ .install() self.assert_console_output( 'digraph G {\n rankdir=LR;\n graph [compound=true];', ' subgraph cluster_goals {\n node [style=filled];\n color = blue;\n label = "goals";', ' goals_goals [label="goals"];', ' }', ' subgraph cluster_llama {\n node [style=filled];\n color = blue;\n label = "llama";', ' llama_llama [label="llama"];', ' }', ' subgraph cluster_alpaca {\n node [style=filled];\n color = blue;\n label = "alpaca";', ' alpaca_alpaca [label="alpaca"];', ' }', ' alpaca_alpaca -> llama_llama [ltail=cluster_alpaca lhead=cluster_llama];', '}', options={ 'graph': True } )
import json import argparse import os from listener.config import MIN_TIME, MAX_TIME, PRESSURE_AV
ERAGE_AM
OUNT from listener.replayer import Replayer from listener.utilities import convert_time, average from listener.calculate import (calculate_temp_NTC, calculate_press, calculate_height, calculate_gyr) from collections import deque from io import StringIO parser = argparse.ArgumentParser(prog="Replayer", description="Replay a CanSat log file for " "listener.") parser.add_argument("input_file") args = parser.parse_args() input_file = os.path.abspath(args.input_file) input_handle = open(input_file, "r") out_file = "static.json" out_handle = open(out_file, "w") replayer = Replayer(MIN_TIME, MAX_TIME, input_handle, StringIO(), False, True) full_data = replayer.start() last_pressure_values = deque(maxlen=PRESSURE_AVERAGE_AMOUNT) data_temperature = [] data_pressure = [] data_height = [] data_gyro = [] for datapoint in full_data: if not MIN_TIME <= datapoint["Time"] <= MAX_TIME: continue # Skip pressure = calculate_press(datapoint["Press"]) last_pressure_values.append(pressure) time = convert_time(datapoint["Time"] - MIN_TIME) data_temperature.append([time, calculate_temp_NTC(datapoint["NTC"])]) pressure = average(last_pressure_values) data_pressure.append([time, pressure]) data_height.append([time, calculate_height(pressure)]) data_gyro.append([time, calculate_gyr(datapoint["GyrZ"])]) all_data = { "Temp": data_temperature, "Press": data_pressure, "Height": data_height, "Gyro": data_gyro } out_handle.write(json.dumps(all_data)) input_handle.close() out_handle.close()
## Automatically adapted for numpy.oldnumeric Apr 14, 2008 by -c from builtins import range def writeMeshMatlabFormat(mesh,meshFileBase): """ build array data structures for matlab finite element mesh representation and write to a file to view and play with in matlatb in matlab can then print mesh with pdemesh(p,e,t) where p is the vertex or point matrix e is the edge matrix, and t is the element matrix points matrix is [2 x num vertices] format : row 1 = x coord, row 2 = y coord for nodes in mesh edge matrix is [7 x num edges] format: row 1 = start vertex number row 2 = end vertex number row 3 = start value in edge parameterization, should be 0 row 4 = end value in edg
e parameterization, should be 1 row 5 = global edge id, base 1 row 6 = subdomain on left? always 1 for now row 7 = subdomain on right? always 0 for now element matrix is [4 x num elements] row 1 = vertex 1 global number row 2 = vertex 2 global number row 3 = vertex 3 global number row 4 = triangle subdomain number where 1,2,3 is a local counter clockwise numbering o
f vertices in triangle """ import numpy as numpy matlabBase = 1 p = numpy.zeros((2,mesh['nNodes_global']),numpy.float_) e = numpy.zeros((7,mesh['nElementBoundaries_global']),numpy.float_) t = numpy.zeros((4,mesh['nElements_global']),numpy.float_) #load p,e,t and write file mfile = open(meshFileBase+'.m','w') mfile.write('p = [ ... \n') for nN in range(mesh['nNodes_global']): p[0,nN]=mesh['nodeArray'][nN,0] p[1,nN]=mesh['nodeArray'][nN,1] mfile.write('%g %g \n' % tuple(p[:,nN])) mfile.write(']; \n') mfile.write("p = p\';\n") #need transpose for matlab mfile.write('e = [ ... \n') for ebN in range(mesh['nElementBoundaries_global']): e[0,ebN]=mesh['elementBoundaryNodesArray'][ebN,0] + matlabBase #global node number of start node base 1 e[1,ebN]=mesh['elementBoundaryNodesArray'][ebN,1] + matlabBase #global node number of end node base 1 e[2,ebN]=0.0 #edge param. is 0 to 1 e[3,ebN]=1.0 e[4,ebN]=ebN + matlabBase #global edge number base 1 e[5,ebN]=0 #subdomain to left e[6,ebN]=1 #subdomain to right mfile.write('%g %g %g %g %g %g %g \n' % tuple(e[:,ebN])) mfile.write(']; \n') mfile.write("e = e\';\n") #need transpose for matlab #write triangles last mfile.write('t = [ ... \n') for eN in range(mesh['nElements_global']): t[0,eN]=mesh['elementNodesArray'][eN,0]+matlabBase #global node number for vertex 0 t[1,eN]=mesh['elementNodesArray'][eN,1]+matlabBase #global node number for vertex 0 t[2,eN]=mesh['elementNodesArray'][eN,2]+matlabBase #global node number for vertex 0 t[3,eN]=1 #subdomain id mfile.write('%g %g %g %g \n' % tuple(t[:,eN])) mfile.write(']; \n'); mfile.write("t = t\';\n") #need transpose for matlab mfile.close() return p,e,t ######################################################################## if __name__ == '__main__': import os,shelve import ppmatlab,numpy.oldnumeric as numpy os.listdir('./results') filename = './results/re_forsyth2_ss_2d_pre_forsyth2_ss_2d_c0p1_n_mesh_results.dat' res = shelve.open(filename) mesh = res['mesh'] mmfile = 'forsyth2MeshMatlab' p,e,t = ppmatlab.writeMeshMatlabFormat(mesh,mmfile)
import sys import vtk from vtk.test import Testing class TestGhostPoints(Testing.vtkTest): def testLinear(self): pts = vtk.vtkPoints() pts.SetNumberOfPoints(4) pts.InsertPoint(0, (0, 0, 0)) pts.InsertPoint(1, (1, 0, 0)) pts.InsertPoint(2, (0.5, 1, 0)) pts.InsertPoint(3, (0.5, 0.5, 1)) te = vtk.vtkTetra() ptIds = te.GetPointIds() for i in range(4): ptIds.SetId(i, i) ghosts = vtk.vtkUnsignedCharArray() ghosts.SetName(vtk.vtkDataSetAttributes.GhostArrayName()) ghosts.SetNumberOfTuples(4) ghosts.SetValue(0, 1) ghosts.SetValue(1, 1) ghosts.SetValue(2, 1) ghosts.SetValue(3, 0) grid = vtk.vtkUnstructuredGrid() grid.Allocate(1, 1) gr
id.InsertNextCell(te.GetCellType(), te.GetPointIds()) grid.SetPoints(pts) grid.GetPointData().AddArray(ghosts) dss = vtk.vtkDataSetSurfaceFilter() dss.SetInputData(grid) dss.
Update() self.assertEqual(dss.GetOutput().GetNumberOfCells(), 3) def testNonLinear(self): pts = vtk.vtkPoints() pts.SetNumberOfPoints(10) pts.InsertPoint(0, (0, 0, 0)) pts.InsertPoint(1, (1, 0, 0)) pts.InsertPoint(2, (0.5, 1, 0)) pts.InsertPoint(3, (0.5, 0.5, 1)) pts.InsertPoint(4, (0.5, 0, 0)) pts.InsertPoint(5, (1.25, 0.5, 0)) pts.InsertPoint(6, (0.25, 0.5, 0)) pts.InsertPoint(7, (0.25, 0.25, 0.5)) pts.InsertPoint(8, (0.75, 0.25, 0.5)) pts.InsertPoint(9, (0.5, 0.75, 0.5)) te = vtk.vtkQuadraticTetra() ptIds = te.GetPointIds() for i in range(10): ptIds.SetId(i, i) ghosts = vtk.vtkUnsignedCharArray() ghosts.SetName(vtk.vtkDataSetAttributes.GhostArrayName()) ghosts.SetNumberOfTuples(10) ghosts.SetValue(0, 1) ghosts.SetValue(1, 1) ghosts.SetValue(2, 1) ghosts.SetValue(3, 0) ghosts.SetValue(4, 1) ghosts.SetValue(5, 1) ghosts.SetValue(6, 1) ghosts.SetValue(7, 0) ghosts.SetValue(8, 0) grid = vtk.vtkUnstructuredGrid() grid.Allocate(1, 1) grid.InsertNextCell(te.GetCellType(), te.GetPointIds()) grid.SetPoints(pts) grid.GetPointData().AddArray(ghosts) ugg = vtk.vtkUnstructuredGridGeometryFilter() ugg.SetInputData(grid) dss = vtk.vtkDataSetSurfaceFilter() dss.SetNonlinearSubdivisionLevel(2) dss.SetInputConnection(ugg.GetOutputPort()) dss.Update() self.assertEqual(dss.GetOutput().GetNumberOfCells(), 48) if __name__ == "__main__": Testing.main([(TestGhostPoints, 'test')])
signature unknown pass def connect_after(self, *args, **kwargs): # real signature unknown pass def connect_object(self, *args, **kwargs): # real signature unknown pass def connect_object_after(self, *args, **kwargs): # real signature unknown pass def disconnect(self, *args, **kwargs): # real signature unknown pass def disconnect_by_func(self, *args, **kwargs): # real signature unknown pass def emit(self, *args, **kwargs): # real signature unknown pass def emit_stop_by_name(self, *args, **kwargs): # real signature unknown pass def freeze_notify(self, *args, **kwargs): # real signature unknown pass def get_data(self, *args, **kwargs): # real signature unknown pass def get_properties(self, *args, **kwargs): # real signature unknown pass def get_property(self, *args, **kwargs): # real signature unknown pass def handler_block(self, *args, **kwargs): # real signature unknown pass def handler_block_by_func(self, *args, **kwargs): # real signature unknown pass def handler_disconnect(self, *args, **kwargs): # real signature unknown pass def handler_is_connected(self, *args, **kwargs): # real signature unknown pass def handler_unblock(self, *args, **kwargs): # real signature unknown pass def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown pass def notify(self, *args, **kwargs): # real signature unknown pass def set_data(self, *args, **kwargs): # real signature unknown pass def set_properties(self, *args, **kwargs): # real signature unknown pass def set_property(self, *args, **kwargs): # real signature unknown pass def stop_emission(self, *args, **kwargs): # real signature unknown pass def thaw_notify(self, *args, **kwargs): # real signature unknown pass def weak_ref(self, *args, **kwargs): # real signature unknown pass def __copy__(self, *args, **kwargs): # real signature unknown pass def __deepcopy__(self, *args, **kwargs): # real signature unknown pass def __delattr__(self, name): # real signature unknown; restored from __doc__ """ x.__delattr__('name') <==> del x.name """ pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass def __setattr__(self, name, value): # real signature unknown; restored from __doc__ """ x.__setattr__('name', value) <==> x.name = value """ pass __grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default props = None # (!) real value is '' __dict__ = None # (!) real value is '' __gdoc__ = 'Object GObject\n\nSignals from GObject:\n notify (GParam)\n\n' __gtype__ = None # (!) real value is '' class GObjectWeakRef(object): """ A GObject weak reference """ def unref(self, *args, **kwargs): # real signature unknown pass def __call__(self, *more): # real signature unknown; restored from __doc__ """ x.__call__(...) <==> x(...) """ pass def __init__(self, *args, **kwargs): # real signature unknown pass class GParamSpec(object): # no doc def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass def __ne__(self, y): # real signature unknown; restored from __doc__ """ x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass class GPointer(object): # no doc def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __ge__(self, y): # real signature unknown; restored from __doc__ """ x.__ge__(y) <==> x>=y """ pass def __gt__(self, y): # real signature unknown; restored from __doc__ """ x.__gt__(y) <==> x>y """ pass def __hash__(self): # real signature unknown; restored from __doc__ """ x.__hash__() <==> hash(x) """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, y): # real signature unknown; restored from __doc__ """ x.__le__(y) <==> x<=y """ pass def __lt__(self, y): # real signature unknown; restored from __doc__ """ x.__lt__(y) <==> x<y """ pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """ pass def __repr__(self): # real signature unknown; restored from __doc__ """ x.__repr__() <==> repr(x) """ pass __gtype__ = None # (!) real value is '' class GType(object): # no doc def from_name(self, *args, **kwargs): # real signature unknown pass def has_value_table(self, *args, **kwargs): # real signature unknown pass def is_a(self, *args, **kwargs): # re
al signature unknown pass def is_abstract(self, *args, **kwargs): # real signature unknown pass def is_classed(self, *args, **kwargs): # real signature unknown pass def is_deep_derivable(self, *args, **kwargs): # real signature unknown pass def is_derivable(self, *args, **kwargs): # real signature unknown pass def is_instantiatable(self, *args, **kwargs): # real signature unknown pass def is_interface(self, *args, **kwargs): # real signature unknown pass def is_value_abstract(self, *args, **kwargs): # real signature unknown pass def is_value_type(self, *args, **kwargs): # real signature unknown pass def __eq__(self, y): # real signature unknown; restored from __doc__ """ x.__eq__(y) <==> x==y """ pass def __ge__(self, y): # real signature unknown; resto
in range(self.n_restarts_optimizer): theta_initial = \ self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.kernel_._check_bounds_params() self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta, clone_kernel=False) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=True) # Line 2 except np.linalg.LinAlgError as exc: exc.args = ("The kernel, %s, is not returning a " "positive definite matrix. Try gradually " "increasing the 'alpha' parameter of your " "GaussianProcessRegressor estimator." % self.kernel_,) + exc.args raise self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3 return self def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, optionally also returns its standard deviation (`return_std=True`) or covariance (`return_cov=True`). Note that at most one of the two can be requested. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated. return_std : bool, default=False If True, the standard-de
viation of the predictive distribution at the
query points is returned along with the mean. return_cov : bool, default=False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean. Returns ------- y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) Mean of predictive distribution a query points. y_std : ndarray of shape (n_samples,), optional Standard deviation of predictive distribution at query points. Only returned when `return_std` is True. y_cov : ndarray of shape (n_samples, n_samples), optional Covariance of joint predictive distribution a query points. Only returned when `return_cov` is True. """ if return_std and return_cov: raise RuntimeError( "At most one of return_std or return_cov can be requested.") if self.kernel is None or self.kernel.requires_vector_input: X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False) else: X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) else: kernel = self.kernel y_mean = np.zeros(X.shape[0]) if return_cov: y_cov = kernel(X) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star) # undo normalisation y_mean = self._y_train_std * y_mean + self._y_train_mean if return_cov: # Solve K @ V = K_trans.T V = cho_solve((self.L_, True), K_trans.T) # Line 5 y_cov = self.kernel_(X) - K_trans.dot(V) # Line 6 # undo normalisation y_cov = y_cov * self._y_train_std**2 return y_mean, y_cov elif return_std: # Solve K @ V = K_trans.T V = cho_solve((self.L_, True), K_trans.T) # Line 5 # Compute variance of predictive distribution # Use einsum to avoid explicitly forming the large matrix # K_trans @ V just to extract its diagonal afterward. y_var = self.kernel_.diag(X) y_var -= np.einsum("ij,ji->i", K_trans, V) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn("Predicted variances smaller than 0. " "Setting those variances to 0.") y_var[y_var_negative] = 0.0 # undo normalisation y_var = y_var * self._y_train_std**2 return y_mean, np.sqrt(y_var) else: return y_mean def sample_y(self, X, n_samples=1, random_state=0): """Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Query points where the GP is evaluated. n_samples : int, default=1 Number of samples drawn from the Gaussian process per query point random_state : int, RandomState instance or None, default=0 Determines random number generation to randomly draw samples. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- y_samples : ndarray of shape (n_samples_X, n_samples), or \ (n_samples_X, n_targets, n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points. """ rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = \ [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_mean.shape[1])] y_samples = np.hstack(y_samples) return y_samples def log_marginal_likelihood(self, theta=None, eval_gradient=False, clone_kernel=True): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like of shape (n_kernel_params,) default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r''' --- module: debconf short_description: Configure a .deb package description: - Configure a .deb package using debconf-set-selections. - Or just query existing selections. version_added: "1.6" notes: - This module requires the command line debconf tools. - A number of questions have to be answered (depending on the package). Use 'debconf-show <package>' on any Debian or derivative with the package installed to see questions/settings available. - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. requirements: - debconf - debconf-utils options: name: description: - Name of package to configure. type: str required: true aliases: [ pkg ] question: description: - A debconf configuration setting. type: str aliases: [ selection, setting ] vtype: description: - The type of the value supplied. - C(seen) was added in Ansible 2.2. type: str choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ] value: description: - Value to set the configuration to. type: str aliases: [ answer ] unseen: description: - Do not set 'seen' flag when pre-seeding. type: bool default: no author: - Brian Coca (@bcoca) ''' EXAMPLES = r''' - name: Set default locale to fr_FR.UTF-8 debconf: name: locales question: locales/default_environment_locale value: fr_FR.UTF-8 vtype: select - name: Set to generate locales debconf: name: locales question: locales/locales_to_be_generated value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8 vtype: multiselect - name: Accept oracle license debconf: name: oracle-java7-installer question: shared/accepted-oracle-license-v1-1 value: 'true' vtype: select - name: Specifying package you can register/return the list of questions and current values debconf: name: tzdata ''' from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) if rc != 0: module.fail_json(msg=err) selections = {} for line in out.splitlines(): (key, value) = line.split(':', 1) selections[key.strip('*').strip()] = value.strip() return selections def set_selection(module, pkg, question, vtype, value, unseen): setsel = module.get_bin_path('debconf-set-selections', True) cmd = [setsel] if unseen: cmd.append('-u') if vtype == 'boolean': if value == 'True': value = 'true' elif value == 'False': value = 'false' data = ' '.join([pkg, question, vtype, value]) return module.run_command(cmd, data=data) def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['pkg']), question=dict(type='str', aliases=['selection', 'setting']), vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']), value=dict(type='str', aliases=['answer']), unseen=dict(type='bool'), ), required_together=(['question', 'vtype', 'value'],), supports_check_mode=True, ) # TODO: enable passing array of options and/or debconf file from get-selections dump pkg = module.params["name"] question = module.params["question"] vtype = module.params["vtype"] value = module.params["value"] unseen = module.params["unseen"] prev = get_selections(module, pkg) changed = False msg = "" if question is not None: if vtype is None or value is None: module.fail_json(msg="when supplying a question you must supply a valid vtype and value") # if question doesn't exist, value cannot match if question not in prev: changed = True else: existing = prev[question] # ensure we compare booleans supplied to the way debconf sees them (true/false strings) if vtype == 'boolean': value = to_text(value).lower() existing = to_text(prev[question]).lower() if value != existing: changed = True if changed: if not module.check_mode: rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) if rc: module.fail_json(msg=e) curr = {question: value} if question in prev: prev = {question: prev
[question]} else: prev[question] = '' if module._diff: after = prev.copy() after.update(curr) diff_
dict = {'before': prev, 'after': after} else: diff_dict = {} module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict) module.exit_json(changed=changed, msg=msg, current=prev) if __name__ == '__main__': main()
"endorsed_at": None, "abuse_flagged": False, "voted": True, "vote_count": 4, "children": [], "editable_fields": ["abuse_flagged", "voted"], }] self.register_get_thread_response({ "id": self.thread_id, "course_id": unicode(self.course.id), "thread_type": "discussion", "children": source_comments, "resp_total": 100, }) response = self.client.get(self.url, {"thread_id": self.thread_id}) self.assert_response_correct( response, 200, { "results": expected_comments, "next": "http://testserver/api/discussion/v1/comments/?page=2&thread_id={}".format( self.thread_id ), "previous": None, } ) self.assert_query_params_equal( httpretty.httpretty.latest_requests[-2], { "recursive": ["True"], "resp_skip": ["0"], "resp_limit": ["10"], "user_id": [str(self.user.id)], "mark_as_read": ["False"], } ) def test_pagination(self): """ Test that pagination parameters are correctly plumbed through to the comments service and that a 404 is correctly returned if a page past the end is requested """ self.register_get_user_response(self.user) self.register_get_thread_response(make_minimal_cs_thread({ "id": self.thread_id, "course_id": unicode(self.course.id), "thread_type": "discussion", "children": [], "resp_total": 10, })) response = self.client.get( self.url, {"thread_id": self.thread_id, "page": "18", "page_size": "4"} ) self.assert_response_correct( response, 404, {"developer_message": "Not found."} ) self.assert_query_params_equal( httpretty.httpretty.latest_requests[-2], { "recursive": ["True"], "resp_skip": ["68"], "resp_limit": ["4"], "user_id": [str(self.user.id)], "mark_as_read": ["False"], } ) @httpretty.activate @disable_signal(api, 'comment_deleted') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetDeleteTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for ThreadViewSet delete""" def setUp(self): super(CommentViewSetDeleteTest, self).setUp() self.url = reverse("comment-detail", kwargs={"comment_id": "test_comm
ent"}) self.comment_id = "test_comment" def test_basic(self): self.register_get_user_response(self.user) cs_thread = make_minimal_cs_thread({ "id": "test_thread", "course_id": unicode(self.course.id), }) self.register_get_thread_response(cs_thread) cs_comment = make_minimal_cs_comment({ "id": self.comment_id, "course_id": cs_thread["course_id"], "thread_id": cs_t
hread["id"], "username": self.user.username, "user_id": str(self.user.id), }) self.register_get_comment_response(cs_comment) self.register_delete_comment_response(self.comment_id) response = self.client.delete(self.url) self.assertEqual(response.status_code, 204) self.assertEqual(response.content, "") self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/comments/{}".format(self.comment_id) ) self.assertEqual(httpretty.last_request().method, "DELETE") def test_delete_nonexistent_comment(self): self.register_get_comment_error_response(self.comment_id, 404) response = self.client.delete(self.url) self.assertEqual(response.status_code, 404) @httpretty.activate @disable_signal(api, 'comment_created') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetCreateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CommentViewSet create""" def setUp(self): super(CommentViewSetCreateTest, self).setUp() self.url = reverse("comment-list") def test_basic(self): self.register_get_user_response(self.user) self.register_get_thread_response( make_minimal_cs_thread({ "id": "test_thread", "course_id": unicode(self.course.id), "commentable_id": "test_topic", }) ) self.register_post_comment_response( { "id": "test_comment", "username": self.user.username, "created_at": "2015-05-27T00:00:00Z", "updated_at": "2015-05-27T00:00:00Z", }, thread_id="test_thread" ) request_data = { "thread_id": "test_thread", "raw_body": "Test body", } expected_response_data = { "id": "test_comment", "thread_id": "test_thread", "parent_id": None, "author": self.user.username, "author_label": None, "created_at": "2015-05-27T00:00:00Z", "updated_at": "2015-05-27T00:00:00Z", "raw_body": "Test body", "rendered_body": "<p>Test body</p>", "endorsed": False, "endorsed_by": None, "endorsed_by_label": None, "endorsed_at": None, "abuse_flagged": False, "voted": False, "vote_count": 0, "children": [], "editable_fields": ["abuse_flagged", "raw_body", "voted"], } response = self.client.post( self.url, json.dumps(request_data), content_type="application/json" ) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) self.assertEqual( urlparse(httpretty.last_request().path).path, "/api/v1/threads/test_thread/comments" ) self.assertEqual( httpretty.last_request().parsed_body, { "course_id": [unicode(self.course.id)], "body": ["Test body"], "user_id": [str(self.user.id)], } ) def test_error(self): response = self.client.post( self.url, json.dumps({}), content_type="application/json" ) expected_response_data = { "field_errors": {"thread_id": {"developer_message": "This field is required."}} } self.assertEqual(response.status_code, 400) response_data = json.loads(response.content) self.assertEqual(response_data, expected_response_data) @disable_signal(api, 'comment_edited') @mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True}) class CommentViewSetPartialUpdateTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase): """Tests for CommentViewSet partial_update""" def setUp(self): super(CommentViewSetPartialUpdateTest, self).setUp() httpretty.reset() httpretty.enable() self.addCleanup(httpretty.disable) self.register_get_user_response(self.user) self.url = reverse("comment-detail", kwargs={"comment_id": "test_comment"}) cs_thread = make_minimal_cs_thread({ "id": "test_thread", "course_id": unicode(self.course.id), }) self.register_get_thread_response(cs_thread) cs_comment = make_minimal_cs_comment({ "id": "test_comment", "course_id": cs_thread["course_id"], "thread_id": cs_thread["id"], "username": self.user.username,