repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
liuyi1112/rst2pdf
rst2pdf/writer.py
Python
mit
860
0
# -*- coding: utf-8 -*- # See LICENSE.txt for licensing terms from StringIO import StringIO from docutils import writers from rst2pdf import createpdf class PdfWriter(writers.Writer): def __init__(self, builder): writers.Writer.__init__(self) self.builder = builder self.output = u'' supported = ('pdf') """Formats this writer supports.""" config_section = 'pdf writer' config_section_dependencies = ('writers') """Final translated form of `document`.""" def translate(self): sio = StringIO('') createpdf.RstToPdf(sphinx=True).createPdf( doctree=self.document, output=sio, compressed=False) self.output = unicode(sio.getvalue(), 'utf
-8', 'ignore') def supports(self, format): """This writer supports all format-specific el
ements.""" return 1
qiuxs/ice-demos
python/Ice/properties/Client.py
Python
gpl-2.0
3,232
0.003403
#!/usr/bin/env python # ********************************************************************** # # Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved. # # ********************************************************************** import sys, traceback, Ice slice_dir = Ice.getSliceDir() if not slice_dir: print(sys.argv[0] + ': Slice directory not found.') sys.exit(1) Ice.loadSlice("'-I" + slice_dir + "' Props.ice") import Demo def menu(): print(""" usage: 1: set properties (batch 1) 2: set properties (batch 2) c: show current properties s: shutdown server x: exit ?: help """) def show(admin): props = admin.getPropertiesForPrefix("Demo") print "Server's current settings:" for k,v in props.items(): print " " + k + "=" + v class Client(Ice.Application): def run(self, args): if len(args) > 1: print(self.appName() + ": too many arguments") return 1 props = Demo.PropsPrx.checkedCast(self.communicator().propertyToProxy("Props.Proxy")) if props == None: print("invalid proxy") return 1 admin = Ice.PropertiesAdminPrx.checkedCast(self.communicator().propertyToProxy("Admin.Proxy")) batch1 = {} batch1["Demo.Prop1"] = "1" batch1["Demo.Prop2"] = "2" batch1["Demo.Prop3"] = "3" batch2 = {} batch2["Demo.Prop1"] = "10" batch2["Demo.Prop2"] = "" # An empty value removes this property batch2["Demo.Prop3"] = "30" show(admin)
menu() c = None while c != 'x': try: sys.stdout.write("==> ") sys.st
dout.flush() c = sys.stdin.readline().strip() if c == "1" or c == "2": propsDict = c == "1" and batch1 or batch2 print("Sending:") for k, v in propsDict.items(): if k.startswith("Demo"): print(" " + k + "=" + v) print admin.setProperties(propsDict) print("Changes:") changes = props.getChanges() if len(changes) == 0: print(" None.") else: for k, v in changes.items(): sys.stdout.write(" " + k) if len(v) == 0: print(" was removed") else: print(" is now " + v) elif c == "c": show(admin) elif c == "s": props.shutdown() elif c == "x": # Nothing to do pass elif c == "?": menu() else: print("unknown command `" + c + "'") menu() except KeyboardInterrupt: break except EOFError: break except Ice.Exception as ex: print(ex) return 0 app = Client() sys.exit(app.main(sys.argv, "config.client"))
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/testing/test_env.py
Python
mit
8,248
0.013337
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Sets environment variables needed to run a chromium unit test.""" import os import stat import subprocess import sys # This is hardcoded to be src/ relative to this script. ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX' CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' def get_sandbox_env(env): """Returns the environment flags needed for the SUID sandbox to work.""" extra_env = {} chrome_sandbox_path = env.get(CHROME_SANDBOX_ENV, CHROME_SANDBOX_PATH) # The above would silently disable the SUID sandbox if the env value were # an empty string. We don't want to allow that. http://crbug.com/245376 # TODO(jln): Remove this check once it's no longer possible to disable the # sandbox that way. if not chrome_sandbox_path: chrome_sandbox_path = CHROME_SANDBOX_PATH extra_env[CHROME_SANDBOX_ENV] = chrome_sandbox_path return extra_env def trim_cmd(cmd): """Removes internal flags from cmd since they're just used to communicate from the host machine to this script running on the swarm slaves.""" sanitizers = ['asan', 'lsan', 'msan', 'tsan'] internal_flags = frozenset('--%s=%d' % (name, value) for name in sanitizers for value in [0, 1]) return [i for i in cmd if i not in internal_flags] def fix_python_path(cmd): """Returns the fixed command line to call the right python executable.""" out = cmd[:] if out[0] == 'python': out[0] = sys.executable elif out[0].endswith('.py'): out.insert(0, sys.executable) return out def get_sanitizer_env(cmd, asan, lsan, msan, tsan): """Returns the envirnoment flags needed for sanitizer tools.""" extra_env = {} # Instruct GTK to use malloc while running sanitizer-instrumented tests. extra_env['G_SLICE'] = 'always-malloc' extra_env['NSS_DISABLE_ARENA_FREE_LIST'] = '1' extra_env['NSS_DISABLE_UNLOAD'] = '1' # TODO(glider): remove the symbolizer path once # https://code.google.com/p/address-sanitizer/issues/detail?id=134 is fixed. symbolizer_path = os.path.abspath(os.path.join(ROOT_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')) if lsan or tsan: # LSan is not sandbox-compatible, so we can use online symbolization. In # fact, it needs symbolization to be able to apply suppressions. symbolization_options = ['symbolize=1', 'external_symbolizer_path=%s' % symbolizer_path] elif (asan or msan) and sys.platform not in ['win32', 'cygwin']: # ASan uses a script for offline symbolization, except on Windows. # Important note: when running ASan with leak detection enabled, we must use # the LSan symbolization options above. symbolization_options = ['symbolize=0'] # Set the path to llvm-symbolizer to be used by asan_symbolize.py extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path else: symbolization_options = [] if asan: asan_options = symbolization_options[:] if lsan: asan_options.append('detect_leaks=1') if asan_options: extra_env['ASAN_OPTIONS'] = ' '.join(asan_options) if sys.platform == 'darwin': isolate_output_dir = os.path.abspath(os.path.dirname(cmd[0])) # This is needed because the test binary has @executable_path embedded in # it that the OS tries to resolve to the cache directory and not the # mapped directory. extra_env['DYLD_LIBRARY_PATH'] = str(isolate_output_dir) if lsan: if asan or msan: lsan_options = [] else: lsan_options = symbolization_options[:] if sys.platform == 'linux2': # Use the debug version of libstdc++ under LSan. If we don't, there will # be a lot of incomplete stack traces in the reports. extra_env['LD_LIBRARY_PATH'] = '/usr/lib/x86_64-linux-gnu/debug:' extra_env['LSAN_OPTIONS'] = ' '.join(lsan_options) if msan: msan_options = symbolization_options[:] if lsan: msan_options.append('detect_leaks=1') extra_env['MSAN_OPTIONS'] = ' '.join(msan_options) if tsan: tsan_options = symbolization_options[:] extra_env['TSAN_OPTIONS'] = ' '.join(tsan_options) return extra_env def get_sanitizer_symbolize_command(json_path=None, executable_path=None): """Construct the command to invoke offline symbolization script.""" script_path = '../tools/valgrind/asan/asan_symbolize.py' cmd = [sys.executable, script_path] if json_path is not None: cmd.append('--test-summary-json-file=%s' % json_path) if executable_path is not None: cmd.append('--executable-path=%s' % executable_path) return cmd def get_json_path(cmd): """Extract the JSON test summary path from a command line.""" json_path_flag = '--test-launcher-summary-output=' for arg in cmd: if arg.startswith(json_path_flag): return arg.split(json_path_flag).pop() return None def symbolize_snippets_in_json(cmd, env): """Symbolize output snippets inside the JSON test summary.""" json_path = get_json_path(cmd) if json_path is None: return try: symbolize_command = get_sanitizer_symbolize_command( json_path=json_path, executable_path=cmd[0]) p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE, env=env) (_, stderr) = p.communicate() except OSError as e: print 'Exception while symbolizing snippets: %s' % e if p.returncode != 0: print "Error: failed to symbolize snippets in JSON:\n" print stderr def run_executable(cmd, env): """Runs an executable with: - environment variable CR_SOURCE_ROOT set to the root directory. - environment variable LANGUAGE to en_US.UTF-8. - environment variable CHROME_DEVEL_SANDBOX set - Reuses sys.executable automatically. """ extra_env = {} # Many tests assume a English interface... extra_env['LANG'] = 'en_US.UTF-8' # Used by base/base_paths_linux.cc as an override. Just make sure the default # logic is used. env.pop('CR_SOURCE_ROOT', None) extra_env.update(get_sandbox_env(env)) # Copy logic from tools/build/scripts/slave/runtest.py. asan = '--asan=1' in cmd lsan = '--lsan=1' in cmd msan = '--msan=1' in cmd tsan = '--tsan=1' in cmd if sys.platform in ['win32', 'cygwin']: # Symbolization works in-process on Windows even when sandboxed. use_symbolization_script = False else: # LSan doesn't support sandboxing yet, so we use the in-process symbolizer. # Note that ASan and MSan can work together with LSan. use_symbolization_script = (asan or msan) and not lsan if asan or lsan or msan or tsan: extra_env.update(get_sanitizer_env(cmd, asan, lsan, msan, tsan)) if lsan or tsan: # LSan and TSan are not sandbox-friendly. cmd.append('--no-sandbox') cmd = trim_cmd(cmd) # Ensure paths are correctly separated on windows. cmd[0] = cmd[0].replace('/', os.path.sep) cmd = fix_python_path(cmd) print('Additional test environment:\n%s\n' 'Command: %s\n' % ( '\n'.join(' %s=%s' % (k, v) for k, v in sorted(extra_env.iteritems())), ' '.join(cmd))) env.update(extra_env or {}) try: # See above comment regarding offline symbolization. if use_symbolization_script: # Need to pipe to the symbolizer script. p1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
stderr=sys.stdout) p2 = subprocess.
Popen( get_sanitizer_symbolize_command(executable_path=cmd[0]), env=env, stdin=p1.stdout) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. p1.wait() p2.wait() # Also feed the out-of-band JSON output to the symbolizer script. symbolize_snippets_in_json(cmd, env) return p1.returncode else: return subprocess.call(cmd, env=env) except OSError: print >> sys.stderr, 'Failed to start %s' % cmd raise def main(): return run_executable(sys.argv[1:],
cycladesnz/chambersAndCreatures
src/item/suf_potions.py
Python
gpl-2.0
1,217
0.013969
import random from pdcglobal import * from effects import * def PotionOfKillbeePoison(item): item.full_name += ' of Killerbee-Poison' item.weaponinfotext = 'Dangerous Poison' def DrinkPotionOfKillbeePoison(self, actor): KillerbeePoisonEffect(actor,None) def PotionOfYumuraPoison(item): item.full_name += ' of Yumura-Poison' def DrinkPotionOfYumuraPoison(self, actor): YumuraPoisonEffect(actor,None) def PotionOfRegeneration(item): item.full_name += ' of Killerbee-Poison' def DrinkPotionOfRegeneration(self, actor): RegenerationEffect(actor,None) def PotionOfEndurance(item): item.full_
name += ' of Endurance' def DrinkPotionOfEndurance(self, actor): actor.cur_endurance += d(10) + d(10) def PotionOfMind(item): item.full_name += ' of Mind' def DrinkPotionOfMind(self, actor): actor.cur_mind += d(10) + d(10) def PotionOfSpellcaster(item):
item.full_name += ' of Spellcasters' def DrinkPotionOfSpellcaster(self, actor): actor.cur_endurance += d(10) + d(10) actor.cur_mind += d(10) + d(10) def PotionOfHealing(item): item.full_name += ' of Healing' def DrinkPotionOfHealing(self, actor): actor.cur_health += d(10)
osiloke/Flumotion-Transcoder
flumotion/transcoder/admin/adminconsts.py
Python
lgpl-2.1
5,673
0.000353
# vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. # Log categories ADMIN_LOG_CATEGORY = "admin" STORES_LOG_CATEGORY = "stores" PROXIES_LOG_CATEGORY = "proxy" DATASOURCE_LOG_CATEGORY = "datasource" MONITORING_LOG_CATEGORY = "monitoring" TRANSCODING_LOG_CATEGORY = "transcoding" SCHEDULER_LOG_CATEGORY = "scheduler" NOTIFIER_LOG_CATEGORY = "notifier" IDLE_LOG_CATEGORY = "idle" JANITOR_LOG_CATEGORY = "janitor" GET_REQUEST_AGENT = "Flumotion Transcoder" # The generic timeout used for remote method call REMOTE_CALL_TIMEOUT = 60 # The time to let the workers to log back in the manager before resuming RESUME_DELAY = 20 # The time a proxy wait after a SIGTERM to send a SIGKILL to a component COMPONENT_WAIT_TO_KILL = 30 # The time between each monitor components set adjustments MONITORSET_TUNE_PERIOD = 10 # Label templates ACTIVITY_LABEL_TEMPLATE = "%(customerName)s/%(profileName)s:%(sourcePath)s" TRANSCODER_LABEL_TEMPLATE = "%(customerName)s/%(profileName)s:%(sourcePath)s" MONITOR_LABEL_TEMPLATE = "Monitor for %(customerName)s" # Maximum time to wait for the admin to load # and initialize all components stats WAIT_IDLE_TIMEOUT = 30 # Maximum time to wait for an elemnt to be active WAIT_ACTIVE_TIMEOUT = 30 # Maximum time to wait for a worker instance # when the worker name is set to a component state WAIT_WORKER_TIMEOUT = 30 # Maximum time to wait for component properties TASKMANAGER_WAITPROPS_TIMEOUT = 30 TASKMANAGER_IDLE_TIMEOUT = 30 # Maximum time for admin tasks to wait for a component to be loaded TASK_LOAD_TIMEOUT = 30 # Maximum time for admin tasks to wait for a component becoming happy TASK_HAPPY_TIMEOUT = 60 # First delay to wait when retrying to load a component TASK_START_DELAY = 3 # The factor to apply to the delay TASK_START_DELAY_FACTOR = 4 # Maximum time to hold a lost component before starting another one TASK_HOLD_TIMEOUT = 60 # Maximum time to look for a valid component before starting a new one TASK_POTENTIAL_COMPONENT_TIMEOUT = 20 # Maximum time to wait when retrieving component UI State TASK_UISTATE_TIMEOUT = 20 # Maximum time the janitor wait before forcing the component deletion JANITOR_WAIT_FOR_DELETE = 20 MONITOR_STATE_UPDATE_PERIOD = 1 MONITOR_MAX_RETRIES = 3 MONITORING_POTENTIAL_WORKER_TIMEOUT = 20 # Maximum time an elected transcoder can stay sad before starting another one TRANSCODER_SAD_TIMEOUT = 120 # Maximum time a component can take to acknowledge. # Take into account that a lots of files are copied/moved # during acknowledgement, so it can take a long time TRANSCODER_ACK_TIMEOUT = 60*12 TRANSCODER_MAX_RETRIES = 2 TRANSCODING_POTENTIAL_WORKER_TIMEOUT = 20 # Startup timeouts MONITORING_START_TIMEOUT = 30 MONITORING_PAUSE_TIMEOUT = 30 MONITORING_RESUME_TIMEOUT = 30 MONITORING_ACTIVATION_TIMEOUT = 30 TRANSCODING_START_TIMEOUT = 30 TRANSCODING_PAUSE_TIMEOUT = 30 TRANSCODING_RESUME_TIMEOUT = 30 SCHEDULER_START_TIMEOUT = 30 SCHEDULER_PAUSE_TIMEOUT = 30 SCHEDULER_RESUME_TIMEOUT = 30 NOTIFIER_START_TIMEOUT = 30 # Maximum time to wait for a datasource to be ready WAIT_DATASOURCE_TIMEOUT = 60 # Forced component deletion constants FORCED_DELETION_TIMEOUT = 10 FORCED_DELETION_BUZY_TIMEOUT = 30 FORCED_DELETION_MAX_RETRY = 3 LOAD_COMPONENT_TIMEOUT = 30.0 GLOBAL_MAIL_NOTIFY_TIMEOUT = 60 GLOBAL_MAIL_NOTIFY_RETRIES = 5 # AdminStore default values DEFAULT_ACCESS_FORCE_USER = None DEFAULT_ACCESS_FORCE_GROUP = None DEFAULT_ACCESS_FORCE_DIR_MODE = None DEFAULT_ACCESS_FORCE_FILE_MODE = None DEFAULT_OUTPUT_MEDIA_TEMPLATE = "%(targetPath)s" DEFAULT_OUTPUT_THUMB_TEMPLATE = "%(targetDir)s%(targetBasename)s.%(index)03d%(targetExtension)s" DEFAULT_LINK_FILE_TEMPLATE = "%(targetPath)s.link" DEFAULT_CONFIG_FILE_TEMPLATE = "%(sourcePath)s.ini" DEFAULT_REPORT_FILE_TEMPLATE = "%(sourcePath)s.%(id)s.rep" DEFAULT_MONITORING_PERIOD = 5 DEFAULT_TRANSCODING_TIMEOUT = 60 DEFAULT_POSTPROCESS_TIMEOUT = 60 DEFAULT_PREPROCESS_TIMEOUT = 60 DEFAULT_MAIL_TIMEOUT = 30 DEFAULT_MAIL_RETRY_MAX = 3 DEFAULT_MAIL_RETRY_SLEEP = 60 DEFAULT_HTTPREQUEST_TIMEOUT = 30 DEFAULT_HTTPREQUEST_RETRY_MAX = 3 DEFAULT_HTTPREQUEST_RETRY_SLEEP = 60 DEFAULT_SQL_TIMEOUT = 30 DEFAULT_SQL_RETRY_MAX = 3 DEFAULT_SQL_RETRY_SLEEP = 60 DEFAULT_PROCESS_PRIORITY = 100 DEFAULT_TRANSCODING_PRIORITY = 100 DEFAULT_MAIL_SUBJECT_TE
MPLATE = "%(customerName)s/%(profileName)s transcoding %(trigger)s" DEFAULT_MAIL_BODY_TEMPLATE = """ Transcoding Report ================== Customer Name: %(customerName)s Profile Name: %(profileName)s -------------- File: '%(inputRelPath)s' Message: %(errorMessage)s """ # Default CustomerStore values DEFAULT_CUSTOMER_PRIORITY = 100 # Default customer directories DEFAULT_INPUT_DIR = "/%s/files/inc
oming" DEFAULT_OUTPUT_DIR = "/%s/files/outgoing" DEFAULT_FAILED_DIR = "/%s/files/failed" DEFAULT_DONE_DIR = "/%s/files/done" DEFAULT_LINK_DIR = "/%s/files/links" DEFAULT_CONFIG_DIR = "/%s/configs" DEFAULT_TEMPREP_DIR = "/%s/reports/pending" DEFAULT_FAILEDREP_DIR = "/%s/reports/failed" DEFAULT_DONEREP_DIR = "/%s/reports/done" DEFAULT_WORK_DIR = "/%s/work" FILE_MONITOR = "file-monitor" HTTP_MONITOR = "http-monitor"
ryanss/holidays.py
test/countries/test_nicaragua.py
Python
mit
1,698
0
# -*- coding: utf-8 -*- # python-holidays # --------------- # A fast, efficient Python library for generating country, province and state # specific sets of holidays on the fly. It aims to make determining whether a # specific date is a holida
y as fast and flexible as possible. # # Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022 # ryanss <ryanssdev@icloud.com> (c) 2014-2017 # Website: https://github.com/dr-prodigy/python-holidays # License: MIT (see LICENSE file) import unittest from datetime import date import holidays class TestNicaragua(unittest.TestCase): def setUp(se
lf): self.ni_holidays = holidays.NI() def test_ni_holidays_2020(self): year = 2020 mn_holidays = holidays.NI(prov="MN") # New Year's Day self.assertIn(date(year, 1, 1), self.ni_holidays) # Maundy Thursday self.assertIn(date(year, 4, 9), self.ni_holidays) # Good Friday self.assertIn(date(year, 4, 10), self.ni_holidays) # Labor Day self.assertIn(date(year, 5, 1), self.ni_holidays) # Revolution Day self.assertIn(date(year, 7, 19), self.ni_holidays) # Battle of San Jacinto Day self.assertIn(date(year, 9, 14), self.ni_holidays) # Independence Day self.assertIn(date(year, 9, 15), self.ni_holidays) # Virgin's Day self.assertIn(date(year, 12, 8), self.ni_holidays) # Christmas Day self.assertIn(date(year, 12, 25), self.ni_holidays) # Santo Domingo Day Down self.assertIn(date(year, 8, 1), mn_holidays) # Santo Domingo Day Up self.assertIn(date(year, 8, 10), mn_holidays)
Ictp/indico
indico/MaKaC/plugins/Collaboration/CERNMCU/options.py
Python
gpl-3.0
5,215
0.011697
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. globalOptions = [ #collaboration options necessary in all plugins ("tab", {"description" : "Name of tab where CERN MCU will be placed", "type": str, "defaultValue": "Videoconferencing", "editable": True, "visible": True, "mustReload": False} ), ("allowedOn", {"description" : "Kind of event types (conference, meeting, simple_event) supported", "type": list, "defaultValue": ["conference", "meeting"], "editable": True, "visible": True, "mustReload": False} ), ("admins", {"description": "CERN MCU admins / responsibles", "type": 'users', "defaultValue": [], "editable": True, "visible": True} ), #CERN MCU Options ("MCUAddress", {"description": "MCU URL", "type": str, "defaultValue": "https://cern-mcu1.cern.ch", "editable": True, "visible": True}), ("indicoID", {"description": "ID of Indico for the MCU", "type": str, "defaultValue": "indico", "editable": True, "visible": True}), ("indicoPassword", {"description": "Password of Indico for the MCU", "type": str, "defaultValue": "", "editable": True, "visible": True}), ("idRange", {"description": "Range of possible IDs (format: min-max)", "type": str, "defaultValue": "90000-99999", "editable": True, "visible": True}), ("MCUTZ", {"description": "Timezone where the MCU is physically located. We assume a MCU Admin will update 'UTC offset' in /settings_time.html of the MCU web interface accordingly.", "type": str, "defaultValue": 'UTC', "editable": True, "visible": True}), ("CERNGatekeeperPrefix", {"description": "CERN's gatekeeper prefix. Will be used for instructions on how to join the conference.", "type": str, "defaultValue": "98", "editable": True, "visible": True}), ("GDSPrefix", {"description": "GDS prefix. Will be used for instructions on how to join the conference.", "type": str, "defaultValue": "0041227670272", "editable": True, "visible": True}), ("MCU_IP", {"description": "MCU's IP. Will be used for instructions on how to join the conference.", "type": str, "defaultValue": "137.138.145.150", "editable": True, "visible": True}), ("Phone_number", {"description": "Phone number used to join by phone. Will be used for instructions on how to join the conference.", "type": str, "defaultValue": "0041227670270", "editable": True, "visible": True}), ("H323_IP_att_name", {"description": "Name of the custom attribute for the H.323 IP of a room in the Roo
m Booking database.", "type": str, "defaultValue": "H323 IP", "editable": True, "visible": True}), ("extraMinutesBefore", {"description" : "Extra minutes allowed before Indico event s
tart time", "type": int, "defaultValue": 60} ), ("extraMinutesAfter", {"description" : "Extra minutes allowed after Indico event start time", "type": int, "defaultValue": 120} ), ("defaultMinutesBefore", {"description" : "Default extra minutes before Indico event start time", "type": int, "defaultValue": 30} ), ("defaultMinutesAfter", {"description" : "Default extra minutes after Indico event start time", "type": int, "defaultValue": 60} ), ]
shlomozippel/ansible
plugins/callbacks/log_plays.py
Python
gpl-3.0
3,121
0.001922
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import time import json TIME_FORMAT="%b %d %Y %H:%M:%S" MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n" if not os.path.exists("/var/log/ansible/hosts"): os.makedirs("/var/log/ansible/hosts") def log(host, category, data): if type(data) == dict: if 'verbose_override' in data: # avoid logging extraneous data from facts data = 'omitted' else: invocation = data.pop('invocation', None) data = json.dumps(data) if invocation is not None: data = json.dumps(invocation) + " => %s " % data path = os.path.join("/var/log/ansible/hosts", host) now = time.strftime(TIME_FORMAT, time.localtime()) fd = open(path, "a") fd.write(MSG_FORMAT % dict(now=now, category=category, data=data)) fd.close() class CallbackModule(object): """ logs playbook results, per host, in /var/log/ansible/hosts """ def on_any(self, *args, **kwargs): pass def runner_on_failed(self, host, res, ignore_errors=False): log(host, 'FAILED', res) def runner_on_ok(self, host, res): log(host, 'OK', res) def runner_on_error(self, host, msg): log(host, 'ERROR', msg) def runner_on_skipped(self, host, item=None): log(host, 'SKIPPED', '...') def runner_on_unreachable(self, host, res): log(host, 'UNREACHABLE', res) def runner_on_no_hosts(self): pass def runner_on_async_poll(self, host, res, jid, clock): pass def runner_on_async_ok(self, host, res, jid): pass def runner_on_async_failed(self, host, res, jid): log(host, 'ASYNC_FAILED', res) def playbook_on_start(self): pass def playbook_on_notify(self, hos
t, handler): pass def playbook_on_task_start(self, name, is_conditional): pass def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False
, salt_size=None, salt=None, default=None): pass def playbook_on_setup(self): pass def playbook_on_import_for_host(self, host, imported_file): log(host, 'IMPORTED', imported_file) def playbook_on_not_import_for_host(self, host, missing_file): log(host, 'NOTIMPORTED', missing_file) def playbook_on_play_start(self, pattern): pass def playbook_on_stats(self, stats): pass
edonyM/emthesis
code/num2iter.py
Python
mit
3,819
0.005237
#!/usr/bin/env python3 # -*- coding: utf-8 -*- r""" # .---. .----------- # / \ __ / ------ # / / \( )/ ----- (`-') _ _(`-') <-. (`-')_ # ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .-> # //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-. # // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' / # // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ / # ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /) # '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /` # ''`` `------' `------' `-----' `--' `--' `--' # ###################################################################################### # # Author: edony - edonyzpc@gmail.com # # twitter : @edonyzpc # # Last modified: 2015-12-08 23:28 # # Filename: num2iter.py # # Description: All Rights Are Reserved # """ #import scipy as sp #import math as m #import matplotlib as mpl import matplotlib.pyplot as plt #from mpl_toolkits.mplot3d import Axes3D as Ax3 #from scipy import stats as st #from matplotlib import cm import numpy as np class PyColor(object): """ This class is for colored print in the python interpreter! "F3" call Addpy() function to add this class which is defined in the .vimrc for vim Editor.""" def __init__(self): self.self_doc = r""" STYLE: \033['display model';'foreground';'background'm DETAILS: FOREGROUND BACKGOUND COLOR --------------------------------------- 30 40 black 31 41 red 32 42 green 33 43 yellow 34 44 blue 35 45 purple 36 46 cyan 37 47 white DISPLAY MODEL DETAILS ------------------------- 0 default 1 highlight 4 underline 5 flicker 7 reverse 8 non-visiable e.g: \033[1;31;40m <!--1-highlight;31-foreground red;40-background black--> \033[0m <!--set all into default--> """ self.warningcolor = '\033[0;31m' self.tipcolor = '\033[0;32m' self.endcolor = '\033[0m' self._newcolor = '' @property def new(self): """ Customized Python Print Color. """ return self._newcolor @new.setter def new(self, color_str): """ New Color. """ self._newcolor = color_str def disable(self): """ Disable Color Print. """ self.warningcolor = '' self.endcolor = '' fig = plt.figure('Point Cloud Number VS Iteration') ax1 = fig.add_subplot(111) ax2 = fig.add_subplot(111) ax3 = fig.add_subplot(111) x = np.linspace(3000, 10000, 100)
y1 = np.linspace(38092, 40318,
100) y2 = np.linspace(507330, 800274, 100) y3_1 = np.linspace(2500737,3082897, 50) + 1000000 y3 = np.linspace(3082897, 2300181, 50) + 5000 y3_1 = list(y3_1) y3_1.extend(list(y3)) y3 = np.array(y3_1) y1 = [i+np.random.randint(50000, 100000) for i in y1] y2 = [i+np.random.randint(50000, 100000) for i in y2] y3 = [i+np.random.randint(50000, 100000) for i in y3] ax1.plot(x, y1, label='Plane') ax2.plot(x, y2, label='Normal Tenon') ax3.plot(x, y3, label='Large-scale Parts') ax1.set_xlabel('Iterations') ax1.set_ylabel('Number of Consesus Cloud Points Set') plt.legend() plt.show()
vidartf/hyperspy
hyperspy/tests/signal/test_eds_tem.py
Python
gpl-3.0
12,087
0
# Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import numpy as np import nose.tools as nt from hyperspy.signals import EDSTEMSpectrum from hyperspy.defaults_parser import preferences from hyperspy.components1d import Gaussian from hyperspy.misc.eds import utils as utils_eds from hyperspy.misc.test_utils import ignore_warning class Test_metadata: def setUp(self): # Create an empty spectrum s = EDSTEMSpectrum(np.ones((4, 2, 1024))) s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time = 3.1 s.metadata.Acquisition_instrument.TEM.beam_energy = 15.0 self.signal = s def test_sum_live_time1(self): s = self.signal old_metadata = s.metadata.deepcopy() sSum = s.sum(0) nt.assert_equal( sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time, 3.1 * 2) # Check that metadata is unchanged print(old_metadata, s.metadata) # Capture for comparison on error nt.assert_dict_equal(old_metadata.as_dictionary(), s.metadata.as_dictionary(), "Source metadata changed") def test_sum_live_time2(self): s = self.signal old_metadata = s.metadata.deepcopy() sSum = s.sum((0, 1)) nt.assert_equal( sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time, 3.1 * 2 * 4) # Check that metadata is unchanged print(old_metadata, s.metadata) # Capture for comparison on error nt.assert_dict_equal(old_metadata.as_dictionary(), s.metadata.as_dictionary(), "Source metadata changed") def test_sum_live_time_out_arg(self): s = self.signal sSum = s.sum(0) s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time = 4.2 s_resum = s.sum(0) r = s.sum(0, out=sSum) nt.assert_is_none(r) nt.assert_equal( s_resum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time, sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time) np.testing.assert_allclose(s_resum.data, sSum.data) def test_rebin_live_time(self): s = self.signal old_metadata = s.metadata.deepcopy() dim = s.axes_manager.shape s = s.rebin([dim[0] / 2, dim[1] / 2, dim[2]]) nt.assert_equal( s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time, 3.1 * 2 * 2) # Check that metadata is unchanged print(old_metadata, self.signal.metadata) # Captured on error nt.assert_dict_equal(old_metadata.as_dictionary(), self.signal.metadata.as_dictionary(), "Source metadata changed") def test_add_elements(self): s = self.signal s.add_elements(['Al', 'Ni']) nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni']) s.add_elements(['Al', 'Ni']) nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni']) s.add_elements(["Fe", ]) nt.assert_equal(s.metadata.Sample.elements, ['Al', "Fe", 'Ni']) s.set_elements(['Al', 'Ni']) nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni']) def test_default_param(self): s = self.signal mp = s.metadata nt.assert_equal( mp.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa, preferences.EDS.eds_mn_ka) def test_TEM_to_SEM(self): s = self.signal.inav[0, 0] signal_type = 'EDS_SEM' mp = s.metadata.Acquisition_instrument.TEM.Detector.EDS mp.energy_resolution_MnKa = 125.3 sSEM = s.deepcopy() sSEM.set_signal_type(signal_type) mpSEM = sSEM.metadata.Acquisition_instrument.SEM.Detector.EDS results = [ mp.energy_resolution_MnKa, signal_type] resultsSEM = [ mpSEM.energy_resolution_MnKa, sSEM.metadata.Signal.signal_type] nt.assert_equal(results, resultsSEM) def test_get_calibration_from(self): s = self.signal scalib = EDSTEMSpectrum(np.ones(1024)) energy_axis = scalib.axes_manager.signal_axes[0] energy_axis.scale = 0.01 energy_axis.offset = -0.10 s.get_calibration_from(scalib) nt.assert_equal(s.axes_manager.signal_axes[0].scale, energy_axis.scale) class Test_quantification: def setUp(self): s = EDSTEMSpectrum(np.ones([2, 2, 1024])) energy_axis = s.axes_manager.signal_axes[0] energy_axis.scale = 1e-2 ene
rgy_axis.units = 'keV' energy_axis.name = "Energy" s.set_microscope_parameters(beam_energy=200, live_time=3.1, tilt_stage=0.0, azimuth_angle=None, elevation_angle=35, energy_resolution_MnKa=130) s.metadata
.Acquisition_instrument.TEM.Detector.EDS.real_time = 2.5 s.metadata.Acquisition_instrument.TEM.beam_current = 0.05 elements = ['Al', 'Zn'] xray_lines = ['Al_Ka', 'Zn_Ka'] intensities = [300, 500] for i, xray_line in enumerate(xray_lines): gauss = Gaussian() line_energy, FWHM = s._get_line_energy(xray_line, FWHM_MnKa='auto') gauss.centre.value = line_energy gauss.A.value = intensities[i] gauss.sigma.value = FWHM s.data[:] += gauss.function(energy_axis.axis) s.set_elements(elements) s.add_lines(xray_lines) s.axes_manager[0].scale = 0.5 s.axes_manager[1].scale = 0.5 self.signal = s def test_quant_lorimer(self): s = self.signal method = 'CL' kfactors = [1, 2.0009344042484134] composition_units = 'weight' intensities = s.get_lines_intensity() res = s.quantification(intensities, method, kfactors, composition_units) np.testing.assert_allclose(res[0].data, np.array([ [22.70779, 22.70779], [22.70779, 22.70779]]), atol=1e-3) def test_quant_zeta(self): s = self.signal method = 'zeta' compositions_units = 'weight' factors = [20, 50] intensities = s.get_lines_intensity() res = s.quantification(intensities, method, factors, compositions_units) np.testing.assert_allclose(res[1].data, np.array( [[2.7125736e-03, 2.7125736e-03], [2.7125736e-03, 2.7125736e-03]]), atol=1e-3) np.testing.assert_allclose(res[0][1].data, np.array( [[80.962287987, 80.962287987], [80.962287987, 80.962287987]]), atol=1e-3) def test_quant_cross_section(self): s = self.signal method = 'cross_section' factors = [3, 5] intensities = s.get_lines_intensity() res = s.quantification(intensities, method, factors) np.testing.assert_allclose(res[1][0].data, np.array( [[21517.1647074, 21517.1647074], [21517.1647074, 21517.1647074]]), atol=1e-3) np.testing.assert_allclose(res[1][1].data, np.array( [[21961.616621, 21961.616621], [21961.616621, 21961.616621]]), atol=1e-3) np.testing.assert_allclose(res[0][0].data, np.array( [[49.4888856823, 49.4888856823]
harryfb/DST5
ArcPy Code/Trees.py
Python
apache-2.0
760
0.015789
# import system modules import arcpy from
arcpy import env # Set environment settings env.workspace = "C:\Users\Ewan\Desktop\SFTPDST5\MapFiles" try: # Set the local variable in_Table = "Trees.csv" x_coords = "X"
y_coords = "Y" z_coords = "Z" out_Layer = "Trees_Layer" saved_Layer = "C:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\Trees.lyr" # Set the spatial reference spRef = r"Coordinate Systems\Geographic Coordinate Systens\World\WGS 1984.prj" # Make the XY Event Layer arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords, out_Layer, spRef) # Save to a layer file arcpy.SaveToLayerFile_management(out_Layer, saved_Layer) except Exception as err: print(err.args[0])
sangwook236/sangwook-library
python/test/language_processing/synth90k_crnn.py
Python
gpl-2.0
29,148
0.020207
import abc import numpy as np import tensorflow as tf from swl.machine_learning.tensorflow_model import SimpleSequentialTensorFlowModel import swl.machine_learning.util as swl_ml_util #-------------------------------------------------------------------- class Synth90kCrnn(SimpleSequentialTensorFlowModel): def __init__(self, input_shape, output_shape, num_classes, is_sparse_output): super().__init__(input_shape, output_shape, num_classes, is_sparse_output, is_time_major=False) self._model_output_len = 0 def get_feed_dict(self, data, num_data, *args, **kwargs): len_data = len(data) model_output_len = [self._model_output_len] * num_data if 1 == len_data: feed_dict = {self._input_ph: data[0], self._model_output_len_ph: model_output_len} elif 2 == len_data: """ feed_dict = {self._input_ph: data[0], self._output_ph: data[1], self._model_output_len_ph: model_output_len} """ # Use output lengths. output_len = list(map(lambda lbl: len(lbl), data[1])) feed_dict = {self._input_ph: data[0], self._output_ph: data[1], self._output_len_ph: output_len, self._model_output_len_ph: model_output_len} else: raise ValueError('Invalid number of feed data: {}'.format(len_data)) return feed_dict def _create_single_model(self, inputs, input_shape, num_classes, is_training): with tf.variable_scope('synth90k_crnn', reuse=tf.AUTO_REUSE): return self._create_crnn(inputs, num_classes, is_training) def _create_crnn(self, inputs, num_classes, is_training): #kernel_initializer = None #kernel_initializer = tf.initializers.he_normal() #kernel_initializer = tf.initializers.glorot_normal() # Xavier normal initialization. kernel_initializer = tf.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal') # Preprocessing. with tf.variable_scope('preprocessing', reuse=tf.AUTO_REUSE): inputs = tf.nn.local_response_normalization(inputs, depth_radius=5, bias=1, alpha=1, beta=0.5, name='lrn') # (samples, height, width, channels) -> (samples, width, height, channels). inputs = tf.transpose(inputs, perm=(0, 2, 1, 3), name='transpose') #-------------------- # Convolutional layer. # TODO [check] >> The magic number (64). num_cnn_features = 64 with tf.variable_scope('convolutional_layer', reuse=tf.AUTO_REUSE): cnn_outputs = self._create_convolutional_layer(inputs, num_cnn_features, kernel_initializer, is_training) #-------------------- # Recurrent layer. with tf.variable_scope('recurrent_layer', reuse=tf.AUTO_REUSE): rnn_outputs = self._create_recurrent_layer(cnn_outputs, self._model_output_len_ph, kernel_initializer, is_training) #-------------------- # Transcription layer. with tf.variable_scope('transcription_layer', reuse=tf.AUTO_REUSE): logits = self._create_transcription_layer(rnn_outputs, num_classes, kernel_initializer, is_training) #-------------------- # Decoding layer. with tf.variable_scope('decoding_layer', reuse=tf.AUTO_REUSE): decoded = self._create_decoding_layer(logits) return {'logit': logits, 'decoded_label': decoded} def _create_convolutional_layer(self, inputs, num_features, kernel_initializer, is_training): with tf.variable_scope('conv1', reuse=tf.AUTO_REUSE): conv1 = tf.layers.conv2d(inputs, filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv') conv1 = tf.layers.batch_normalization(conv1, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm') conv1 = tf.nn.relu(conv1, name='relu') conv1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), strides=(2, 2), padding='same', name='maxpool') with tf.variable_scope('conv2', reuse=tf.AUTO_REUSE): conv2 = tf.layers.conv2d(conv1, filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv') conv2 = tf.layers.batch_normalization(conv2, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm') conv2 = tf.nn.relu(conv2, name='relu') conv2 = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), strides=(2, 2), padding='same', name='maxpool') with tf.variable_scope('conv3', reuse=tf.AUTO_REUSE): conv3 = tf.layers.conv2d(conv2, filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv1') conv3 = tf.layers.batch_normalization(conv3, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm1') conv3 = tf.nn.relu(conv3, name='relu1') conv3 = tf.layers.conv2d(conv3, filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv2') conv3 = tf.layers.batch_normalization(conv3, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm2') conv3 = tf.nn.relu(conv3, name='relu2') conv3 = tf.layers.max_pooling2d(conv3, pool_size=(1, 2), strides=(1, 2), padding='same', name='maxpool') with tf.variable_scope('conv4', reuse=tf.AUTO_REUSE): conv4 = tf.layers.conv2d(conv3, filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv1') conv4 = tf.layers.batch_normalization(conv4, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm1') conv4 = tf.nn.relu(conv4, name='relu1') conv4 = tf.layers.conv2d(conv4, filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv2') conv4 = tf.layers.batch_normalization(conv4, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm2') conv4 = tf.nn.relu(conv4, name='relu2') conv4 = tf.layers.max_pooling2d(conv4, pool_size=(1, 2), strides=(1, 2), padding='same', name='maxpool') with tf.variable_scope('conv5', reuse=tf.AUTO_REUSE): conv5 = tf.layers.conv2d(conv4, filters=512, kernel_size=(2, 2), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv') #conv5 = tf.layers.conv2d(conv4, filters=512, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv') conv5 = tf.layers.batch_normalization(conv5, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm') conv5 = tf.nn.relu(conv5, name='relu') with tf.variable_scope('dense', reuse=tf.AUTO_REUSE): conv5_shape = conv5.s
hape #conv5.shape.as_list() self._model_output_len = conv5_shape[1] #dense = tf.reshape(conv5, shape=conv5_shape[:2] + (-1,), name='reshape') #dense = tf.reshape(conv5, shape=conv5_shape[:2] + (conv5_shape[2] * conv5_shape[3]), name='reshape') outputs = tf.reshape(conv5, shape=(-1, conv5_shape[1], conv5_shape[2] * conv5_shape[3]), name='reshape') return tf.layers.dense(outputs, num_features, activation=tf.nn.rel
u, kernel_initializer=kernel_initializer, name='dense') def _create_recurrent_layer(self, inputs, input_len, kernel_initializer, is_training): num_hidden_units = 256 keep_prob = 1.0 #keep_prob = 0.5 with tf.variable_scope('rnn1', reuse=tf.AUTO_REUSE): cell_fw1 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'fw_unit_cell') # Forward cell. #cell_fw1 = tf.contrib.rnn.DropoutWrapper(cell_fw1, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob) cell_bw1 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'bw_unit_cell') # Backward cell. #cell_bw1 = tf.contrib.rnn.DropoutWrapper(cell_bw1, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob) #rnn_outputs1, rnn_states1 = tf.nn.bidirectional_dynamic_rnn(cell_fw1, cell_bw1, inputs, sequence_length=None, time_major=False, dtype=tf.float32, scope='rnn') rnn_outputs1, rnn_states1 = tf.nn.bidirectional_dynamic_rnn(cell_fw1, cell_bw1, inputs, sequence_length=input_len, time_major=False, dtype=tf.float32, scope='rnn') rnn_outputs1 = tf.concat(rnn_outputs1, axis=-1) rnn_outputs1 = tf.layers.batch_normalization
Notgnoshi/clippy
clippy/clipboard.py
Python
mit
1,004
0.001992
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk, Gdk class Clipboard(object): """A wrapper around GTK3's clipboard interface. Text only.""" def __init__(self): self._clipboard = Gtk.Clipboard.get(Gdk.
SELECTION_CLIPBOARD) self.text = None def __repr__(self): return str(self.text) def set(self, text): """ Set the contents of the clipboard. """ # -1 means figure out the length. self._clipboard.set_text(text, -1) self._clipboard.store() def get(self): """ Get the contents of the clipboard.
""" return self._clipboard.wait_for_text() def hasUpdate(self): """ Check to see of the contents of the clipboard have changed since the last time this method was called. """ temp = self.get() if temp != self.text: self.text = temp return True return False
alikins/subscription-manager
src/subscription_manager/gui/networkConfig.py
Python
gpl-2.0
13,296
0.002332
# # Copyright (c) 2010 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import gettext import logging import os import threading #from gi.repository import GObject import socket import rhsm.config import rhsm.connection as connection import rhsm.utils from rhsm.utils import remove_scheme from rhsm.utils import parse_url from subscription_manager.ga import GObject as ga_GObject from subscription_manager.gui.utils import show_error_window import subscription_manager.injection as inj from subscription_manager.gui import progress from subscription_manager.gui import widgets _ = gettext.gettext DIR = os.path.dirname(__file__) log = logging.getLogger('rhsm-app.' + __name__) class NetworkConfigDialog(widgets.SubmanBaseWidget): """This is the dialog that allows setting http proxy settings. It uses the instant apply paradigm or whatever you wanna call it that the gnome HIG recommends. Whenever a toggle button is flipped or a text entry changed, the new setting will be saved. """ widget_names = ["networkConfigDialog", "enableProxyButton", "enableProxyAuthButton", "proxyEntry", "proxyUserEntry", "proxyPasswordEntry", "cancelButton", "saveButton", "testConnectionButton", "connectionStatusLabel"] gui_file = "networkConfig" def __init__(self): # Get widgets we'll need to access super(NetworkConfigDialog, self).__init__() self.org_timeout = socket.getdefaulttimeout() self.progress_bar = None self.cfg = rhsm.config.initConfig() self.cp_provider = inj.require(inj.CP_PROVIDER) # Need to load values before connecting signals because when the dialog # starts up it seems to trigger the signals which overwrites the config # with the blank values. self.set_initial_values() self.enableProxyButton.connect("toggled", self.enable_action) self.enableProxyAuthButton.connect("toggled", self.enable_action) self.enableProxyButton.connect("toggled", self.clear_connection_label) self.enableProxyAuthButton.connect("toggled", self.clear_connection_label) self.enableProxyButton.connect("toggled", self.enable_test_button) self.proxyEntry.connect("changed", self.clear_connection_label) self.proxyUserEntry.connect("changed", self.clear_connection_label) self.proxyPasswordEntry.connect("changed", self.clear_connection_label) self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry) self.cancelButton.connect("clicked", self.on_cancel_clicked) self.saveButton.connect("clicked", self.on_save_clicked) self.testConnectionButton.connect("clicked", self.on_test_connection_clicked) self.networkConfigDialog.connect("delete-event", self.deleted) def set_initial_values(self): proxy_url = self.cfg.get("server", "proxy_hostname") or "" # append port unless not specified, then append the default of 3128 if proxy_url: proxy_url = proxy_url + ':' + (self.cfg.get("server", "proxy_port") or rhsm.config.DEFAULT_PROXY_PORT) self.proxyEntry.set_text("%s" % proxy_url) # show proxy/proxy auth sections as being enabled if we have values set # rhn actualy has a seperate for config flag for enabling, which seems overkill if self.cfg.get("server", "proxy_hostname"): self.enableProxyButton.set_active(True) if self.cfg.get("server", "proxy_hostname") and self.cfg.get("server", "proxy_user"): self.enableProxyAuthButton.set_active(True) self.enable_action(self.enableProxyAuthButton) self.enable_action(self.enableProxyButton) # the extra or "" are to make sure we don't str None self.proxyUserEntry.set_text(str(self.cfg.get("server", "proxy_user") or "")) self.proxyPasswordEntry.set_text(str(self.cfg.get("server", "proxy_password") or "")) self.connectionStatusLabel.set_label("") # If there is no proxy information, disable the proxy test # button. if not self.enableProxyButton.get_active(): self.testConnectionButton.set_sensitive(False) self.enableProxyAuthButton.set_sensitive(False) def write_values(self, widget=None, dummy=None): proxy = self.proxyEntry.get_text() or "" # don't save these values if they are disabled in the gui if proxy and self.enableProxyButton.get_active(): # Remove any URI scheme provided proxy = remove_scheme(proxy)
# Update the proxy entry field to show we removed any scheme self.proxyEntry.set_text(proxy) try:
proxy_hostname, proxy_port = proxy.split(':') self.cfg.set("server", "proxy_hostname", proxy_hostname) self.cfg.set("server", "proxy_port", proxy_port) except ValueError: # no port? just write out the hostname and assume default self.cfg.set("server", "proxy_hostname", proxy) self.cfg.set("server", "proxy_port", rhsm.config.DEFAULT_PROXY_PORT) else: # delete config options if we disable it in the ui self.cfg.set("server", "proxy_hostname", "") self.cfg.set("server", "proxy_port", "") if self.enableProxyAuthButton.get_active(): if self.proxyUserEntry.get_text() is not None: self.cfg.set("server", "proxy_user", str(self.proxyUserEntry.get_text())) if self.proxyPasswordEntry.get_text() is not None: self.cfg.set("server", "proxy_password", str(self.proxyPasswordEntry.get_text())) else: self.cfg.set("server", "proxy_user", "") self.cfg.set("server", "proxy_password", "") try: self.cfg.save() self.cp_provider.set_connection_info() except Exception: show_error_window(_("There was an error saving your configuration.") + _("Make sure that you own %s.") % self.cfg.fileName, parent=self.networkConfigDialog) def show(self): self.set_initial_values() self.networkConfigDialog.present() def on_save_clicked(self, button): self.write_values() self.networkConfigDialog.hide() def on_cancel_clicked(self, button): self.networkConfigDialog.hide() def enable_test_button(self, button): self.testConnectionButton.set_sensitive(button.get_active()) def clear_connection_label(self, entry): self.connectionStatusLabel.set_label("") # only used as callback from test_connection thread def on_test_connection_finish(self, result): if result: self.connectionStatusLabel.set_label(_("Proxy connection succeeded")) else: self.connectionStatusLabel.set_label(_("Proxy connection failed")) self._clear_progress_bar() def _reset_socket_timeout(self): socket.setdefaulttimeout(self.org_timeout) def test_connection_wrapper(self, proxy_host, proxy_port, proxy_user, proxy_password): connection_status = self.test_connection(proxy_host, proxy_port, proxy_user, proxy_password) ga_GObject.idle_add(self.on_test_connection_finish, connection_status) def test_connection(self, proxy_host, proxy_port, proxy_user, proxy_password): cp = connection.UEPConnection( proxy_hostname=proxy_host,
dknlght/dkodi
src/script.module.cryptopy/lib/crypto/hash/sha1Hash_test.py
Python
gpl-2.0
2,199
0.012278
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- """ sha1Hash_test.py Unit tests for sha1.py """ from crypto.hash.sha1Hash import SHA1 import unittest import stru
ct assert struct.calcsize('!IIIII') == 20, '5 integers should be 20 bytes' class SHA1_FIPS180_TestCases(unittest.TestCase): """
SHA-1 tests from FIPS180-1 Appendix A, B and C """ def testFIPS180_1_Appendix_A(self): """ APPENDIX A. A SAMPLE MESSAGE AND ITS MESSAGE DIGEST """ hashAlg = SHA1() message = 'abc' message_digest = 0xA9993E36L, 0x4706816AL, 0xBA3E2571L, 0x7850C26CL, 0x9CD0D89DL md_string = _toBString(message_digest) assert( hashAlg(message) == md_string ), 'FIPS180 Appendix A test Failed' def testFIPS180_1_Appendix_B(self): """ APPENDIX B. A SECOND SAMPLE MESSAGE AND ITS MESSAGE DIGEST """ hashAlg = SHA1() message = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq' message_digest = 0x84983E44L, 0x1C3BD26EL, 0xBAAE4AA1L, 0xF95129E5L, 0xE54670F1L md_string = _toBString(message_digest) assert( hashAlg(message) == md_string ), 'FIPS180 Appendix B test Failed' def testFIPS180_1_Appendix_C(self): """ APPENDIX C. A THIRD SAMPLE MESSAGE AND ITS MESSAGE DIGEST Let the message be the binary-coded form of the ASCII string which consists of 1,000,000 repetitions of "a". """ hashAlg = SHA1() message = 1000000*'a' message_digest = 0x34AA973CL, 0xD4C4DAA4L, 0xF61EEB2BL, 0xDBAD2731L, 0x6534016FL md_string = _toBString(message_digest) assert( hashAlg(message) == md_string ), 'FIPS180 Appendix C test Failed' def _toBlock(binaryString): """ Convert binary string to blocks of 5 words of uint32() """ return [uint32(word) for word in struct.unpack('!IIIII', binaryString)] def _toBString(block): """ Convert block (5 words of 32 bits to binary string """ return ''.join([struct.pack('!I',word) for word in block]) if __name__ == '__main__': # Run the tests from the command line unittest.main()
sxjscience/tvm
python/tvm/relay/backend/vm.py
Python
apache-2.0
9,793
0.000715
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distribu
ted with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://
www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin """ The Relay Virtual Machine. Implements a Python interface to compiling and executing on the Relay VM. """ import numpy as np import tvm import tvm.runtime.ndarray as _nd import tvm.runtime.vm as vm_rt from tvm import autotvm from tvm.relay import expr as _expr from tvm.relay.backend.interpreter import Executor from . import _vm def compile(mod, target=None, target_host=None, params=None): """Compile the module to VM executable. A helper function for VMCompiler. Parameters ---------- mod : tvm.IRModule The Relay module to build. target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context name) to str/tvm.target.Target, optional For heterogeneous compilation, it is a dictionary indicating context to target mapping. For homogeneous compilation, it is a build target. target_host : str or :any:`tvm.target.Target`, optional Host compilation target, if target is device. When TVM compiles device specific program such as CUDA, we also need host(CPU) side code to interact with the driver to setup the dimensions and parameters correctly. target_host is used to specify the host side codegen target. By default, llvm is used if it is enabled, otherwise a stackvm intepreter is used. params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. Returns ------- exec : tvm.runtime.vm.Executable The VM executable that contains both library code and bytecode. """ compiler = VMCompiler() if params: compiler.set_params(params) compiler.lower(mod, target, target_host) compiler.codegen() return compiler.get_exec() class VMCompiler(object): """Compiler that compiles Relay module to VM executable.""" def __init__(self): self.mod = _vm._VMCompiler() self._lower = self.mod["lower"] self._codegen = self.mod["codegen"] self._get_exec = self.mod["get_executable"] self._set_params_func = self.mod["set_params"] self._get_params_func = self.mod["get_params"] self._optimize = self.mod["optimize"] def set_params(self, params): """Set constant parameters for the model. Parameters ---------- params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. """ inputs = {} for name, param in params.items(): if isinstance(param, np.ndarray): param = _nd.array(param) inputs[name] = _expr.const(param) self._set_params_func(inputs) def get_params(self): """Return the updated weights.""" params = self._get_params_func() ret = {} for key, value in params.items(): ret[key] = value.data return ret def lower(self, mod, target=None, target_host=None): """Lower the module to VM bytecode. Parameters ---------- mod : tvm.IRModule The Relay module to build. target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context name) to str/tvm.target.Target, optional For heterogeneous compilation, it is a dictionary indicating context to target mapping. For homogeneous compilation, it is a build target. target_host : str or :any:`tvm.target.Target`, optional Host compilation target, if target is device. When TVM compiles device specific program such as CUDA, we also need host(CPU) side code to interact with the driver to setup the dimensions and parameters correctly. target_host is used to specify the host side codegen target. By default, llvm is used if it is enabled, otherwise a stackvm intepreter is used. """ target = self._update_target(target) target_host = self._update_target_host(target, target_host) tophub_context = self._tophub_context(target) with tophub_context: self._lower(mod, target, target_host) def codegen(self): """Generate the kernel library.""" self._codegen() def optimize(self, mod, target=None, target_host=None, params=None): """Helper method that optimizes a Relay module via VM. Parameters ---------- mod : tvm.IRModule target : str, :any:`tvm.target.Target`, or dict of str (i.e. device/context name) to str/tvm.target.Target, optional target_host : str or :any:`tvm.target.Target`, optional The compilation target for host. By default, llvm is used if it is enabled, otherwise a stackvm intepreter is used. params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. Returns ------- mod : tvm.IRModule The optimized relay module. params : dict The parameters of the final module. """ target = self._update_target(target) target_host = self._update_target_host(target, target_host) if params: self.set_params(params) return self._optimize(mod, target, target_host), self.get_params() def get_exec(self): """Get the VM executable. Returns ------- exec : tvm.runtime.vm.Executable The VM executable that contains both library code and bytecode. """ return vm_rt.Executable(self._get_exec()) def _update_target(self, target): """Update target.""" target = target if target else tvm.target.Target.current() if target is None: raise ValueError("Target is not set in env or passed as argument.") tgts = {} if isinstance(target, (str, tvm.target.Target)): dev_type = tvm.tir.IntImm("int32", tvm.nd.context(str(target)).device_type) tgts[dev_type] = tvm.target.Target(target) elif isinstance(target, dict): for dev, tgt in target.items(): dev_type = tvm.tir.IntImm("int32", tvm.nd.context(dev).device_type) tgts[dev_type] = tvm.target.Target(tgt) else: raise TypeError( "target is expected to be str, tvm.target.Target, " + "or dict of str to str/tvm.target.Target, but received " + "{}".format(type(target)) ) return tgts def _update_target_host(self, target, target_host): """Update target host.""" target_host = None if target_host == "" else target_host if not target_host: for device_type, tgt in target.items(): if device_type.value == tvm.nd.cpu(0).device_type: target_host = tgt break if not target_host: target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm" if isinstance(target_host, str): target_host = tvm.target.Target(ta
LeBarbouze/tunacell
tunacell/base/timeseries.py
Python
mit
8,945
0.000335
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ This module will define useful objects for conditional analysis """ import collections import numpy as np import pandas as pd from tunacell.base.datatools import Coordinates # define an object to handle heterogeneous types of time series class TimeSeries(object): """Object that decorates the data with other useful attributes. Parameters ---------- ts : :class:`Coordinates` instance, or 2d structured ndarray better to use Coordinates, so that names can be carried along ids : sequence of cell identifiers from which data was collected index_cycles : sequence of couples (index_first, index_last) that delimit data corresponding to cell id, must be same length as ids slices : sequence of slice objects each item can be used to slice the entire table time_bounds : sequence of couples of floats for each cell, first element is the lower bound of cell cycle, the second element is the upper bound of cell cycle, must be same length as ids select_ids : sequences of True/False values corresponding whether or not to include data from cell id in timeseries, must be same length as ids """ def __init__(self, ts=[], ids=[], index_cycles=[], slices=None, time_bounds=[], select_ids={}, container_label=None, experiment_label=None): # ts is a Coordinates instance self.container_label = container_label self.experiment_label = experiment_label if isinstance(ts, Coordinates): self._timeseries = ts # ts is a numpy array (structured if possible) elif isinstance(ts, np.ndarray): # convert structured arrays to 2d ndarrays if ts.dtype.n
ames is not None: _arr = ts.view((float, len(ts.dtype.names))) _x_name, _y_name = ts.dtype.names[:2] # take only first 2 cols
else: _arr = ts _x_name, _y_name = 'x', 'y' _x = _arr[:, 0] _y = _arr[:, 1] self._timeseries = Coordinates(_x, _y, x_name=_x_name, y_name=_y_name) # ... list of couples elif isinstance(ts, collections.Iterable): _ts = list(ts) _x, _y = map(np.array, zip(*_ts)) self._timeseries = Coordinates(_x, _y) self.time_bounds = time_bounds self.slices = [] if index_cycles: # array indices corresponding to (first, last) frame for each cell self.index_cycles = index_cycles slices = [] for item in index_cycles: if item is None: slices.append(None) # indices are reported as a single None # when no data is reported for a given cell else: i, j = item if j is not None: slices.append(slice(i, j+1)) else: slices.append(slice(i, None)) self.slices = slices elif slices is not None: self.slices = slices index_cycles = [] for item in slices: if item is None: index_cycles.append(None) else: if item.stop is not None: index_cycles.append((item.start, item.stop - 1)) else: index_cycles.append((item.start, None)) self.index_cycles = index_cycles self.ids = ids if len(select_ids.keys()) > 0: # master is already defined self.selections = select_ids else: # nothing is defined, we define master here self.selections = {'master': [True for _ in self.ids]} return def use_condition(self, condition_label='master', sharp_tleft=None, sharp_tright=None): """Get conditioned timeseries. Parameter --------- condition_label : str (default 'master') must be a key of dictionary self.selections, and corresponds to the repr of a given :class:`FilterSet` instance. sharp_left : float (default None) sharp lower bound for cell cycle timing. USE ONLY FOR CELL CYCLE OBSERVABLES sharp_right : float (default None) sharp upper bound for cell cycle timing. USE ONLY FOR CELL CYCLE OBSERVABLES Returns ------- Coordinates instance made of valid (x, y) points """ selection = self.selections[condition_label] xs, ys = [], [] for index, cid in enumerate(self.ids): if selection[index] and self.slices[index] is not None: if sharp_tleft is not None: if self.time_bounds[index][0] < sharp_tleft: continue if sharp_tright is not None: if self.time_bounds[index][1] > sharp_tright: continue xs.append(self.timeseries.x[self.slices[index]]) ys.append(self.timeseries.y[self.slices[index]]) if len(xs) > 0: _x = np.concatenate(xs) _y = np.concatenate(ys) else: _x = [] _y = [] out = Coordinates(_x, _y, x_name=self.timeseries.x_name, y_name=self.timeseries.y_name) return out @property def timeseries(self): return self._timeseries # # @timeseries.setter # def timeseries(self, ts): # self._timeseries = ts # def __getitem__(self, key): # return self.timeseries[key] def __repr__(self): return repr(self.timeseries) def as_text(self, sep='\t', cell_sep='\n', print_labels=False): """Export TimeSeries as text arrays Parameters ---------- sep : str (default '\t') how to separate columns cell_sep : str (default '\n') how to separate cells (default: one blank line) print_labels : bool {False, True} first line is labels, followed by empty line """ printout = '' labels = [self.timeseries.x_name, self.timeseries.y_name, 'cellID', 'containerID', 'experimentID'] if print_labels and labels is not None: printout += '\t'.join(labels) + '\n' printout += '\n' for index, sl in enumerate(self.slices): chunk = '' x = self.timeseries.x[sl] y = self.timeseries.y[sl] ids = len(x) * [self.ids[index]] container_id = len(x) * [self.container_label, ] exp_id = len(x) * [self.experiment_label, ] for line in zip(x, y, ids, container_id, exp_id): chunk += '{}'.format(sep).join(['{}'.format(item) for item in line]) + '\n' printout += chunk printout += cell_sep return printout.lstrip().rstrip() # remove empty lines at beginning/end def to_dataframe(self, start_index=0, sharp_tleft=None, sharp_tright=None): dic = {} dic[self.timeseries.x_name] = [] # self.timeseries.x dic[self.timeseries.y_name] = [] # self.timeseries.y dic['cellID'] = [] dic['containerID'] = [] dic['experimentID'] = [] for key in self.selections.keys(): if key == 'master': continue dic[key] = [] size = 0 # add cell ID, container ID, experiment ID, and TRUE/FALSE for each cdt for index, sl in enumerate(self.slices): # collect only if within bounds if sharp_tleft is not None: if self.time_bounds[index][0] < sharp_tleft: continue if sharp_tright is not None: if self.time_bounds[index][1] > sharp_tright: continue _x = self.timeseries.x[sl]
dmpetrov/dataversioncontrol
dvc/updater.py
Python
apache-2.0
5,458
0
import logging import os import sys import time from typing import TYPE_CHECKING, Optional from packaging import version from dvc import __version__ from dvc.utils.pkg import PKG if TYPE_CHECKING: from dvc.ui import RichText logger = logging.getLogger(__name__) class Updater: URL = "https://updater.dvc.org" UPDATER_FILE = "updater" TIMEOUT = 24 * 60 * 60 # every day TIMEOUT_GET = 10 def __init__(self, tmp_dir, friendly=False, hardlink_lock=False): from dvc.lock import make_lock self.updater_file = os.path.join(tmp_dir, self.UPDATER_FILE) self.lock = make_lock( self.updater_file + ".lock", tmp_dir=tmp_dir, friendly=friendly, hardlink_lock=hardlink_lock, ) self.current = version.parse(__version__).base_version def _is_outdated_file(self): ctime = os.path.getmtime(self.updater_file) outdated = time.time() - ctime >= self.TIMEOUT if outdated: logger.debug(f"'{self.updater_file}' is outdated") return outdated def _with_lock(self, func, action): from dvc.lock import LockError try: with self.lock: func() except LockError: msg = "Failed to acquire '{}' before {} updates" logger.debug(msg.format(self.lock.lockfile, action)) def check(self): from dvc.utils import env2bool if ( os.getenv("CI") or env2bool("DVC_TEST") or PKG == "snap" or not self.is_enabled() ): return self._with_lock(self._check, "checking") def _check(self): if not os.path.exists(self.updater_file) or self._is_outdated_file(): self.fetch() return with open(self.updater_file, encoding="utf-8") as fobj: import json try: info = json.load(fobj) latest = info["version"]
except Exception as exc: # pylint: disable=broad-except msg = "'{}' i
s not a valid json: {}" logger.debug(msg.format(self.updater_file, exc)) self.fetch() return if version.parse(self.current) < version.parse(latest): self._notify(latest) def fetch(self, detach=True): from dvc.daemon import daemon if detach: daemon(["updater"]) return self._with_lock(self._get_latest_version, "fetching") def _get_latest_version(self): import json import requests try: resp = requests.get(self.URL, timeout=self.TIMEOUT_GET) info = resp.json() except requests.exceptions.RequestException as exc: msg = "Failed to retrieve latest version: {}" logger.debug(msg.format(exc)) return with open(self.updater_file, "w+", encoding="utf-8") as fobj: json.dump(info, fobj) def _notify(self, latest: str, pkg: Optional[str] = PKG) -> None: from dvc.ui import ui if not sys.stdout.isatty(): return message = self._get_message(latest, pkg=pkg) return ui.error_write(message, styled=True) def _get_message( self, latest: str, current: str = None, color: str = "yellow", pkg: Optional[str] = None, ) -> "RichText": from dvc.ui import ui current = current or self.current update_message = ui.rich_text.from_markup( f"You are using dvc version [bold]{current}[/]; " f"however, version [bold]{latest}[/] is available." ) instruction = ui.rich_text.from_markup( self._get_update_instructions(pkg=pkg) ) return ui.rich_text.assemble( "\n", update_message, "\n", instruction, style=color ) @staticmethod def _get_update_instructions(pkg: Optional[str] = None) -> str: if pkg in ("osxpkg", "exe", "binary"): return ( "To upgrade, uninstall dvc and reinstall from " "[blue]https://dvc.org[/]." ) instructions = { "pip": "pip install --upgrade dvc", "rpm": "yum update dvc", "brew": "brew upgrade dvc", "deb": "apt-get install --only-upgrade dvc", "conda": "conda update dvc", "choco": "choco upgrade dvc", } if pkg not in instructions: return ( "Find the latest release at " "[blue]https://github.com/iterative/dvc/releases/latest[/]." ) instruction = instructions[pkg] return f"To upgrade, run '{instruction}'." def is_enabled(self): from dvc.config import Config, to_bool enabled = to_bool( Config(validate=False).get("core", {}).get("check_update", "true") ) logger.debug( "Check for update is {}abled.".format("en" if enabled else "dis") ) return enabled def notify_updates(): from contextlib import suppress from dvc.repo import NotDvcRepoError, Repo with suppress(NotDvcRepoError), Repo() as repo: hardlink_lock = repo.config["core"].get("hardlink_lock", False) updater = Updater(repo.tmp_dir, hardlink_lock=hardlink_lock) updater.check()
nirvn/QGIS
python/plugins/processing/algs/qgis/DensifyGeometriesInterval.py
Python
gpl-2.0
2,659
0.000752
# -*- coding: utf-8 -*- """ *************************************************************************** DensifyGeometriesInterval.py by Anita Graser, Dec 2012 based on DensifyGeometries.py --------------------- Date : October 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************
************************************************** *
* * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from builtins import range __author__ = 'Anita Graser' __date__ = 'Dec 2012' __copyright__ = '(C) 2012, Anita Graser' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import (QgsProcessingParameterNumber, QgsProcessing) from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm): INTERVAL = 'INTERVAL' def group(self): return self.tr('Vector geometry') def __init__(self): super().__init__() self.interval = None def initParameters(self, config=None): self.addParameter(QgsProcessingParameterNumber(self.INTERVAL, self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double, 1, False, 0, 10000000)) def name(self): return 'densifygeometriesgivenaninterval' def displayName(self): return self.tr('Densify geometries given an interval') def outputName(self): return self.tr('Densified') def inputLayerTypes(self): return [QgsProcessing.TypeVectorLine, QgsProcessing.TypeVectorPolygon] def prepareAlgorithm(self, parameters, context, feedback): interval = self.parameterAsDouble(parameters, self.INTERVAL, context) return True def processFeature(self, feature, feedback): if feature.hasGeometry(): new_geometry = feature.geometry().densifyByDistance(float(interval)) feature.setGeometry(new_geometry) return feature
zhouxiumin/projecteuler
python/euler65.py
Python
apache-2.0
329
0.097264
#coding=utf-8 def generatList(n)
: llist=[] for i in range(n): t=1 if i%3==1: t=2*(i/3+1) llist.append(t) return llist[::-1] def generatitem(n): fz=0 fm=1 llist=generatList(n) for i in llist: temp=fm fm=fz+i*fm
fz=temp return (fz+2*fm,fm) fz=[int(x) for x in list(str(generatitem(99)[0]))] print sum(fz)
realestate-com-au/dashmat
dashmat/option_spec/import_line.py
Python
mit
2,434
0.015612
from dashmat.option_spec.module_imports import module_import_spec from dashmat.formatter import MergedOptionStringFormatter from dashmat.core_modules.base import Module from dashmat.errors import UnknownModule from input_algorithms.spec_base import boolean, string_spec, formatted, listof, overridden, or_spec, set_options from input_algorithms.many_item_spec import many_item_formatted_spec from input_algorithms.dictobj import dictobj import six class import_line_spec(many_item_formatted_spec): value_name = "Import line" specs = [listof(string_spec()), or_spec(string_spec(), set_options(import_path=string_spec()))] optional_specs = [string_spec()] def create_result(self, imports, module_name, import_from, meta, val, dividers): """Default permissions to rw""" options = {"imports": imports, "module_name": module_name, "import_from": import_from} return ImportLine.FieldSpec(formatter=MergedOptionStringFormatter).normalise(meta, options) class ImportLine(dictobj.Spec): module_name = dictobj.Field( lambda: or_spec(string_spec(), set_options(import_path=module_import_spec(Module))) , formatted = True , help = "The name of the module this import comes from" ) imports = dictobj.Field( string_spec , f
ormatted = True , wrapper = listof , help = "The modules that are imported" ) import_from = dictobj.Field( string_spec , formatted = True , default = "main.jsx" , help = "The module in our import_path to import the imports from" ) def import_line(self, modules): module_name = self.module_name if type(module_name) is dict: module_name = self.module_nam
e['import_path'] if isinstance(module_name, six.string_types): if module_name not in modules: raise UnknownModule(module=module_name, available=list(modules.keys())) module = modules[module_name] else: module = module_name if type(module) is type: import_path = "{0}:{1}".format(module.module_path, module.__name__) module = module(import_path, import_path) imports = "{{{0}}}".format(", ".join(self.imports)) relative_to = module.relative_to return 'import {0} from "/modules/{1}/{2}"'.format(imports, relative_to, self.import_from)
G4m4/chartreuse
scripts/freq_scale.py
Python
gpl-3.0
2,022
0.001978
#!/usr/bin/env python ''' @file freq_scale.py @brief Sandbox for various frequency scale generators @author gm @copyright gm 2014 This file is part of Chartreuse Chartreuse is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Chartreuse is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Chartreuse. If not, see <http://www.gnu.org/licenses/>. ''' import numpy import pylab class LogFreqScale(object): ''' Log frequency scale ''' def __init__(self, length, dft_length, sampling_freq): self.length = length self.dft_length = dft_length self.sampling_freq = sampling_freq self._Synthesize() def _Synthesize(self): ''' Actual processing function for generating the scale ''' kLowBound = 2.0 * self.sampling_freq / self.dft_length kHighBound = self.sampling_freq * 0.5 tmp = numpy.linspace(kLowBound, kHighBound, self.length) tmp[0] = self.sampling_freq / (self.dft_length * (3.0 / 4.0)) self.data = numpy.log2(tmp * 0.001) if __name__ == "__main__": import utilities sampling_freq = 48000.0 dft_bins_count = 2048 low_edge = 62.5 high_edge = 1500.0 low_edge_idx = n
umpy.ceil(low_edge * dft_bins_count / sampling_freq) high_edge_idx = dft_bins_count / 2 + 1 length = high_edge_idx - low_edge_idx + 1 gene
rator = LogFreqScale(length, dft_bins_count, sampling_freq) out_data = generator.data print(utilities.PrintMetadata(utilities.GetMetadata(out_data))) pylab.plot(out_data, label = "out") pylab.legend() pylab.show()
TouK/vumi
vumi/transports/cellulant/cellulant_sms.py
Python
bsd-3-clause
3,968
0.000756
# -*- test-case-name: vumi.transports.cellulant.tests.test_cellulant_sms -*- import json from urllib import urlencode from twisted.internet.defer import inlineCallbacks from vumi.utils import http_request_full from vumi import log from vumi.config import ConfigDict, ConfigText from vumi.transports.httprpc import HttpRpcTransport class CellulantSmsTransportConfig(HttpRpcTransport.CONFIG_CLASS): """Cellulant transport config. """ credentials = ConfigDict( "A dictionary where the `from_addr` is used for the key lookup and the" " returned value should be a dictionary containing the username and" " password.", required=True, static=True) outbound_url = ConfigText( "The URL to send outbound messages to.", required=True, static=True) class CellulantSmsTransport(HttpRpcTransport): """ HTTP transport for Cellulant SMS. """ transport_type = 'sms' CONFIG_CLASS = CellulantSmsTransportConfig EXPECTED_FIELDS = set(["SOURCEADDR", "DESTADDR", "MESSAGE", "ID"]) IGNORED_FIELDS = set(["channelID", "keyword", "CHANNELID", "serviceID", "SERVICEID", "unsub", "transactionID"]) KNOWN_ERROR_RESPONSE_CODES = { 'E0': 'Insufficient HTTP Params passed', 'E1': 'Invalid username or password', 'E2': 'Credits have expired or run out', '1005': 'Suspect source address', } def validate_config(self): config = self.get_static_config() self._credentials = config.credentials self._outbound_url = config.outbound_url return super(Ce
llulantSmsTransport, self).validate_config() @inlineCallbac
ks def handle_outbound_message(self, message): creds = self._credentials.get(message['from_addr'], {}) username = creds.get('username', '') password = creds.get('password', '') params = { 'username': username, 'password': password, 'source': message['from_addr'], 'destination': message['to_addr'], 'message': message['content'], } log.msg("Sending outbound message: %s" % (message,)) url = '%s?%s' % (self._outbound_url, urlencode(params)) log.msg("Making HTTP request: %s" % (url,)) response = yield http_request_full(url, '', method='GET') log.msg("Response: (%s) %r" % (response.code, response.delivered_body)) content = response.delivered_body.strip() # we'll only send 1 message at a time and so the API can only # return this on a valid ack if content == '1': yield self.publish_ack(user_message_id=message['message_id'], sent_message_id=message['message_id']) else: error = self.KNOWN_ERROR_RESPONSE_CODES.get(content, 'Unknown response code: %s' % (content,)) yield self.publish_nack(message['message_id'], error) @inlineCallbacks def handle_raw_inbound_message(self, message_id, request): values, errors = self.get_field_values(request, self.EXPECTED_FIELDS, self.IGNORED_FIELDS) if errors: log.msg('Unhappy incoming message: %s' % (errors,)) yield self.finish_request(message_id, json.dumps(errors), code=400) return log.msg(('CellulantSmsTransport sending from %(SOURCEADDR)s to ' '%(DESTADDR)s message "%(MESSAGE)s"') % values) yield self.publish_message( message_id=message_id, content=values['MESSAGE'], to_addr=values['DESTADDR'], from_addr=values['SOURCEADDR'], provider='vumi', transport_type=self.transport_type, transport_metadata={'transport_message_id': values['ID']}, ) yield self.finish_request( message_id, json.dumps({'message_id': message_id}))
mrev11/ccc3
jt/jtpython/jtlib/jtexec.py
Python
lgpl-2.1
653
0.027565
##! /usr/bin/env python # _*_ coding: latin-1 _*_ import jtutil import jtsocket import jtdom def jtexec(cmd): jtsocket.s
end("<jtexec>"+jtutil.cdataif(cmd)+"</jtexec>") # <exitvalue>value</exitvalue> # vagy # <execerror>error</execerror> while 1: rsp=jtsocket.recv() if rsp==None: return None dom=jtdom.do
mparse(rsp) node=jtdom.domfirst(dom) type=jtdom.domname(node) value=jtdom.domtext(node) if type=="execerror": raise jtutil.applicationerror, ("jtexec", "failed", value) elif type=="exitvalue": return value
lucasw/vimjay
scripts/dr_camera_info.py
Python
gpl-3.0
1,867
0.002142
#!/usr/bin/env python # Lucas Walter # Configure a CameraInfo from a dynamic reconfigure interface import rospy from dynamic_reconfigure.server import Server from sensor_msgs.msg import CameraInfo from vimjay.cfg import DrCameraInfoConfig class DrCameraInfo: def __init__(self): rospy.init_node('dr_camera_info') self.camera_info = None self.pub = rospy.Publisher("camera_info", CameraInfo, queue_size=1, latch=True) self.server = Server(DrCameraInfoConfig, self.dr_callback) # reset=True makes this node survive jumps back in time (why not make that the default?) # https://github.com/ros-visualization/interactive_markers/pull/47/ # TODO(lucasw) make this update if the callback changes update rate self.timer = rospy.Timer(rospy.Duration(0.2), self.update, reset=True) def dr_callback(self, config, level): ci = CameraInfo() ci.header.frame_id = config['frame_id'] ci.width = config['width'] ci.height = config['height'] ci.distortion_model = config['distortion_model'] ci.D = [config['d0'], config['d1'], config['d2'], config['d3'], config['d4']] ci.K[0 * 3 + 0] = config['fx'] ci.
K[0 * 3 + 2] = config['cx'] ci.K[1 * 3 + 1] = config['fy'] ci.K[1 * 3 + 2] = config['cy'] ci.K[2 * 3 + 2] = 1 ci.P[0 * 4 + 0] = conf
ig['fx'] ci.P[0 * 4 + 2] = config['cx'] ci.P[1 * 4 + 1] = config['fy'] ci.P[1 * 4 + 2] = config['cy'] ci.P[2 * 4 + 2] = 1 ci.R[0] = 1 ci.R[4] = 1 ci.R[8] = 1 self.camera_info = ci return config def update(self, event): self.camera_info.header.stamp = rospy.Time.now() self.pub.publish(self.camera_info) if __name__ == '__main__': dr_camera_info = DrCameraInfo() rospy.spin()
s0hvaperuna/Not-a-bot
cogs/utils.py
Python
mit
28,689
0.002162
import inspect import logging import os import re import shlex import subprocess import sys import textwrap import time from datetime import datetime from email.utils import formatdate as format_rfc2822 from io import StringIO from urllib.parse import quote import aiohttp import discord import psutil import pytz from asyncpg.exceptions import PostgresE
rror from dateutil import parser from dateutil.tz import gettz from discord.ext.commands import (BucketType, Group, clean_content) from discord.ext.commands.errors import BadArgument from bot.bot import command, cooldown, bot_has_permissions from bot.converters import FuzzyRole, TzConverter, PossibleUser from cogs.cog import Cog from utils.tzinfo import fuzzy_tz, tz_dict from utils.un
zalgo import unzalgo, is_zalgo from utils.utilities import (random_color, get_avatar, split_string, get_emote_url, send_paged_message, format_timedelta, parse_timeout, DateAccuracy) try: from pip.commands import SearchCommand except ImportError: try: from pip._internal.commands.search import SearchCommand except (ImportError, TypeError): SearchCommand = None logger = logging.getLogger('terminal') parserinfo = parser.parserinfo(dayfirst=True) class Utilities(Cog): def __init__(self, bot): super().__init__(bot) @command() @cooldown(1, 10, BucketType.guild) async def changelog(self, ctx, page: int=1): sql = 'SELECT * FROM changelog ORDER BY time DESC' rows = await self.bot.dbutil.fetch(sql) def create_embed(row): embed = discord.Embed(title='Changelog', description=row['changes'], timestamp=row['time']) return embed def get_page(page, idx): if not isinstance(page, discord.Embed): page = create_embed(page) page.set_footer(text=f'Page {idx+1}/{len(rows)}') rows[idx] = page return page if page > 0: page -= 1 elif page == 0: page = 1 await send_paged_message(ctx, rows, True, page, get_page) @command(aliases=['pong']) @cooldown(1, 5, BucketType.guild) async def ping(self, ctx): """Ping pong""" t = time.perf_counter() if ctx.received_at: local_delay = t - ctx.received_at else: local_delay = datetime.utcnow().timestamp() - ctx.message.created_at.timestamp() await ctx.trigger_typing() t = time.perf_counter() - t message = 'Pong!\n🏓 took {:.0f}ms\nLocal delay {:.0f}ms\nWebsocket ping {:.0f}ms'.format(t*1000, local_delay*1000, self.bot.latency*1000) if hasattr(self.bot, 'pool'): try: _, sql_t = await self.bot.dbutil.fetch('SELECT 1', measure_time=True) message += '\nDatabase ping {:.0f}ms'.format(sql_t * 1000) except PostgresError: message += '\nDatabase could not be reached' await ctx.send(message) @command(aliases=['e', 'emoji']) @cooldown(1, 5, BucketType.channel) async def emote(self, ctx, emote: str): """Get the link to an emote""" emote = get_emote_url(emote) if emote is None: return await ctx.send('You need to specify an emote. Default (unicode) emotes are not supported ~~yet~~') await ctx.send(emote) @command(aliases=['roleping']) @cooldown(1, 4, BucketType.channel) async def how2role(self, ctx, *, role: FuzzyRole): """Searches a role and tells you how to ping it""" name = role.name.replace('@', '@\u200b') await ctx.send(f'`{role.mention}` {name}') @command(aliases=['howtoping']) @cooldown(1, 4, BucketType.channel) async def how2ping(self, ctx, *, user): """Searches a user by their name and get the string you can use to ping them""" if ctx.guild: members = ctx.guild.members else: members = self.bot.get_all_members() def filter_users(predicate): for member in members: if predicate(member): return member if member.nick and predicate(member.nick): return member if ctx.message.raw_role_mentions: i = len(ctx.invoked_with) + len(ctx.prefix) + 1 user = ctx.message.clean_content[i:] user = user[user.find('@')+1:] found = filter_users(lambda u: str(u).startswith(user)) s = '`<@!{}>` {}' if found: return await ctx.send(s.format(found.id, str(found))) found = filter_users(lambda u: user in str(u)) if found: return await ctx.send(s.format(found.id, str(found))) else: return await ctx.send('No users found with %s' % user) @command(aliases=['src', 'source_code', 'sauce']) @cooldown(1, 5, BucketType.user) async def source(self, ctx, *cmd): """Link to the source code for this bot You can also get the source code of commands by doing {prefix}{name} cmd_name""" if cmd: full_name = ' '.join(cmd) cmnd = self.bot.all_commands.get(cmd[0]) if cmnd is None: raise BadArgument(f'Command "{full_name}" not found') for c in cmd[1:]: if not isinstance(cmnd, Group): raise BadArgument(f'Command "{full_name}" not found') cmnd = cmnd.get_command(c) cmd = cmnd if not cmd: await ctx.send('You can find the source code for this bot here https://github.com/s0hv/Not-a-bot') return source, line_number = inspect.getsourcelines(cmd.callback) filename = inspect.getsourcefile(cmd.callback).replace(os.getcwd(), '').strip('\\/') # unformatted source original_source = textwrap.dedent(''.join(source)) # Url pointing to the command in github url = f'https://github.com/s0hv/Not-a-bot/tree/master/{filename}#L{line_number}' # Source code in message source = original_source.replace('```', '`\u200b`\u200b`') # Put zero width space between backticks so they can be within a codeblock source = f'<{url}>\n```py\n{source}\n```' if len(source) > 2000: file = discord.File(StringIO(original_source), filename=f'{full_name}.py') await ctx.send(f'Content was longer than 2000 ({len(source)} > 2000)\n<{url}>', file=file) return await ctx.send(source) @command() @cooldown(1, 5, BucketType.user) async def undo(self, ctx): """ Undoes the last undoable command result. Not all messages will be undoable and undoable messages override each other because only one message can be undone. """ if not await ctx.undo(): await ctx.send('Failed to undo the latest undoable command for you.\n' 'Do note that they expire in one minute') @command() @cooldown(1, 10, BucketType.user) async def invite(self, ctx): """This bots invite link""" await ctx.send(f'<https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=1342557248&scope=bot>') @staticmethod def _unpad_zero(value): if not isinstance(value, str): return return value.lstrip('0') @command(aliases=['bot', 'botinfo']) @cooldown(2, 5, BucketType.user) @bot_has_permissions(embed_links=True) async def stats(self, ctx): """Get stats about this bot""" pid = os.getpid() process = psutil.Process(pid) uptime = time.time() - process.create_time() d = datetime.utcfromtimestamp(uptime) uptime = f'{d.day-1}d {d.hour}h {d.minute}m {d.second}s' current_memory = round(process.memory_info().rss / 1048576, 2) memory_usage = f' Current: {current_memory}MB' if sys.platform == 'linux': try: # use pmap to find the memor
douban/code
vilya/models/ngit/repo.py
Python
bsd-3-clause
21,073
0.000238
# -*- coding: utf-8 -*- from __future__ import absolute_import import os import time import gzip import tempfile import ConfigParser from cStringIO import StringIO from datetime import datetime from ellen.repo import Jagare from ellen.utils import JagareError from vilya.libs.permdir import get_tmpdir from vilya.models.user import User from vilya.models.ngit.commit import Commit from vilya.models.ngit.diff import Diff from vilya.models.ngit.blob import Blob from vilya.models.ngit.submodule import Submodule from vilya.models.ngit.tree import Tree from vilya.models.ngit.blame import Blame LATEST_UPDATE_REF_THRESHOLD = 60 * 60 * 24 MAX_DIFF_PATCHES = 2000 REFS_HEADS_PREFIX_LENGTH = len('refs/heads/') class RepoMergeError(Exception): pass class RepoPushError(Exception): pass class Repo(object): provided_features = [] def __init__(self, path): self.type = "repo" self.path = path self.repo = Jagare(self.path) def provide(self, name): '''检查是否提供某功能,即是否提供某接口''' return name in self.provided_features @property def empty(self): return self.is_empty @property def is_empty(self): return self.repo.empty @property def default_branch(self): branch = '' head = self.repo.head if head: branch = head.name[REFS_HEADS_PREFIX_LENGTH:] return branch def update_default_branch(self, name): branches = self.repo.branches if name not in branches: return None self.repo.update_head(name) def clone(self, path, bare=None, branch=None, mirror=None, env=None, shared=None): self.repo.clone(path, bare=bare, branch=branch, mirror=mirror, env=env) # shared=shared) why? def archive(self, name, ref='master', ext='tar.gz'): content = self.repo.archive(name, ref=ref) if ext == 'tar': return content outbuffer = StringIO() zipfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=outbuffer) zipfile.write(content) zipfile.close() out = outbuffer.getvalue() return out def get_submodule(self, ref, path): path = path.strip() gitmodules = self.repo.show("%s:%s" % (ref, '.gitmodules')) if not gitmodules: return None submodules_lines = gitmodules["data"].split('\n') modules_str = '\n'.join([line.strip() for line in submodules_lines]) config = ConfigParser.RawConfigParser() config.readfp(StringIO(modules_str)) for section in config.sections(): if config.has_option(section, 'path') and config.get( section, 'path') == path: url = config.get(section, 'url') return Submodule(url, path) return None def get_file(self, ref, path): blob = self.repo.show("%s:%s" % (ref, path)) if not blob: return None if blob['type'] != 'blob': return None # TODO: validate blob return Blob(self, blob) def get_file_by_lines(self, ref, path): blob = self.get_file(ref, path) # TODO: blob.size < xxx if not blob or blob.binary: return None if not blob.data: return [] src = blob.data return src.splitlines() def get_file_n_lines(self, ref, path): lines = self.get_file_by_lines(ref, path) if lines: return len(lines) return 0 def get_commits(self, to_ref, from_ref=None, path=None, skip=0, max_count=0, author=None, query=None, first_parent=None, since=0, no_merges=None): commits = self.repo.rev_list(to_ref=to_ref, from_ref=from_ref, path=path, skip=skip, max_count=max_count, author=author, query=query, first_parent=first_parent, since=since, no_merges=no_merges) return [Commit(self, commit) for commit in commits] def get_raw_diff(self, ref, from_ref=None, paths=None, **kw): ''' get Jagare formated diff dict ''' try: diff = self.repo.diff(ref, from_ref=from_ref, paths=paths, **kw) except KeyError: return None return diff def get_diff(self, ref=None, from_ref=None, linecomments=[], raw_diff=None, paths=None, **kw): ''' get ngit wrapped diff object ''' _raw_diff = None if raw_diff: _raw_diff = raw_diff elif ref: _raw_diff = self.get_raw_diff(ref, from_ref=from_ref, paths=paths, **kw) if _raw_diff: return Diff(self, _raw_diff, linecomments) else: return None def get_diff_length(self, ref, from_ref=None, **kw): _raw_diff = self.get_raw_diff(ref, from_ref=from_ref, **kw) return len(_raw_diff['patches']) if _raw_diff else 0 def get_last_commit(self, ref, path=None, no_merges=False): if not path: return self.get_commit(ref) commit = self.repo.rev_list(ref, path=path, max_count=1, no_merges=no_merges) if not commit: return None commit = commit[0] commit = Commit(self, commit) return commit def get_previours_commit(self, ref, path): """previours commit that touch the specified path""" commits = self.repo.rev_list(ref, path=path, max_count=2, no_merges=True) for commit in commits: if commit['sha'] != self.repo.sha(ref): return Commit(self, commit) return None def get_commit(self, ref): sha = self.repo.resolve_commit(ref) if not sha: return None commit = self.repo.show(sha) if not commit: return None # TODO: validate commit return Commit(self, commit) def delete_branch(self, name): self.repo.delete_branch(name) def get_path_by_ref(self, ref): ''' get blob or tree ''' path = self.repo.show(ref) if not path: return None if pat
h['type'] == 'tree': path = Tree(self, path['entries']) elif path['type'] == 'blob': path = Blob(self, path) else: path = None return path def get_path(self, ref, path): _item = self.repo.show("%s:%s" % (ref, path)) if not _item: return None if _item['type'] ==
'tree': item = Tree(self, _item['entries']) elif _item['type'] == 'blob': item = Blob(self, _item) else: item = None return item def get_last_update_timestamp(self): commit = self.get_last_commit('HEAD') if not commit: return 0 return int(commit.author_timestamp) class ProjectRepo(Repo): provided_features = ['project', 'fulltext', 'moreline', 'side_by_side', 'patch_actions'] def __init__(self, project, pull=None): self.type = "project" self.pull = pull self.project = project self.project_name = project.name self.name = project.name self.path = project.repo_path self.repo = Jagare(self.path) # TODO: url @property def api_url(self): return '' @property def context_url(self): return 'moreline' @property def fulltext_url(self): return 'fulltext' @property def branches(self): return self.repo.branches @property def tags(self): return self.repo.tags def get_tree(self, ref, path=None, recursive=False, with_commit=False, recursive_with_tree_node=False): tree = self.repo.ls_tree( ref, path=path, recursive=recursive, with_commit=with_commit)
philippks/OSMNames
tests/export_osmnames/test_export_housenumbers.py
Python
gpl-2.0
365
0.00274
import os from osmnames.export_osmnames.export_osmnames import export_housenumbers, create_views def test_tsv_get_created(session, tables): session.add( tables.osm_housenumber( osm_id=1, ) ) create_views()
export_housenumbers() assert os.path.exists('/tmp/osmnames/expo
rt/test_housenumbers.tsv')
sonali0901/zulip
zerver/webhooks/freshdesk/view.py
Python
apache-2.0
5,880
0.00102
"""Webhooks for external integrations.""" from __future__ import absolute_import from django.http import HttpRequest, HttpResponse from django.utils.translation import ugettext as _ from zerver.models import get_client, UserProfile from zerver.lib.actions import check_send_message from zerver.lib.response import json_success, json_error from zerver.lib.notifications import convert_html_to_markdown from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view import logging import ujson from typing import Any, Dict, Optional, Tuple, Union, Text class TicketDict(dict): """ A helper class to turn a dictionary with ticket information into an object where each of the keys is an attribute for easy access. """ def __getattr__(self, field): # type: (str) -> Any if "_" in field: return self.get(field) else: return self.get("ticket_" + field) def property_name(property, index): # type: (str, int) -> str """The Freshdesk API is currently pretty broken: statuses are customizable but the API will only tell you the number associated with the status, not the name. While we engage the Freshdesk developers about exposing this information through the API, since only FlightCar uses this integration, hardcode their statuses. """ statuses = ["", "", "Open", "Pending", "Resolved", "Closed", "Waiting on Customer", "Job Application", "Monthly"] priorities = ["", "Low", "Medium", "High", "Urgent"] if property == "status": return statuses[index] if index < len(statuses) else str(index) elif property == "priority": return priorities[index] if index < len(priorities) else str(index) else: raise ValueError("Unknown property") def parse_freshdesk_event(event_string): # type: (str) -> List[str] """These are always of the form "{ticket_action:created}" or "{status:{from:4,to:6}}". Note the lack of string quoting: this isn't valid JSON so we have to parse it ourselves. """ data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":") if len(data) == 2: # This is a simple ticket action event, like # {ticket_action:created}. return data else: # This is a property change event, like {status:{from:4,to:6}}. Pull out # the property, from, and to states. property, _, from_state, _, to_state = data return [property, property_name(property, int(from_state)), property_name(property, int(t
o_state))] def format_freshdesk_note_message(ticket, event_info): # type: (TicketDict, List[str]) -> str """There are public (visible to customers) and private note types.""" note_type = event_info[1] content = "%s <%s> added a %s note to [ticket #%s](%s)." % ( ticket.requester_nam
e, ticket.requester_email, note_type, ticket.id, ticket.url) return content def format_freshdesk_property_change_message(ticket, event_info): # type: (TicketDict, List[str]) -> str """Freshdesk will only tell us the first event to match our webhook configuration, so if we change multiple properties, we only get the before and after data for the first one. """ content = "%s <%s> updated [ticket #%s](%s):\n\n" % ( ticket.requester_name, ticket.requester_email, ticket.id, ticket.url) # Why not `"%s %s %s" % event_info`? Because the linter doesn't like it. content += "%s: **%s** => **%s**" % ( event_info[0].capitalize(), event_info[1], event_info[2]) return content def format_freshdesk_ticket_creation_message(ticket): # type: (TicketDict) -> str """They send us the description as HTML.""" cleaned_description = convert_html_to_markdown(ticket.description) content = "%s <%s> created [ticket #%s](%s):\n\n" % ( ticket.requester_name, ticket.requester_email, ticket.id, ticket.url) content += """~~~ quote %s ~~~\n """ % (cleaned_description,) content += "Type: **%s**\nPriority: **%s**\nStatus: **%s**" % ( ticket.type, ticket.priority, ticket.status) return content @authenticated_rest_api_view(is_webhook=True) @has_request_variables def api_freshdesk_webhook(request, user_profile, payload=REQ(argument_type='body'), stream=REQ(default='freshdesk')): # type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse ticket_data = payload["freshdesk_webhook"] required_keys = [ "triggered_event", "ticket_id", "ticket_url", "ticket_type", "ticket_subject", "ticket_description", "ticket_status", "ticket_priority", "requester_name", "requester_email", ] for key in required_keys: if ticket_data.get(key) is None: logging.warning("Freshdesk webhook error. Payload was:") logging.warning(request.body) return json_error(_("Missing key %s in JSON") % (key,)) ticket = TicketDict(ticket_data) subject = "#%s: %s" % (ticket.id, ticket.subject) try: event_info = parse_freshdesk_event(ticket.triggered_event) except ValueError: return json_error(_("Malformed event %s") % (ticket.triggered_event,)) if event_info[1] == "created": content = format_freshdesk_ticket_creation_message(ticket) elif event_info[0] == "note_type": content = format_freshdesk_note_message(ticket, event_info) elif event_info[0] in ("status", "priority"): content = format_freshdesk_property_change_message(ticket, event_info) else: # Not an event we know handle; do nothing. return json_success() check_send_message(user_profile, get_client("ZulipFreshdeskWebhook"), "stream", [stream], subject, content) return json_success()
misterlihao/network-programming-project
synchronizationRole.py
Python
mit
3,395
0.010898
import socket import os import threading import time import zipfile import threading import myPacket as mp def getParentDirectory(path): path2 = path.split('/') temp='' for ph in path2: if(len(ph)>4 and (ph[len(ph)-4:] == '.txt')): break
temp = os.path.join(temp, ph) return temp def checkCharVersion(sock, myChadir, friChadir): text = str(getCharDataSize(myChadir)) print(text) mp.sendPacket(sock, text.encode('utf8')) data = mp.recvPacket(sock).decode('utf8') if cmpCharVersion(getCharDataSize(friChadir), int(data)): return True retu
rn False def cmpCharVersion(myDataSize = 0, hisDataSize = 0): if myDataSize == hisDataSize: return True return False def getCharDataSize(charDirectory): temp = 0 for dirPath, dirNames, fileNames in os.walk(charDirectory): for fileName in fileNames: file = os.path.join(dirPath, fileName) temp += os.path.getsize(file) return temp def updateCharacter(sock, friChadir, friendID, func): fileName = friendID+'.zip' with open(fileName, 'wb') as cfile: while True: data = mp.recvPacket(sock) if data == b'EOF': break cfile.write(data) func(0) #win32gui.ShowWindow(self.hwnd, 0) os.system('rd /S /Q ' + friChadir) zf = zipfile.ZipFile(fileName) zf.extractall(friChadir) zf.close() func(1) #win32gui.ShowWindow(self.hwnd, 1) os.remove(fileName) def uploadCharacter(sock, myChadir): sfileName = 'ArchiveName.zip' zf = zipfile.ZipFile(sfileName,'w',zipfile.ZIP_DEFLATED) for dirPath, dirNames, fileNames in os.walk(myChadir): for fileName in fileNames: file = os.path.join(dirPath, fileName) zf.write(file, file[len(myChadir)+1:]) zf.close() with open(sfileName, 'rb') as file: while True: data = file.read(4096) if not data: break mp.sendPacket(sock, data) time.sleep(1) # delete after send in fixed len mp.sendPacket(sock, b'EOF') os.remove(sfileName) def updataIfNeed(sock, myChafile, friendID, func, callbackFunc = None): firChafile = func(None) friChadir = getParentDirectory(firChafile) myChadir = getParentDirectory(myChafile) try: if not checkCharVersion(sock, myChadir, friChadir): mp.sendPacket(sock, 'True'.encode('utf8')) data = mp.recvPacket(sock).decode('utf8') if data=='True': myThread = threading.Thread(target=uploadCharacter, args=(sock, myChadir)) myThread.setDaemon(True) myThread.start() #uploadCharacter(sock, myChadir) updateCharacter(sock, friChadir, friendID, func) if data=='True': myThread.join( ) else: mp.sendPacket(sock, 'False'.encode('utf8')) data = mp.recvPacket(sock).decode() if data == 'True': uploadCharacter(sock, myChadir) except: pass finally: sock.close() if callbackFunc != None: callbackFunc() #thread = threading.Thread(target=self.listen_to_chat_messagesInThread) #thread.setDaemon(True) #thread.start() #self.connected = True
jejimenez/django
django/template/engine.py
Python
bsd-3-clause
10,778
0
import warnings from django.core.exceptions import ImproperlyConfigured from django.utils import lru_cache, six from django.utils.deprecation import RemovedInDjango110Warning from django.utils.functional import cached_property from django.utils.module_loading import import_string from .base import Context, Template from .context import _builtin_context_processors from .exceptions import TemplateDoesNotExist from .library import import_library _context_instance_undefined = object() _dictionary_undefined = object() _dirs_undefined = object() class Engine(object): default_builtins = [ 'django.template.defaulttags', 'django.template.defaultfilters',
'django.template.loader_tags', ] def __init__(self, dirs=None, app_dirs=False, allowed_include_roots=None, context_processors=None, debug=False, loaders=None, string_if_invalid='', file_charset='utf-8', libraries=None, builtins=None): if dirs is None: dirs = [] if allowed_include_roots is None:
allowed_include_roots = [] if context_processors is None: context_processors = [] if loaders is None: loaders = ['django.template.loaders.filesystem.Loader'] if app_dirs: loaders += ['django.template.loaders.app_directories.Loader'] else: if app_dirs: raise ImproperlyConfigured( "app_dirs must not be set when loaders is defined.") if libraries is None: libraries = {} if builtins is None: builtins = [] if isinstance(allowed_include_roots, six.string_types): raise ImproperlyConfigured( "allowed_include_roots must be a tuple, not a string.") self.dirs = dirs self.app_dirs = app_dirs self.allowed_include_roots = allowed_include_roots self.context_processors = context_processors self.debug = debug self.loaders = loaders self.string_if_invalid = string_if_invalid self.file_charset = file_charset self.libraries = libraries self.template_libraries = self.get_template_libraries(libraries) self.builtins = self.default_builtins + builtins self.template_builtins = self.get_template_builtins(self.builtins) @staticmethod @lru_cache.lru_cache() def get_default(): """ When only one DjangoTemplates backend is configured, returns it. Raises ImproperlyConfigured otherwise. This is required for preserving historical APIs that rely on a globally available, implicitly configured engine such as: >>> from django.template import Context, Template >>> template = Template("Hello {{ name }}!") >>> context = Context({'name': "world"}) >>> template.render(context) 'Hello world!' """ # Since Engine is imported in django.template and since # DjangoTemplates is a wrapper around this Engine class, # local imports are required to avoid import loops. from django.template import engines from django.template.backends.django import DjangoTemplates django_engines = [engine for engine in engines.all() if isinstance(engine, DjangoTemplates)] if len(django_engines) == 1: # Unwrap the Engine instance inside DjangoTemplates return django_engines[0].engine elif len(django_engines) == 0: raise ImproperlyConfigured( "No DjangoTemplates backend is configured.") else: raise ImproperlyConfigured( "Several DjangoTemplates backends are configured. " "You must select one explicitly.") @cached_property def template_context_processors(self): context_processors = _builtin_context_processors context_processors += tuple(self.context_processors) return tuple(import_string(path) for path in context_processors) def get_template_builtins(self, builtins): return [import_library(x) for x in builtins] def get_template_libraries(self, libraries): loaded = {} for name, path in libraries.items(): loaded[name] = import_library(path) return loaded @cached_property def template_loaders(self): return self.get_template_loaders(self.loaders) def get_template_loaders(self, template_loaders): loaders = [] for template_loader in template_loaders: loader = self.find_template_loader(template_loader) if loader is not None: loaders.append(loader) return loaders def find_template_loader(self, loader): if isinstance(loader, (tuple, list)): args = list(loader[1:]) loader = loader[0] else: args = [] if isinstance(loader, six.string_types): loader_class = import_string(loader) if getattr(loader_class, '_accepts_engine_in_init', False): args.insert(0, self) else: warnings.warn( "%s inherits from django.template.loader.BaseLoader " "instead of django.template.loaders.base.Loader. " % loader, RemovedInDjango110Warning, stacklevel=2) return loader_class(*args) else: raise ImproperlyConfigured( "Invalid value in template loaders configuration: %r" % loader) def find_template(self, name, dirs=None, skip=None): tried = [] for loader in self.template_loaders: if loader.supports_recursion: try: template = loader.get_template( name, template_dirs=dirs, skip=skip, ) return template, template.origin except TemplateDoesNotExist as e: tried.extend(e.tried) else: # RemovedInDjango20Warning: Use old api for non-recursive # loaders. try: return loader(name, dirs) except TemplateDoesNotExist: pass raise TemplateDoesNotExist(name, tried=tried) def from_string(self, template_code): """ Returns a compiled Template object for the given template code, handling template inheritance recursively. """ return Template(template_code, engine=self) def get_template(self, template_name, dirs=_dirs_undefined): """ Returns a compiled Template object for the given template name, handling template inheritance recursively. """ if dirs is _dirs_undefined: dirs = None else: warnings.warn( "The dirs argument of get_template is deprecated.", RemovedInDjango110Warning, stacklevel=2) template, origin = self.find_template(template_name, dirs) if not hasattr(template, 'render'): # template needs to be compiled template = Template(template, origin, template_name, engine=self) return template # This method was originally a function defined in django.template.loader. # It was moved here in Django 1.8 when encapsulating the Django template # engine in this Engine class. It's still called by deprecated code but it # will be removed in Django 1.10. It's superseded by a new render_to_string # function in django.template.loader. def render_to_string(self, template_name, context=None, context_instance=_context_instance_undefined, dirs=_dirs_undefined, dictionary=_dictionary_undefined): if context_instance is _context_instance_undefined: context_instance = None else: warnings.warn( "The context_instance argument of render_to_string is " "deprecated.", R
4k1/wufuzzer
src/util.py
Python
mit
2,921
0.008216
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ============================================================================== # MIT License # # Copyright (c) 2017-2019 4k1 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================== import os import datetime import threading class Logging(): logmode = False def __init__(self): self.__lock = threading.Lock() # create dir projdir = "proj_" + datetime.datetime.today().strftime("%Y%m%d_%H%M%S") os.mkdir(projdir) self.__fn = projdir + "/" + datetime.datetime.today().strftime("%Y%m%d_%H%M%S_#") def get_basename(self): return self.__fn def set_logging_mode(self, logmode): self.logmode = logmode def log(self, tid, value): if self.logmode: return try: f = open(self.__fn + str(tid) + ".log", "a") f.write(value + '\r\n') f.close() except: None def warn(self, value): with self.__lock: try: f = open(self.__fn + "!.log", "a") f.write(value + '\r\n') f.close() except: None def vsplit(iterable, n): return [iterable[x:x + n] for x in range(0, len(iterable), n)] def urled(p): if "://" in p: p = p[p.find("://")+3:] if "/" not in p: return "" else: p = p[p.find("/")+1:] if p.split() == "": return "" else: return p else: return p def passed(p): if p == "/": return "" if p[0:1] == "/": p = p[1:] if p[-1:] != "/": return p + "/" return p def filed(p): if p[0:1] == "/": p = p[1:] return p
gannicus-yu/pyutils
myutils/mailbase.py
Python
apache-2.0
4,008
0.000763
#!/usr/bin/env python # coding: utf-8 import email.utils import logging import os import smtplib import threading from email.mime.text import MIMEText from email.MIMEMultipart import MIMEMultipart logger = logging.getLogger("maillog") class MailBase(threading.Thread): mailServerPort = 25 def __init__(self, subject, content, basic_info, attachment=""): """ 多线程邮件处理类 @Params target: file or string basicInfo= { "TOLIST": ["heyu@ucweb.com"], "SERVER": "mail.ucweb.com", "PORT": 25, #25 if missing "USERNAME": "test@ucweb.com", "PASSWORD": "" } (attachment) :param subject: 邮件标题 :param content: 文件名或内容,文件名超过50字符 :param basic_info: 邮件相关配置 :param attachment: 附件 """ threading.Thread.__init__(self) self._set_basic_info(basic_info) self.subject = subject self.content = content self.attachment = attachment def _set_basic_info(self, basic_info): """ :type basic_info: dict
"""
self.BASICS = {} basic = ["TOLIST", "SERVER", "USERNAME", "PASSWORD", "PORT"] if isinstance(basic_info, dict): if "PORT" not in basic_info.keys(): basic_info["PORT"] = self.mailServerPort if len(basic_info.keys()) != len(basic): logger.error("params nums not correct~") raise BadEmailSettings("basic_info param error") for basic in basic: if basic in basic_info.keys(): self.BASICS[basic] = basic_info[basic] else: logger.error("mail settings has no %s", basic) raise BadEmailSettings() else: logger.error("basic_info should be a dict") raise BadEmailSettings("basic_info not a dict") def _send_mail(self, subject, content, attachment): subject = subject.decode("utf-8") self._do_send_mail(self.BASICS["TOLIST"], subject, content, attachment) def run(self): if not self.subject or not self.content: return self._send_mail(self.subject, self.content, self.attachment) def _do_send_mail(self, to, subject, content, attachment): msg = MIMEMultipart('related') msg['To'] = ', '.join(to) msg['From'] = email.utils.formataddr((self.BASICS["USERNAME"], self.BASICS["USERNAME"])) msg['Subject'] = subject # msgText = MIMEText(content.encode("utf-8"), "html") msgtext = MIMEText(content, "html") msgtext.set_charset('utf-8') msg.attach(msgtext) if attachment: att = MIMEText(open(attachment, 'rb').read(), 'base64', 'utf-8') att["Content-Type"] = 'application/octet-stream' att["Content-Disposition"] = 'attachment;filename="%s"' % attachment msg.attach(att) server = smtplib.SMTP(self.BASICS["SERVER"], self.BASICS["PORT"]) server.set_debuglevel(False) # show communication with the server server.login(self.BASICS["USERNAME"], self.BASICS["PASSWORD"]) try: server.sendmail(self.BASICS["USERNAME"], to, msg.as_string()) finally: server.quit() class FileMail(MailBase): """ load文件发邮件 """ def __init__(self, subject, mail_file, basic_info, attachment=""): if len(mail_file) <= 50 and os.path.isfile(mail_file): fd = open(mail_file) content = fd.read() content = "<br/>".join(content.split("\n")) fd.close() else: content = "" super(FileMail, self).__init__(subject, content, basic_info, attachment) class BadEmailSettings(Exception): pass
JacekKarnasiewicz/HomePage
apps/tree_traversal/apps.py
Python
mit
102
0
from django.apps import AppConfig class Tre
eTraversalConfig(AppConfig): name = 'tree_traver
sal'
magyarm/periphondemand-code
setup.py
Python
lgpl-2.1
3,545
0.010155
#! /usr/bin/python # -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Name: setup.py # Purpose: # Author: Fabien Marteau <fabien.marteau@armadeus.com> # Created: 16/02/2009 #----------------------------------------------------------------------------- # Copyright (2008) Armadeus Systems # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #----------------------------------------------------------------------------- # Revision list : # # Date By Changes # #----------------------------------------------------------------------------- from distutils.core import setup import os,re import sys sys.path.append("src/bin/") from version import * def visit(libfile,dirname,names): """ function used for getLibraryTree to walk throw library tree""" for file in names: filepath = os.path.join(dirname,file) if not os.path.isdir(filepath): if not re.search(r".svn",filepath): # FIXME: # I can't find how to split with os.path ! # will be used when package_data work #realpath = "/".join(filepath.split("/")[1:]) #libfile.append(realpath) libfile.append(filepath) def getTree(directory): """ return a tuple list of files """ libf
ile = [] os.path.walk(os.path.join("src",directory),visit,libfile) new_libfile = [] for path_file in libf
ile: new_libfile.append('/'.join(path_file.split('/')[1:])) if (directory == "platforms"): print str(new_libfile) return new_libfile # Package files package_files_list = [] package_files_list.extend(getTree("library")) package_files_list.extend(getTree("platforms")) package_files_list.extend(getTree("templates")) package_files_list.extend(getTree("busses")) package_files_list.extend(getTree("toolchains")) package_files_list.extend(getTree("tests")) datafiles=[ ('bin',['src/bin/pod']) ] setup( name='PeriphOnDemand', version=getVersion(), url='https://sourceforge.net/projects/periphondemand', author='Fabien Marteau and Nicolas Colombain', author_email='<fabien.marteau@armadeus.com>,<nicolas.colombain@armadeus.com>,', maintainer='Fabien Marteau', maintainer_email='fabien.marteau@armadeus.com', package_dir = {"periphondemand":"src"}, packages=['periphondemand', 'periphondemand.bin', 'periphondemand.bin.code', 'periphondemand.bin.code.vhdl', 'periphondemand.bin.commandline', 'periphondemand.bin.core', 'periphondemand.bin.toolchain', 'periphondemand.bin.utils', ], package_data = {'periphondemand':package_files_list}, data_files=datafiles, license='GPL', )
face2wind/Elegance
tools/serialize_creator/cpp_creator.py
Python
lgpl-3.0
8,346
0.003834
#!/usr/bin/env python3 import os import sys import getopt import xml.dom.minidom class CppCreator(object): def __init__(self, file_name, xml_root, output_path): if not os.path.exists(output_path): print ("CppCreator create error") exit(1) self.xml_root = xml_root self.output_path = output_path self.file_name = file_name def GetCppRealType(self, type_str, subtype_str): real_type_str = type_str if type_str == "int8": real_type_str = "char" elif type_str == "uint8": real_type_str = "unsigned char" elif type_str == "int16": real_type_str = "short" elif type_str == "uint16": real_type_str = "unsigned short" elif type_str == "int32": real_type_str = "int" elif type_str == "uint32": real_type_str = "unsigned int" elif type_str == "int64": real_type_str = "long long" elif type_str == "uint64": real_type_str = "unsigned long long" elif type_str == "string": real_type_str = "std::string" elif type_str == "array": if subtype_str == "": print("GetCppRealType : subtype_str can not empty when type is array") exit(1) real_type_str = "std::vector<" + self.GetCppRealType(subtype_str, "") + ">" return real_type_str def GetSerializeCode(self, type_str, subtype_str, attr_name): code_str = "" if type_str == "int8": code_str += (" collector.WriteInt8(" + attr_name + ");\n") elif type_str == "uint8": code_str += (" collector.WriteUint8(" + attr_name + ");\n") elif type_str == "int16": code_str += (" collector.WriteInt16(" + attr_name + ");\n") elif type_str == "uint16": code_str += (" collector.WriteUint16(" + attr_name + ");\n") elif type_str == "int32": code_str += (" collector.WriteInt32(" + attr_name + ");\n") elif type_str == "uint32": code_str += (" collector.WriteUint32(" + attr_name + ");\n") elif type_str == "int64": code_str += (" collector.WriteInt64(" + attr_name + ");\n") elif type_str == "uint64": code_str += (" collector.WriteUint64(" + attr_name + ");\n") elif type_str == "string": code_str += (" collector.WriteString(" + attr_name + ");\n") elif type_str == "array": if subtype_str == "": print("GetSerializeCode : subtype_str can not empty when type is array") exit(1) code_str += (" collector.WriteUint16((unsigned short)" + attr_name + ".size());\n") code_str += " for (auto array_item : " + attr_name + ")\n {\n " sub_serialize_code = self.GetSerializeCode(subtype_str, "", "array_item") if sub_serialize_code == "": sub_serialize_code = " array_item.Serialize(collector);\n" code_str += sub_serialize_code code_str += " }\n" return code_str def GetUnserializeCode(self, type_str, subtype_str, attr_name): code_str = "" if type_str == "int8": code_str += (" " + attr_name + " = collector.ReadInt8();\n") elif type_str == "uint8": code_str += (" " + attr_name + " = collector.ReadUint8();\n") elif type_str == "int16": code_str += (" " + attr_name + " = collector.ReadInt16();\n") elif type_str == "uint16": code_str += (" " + attr_name + " = collector.ReadUint16();\n") elif type_str == "int32": code_str += (" " + attr_name + " = collector.ReadInt32();\n") elif type_str == "uint32": code_str += (" " + attr_name + " = collector.ReadUint32();\n") elif type_str == "int64": code_str += (" " + attr_name + " = collector.ReadInt64();\n") elif type_str == "uint64": code_str += (" " + attr_name + " = collector.ReadUint64();\n") elif type_str == "string": code_str += (" " + attr_name + " = collector.ReadString();\n") elif type_str == "array": if subtype_str == "": print("GetUnserializeCode : subtype_str can not empty when type is array") exit(1) code_str += (" {\n int array_size = collector.ReadUint16();\n " + self.GetCppRealType(subtype_str, "") + " tmp_attr_value;\n") code_str += " for (int index = 0; index < array_size; ++ index)\n {\n " sub_serialize_code = self.GetUnserializeCode(subtype_str, "", "tmp_attr_value") if sub_serialize_code == "": sub_serialize_code = " tmp_attr_value.Unserialize(collector);\n" code_str += sub_serialize_code code_str += (" " + attr_name + ".push_back(tmp_attr_value);\n") code_str += " }\n }\n" return code_str def DoCreate(self): protocols = self.xml_root.getElementsByTagName("protocol") hpp_file_str = "#pragma once\n\n#include <string>\n#include <vector>\n#include <elegance/memory/serialize/serialize_base.hpp>\n\nusing face2wind::SerializeBase;\nusing face2wind::SerializeDescribe;\nusing face2wind::ByteArray;\n\nnamespace Protocol {\n\n" cpp_file_header_str = "#include \"" + self.file_name + ".hpp\"\n\nnamespace Protocol {\n\n" describe_hpp_str = "" describe_cpp_str = "" cpp_file_str = "" for protocol in protocols: class_name = protocol.getAttribute("name") hpp_file_str += ("class " + class_name + " : public SerializeBase\n{\npublic:\n") cpp_serialize_code = "" cpp_unserialize_code = "" attrs = protocol.getElementsByTagName("attr") for attr in attrs: type_name = attr.getAttribute("type") attr_name = attr.getAttribute("name") subtype_name = "" real_type_name = "" if (attr.hasAttribute("subtype")): subtype_name = attr.getAttribute("subtype") real_type_name = self.GetCppRealType(type_name, subtype_name) hpp_file_str += (" " + real_type_name + " " + attr_name + ";\n") cpp_serialize_code += self.GetSerializeCode(type_name, subtype_name, attr_name) cpp_unserialize_code += self.GetUnserializeCode(type_name, subtype_name, attr_name) hpp_file_str += "\n virtual void Se
rialize(ByteArray &collector) const;\n" hpp_file_str += " virtual void Unserialize(ByteArray &collector);\n" hpp_file_str += " virtual const std::string GetTypeName() const { return \"" + class_name + "\"; }\n" hpp_file_str += "};\n\n" describe_class_name = "__" + class_name + "Describe__"; describe_hpp_str += ("class " + describe_class_name + " : public SerializeDescribe\n{\npublic:\n " + describe_class_name + "() { GetNameToObjectM
ap()[\"" + class_name + "\"] = this; }\n virtual ~" + describe_class_name + "() {}\n") describe_hpp_str += "\nprotected:\n virtual SerializeBase * CreateSerialize() const { return new " + class_name + "(); }\n};\n\n" describe_cpp_str += (describe_class_name + " " + "for_describe_register_to_" + describe_class_name.lower() + ";\n") cpp_file_str += ("void " + class_name + "::Serialize(ByteArray &collector) const\n") cpp_file_str += ("{\n" + cpp_serialize_code + "}\n\n") cpp_file_str += ("void " + class_name + "::Unserialize(ByteArray &collector)\n") cpp_file_str += ("{\n" + cpp_unserialize_code + "}\n\n") cpp_file_str += "}\n\n" describe_hpp_str += "\n\n" describe_cpp_str += "\n\n" hpp_file = open(self.output_path + "/" + self.file_name + ".hpp", "w") hpp_file.write(hpp_file_str + describe_hpp_str + "}\n\n") hpp_file.close() cpp_file
vgrem/SharePointOnline-REST-Python-Client
office365/sharepoint/content_type_collection.py
Python
mit
376
0.00266
from office365.runtime.client_object_collection import ClientObjectCollection from office365.sharepoint.content_type import ContentType class ContentTypeCollection(ClientObjectCollection): """Content Type resource collection""" def __init__(self, context, resource_path=None): super(ContentTypeCollection, self).__init__(context, ContentType, resource_path
)
badp/weather
weather/models.py
Python
bsd-2-clause
8,309
0.011674
from django.db import models from django.contrib.auth.models import User from django.db.models.signals import post_save from collections import OrderedDict from datetime import date, timedelta import signals class UserProfile(models.Model): user = models.OneToOneField(User, primary_key=True) # Optional new fields go here, but for now I don't need any. # Mostly a soft deletion bit. @classmethod def of(cls, user): return cls.objects.get(user=user) def vote(self, vote): TodayVotes(user=self, vote=vote).save() def __unicode__(self): return self.user.username # Ensure that we have a UserProfile for all users def create_user_profile(sender, instance, created, **kwargs): if created: UserProfile.objects.create(user=instance) post_save.connect(create_user_profile, sender=User) class Team(models.Model): HIERARCHICAL = 0 CIRCULAR = 1 SHAPE_CHOICES = [ (HIERARCHICAL, "Hierarchical"), (CIRCULAR, "Circular"), ] id = models.AutoField(primary_key=True) name = models.CharField(max_length=25, unique=True) members = models.ManyToManyField(UserProfile) shape = models.IntegerField( choices=SHAPE_CHOICES, default=HIERARCHICAL, help_text="If a group is hierarchical, members report to one manager.\n" "If a group is circular, members report to each other.", ) manager = models.ForeignKey(User, on_delete=models.PROTECT, related_name="owned_by") supervisor = models.ForeignKey(User, on_delete=models.PROTECT, related_name="supervised_by") def get_votes(self, for_past_days=5): cutoff_point = date.today() - timedelta(days=for_past_days) return Votes.objects.filter(team=self, is_vote=True, date__gte=cutoff_point) def get_vote_averages(self, for_past_days=5): # this returns a list of dicts with keys "date" and "vote__avg" data = (self.get_votes(for_past_days=for_past_days) .values('date') .annotate(models.Avg('vote'))) # turn that into a simpler dict from date to average response = OrderedDict() for point in data: response[point["date"]] = point["vote__avg"] return response def get_predictions(self, for_past_days=5): cutoff_point = date.today() - timedelta(days=for_past_days) return Votes.objects.filter(team=self, is_prediction=True, date__gte=cutoff_point) def get_all_voters(self): return UserProfile.objects.filter(team=self) def get_all_predictors(self): if self.shape == self.HIERARCHICAL: return [self.manager] else: return self.get_all_voters() def check_for_warnings(self, send_signals=False): def is_prediction_successful(pred, avg): if self.shape == self.HIERARCHICAL: ranges = [(1, 3), (1, 3), (0, 0), (3, 5), (3, 5)] else: ranges = [(1, 2.2), (2.2, 2.8), (2.8, 3.2), (3.2, 3.8), (3.8, 5)] low, hi = ranges[pred-1] return low <= avg <= hi def is_concerning(self, failures, num_predictions, pvalue_threshold=0.1): base_prob = 0.5 if self.shape == self.HIERARCHICAL else 0.4 successes = num_predictions - failures test_pvalue = choose(num_predictions, successes) test_pvalue *= base_prob ** successes test_pvalue *= (1-base_prob) ** failures # TODO: adjust pvalue threshold to num_predictions? return (test_pvalue <= pvalue_threshold, test_pvalue) avgs = self.get_vote_averages() predictions = self.get_predictions() num_predictions = predictions.count() failures = 0 for day in avgs: avg = avgs[day] day_predictions = predictions.filter(date=day) for pred in day_predictions: if not is_prediction_successful(pred.vote, avg): failures += 1 threshold_hit = is_concerning(self, failures, num_predictions) if send_signals and threshold_hit: # TODO: add a ton more information here signals.problem_detected.send(sender=self.__class__, team=self) return threshold_hit def __unicode__(self): return self.name VOTE_CHOICES = [ (1, ":C"), (2, ":/"), (3, ":|"), (4, ":)"), (5, ":D"), ] # class Membership(models.Model): # id = models.AutoField(primary_key=True, editable=False) # team = models.ForeignKey(Team) # member = models.ForeignKey(UserProfile) # def __unicode__(self): # return "%s/%s" % (self.team, self.member) class TodayVotes(models.Model): user = models.ForeignKey(UserProfile, primary_key=True) vote = models.IntegerField(choices=VOTE_CHOICES, blank=True) @classmethod # This accepts kwargs because it is a signal handler def do_switchover(cls, **kwargs): for team in Team.objects.all(): voters
= team.get_all_voters() predictors = team.get_all_predictors() votes = TodayVotes.objects.filter(user__in=voters) predictions = TodayVotes.objects.filter(user__in=predictors) if votes.count() < 3: print "Skipping %s, not enough votes" % team continue else: for vote in vot
es: Votes(team=team, vote=vote.vote, is_vote=True).save() for prediction in predictions: Votes(team=team, vote=vote.vote, is_prediction=True).save() # Send warnings team.check_for_warnings(send_signals=True) TodayVotes.objects.all().delete() def __unicode__(self): return "%d" % self.vote signals.day_switchover.connect(TodayVotes.do_switchover) class Votes(models.Model): id = models.AutoField(primary_key=True, editable=False) # Is it okay to cascade delete votes? # It's probably best not to delete users or teams to begin with. # If we HAVE to delete things, then we might as well delete it all. team = models.ForeignKey(Team, editable=False) is_vote = models.BooleanField(default=False) is_prediction = models.BooleanField(default=False) vote = models.IntegerField(choices=VOTE_CHOICES, editable=False) date = models.DateField(auto_now_add=True, editable=False) class Meta: ordering = ["team", "date"] get_latest_by = "date" def __unicode__(self): return ("{s.team}@{s.date}:{s.vote}" " ({s.is_vote}, {s.is_prediction})".format(s=self)) # from http://stackoverflow.com/a/3025547/13992 # by http://stackoverflow.com/users/4279/j-f-sebastian def choose(n, k): """ A fast way to calculate binomial coefficients by Andrew Dalke (contrib). """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in xrange(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 # This is called on syncdb def init(sender, **kwargs): # Only run this once. HACK: change parameter in connect() instead if ".admin." not in sender.__name__: return # Don't do anything if the DB is already populated if User.objects.all().count() != 0: return print "Bootstrapping triggered by %s." % sender.__name__ root = User.objects.create_superuser("root", "root@localhost", "root") print "Superuser created (username/password are 'root')" user1 = User.objects.create_user('one', 'one@localhost', 'one') user2 = User.objects.create_user('two', 'two@localhost', 'two') user3 = User.objects.create_user('tri', 'tri@localhost', 'tri') print "Users one, two, tri created and added to team 'demo'." team = Team(name="demo", shape=Team.CIRCULAR, manager=root, supervisor=root) team.save() for user in (root, user1, user2, user3): team.members.add(UserProfile.of(user)) assert team.get_all_voters().count() == 4 # Create votes in the past for days_in_the_past in range(6): day = date.today() - timedelta(days=days_in_the_past) Votes(vote=3, team=team, is_vote=True, date=day).save() Votes(vote=2, team=team, is_vote=True, date=day).save() Votes(vote=1, team=team, is_vote=True, date=day).save() Votes(vote=4, team=team, is_prediction=True, date=day).save() TodayVotes(user=UserProfile.of(user1), vote=1).save() TodayVotes(user=UserProfile.of(user2), vote=2).save() TodayVotes(user=U
paxswill/evesrp
src/evesrp/views/api.py
Python
bsd-2-clause
12,023
0.002578
from __future__ import absolute_import from flask import url_for, redirect, abort, request, Blueprint, current_app from flask_login import login_required, current_user import six from six.moves import filter, map from sqlalchemy.orm.exc import NoResultFound from itertools import chain from .. import ships, systems, db from ..models import Request, ActionType from ..auth import PermissionType from ..auth.models import Division, User, Group, Pilot, Entity from .requests import PermissionRequestListing, PersonalRequests from ..util import jsonify, classproperty api = Blueprint('api', __name__) filters = Blueprint('filters', __name__) @api.route('/entities/') @login_required def list_entities(): """Return a JSON object with a list of all of the specified entity type. Example output:: { entities: [ {name: 'Bar', id: 1, source: 'Auth Source', type: 'User'}, {name: 'Foo', id: 0, source: 'Another Auth Source', type: 'Group'}, {name: 'Baz', id: 20, source: 'Auth Source', type: 'Group'} ] } This method is only accesible to administrators. :param str entity_type: Either ``'user'`` or ``'group'``. """ if not current_user.admin and not \ current_user.has_permission(PermissionType.admin): abort(403) user_query = db.session.query(User.id, User.name, User.authmethod) group_query = db.session.query(Group.id, Group.name, Group.authmethod) users = map(lambda e: { u'id': e.id, u'name': e.name, u'type': u'User', u'source': e.authmethod}, user_query) groups = map(lambda e: { u'id': e.id, u'name': e.name, u'type': u'Group', u'source': e.authmethod}, group_query) return jsonify(entities=chain(users, groups)) @api.route('/user/<int:user_id>/') @login_required def user_detail(user_id): if not current_user.admin and not \ current_user.has_permission(PermissionType.admin): abort(403) user = User.query.get_or_404(user_id) # Set up divisions submit = map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.submit, user.permissions)) review = map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.review, user.permissions)) pay = map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.pay, user.permissions)) resp = { u'name': user.name, u'groups': list(user.groups), u'divisions': { u'submit': list(set(submit)), u'review': list(set(review)), u'pay': list(set(pay)), }, u'admin': user.admin, u'requests': user.requests, } return jsonify(**resp) @api.route('/group/<int:group_id>/') @login_required def group_detail(group_id): if not current_user.admin and not \ current_user.has_permission(PermissionType.admin): abort(403) group = Group.query.get_or_404(group_id) submit = map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.submit, group.permissions)) review = map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.review, group.permissions)) pay =
map(lambda p: p.division, filter(lambda p: p.permission == PermissionType.pay, group.permissions)) resp = { u'name': group.name, u'users': list(group.users), u'divisions': { u'submit': list(set(submit)), u'review': list(set(review)), u'pay': list(set(pay)), }, } return jsonify(**resp) @api.route('/division/') @login_required def list_divisions(): """
List all divisions. """ if not current_user.admin: abort(403) divisions = db.session.query(Division.id, Division.name) return jsonify(divisions=divisions) @api.route('/division/<int:division_id>/') @login_required def division_detail(division_id): """Get the details of a division. :param int division_id: The ID of the division """ division = Division.query.get_or_404(division_id) if not current_user.admin and not \ current_user.has_permission(PermissionType.admin, division): abort(403) permissions = {} for perm in PermissionType.all: key = perm.name + '_href' permissions[key] = url_for('.division_permissions', division_id=division_id, permission=perm.name) return jsonify( name=division.name, requests=division.requests, permissions=permissions) @api.route('/division/<int:division_id>/<permission>/') @login_required def division_permissions(division_id, permission): division = Division.query.get_or_404(division_id) if not current_user.admin and not \ current_user.has_permission(PermissionType.admin, division): abort(403) permission = PermissionType.from_string(permission) # Can't use normal Entity JSON encoder as it doesn't include the # authentication source or their type (explicitly. Ain't nobody got time # for parsing the entity type out of the href). entities = [] for entity in map(lambda p: p.entity, division.permissions[permission]): entity_info = { u'name': entity.name, u'id': entity.id, u'source': str(entity.authmethod), } if hasattr(entity, u'users'): entity_info[u'type'] = u'Group' entity_info[u'length'] = len(entity.users) else: entity_info[u'type'] = u'User' entities.append(entity_info) return jsonify( entities=entities, name=permission.name, description=permission.description) @api.route('/ships/') @login_required def ship_list(): """Get an array of objects corresponding to every ship type. The objects have two keys, ``id`` is the integer typeID, and ``name`` is the name of the ship. This method is only accessible for logged in users to try to keep possible misuse to a minimum. """ ship_objs = list(map(lambda s: {u'name': s[1], u'id': s[0]}, ships.ships.items())) return jsonify(ships=ship_objs) class FiltersRequestListing(object): @classproperty def _load_options(self): """Returns a sequence of :py:class:`~sqlalchemy.orm.strategy_options.Load` objects specifying which attributes to load. """ return ( db.Load(Request).load_only( 'id', 'pilot_id', 'corporation', 'alliance', 'ship_type', 'status', 'base_payout', 'kill_timestamp', 'timestamp', 'division_id', 'submitter_id', 'system', ), db.Load(Division).joinedload('name'), db.Load(Pilot).joinedload('name'), db.Load(User).joinedload('id') ) def dispatch_request(self, filters='', **kwargs): def request_dict(request): payout = request.payout return { u'id': request.id, u'href': url_for('requests.get_request_details', request_id=request.id), u'pilot': request.pilot.name, u'corporation': request.corporation, u'alliance': request.alliance, u'ship': request.ship_type, u'status': request.status.name, u'payout': payout.currency(), u'kill_timestamp': request.kill_timestamp, u'submit_timestamp': request.timestamp, u'division': request.division.name, u'submitter_id': request.submitter.id, u'system': request.system, u'constellation
codesy/codesy
auctions/migrations/0039_auto_20160909_1728.py
Python
agpl-3.0
573
0.001745
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-09-09 17:28 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('auctions', '0038_offercredit'), ] operations = [ migrations.Alte
rField( model_name='offercredit', name='offer', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCAD
E, related_name='offer_credit', to='auctions.Offer'), ), ]
kratorius/ads
python/interviewquestions/atoi.py
Python
mit
1,456
0.00206
""" Implement atoi() in Python (given a string, return a number). Assume all the strings are always valid. """ import unittest def atoi(string): l = len(string) t = 0 v = 10 ** (l - 1) for ch in string: t
+= v * int(ch) v /= 10 return t def atoi2(string): l, t = len(string), 0 for idx, ch in enumerate(string): t += int(ch) * (10 ** (l - idx - 1)) return t def atoi3(string): l = len(string) return sum([ int(ch) * (10 ** (l - idx - 1)) for idx, ch in enumerate(
string) ]) class AtoITest(unittest.TestCase): def test_atoi(self): self.assertEqual(12345, atoi("12345")) self.assertEqual(1234, atoi("1234")) self.assertEqual(123, atoi("123")) self.assertEqual(12, atoi("12")) self.assertEqual(1, atoi("1")) self.assertEqual(0, atoi("0")) def test_atoi2(self): self.assertEqual(12345, atoi2("12345")) self.assertEqual(1234, atoi2("1234")) self.assertEqual(123, atoi2("123")) self.assertEqual(12, atoi2("12")) self.assertEqual(1, atoi2("1")) self.assertEqual(0, atoi2("0")) def test_atoi3(self): self.assertEqual(12345, atoi3("12345")) self.assertEqual(1234, atoi3("1234")) self.assertEqual(123, atoi3("123")) self.assertEqual(12, atoi3("12")) self.assertEqual(1, atoi3("1")) self.assertEqual(0, atoi3("0"))
PierreMarchand20/htool
interface/htool/multihmatrix.py
Python
mit
10,354
0.005698
#!/usr/bin/env python # coding: utf-8 import os,sys import ctypes import numpy as np from .hmatrix import _C_HMatrix, HMatrix class _C_MultiHMatrix(ctypes.Structure): """Holder for the raw data from the C++ code.""" pass class AbstractMultiHMatrix: """Common code for the two actual MultiHMatrix classes below.""" ndim = 2 # To mimic a numpy 2D array def __init__(self, c_data: _C_MultiHMatrix, **params): # Users should use one of the two constructors below. self.c_data = c_data self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data)) self.size = self.lib.nbhmats(c_data) self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix) self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
self.hmatrices = [] for l in range(0,self.size): c_data_hmatrix = self.lib.getHMatrix(self.c_data,l) self.hmatrices.append(HMatrix(c_data_hmatrix,**params)) self.params = params.copy() @classmethod def from_coefs(cls, getcoefs, nm, po
ints_target, points_source=None, **params): """Construct an instance of the class from a evaluation function. Parameters ---------- getcoefs: Callable A function evaluating an array of matrices at given coordinates. points_target: np.ndarray of shape (N, 3) The coordinates of the target points. If points_source=None, also the coordinates of the target points points_source: np.ndarray of shape (N, 3) If not None; the coordinates of the source points. epsilon: float, keyword-only, optional Tolerance of the Adaptive Cross Approximation eta: float, keyword-only, optional Criterion to choose the blocks to compress minclustersize: int, keyword-only, optional Minimum shape of a block maxblocksize: int, keyword-only, optional Maximum number of coefficients in a block Returns ------- MultiHMatrix or ComplexMultiHMatrix """ # Set params. cls._set_building_params(**params) # Boilerplate code for Python/C++ interface. _getcoefs_func_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)) if points_source is None: cls.lib.MultiHMatrixCreateSym.restype = ctypes.POINTER(_C_MultiHMatrix) cls.lib.MultiHMatrixCreateSym.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, _getcoefs_func_type, ctypes.c_int ] # Call the C++ backend. c_data = cls.lib.MultiHMatrixCreateSym(points_target, points_target.shape[0], _getcoefs_func_type(getcoefs),nm) else: cls.lib.MultiHMatrixCreate.restype = ctypes.POINTER(_C_MultiHMatrix) cls.lib.MultiHMatrixCreate.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, _getcoefs_func_type, ctypes.c_int ] # Call the C++ backend. c_data = cls.lib.MultiHMatrixCreate(points_target,points_target.shape[0],points_source, points_source.shape[0], _getcoefs_func_type(getcoefs),nm) return cls(c_data, **params) @classmethod def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params): """Construct an instance of the class from a evaluation function. Parameters ---------- points: np.ndarray of shape (N, 3) The coordinates of the points. getsubmatrix: Callable A function evaluating the matrix in a given range. epsilon: float, keyword-only, optional Tolerance of the Adaptive Cross Approximation eta: float, keyword-only, optional Criterion to choose the blocks to compress minclustersize: int, keyword-only, optional Minimum shape of a block maxblocksize: int, keyword-only, optional Maximum number of coefficients in a block Returns ------- HMatrix or ComplexHMatrix """ # Set params. cls._set_building_params(**params) # Boilerplate code for Python/C++ interface. _getsumatrix_func_type = ctypes.CFUNCTYPE( None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double) ) if points_source is None: cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix) cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, _getsumatrix_func_type, ctypes.c_int ] # Call the C++ backend. c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm) else: cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix) cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'), ctypes.c_int, _getsumatrix_func_type, ctypes.c_int ] # Call the C++ backend. c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm) return cls(c_data, **params) @classmethod def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None): """Put the parameters in the C++ backend.""" if epsilon is not None: cls.lib.setepsilon.restype = None cls.lib.setepsilon.argtypes = [ ctypes.c_double ] cls.lib.setepsilon(epsilon) if eta is not None: cls.lib.seteta.restype = None cls.lib.seteta.argtypes = [ ctypes.c_double ] cls.lib.seteta(eta) if minclustersize is not None: cls.lib.setminclustersize.restype = None cls.lib.setminclustersize.argtypes = [ ctypes.c_int ] cls.lib.setminclustersize(minclustersize) if maxblocksize is not None: cls.lib.setmaxblocksize.restype = None cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ] cls.lib.setmaxblocksize(maxblocksize) def __str__(self): return f"{self.__class__.__name__}(shape={self.shape})" def __getitem__(self, key): # self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix) # self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int] # c_data_hmatrix = self.lib.getHMatrix(self.c_data,key) # return HMatrix(c_data_hmatrix,**self.params) return self.hmatrices[key] def matvec(self, l , vector): """Matrix-vector product (interface for scipy iterative solvers).""" assert self.shape[1] == vector.shape[0], "Matrix-vector product of matrices of wrong shapes." # Boilerplate for Python/C++ interface self.lib.MultiHMatrixVecProd.argtypes = [ ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int, np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS'), np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS') ] # Initialize vector result = np.zeros((self.shape[0],), dtype=self.dtype) # Call C++ backend self.lib
microcom/odoo-product-configurator
product_configurator_wizard/models/sale.py
Python
agpl-3.0
1,538
0.00065
# -*- coding: utf-8 -*- from ast import literal_eval from odoo import models, fields, api class SaleOrderLine(models.Model): _inherit = 'sale.order.line' config_ok = fields.Boolean( related='product_id.config_ok', string="Configurable", readonl
y=True ) @api.multi def reconfigure_product(self): """ Creates and launches a product configurator wizard with a linked template and variant in order to re-configure a existing product. It is esetially a shortcut to pre-fill configuration data of a variant""" cfg_steps = self.product_id.product_tmpl_id.config_step_line_i
ds active_step = str(cfg_steps[0].id) if cfg_steps else 'configure' product_modifiable = literal_eval(self.env['ir.config_parameter'].sudo().get_param( 'product_configurator.product_modifiable', default='False')) wizard_obj = self.env['product.configurator'] wizard = wizard_obj.create({ 'product_modifiable': product_modifiable, 'product_id': self.product_id.id, 'state': active_step, 'order_line_id': self.id, }) return { 'type': 'ir.actions.act_window', 'res_model': 'product.configurator', 'name': "Configure Product", 'view_mode': 'form', 'context': dict( self.env.context, wizard_id=wizard.id, ), 'target': 'new', 'res_id': wizard.id, }
fidelram/deepTools
deeptools/plotCorrelation.py
Python
gpl-3.0
10,834
0.002861
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import argparse import numpy as np import matplotlib matplotlib.use('Agg') matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['svg.fonttype'] = 'none' from deeptools import cm # noqa: F401 import matplotlib.pyplot as plt from deeptools.correlation import Correlation from deeptools.parserCommon import writableFile from deeptools._version import __version__ old_settings = np.seterr(all='ignore') def parse_arguments(args=None): basic_args = plot_correlation_args() heatmap_parser = heatmap_options() scatter_parser = scatterplot_options() parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=""" Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or multiBigwigSummary. Pearson or Spearman methods are available to compute correlation coefficients. Results can be saved as multiple scatter plots depicting the pairwise correlations or as a clustered heatmap, where the colors represent the correlation coefficients and the clusters are constructed using complete linkage. Optionally, the values can be saved as tables, too. detailed help: plotCorrelation -h """, epilog='example usages:\n' 'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n' ' \n\n', parents=[basic_args, heatmap_parser, scatter_parser]) return parser def plot_correlation_args(): parser = argparse.ArgumentParser(add_help=False) required =
parser.add_argument_group('Required arguments') # define the arguments required.add_argument('--corData', '-in', metavar='FILE', help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary', required=True) required.add_argument('--corMethod', '-c',
help="Correlation method.", choices=['spearman', 'pearson'], required=True) required.add_argument('--whatToPlot', '-p', help="Choose between a heatmap or pairwise scatter plots", choices=['heatmap', 'scatterplot'], required=True) optional = parser.add_argument_group('Optional arguments') optional.add_argument('--plotFile', '-o', help='File to save the heatmap to. The file extension determines the format, ' 'so heatmap.pdf will save the heatmap in PDF format. ' 'The available formats are: .png, ' '.eps, .pdf and .svg.', type=writableFile, metavar='FILE') optional.add_argument('--skipZeros', help='By setting this option, genomic regions ' 'that have zero or missing (nan) values in all samples ' 'are excluded.', action='store_true', required=False) optional.add_argument('--labels', '-l', metavar='sample1 sample2', help='User defined labels instead of default labels from ' 'file names. ' 'Multiple labels have to be separated by spaces, e.g. ' '--labels sample1 sample2 sample3', nargs='+') optional.add_argument('--plotTitle', '-T', help='Title of the plot, to be printed on top of ' 'the generated image. Leave blank for no title. (Default: %(default)s)', default='') optional.add_argument('--plotFileFormat', metavar='FILETYPE', help='Image format type. If given, this option ' 'overrides the image format based on the plotFile ' 'ending. The available options are: png, ' 'eps, pdf and svg.', choices=['png', 'pdf', 'svg', 'eps', 'plotly']) optional.add_argument( '--removeOutliers', help='If set, bins with very large counts are removed. ' 'Bins with abnormally high reads counts artificially increase ' 'pearson correlation; that\'s why, multiBamSummary tries ' 'to remove outliers using the median absolute deviation (MAD) ' 'method applying a threshold of 200 to only consider extremely ' 'large deviations from the median. The ENCODE blacklist page ' '(https://sites.google.com/site/anshulkundaje/projects/blacklists) ' 'contains useful information about regions with unusually high counts' 'that may be worth removing.', action='store_true') optional.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__)) group = parser.add_argument_group('Output optional options') group.add_argument('--outFileCorMatrix', help='Save matrix with pairwise correlation values to a tab-separated file.', metavar='FILE', type=writableFile) return parser def scatterplot_options(): """ Options specific for creating the scatter plot """ parser = argparse.ArgumentParser(add_help=False) scatter_opts = parser.add_argument_group('Scatter plot options') scatter_opts.add_argument('--xRange', help='The X axis range. The default scales these such that the full range of dots is displayed.', type=int, nargs=2, default=None) scatter_opts.add_argument('--yRange', help='The Y axis range. The default scales these such that the full range of dots is displayed.', type=int, nargs=2, default=None) scatter_opts.add_argument('--log1p', help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.', action='store_true') return parser def heatmap_options(): """ Options for generating the correlation heatmap """ parser = argparse.ArgumentParser(add_help=False) heatmap = parser.add_argument_group('Heatmap options') heatmap.add_argument('--plotHeight', help='Plot height in cm. (Default: %(default)s)', type=float, default=9.5) heatmap.add_argument('--plotWidth', help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)', type=float, default=11) heatmap.add_argument('--zMin', '-min', default=None, help='Minimum value for the heatmap intensities. ' 'If not specified, the value is set automatically', type=float) heatmap.add_argument('--zMax', '-max', default=None, help='Maximum value for the heatmap intensities.' 'If not specified, the value is set automatically', type=float) heatmap.add_argument( '--colorMap', default='jet', metavar='', help='Color map to use for the heatmap. Available values can be ' 'seen here: ' 'http://matplotlib.org/examples/color/colormaps_reference.html') heatmap.add_argument('--plotNumbers', help='If set, then the correlation number is plotted ' 'on top of the heatmap. This opti
open-synergy/opnsynid-partner-contact
partner_single_sale_risk_policy/models/res_partner.py
Python
agpl-3.0
2,591
0
# -*- coding: utf-8 -*- # Copyright 2016 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from openerp import models, fields, api from openerp.tools.translate import _ from openerp.exceptions import Warning as UserError class ResPartner(models.Model): _inherit = "r
es.partner" @api.multi def _compute_single_sale_policy(self): single_sale_order = 0.0 unset_single_sale_order = False criteria = [ ("user_ids.id", "in",
[self.env.user.id]), ] policy = self.env["partner.risk_limit_policy"].search( criteria, limit=1) if len(policy) == 1: single_sale_order = policy.single_sale_order_limit unset_single_sale_order = policy.unset_single_sale_order_limit for partner in self: partner.single_sale_order_limit_policy = single_sale_order partner.unset_single_sale_order_limit_policy = \ unset_single_sale_order single_sale_order_limit_policy = fields.Float( string="Single Sale Order Limit Policy", compute="_compute_single_sale_policy", store=False, ) unset_single_sale_order_limit_policy = fields.Boolean( string="Unset Single Sale Order Limit Policy", compute="_compute_single_sale_policy", store=False, ) @api.model def _update_limit_check_context(self, values): _super = super(ResPartner, self) ctx = _super._update_limit_check_context(values) for field in iter(values): if field == "risk_single_sale_order_limit": ctx.update({"check_single_sale_order_limit": True}) return ctx @api.constrains( "risk_single_sale_order_limit", ) def _check_single_sale_limit_policy(self): for partner in self: if partner.single_sale_order_limit_policy and \ partner.single_sale_order_limit_policy < \ partner.risk_single_sale_order_limit and \ partner.risk_single_sale_order_limit > 0 and \ self._context.get("check_single_sale_order_limit", False): raise UserError(_("Unauthorized single sale order amount")) if not partner.unset_single_sale_order_limit_policy and \ partner.risk_single_sale_order_limit <= 0.0 and \ self._context.get("check_single_sale_order_limit", False): raise UserError( _("Unauthorized to unset single sale order limit amount"))
pindanet/Raspberry
demoSenseHat.py
Python
gpl-3.0
2,108
0.005693
#!/usr/bin/python3 # x11vnc # SoftAP from sense_hat import SenseHat from time import sleep sense = SenseHat() sense.low_light = True led_loop = [4, 5, 6, 7, 15, 23, 31, 39, 47, 55, 63, 62, 61, 60, 59, 58, 57, 56, 48, 40, 32, 24, 16, 8, 0, 1, 2, 3] prev_x = 0 prev_y = 0 while True: sense.set_rotation(180) sense.clear() r = 32 g = 32 b = 200 # Eyes sense.set_pixel(2, 1, r, g, b) sense.set_pixel(5, 1, r, g, b) # Nose sense.set_pixel(3, 3, r+223, g, b-100) sense.set_pixel(4, 3, r+223, g, b-100) # Mouth sense.set_pixel(1, 5, 255, 255, 0) sense.set_pixel(2, 6, 255, 255, 0) sense.set_pixel(3, 6, 255, 255, 0) sense.set_pixel(4, 6, 255, 255, 0) sense.set_pixel(5, 6, 255, 255, 0) sense.set_pixel(6, 5, 255, 255, 0) sense.set_pixel(1, 4, 255, 255, 0) sense.set_pixel(6, 4, 255, 255, 0) for i in range(0, 5): sense.set_pixel(5, 1, r-32, g-32, b-32) for offset in led_loop: y = offset // 8 # row x = offset % 8 # column if x != prev_x or y != prev_y: sense.set_pixel(prev_x, prev
_y, 0, 0, 0) sense.set_pixel(x, y, 0, 255, 0) prev_x = x prev_y = y sleep(0.1) sense.set_pixel(5, 1, r, g, b) for offset in led_loop: y = offset // 8 # row x = offset % 8 # column if x != prev_x or y != prev_y: sense.set_pixel(prev_x, prev_y, 0, 0, 0)
sense.set_pixel(x, y, 0, 255, 0) prev_x = x prev_y = y sleep(0.1) t = sense.get_temperature() + 273.15 t = (t - 32)/1.8 p = sense.get_pressure() h = sense.get_humidity() t = round(t, 1) p = round(p, 1) h = round(h, 1) print("Deze Raspberry Pi meet de temperatuur ({0} graden) en de luchtdruk ({1} hPa).".format(t,p)) sense.show_message("Deze Raspberry Pi meet de temperatuur ({0} graden) en de luchtdruk ({1} hPa).".format(t,p), text_colour=[0,255,0]) # sense.set_rotation(180)
koalalorenzo/python-digitalocean
digitalocean/__init__.py
Python
lgpl-3.0
1,128
0.001773
# -*- coding: utf-8 -*- """digitalocean API to manage droplets""" __version__ = "1.16.0" __author__ = "Lorenzo Setale ( http://who.is.lorenzo.setale.me/? )" __author_email__ = "lorenzo@setale.me" __license__ = "LGPL v3" __copyright__ = "Copyright (c) 2012-2020 Lorenzo Setale" from .Manager import Manager from .Droplet import Droplet, DropletError, BadKernelObject, BadSSHKeyFormat from .Region import Region from .Size import Size from .Image import Image from .Action import Action from .Account import Account from .Balance import Balance from .Domain import Domain from .Record import Record from .SSHKey import SSHKey from .Kernel import Kernel from .FloatingIP import FloatingIP from .Volume import Volume from .baseapi import Error, EndPointError, TokenError, DataReadError, NotFoundError from .Tag import Tag from .LoadBalancer import LoadBalancer from .
LoadBalancer import StickySessions, ForwardingRule, HealthCheck from .Certificate import Certificate from .Snapshot i
mport Snapshot from .Project import Project from .Firewall import Firewall, InboundRule, OutboundRule, Destinations, Sources from .VPC import VPC
adundovi/CRPropa3-scripts
python_modules/network.py
Python
gpl-3.0
1,236
0.022654
import zmq from crpropa import Module class SendCandidateProperties( Module ): """ Sends candidate proporties given by the function ```extract_func( candidate )``` over the network to the server on ```ip_port``` """ def __init__( self, ip_port, extract_func ): Module.__init__( self ) self.socket = None self.ip_port = "tcp://" + ip_port
self.extract_func = extract_func def beginRun( self ): context = zmq.Context() self.socket = context.socket( zmq.REQ )
self.socket.connect( self.ip_port ) def process(self, c): self.socket.send_pyobj( self.extract_func( c ) ) msg_in = self.socket.recv_pyobj() def endRun( self ): del self.socket class RecvCandidateProperties: """ Server side: receive data from the client module while listening on ```ip_port``` self.recv method should be in a non-blocking loop """ def __init__( self, ip_port ): context = zmq.Context() self.socket = context.socket( zmq.REP ) self.socket.bind( "tcp://" + ip_port ) def recv( self ): msg = self.socket.recv_pyobj() self.socket.send_pyobj(msg) return msg
timpalpant/calibre
src/calibre/web/feeds/recipes/__init__.py
Python
gpl-3.0
1,816
0.010463
#!/usr/bin/env python2 __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' Builtin recipes. ''' import re, time, io from calibre.web.feeds.news import (BasicNewsRecipe, CustomIndexRecipe, AutomaticNewsRecipe, CalibrePeriodical) from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.utils.config import JSONConfig basic_recipes = (BasicNewsRecipe, AutomaticNewsRecipe, CustomIndexRecipe, CalibrePeriodical) custom_recipes = JSONConfig('custom_recipes/index.json') def custom_recipe_filename(id_, title): from calibre.utils.filenames import ascii_filename return ascii_filename(title[:50]) + \ ('_%s.recipe'%id_) def compile_recipe(src): ''' Compile the code in src and return a recipe object, if found. :param src: Python source code as bytestring or unicode object :return: Recipe class or None, if no such class was found in src ''' if not isinstance(src, unicode): match = re.search(r'coding[:=]\s*([-\w.]+)', src[:200]) enc = match.group(1) if match else 'utf-8' src = src.decode(enc) # Python complains if there is a coding declaration in a unicode string src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src.lstrip(u'\ufeff'), flags=re.MULTILINE) # Translate newlines to \n src = io.StringIO(src, newline=None).getvalue() namespace = { 'BasicNewsRecipe':BasicNewsRecipe, 'AutomaticNewsRecipe':AutomaticNewsRecipe, 'time':time, 're':re, 'BeautifulSoup':BeautifulSoup } exe
c src in namespace for x in namespace.itervalues(): if (isi
nstance(x, type) and issubclass(x, BasicNewsRecipe) and x not in basic_recipes): return x return None
olarcheveque/usinacv
usinacv/usinacv/settings/base.py
Python
mit
8,153
0.004661
"""Common settings and globals.""" from os.path import abspath, basename, dirname, join, normpath from sys import path try: from secret import * except: pass ########## PATH CONFIGURATION # Absolute filesystem path to the Django project directory: DJANGO_ROOT = dirname(dirname(abspath(__file__))) # Absolute filesystem path to the top-level project folder: SITE_ROOT = dirname(DJANGO_ROOT) # Sit
e name: SITE_NAME = basename(DJANGO_ROOT) # Add our project to our pythonpath, this way we don't need to type our project # name in our dotted import paths: path.append(DJANGO_ROOT) # Add SITE_ROOT to lookup application (wsgi) path.append(SITE_ROOT) ########## END PATH CONFIGURATION ########## DEBUG CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG ########## END DEBUG CONFIGURATION ########## MANAGER CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ('olivier', 'olivier.larcheveque@gmail.com'), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS ########## END MANAGER CONFIGURATION ########## DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', 'NAME': '', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } ########## END DATABASE CONFIGURATION ########## GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'America/Los_Angeles' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True ########## END GENERAL CONFIGURATION ########## MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = normpath(join(SITE_ROOT, 'media')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' ########## END MEDIA CONFIGURATION ########## STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = normpath(join(SITE_ROOT, 'assets')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( normpath(join(SITE_ROOT, 'static')), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) ########## END STATIC FILE CONFIGURATION ########## SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key ########## END SECRET CONFIGURATION ########## FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( normpath(join(SITE_ROOT, 'fixtures')), ) ########## END FIXTURE CONFIGURATION ########## TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', 'cms.context_processors.media', 'sekizai.context_processors.sekizai', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( normpath(join(SITE_ROOT, 'templates')), ) ########## END TEMPLATE CONFIGURATION ########## MIDDLEWARE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes MIDDLEWARE_CLASSES = ( # Default Django middleware. 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.doc.XViewMiddleware', 'django.middleware.common.CommonMiddleware', 'cms.middleware.page.CurrentPageMiddleware', 'cms.middleware.user.CurrentUserMiddleware', 'cms.middleware.toolbar.ToolbarMiddleware', 'cms.middleware.language.LanguageCookieMiddleware', ) ########## END MIDDLEWARE CONFIGURATION ########## URL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = '%s.urls' % SITE_NAME ########## END URL CONFIGURATION ########## APP CONFIGURATION DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Useful template tags: # 'django.contrib.humanize', # Admin panel and documentation: 'django.contrib.admin', # 'django.contrib.admindocs', 'django.contrib.markup', ) THIRD_PARTY_APPS = ( # Database migration helpers: 'south', # Django CMS 'cms', 'cms.stacks', 'menus', 'mptt', 'menus', 'sekizai', 'django_countries', ) # Apps specific for this project go here. LOCAL_APPS = ( 'resume', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS ########## END APP CONFIGURATION ########## LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } ########## END LOGGING CONFIGURATION ########## WSGI CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'wsgi.application' ########## END WSGI CONFIGURATION ######### DJANGO CMS CMS_PERMISSION = True CMS_PUBLIC_FOR = "all" LANGUAGES = [ ('fr', 'French'), ('en', 'English'), ] CMS_LANGUAGES = { 'default': { 'fallbacks': ['fr', 'en', ], 'redirect_on_fallback':True, 'public': True, 'hide_untranslated': False, } } CMS_TEMPLATES = ( ('layouts/classic.html', 'Classic'), ('layouts/classic_home.html', 'Classic Home'), ('layouts/classic_2columns.html', 'Classic 2 columns'), ) ######### END DJANGO CMS
FRidh/Sea
Sea/model/couplings/Coupling2DCavities2D.py
Python
bsd-3-clause
1,049
0.010486
import numpy as np from Coupling import Coupling class Coupling2DCavities2D(Coupling): """ Coupling for cavity2D to cavity transmission. """ @property def impedance_from(self): """ Choses the right impedance of subsystem_from. Applies boundary conditions correction as well. """ return self.subsystem_from.impedance @property def impedance_to(self): """ Choses the right impedance of subsystem_from. Applies boundary conditions correction as well.
""" return self.subsystem_to.impedance @property def tau(self): """ Transmission coefficient. """ return np.zeros(self.frequency.amount) @property def clf(self): """ Coupling loss factor for transmission from a 2D cavity
to a cavity. .. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi} See BAC, equation 3.14 """ return self.tau / (4.0 * np.pi)
openqt/algorithms
projecteuler/pe157-solving-the-diophantine-equation-sup1supsubvaravarsubsup1supsubvarbvarsub-supvarpvarsupsub10supvarnvarsupsub.py
Python
gpl-3.0
823
0.009792
#!/usr/bin/
env python # coding=utf-8 """157. Solving the diophantine equation <sup>1</sup>/<sub><var>a<
/var></sub>+<sup>1</sup>/<sub><var>b</var></sub>= <sup><var>p</var></sup>/<sub>10<sup><var>n</var></sup></sub> https://projecteuler.net/problem=157 Consider the diophantine equation 1/a+1/b= p/10n with a, b, p, n positive integers and a ≤ b. For n=1 this equation has 20 solutions that are listed below: 1/1+1/1=20/10 | 1/1+1/2=15/10 | 1/1+1/5=12/10 | 1/1+1/10=11/10 | 1/2+1/2=10/10 ---|---|---|---|--- 1/2+1/5=7/10 | 1/2+1/10=6/10 | 1/3+1/6=5/10 | 1/3+1/15=4/10 | 1/4+1/4=5/10 1/4+1/20=3/10 | 1/5+1/5=4/10 | 1/5+1/10=3/10 | 1/6+1/30=2/10 | 1/10+1/10=2/10 1/11+1/110=1/10 | 1/12+1/60=1/10 | 1/14+1/35=1/10 | 1/15+1/30=1/10 | 1/20+1/20=1/10 How many solutions has this equation for 1 ≤ n ≤ 9? """
pivonroll/Qt_Creator
tests/system/suite_APTW/tst_APTW02/test.py
Python
gpl-3.0
1,630
0.00184
############################################################################ # # Copyright (C) 2016 The Qt Company Ltd. # Contact: https://www.qt.io/licensing/ # # This file is part of Qt Creator. # # Commercial License Usage # Licensees holding valid commercial Qt licenses may use this file in # accordance with the commercial license agreement provided with the # Software or, alternatively, in accordance with the terms contained in # a written agreement between you and The Qt Company. For licensing terms # and conditions see https://www.qt.io/terms-conditions. For further # information use the contact form at https://www.qt.io/contact-us. # # GNU General Public License Usage # Alternatively, this file may be used under the terms of the GNU # General Public License version 3 as published by the Free Software # Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT # included in the packaging of this file. Please review the following # information to ensure the GNU General Public License requirements will # be met: https://www.gnu.org/licenses/gpl-3.0.html. # ############################################################################ source("../../shared/qtcreator.py") # test New Qt Quick Appli
cation build and run for release and debug option def main(): startApplication("qtcreator" + SettingsPath) if not startedWithoutPluginError(): return checkedTargets, projectName = createNewQtQuickApplication(tempD
ir(), "SampleApp") # run project for debug and release and verify results runVerify(checkedTargets) #close Qt Creator invokeMenuItem("File", "Exit")
dscho/hg
contrib/synthrepo.py
Python
gpl-2.0
18,180
0.000605
# synthrepo.py - repo synthesis # # Copyright 2012 Facebook # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''synthesize structurally interesting change history This extension is useful for creating a repository with properties that are statistically similar to an existing repository. During analysis, a simple probability table is constructed from the history of an existing repository. During synthesis, these properties are reconstructed. Properties that are analyzed and synthesized include the following: - Lines added or removed when an existing file is modified - Number and sizes of files added - Number of files removed - Line lengths - Topological distance to parent changeset(s) - Probability of a commit being a merge - Probability of a newly added file being added to a new directory - Interarrival time, and time zone, of commits - Number of files in each directory A few obvious properties that are not currently handled realistically: - Merges are treated as regular commits with two parents, which is not realistic - Modifications are not treated as operations on hunks of lines, but as insertions and deletions of randomly chosen single lines - Committer ID (always random) - Executability of files - Symlinks and binary files are ignored ''' from __future__ import absolute_import import bisect import collections import itertools import json import os import random import sys import time from mercurial.i18n import _ from mercurial.node import ( nullid, nullrev, short, ) from mercurial import ( cmdutil, context, error, hg, patch, scmutil, util, ) # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or # leave the attribute unspecified. testedwith = 'internal' cmdtable = {} command = cmdutil.command(cmdtable) newfile = set(('new fi', 'rename', 'copy f', 'copy t')) def zerodict(): return collections.defaultdict(lambda: 0) def roundto(x, k): if x > k * 2: return int(round(x / float(k)) * k) return int(round(x)) def parsegitdiff(lines): filename, mar, lineadd, lineremove = None, None, zerodict(), 0 binary = False for line in lines: start = line[:6] if start == 'diff -': if filename: yield filename, mar, lineadd, lineremove, binary mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False filename = patch.gitre.match(line).group(1) elif start in newfile: mar = 'a' elif start == 'GIT bi': binary = True elif start == 'delete': mar = 'r' elif start: s = start[0] if s == '-' and not line.startswith('--- '): lineremove += 1 elif s == '+' and not line.startswith('+++ '): lineadd[roundto(len(line) - 1, 5)] += 1 if filename: yield filename, mar, lineadd, lineremove, binary @command('analyze', [('o', 'output', '', _('write output to given file'), _('FILE')), ('r', 'rev', [], _('analyze specified revisions'), _('REV'))], _('hg analyze'), optionalrepo=True) def analyze(ui, repo, *revs, **opts): '''create a simple model of a repository to use for later synthesis This command examines every changeset in the given range (or all of history if none are specified) and creates a simple statistical model of the history of the repository. It also measures the directory structure of the repository as checked out. The model is written out to a JSON file, and can be used by :hg:`synthesize` to create or augment a repository with synthetic commits that have a structure that is statistically similar to the analyzed repository. ''' root = repo.root if not root.endswith(os.path.sep): root += os.path.sep revs = list(revs) revs.extend(opts['rev']) if not revs: revs = [':'] output = opts['output'] if not output: output = os.path.basename(root) + '.json' if output == '-': fp = sys.stdout
else: fp = open(output, 'w') # Always obtain file counts of each directory in the given roo
t directory. def onerror(e): ui.warn(_('error walking directory structure: %s\n') % e) dirs = {} rootprefixlen = len(root) for dirpath, dirnames, filenames in os.walk(root, onerror=onerror): dirpathfromroot = dirpath[rootprefixlen:] dirs[dirpathfromroot] = len(filenames) if '.hg' in dirnames: dirnames.remove('.hg') lineschanged = zerodict() children = zerodict() p1distance = zerodict() p2distance = zerodict() linesinfilesadded = zerodict() fileschanged = zerodict() filesadded = zerodict() filesremoved = zerodict() linelengths = zerodict() interarrival = zerodict() parents = zerodict() dirsadded = zerodict() tzoffset = zerodict() # If a mercurial repo is available, also model the commit history. if repo: revs = scmutil.revrange(repo, revs) revs.sort() progress = ui.progress _analyzing = _('analyzing') _changesets = _('changesets') _total = len(revs) for i, rev in enumerate(revs): progress(_analyzing, i, unit=_changesets, total=_total) ctx = repo[rev] pl = ctx.parents() pctx = pl[0] prev = pctx.rev() children[prev] += 1 p1distance[rev - prev] += 1 parents[len(pl)] += 1 tzoffset[ctx.date()[1]] += 1 if len(pl) > 1: p2distance[rev - pl[1].rev()] += 1 if prev == rev - 1: lastctx = pctx else: lastctx = repo[rev - 1] if lastctx.rev() != nullrev: timedelta = ctx.date()[0] - lastctx.date()[0] interarrival[roundto(timedelta, 300)] += 1 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), []) fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff): if isbin: continue added = sum(lineadd.itervalues(), 0) if mar == 'm': if added and lineremove: lineschanged[roundto(added, 5), roundto(lineremove, 5)] += 1 filechanges += 1 elif mar == 'a': fileadds += 1 if '/' in filename: filedir = filename.rsplit('/', 1)[0] if filedir not in pctx.dirs(): diradds += 1 linesinfilesadded[roundto(added, 5)] += 1 elif mar == 'r': fileremoves += 1 for length, count in lineadd.iteritems(): linelengths[length] += count fileschanged[filechanges] += 1 filesadded[fileadds] += 1 dirsadded[diradds] += 1 filesremoved[fileremoves] += 1 invchildren = zerodict() for rev, count in children.iteritems(): invchildren[count] += 1 if output != '-': ui.status(_('writing output to %s\n') % output) def pronk(d): return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) json.dump({'revs': len(revs), 'initdirs': pronk(dirs), 'lineschanged': pronk(lineschanged), 'children': pronk(invchildren), 'fileschanged': pronk(fileschanged), 'filesadded': pronk(filesadded), 'linesinfilesadded': pronk(linesinfilesadded), 'dirsadded': pronk(dirsadded), 'filesremoved': pronk(filesremoved), 'linelengths': pronk(linelengths),
216software/Profiles
communityprofiles/profiles/oldmigrations/0038_auto__add_precalculatedvalue.py
Python
mit
15,090
0.008217
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'PrecalculatedValue' db.create_table('profiles_precalculatedvalue', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('formula', self.gf('django.db.models.fields.TextField')(blank=True)), ('geo_record', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'])), ('value', self.gf('django.db.models.fields.TextField')(blank=True)), ('data_source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataSource'])), ('notes', self.gf('django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('profiles', ['PrecalculatedValue']) def backwards(self, orm): # Deleting model 'PrecalculatedValue' db.delete_table('profiles_precalculatedvalue') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'Conten
tType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'profiles.datadomain': { 'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'profiles.datapoint': { 'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'}, 'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}), 'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}), 'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}), 'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'}) }, 'profiles.datasource': { 'Meta': {'object_name': 'DataSource'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'profiles.denominator': { 'Meta': {'object_name': 'Denominator'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'profiles.denominatorpart': { 'Meta': {'object_name': 'DenominatorPart'}, 'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}), 'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']"}), 'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}), 'part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.IndicatorPart']"}) }, 'profiles.geolevel': { 'Meta': {'object_name': 'GeoLevel'}, 'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'uniqu
OpusVL/odoo_line_notes_field
line_notes_field/__init__.py
Python
agpl-3.0
25
0
import sale
s_order_n
otes
vaishalitekale/treeherder
treeherder/model/derived/jobs.py
Python
mpl-2.0
83,969
0.000488
import logging import time import zlib from collections import defaultdict from datetime import datetime from hashlib import sha1 from operator import itemgetter import simplejson as json from _mysql_exceptions import IntegrityError from django.conf import settings from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist from treeherder.etl.common import get_guid_root from treeherder.events.publisher import JobStatusPublisher from treeherder.model import error_summary, utils from treeherder.model.models import Datasource, ExclusionProfile from treeherder.model.tasks import (populate_error_summary, publish_job_action, publish_resultset, publish_resultset_action) from .artifacts import ArtifactsModel from .base import ObjectNotFoundException, TreeherderModelBase logger = logging.getLogger(__name__) class JobsModel(TreeherderModelBase): """ Represent a job repository """ INCOMPLETE_STATES = ["running", "pending"] STATES = INCOMPLETE_STATES + ["completed", "coalesced"] # indexes of specific items in the ``job_placeholder`` objects JOB_PH_JOB_GUID = 0 JOB_PH_COALESCED_TO_GUID = 2 JOB_PH_RESULT_SET_ID = 3 JOB_PH_BUILD_PLATFORM_KEY = 4 JOB_PH_MACHINE_PLATFORM_KEY = 5 JOB_PH_MACHINE_NAME = 6 JOB_PH_DEVICE_NAME = 7 JOB_PH_OPTION_COLLECTION_HASH = 8 JOB_PH_TYPE_KEY = 9 JOB_PH_PRODUCT_TYPE = 10 JOB_PH_WHO = 11 JOB_PH_REASON = 12 JOB_PH_RESULT = 13 JOB_PH_STATE = 14 JOB_PH_START_TIMESTAMP = 16 JOB_PH_END_TIMESTAMP = 17 JOB_PH_RUNNING_AVG = 18 # list of searchable columns, i.e. those who have an index # it would be nice to get this directly from the db and cache it INDEXED_COLUMNS = { "job": { "id": "j.id", "job_guid": "j.job_guid", "job_coalesced_to_guid": "j.job_coalesced_to_guid", "result_set_id": "j.result_set_id", "build_platform_id": "j.build_platform_id", "build_system_type": "j.build_system_type", "machine_platform_id": "j.machine_platform_id", "machine_id": "j.machine_id", "option_collection_hash": "j.option_collection_hash", "job_type_id": "j.job_type_id", "product_id": "j.product_id", "failure_classification_id": "j.failure_classification_id", "who": "j.who", "reason": "j.reason", "result": "j.result", "state": "j.state", "submit_timestamp": "j.submit_timestamp", "start_timestamp": "j.start_timestamp", "end_timestamp": "j.end_timestamp", "last_modified": "j.last_modified", "signature": "j.signature", "tier": "j.tier" }, "result_set": { "id": "rs.id", "revision_hash": "rs.revision_hash", "revision": "revision.revision", "author": "rs.author", "push_timestamp": "rs.push_timestamp" }, "bug_job_map": { "job_id": "job_id", "bug_id": "bug_id", "type": "type", "who": "who", "submit_timestamp": "submit_timestamp" } } # jobs cycle targets # NOTE: There is an order dependency here, cycle_job and # cycle_result_set should be after any tables with foreign keys # to their ids. JOBS_CYCLE_TARGETS = [ "jobs.deletes.cycle_job_artifact", "jobs.deletes.cycle_performance_artifact", "jobs.deletes.cycle_job_log_url", "jobs.deletes.cycle_job_note", "jobs.deletes.cycle_bug_job_map", "jobs.deletes.cycle_job", "jobs.deletes.cycle_revision", "jobs.deletes.cycle_revision_map", "jobs.deletes.cycle_result_set" ] PERFORMANCE_SERIES_JSON_KEYS = [ "subtest_signatures", "test_options" ] @classmethod def create(cls, project): """ Create all the datasource tables for this project. """ source = Datasource(project=project)
source.save() return cls(project=project) def execute(self, **kwargs): return utils.retry_execute(self.get_dhub(), logger, **kwargs) ################## # # Job schema data methods # ################## def get_job(self, id): """Return the job row for
this ``job_id``""" repl = [self.refdata_model.get_db_name()] data = self.execute( proc="jobs.selects.get_job", placeholders=[id], debug_show=self.DEBUG, replace=repl, ) return data def get_job_reference_data(self, signature): # Retrieve associated data in reference_data_signatures result = self.refdata_model.get_reference_data([signature]) if result and signature in result: return result[signature] return None def get_job_list(self, offset, limit, conditions=None, exclusion_profile=None, visibility="included"): """ Retrieve a list of jobs. It's mainly used by the restful api to list the jobs. The conditions parameter is a dict containing a set of conditions for each key. e.g.: { 'who': set([('=', 'john')]), 'result': set([('IN', ("success", "retry"))]) } """ replace_str, placeholders = self._process_conditions( conditions, self.INDEXED_COLUMNS['job'] ) if exclusion_profile: try: if exclusion_profile is "default": profile = ExclusionProfile.objects.get( is_default=True ) else: profile = ExclusionProfile.objects.get( name=exclusion_profile ) signatures = profile.flat_exclusion[self.project] # NOT here means "not part of the exclusion profile" inclusion = "NOT" if visibility == "included" else "" replace_str += " AND j.signature {0} IN ({1})".format( inclusion, ",".join(["%s"] * len(signatures)) ) placeholders += signatures except KeyError: # this repo/project has no hidden signatures pass except ExclusionProfile.DoesNotExist: # Either there's no default profile setup or the profile # specified is not availble pass repl = [self.refdata_model.get_db_name(), replace_str] data = self.execute( proc="jobs.selects.get_job_list", replace=repl, placeholders=placeholders, limit=limit, offset=offset, debug_show=self.DEBUG, ) return data def set_state(self, job_id, state): """Update the state of an existing job""" self.execute( proc='jobs.updates.set_state', placeholders=[state, job_id], debug_show=self.DEBUG ) def get_incomplete_job_guids(self, resultset_id): """Get list of ids for jobs of resultset that are not in complete state.""" return self.execute( proc='jobs.selects.get_incomplete_job_guids', placeholders=[resultset_id], debug_show=self.DEBUG, return_type='dict', key_column='job_guid' ) def cancel_all_resultset_jobs(self, requester, resultset_id): """Set all pending/running jobs in resultset to usercancel.""" job_guids = list(self.get_incomplete_job_guids(resultset_id)) jobs = self.get_job_ids_by_guid(job_guids).values() # Cancel all the jobs in the database... self.execute( proc='jobs.updates.cancel_all', placeholders=[resultset_id], debug_show=self.DEBUG ) # Sending 'cancel_all' action to pu
xiaowing/tinysso
config.py
Python
apache-2.0
54
0
CSRF_ENABLED = True
SECRET_KEY = 'this-is-a-
secret'
jordanemedlock/psychtruths
temboo/core/Library/Yelp/__init__.py
Python
apache-2.0
1,354
0.005908
from temboo.Library.Yelp.SearchByAddress import SearchByAddress, SearchByAddressInputSet, SearchByAddressResultSet, SearchByAddressChoreographyExecution from temboo.Library.Yelp.SearchByBoundingBox import SearchByBoundingBox, SearchByBoundingBoxInputSet, SearchByBoundingBoxResultSet, SearchByBoundingBoxChoreographyExecution from temboo.Library.Yelp.SearchByCategory import SearchByCategory, SearchByCategoryInputSet, SearchByCategoryResultSet, SearchByCategoryChoreographyExecution from temboo.Library.Yelp.SearchByCity import SearchByCity, SearchByCityInputSet, SearchByCityResultSet, SearchByCityChoreographyExecution from temboo.Library.Yelp.SearchByCoordinates import SearchByCoordinates, SearchByCoordinatesInputSet, SearchByCoordinatesResultSet, SearchByCoordinatesChoreographyExecution from temboo.Library.Yelp.
SearchByNeighborhood import SearchByNeighborhood, SearchByNeighborhoodInputSet, SearchByNeighborhoodResultSet, SearchByNeighborhoodChoreographyExecution from temboo.Library.Yelp.SearchForBusiness import
SearchForBusiness, SearchForBusinessInputSet, SearchForBusinessResultSet, SearchForBusinessChoreographyExecution from temboo.Library.Yelp.SearchForBusinessesWithDeals import SearchForBusinessesWithDeals, SearchForBusinessesWithDealsInputSet, SearchForBusinessesWithDealsResultSet, SearchForBusinessesWithDealsChoreographyExecution
antoinecarme/sklearn2sql_heroku
tests/classification/FourClass_500/ws_FourClass_500_SGDClassifier_mysql_code_gen.py
Python
bsd-3-clause
141
0.014184
fr
om sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("SGDClassifier" , "FourClass_500" , "mysql")
dwang159/iris-api
src/iris/role_lookup/__init__.py
Python
bsd-2-clause
863
0.001159
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license. # See LICENSE in the project root for license information. from iris.custom_import import import_custom_module import logging logger = logging.getLogger(__name__) class IrisRoleLookupException(Exception): pass def get_role_lookups(config):
modules = config.get('role_lookups', []) # default to only support user and mailing_list. if not modules: modules = ['user', 'mailing_list'] imported_modules = [] for m in modules: try: imported_modules.append( import_custom_module('iris.role_lookup', m)(config)) logger.info('Loaded lookup modules: %s', m) except Exception: logger.except
ion('Failed to load role lookup module: %s', m) return imported_modules
SartoNess/BitcoinUnlimited
qa/rpc-tests/abandonconflict.py
Python
mit
7,686
0.008197
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Copyright (c) 2015-2016 The Bitcoin Unlimited developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * try: import urllib.parse as urlparse except ImportError: import urlparse class AbandonConflictTest(BitcoinTestFramework): def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"])) connect_nodes(self.nodes[0], 1) def run_test(self): self.nodes[1].generate(100) sync_blocks(self.nodes) balance = self.nodes[0].getbalance() txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) sync_mempools(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) newbalance = self.nodes[0].getbalance() assert(balance - newbalance < Decimal("0.001")) #no more than fees lost balance = newbalance url = urlparse.urlparse(self.nodes[1].url) self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1))) # Identify the 10btc outputs nA = nex
t(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) nB = next(i for i, vout in enumerate(self.nodes[
0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) inputs =[] # spend 10btc outputs from txA and txB inputs.append({"txid":txA, "vout":nA}) inputs.append({"txid":txB, "vout":nB}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) # Identify the 14.99998btc output nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) #Create a child tx spending AB1 and C inputs = [] inputs.append({"txid":txAB1, "vout":nAB}) inputs.append({"txid":txC, "vout":nC}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # In mempool txs from self should increase balance from change newbalance = self.nodes[0].getbalance() assert(newbalance == balance - Decimal("30") + Decimal("24.9996")) balance = newbalance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction # Note had to make sure tx did not have AllowFree priority stop_node(self.nodes[0],0) self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) # Verify txs no longer in mempool assert(len(self.nodes[0].getrawmempool()) == 0) # Not in mempool txs from self should only reduce balance # inputs are still spent, but change not received newbalance = self.nodes[0].getbalance() assert(newbalance == balance - Decimal("24.9996")) # Unconfirmed received funds that are not in mempool, also shouldn't show # up in unconfirmed balance unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() assert(unconfbalance == newbalance) # Also shouldn't show up in listunspent assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) balance = newbalance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) newbalance = self.nodes[0].getbalance() assert(newbalance == balance + Decimal("30")) balance = newbalance # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned stop_node(self.nodes[0],0) self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]) assert(len(self.nodes[0].getrawmempool()) == 0) assert(self.nodes[0].getbalance() == balance) # But if its received again then it is unabandoned # And since now in mempool, the change is available # But its child tx remains abandoned self.nodes[0].sendrawtransaction(signed["hex"]) newbalance = self.nodes[0].getbalance() assert(newbalance == balance - Decimal("20") + Decimal("14.99998")) balance = newbalance # Send child tx again so its unabandoned self.nodes[0].sendrawtransaction(signed2["hex"]) newbalance = self.nodes[0].getbalance() assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) balance = newbalance # Remove using high relay fee again stop_node(self.nodes[0],0) self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) assert(len(self.nodes[0].getrawmempool()) == 0) newbalance = self.nodes[0].getbalance() assert(newbalance == balance - Decimal("24.9996")) balance = newbalance # Create a double spend of AB1 by spending again from only A's 10 output # Mine double spend from node 1 inputs =[] inputs.append({"txid":txA, "vout":nA}) outputs = {} outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransaction(tx) self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) connect_nodes(self.nodes[0], 1) sync_blocks(self.nodes) # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted newbalance = self.nodes[0].getbalance() assert(newbalance == balance + Decimal("20")) balance = newbalance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 BTC output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() #assert(newbalance == balance - Decimal("10")) print "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer" print "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315" print balance , " -> " , newbalance , " ?" if __name__ == '__main__': AbandonConflictTest().main()
ananswam/bioscrape
bioscrape/pid_interfaces.py
Python
mit
13,998
0.008573
from bioscrape.inference import DeterministicLikelihood as DLL from bioscrape.inference import StochasticTrajectoriesLikelihood as STLL from bioscrape.inference import StochasticTrajectories from bioscrape.inference import BulkData import warnings import numpy as np class PIDInterface(): ''' PID Interface : Parameter identification interface. Super class to create parameter identification (PID) interfaces. Two PID interfaces currently implemented: Deterministic and Stochastic inference using time-series data. To add a new PIDInterface - simply add a new subclass of this parent class with your desired log-likelihood functions. You can even have your own check_prior function in that class if you do not prefer to use the built in priors with this package. ''' def __init__(self, params_to_estimate, M, prior): ''' Parent class for all PID interfaces. Arguments: * `params_to_estimate` : List of parameter names to be estimated * `M` : The bioscrape Model object to use for inference * `prior` : A dictionary specifying prior distribution. Two built-in prior functions are `uniform_prior` and `gaussian_prior`. Each prior has its own syntax for accepting the distribution parameters in the dictionary. New priors may be added. The suggested format for prior dictionaries: prior_dict = {'parameter_name': ['prior_name', prior_distribution_parameters]} For built-in uniform prior, use {'parameter_name':['uniform', lower_bound, upper_bound]} For built-in gaussian prior, use {'parameter_name':['gaussian', mean, standard_deviation, probability threshold]} New PID interfaces can be added by creating child classes of PIDInterface class as shown for Built-in PID interfaces : `StochasticInference` and `DeterministicInference` ''' self.params_to_estimate = params_to_estimate self.M = M self.prior = prior return def check_prior(self, params_dict): ''' To add new prior functions: simply add a new function similar to ones that exist and then call it here. ''' lp = 0.0 for key,value in params_dict.items(): if 'positive' in self.prior[key] and value < 0: return np.inf prior_type = self.prior[key][0] if prior_type == 'uniform': lp += self.uniform_prior(key, value) elif prior_type == 'gaussian': lp += self.gaussian_prior(key, value) elif prior_type == 'exponential': lp += self.exponential_prior(key, value) elif prior_type == 'gamma': lp += self.gamma_prior(key, value) elif prior_type == 'log-uniform': lp += self.log_uniform_prior(key, value) elif prior_type == 'log-gaussian': lp += self.log_gaussian_prior(key, value) elif prior_type == 'beta': lp += self.beta_prior(key, value) elif prior_type == 'custom': # The last element in the prior dictionary must be a callable function # The callable function shoud have the following signature : # Arguments: param_name (str), param_value(float) # Returns: log prior probability (float or numpy inf) custom_fuction = self.prior[key][-1]
lp += custom_fuction(key, value) else: raise ValueError('Prior type undefined.') return lp def uniform_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns np.Inf if the param_value is outside the prior range and 0.0 if it is inside. param_name is used to look for the parameter in the prior dictionary. ''' p
rior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') lower_bound = prior_dict[param_name][1] upper_bound = prior_dict[param_name][2] if param_value > upper_bound or param_value < lower_bound: return np.inf else: return np.log( 1/(upper_bound - lower_bound) ) def gaussian_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.Inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') mu = prior_dict[param_name][1] sigma = prior_dict[param_name][2] if sigma < 0: raise ValueError('The standard deviation must be positive.') # Using probability density function for normal distribution # Using scipy.stats.norm has overhead that affects speed up to 2x prob = 1/(np.sqrt(2*np.pi) * sigma) * np.exp(-0.5*(param_value - mu)**2/sigma**2) if prob < 0: warnings.warn('Probability less than 0 while checking Gaussian prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob) def exponential_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') lambda_p = prior_dict[param_name][1] prob = lambda_p * np.exp(-lambda_p * param_value) if prob < 0: warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob) def gamma_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') alpha = prior_dict[param_name][1] beta = prior_dict[param_name][2] from scipy.special import gamma prob = (beta**alpha)/gamma(alpha) * param_value**(alpha - 1) * np.exp(-1 * beta*param_value) if prob < 0: warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob) def beta_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') alpha = prior_dict[param_name][1] beta = prior_dict[param_name][2] import scipy.special.beta as beta_func prob = (param_value**(alpha-1) * (1 - param_value)**(beta - 1) )/beta_func(alpha, beta) if prob < 0: warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value)) return np.inf else: return np.log(prob) def log_uniform_prior(self, param_name, param_value): ''' Check if given param_value is valid according to the prior distribution. Returns the log prior probability or np.inf if the param_value is invalid. ''' prior_dict = self.prior if prior_dict is None: raise ValueError('No prior found') lower_bound = prior_dict[param_name][1] upper_bound = prior_dict[param_name][2]
UKTradeInvestment/export-wins-data
wins/migrations/0018_merge.py
Python
gpl-3.0
324
0
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-08-03 13:45 from __future__ import unicode_literals from django.db import migrations class
Migration(migrations.Migration): dependencies = [ ('win
s', '0016_win_updated'), ('wins', '0017_auto_20160801_1230'), ] operations = [ ]
GeoMop/GeoMop
src/JobPanel/data/__init__.py
Python
gpl-3.0
131
0.007634
from
.states import (TaskStatus, MultijobActions, TASK_STATUS_PERMITTED_ACTIONS,
TASK_STATUS_STARTUP_ACTIONS)
antoinecarme/pyaf
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_Hour_ARX.py
Python
bsd-3-clause
156
0.051282
import tests.model_co
ntrol.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Logit'] , [
'ConstantTrend'] , ['Seasonal_Hour'] , ['ARX'] );
Azure/azure-sdk-for-python
sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/aio/_sql_virtual_machine_management_client.py
Python
mit
5,342
0.003931
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from copy import deepcopy from typing import Any, Awaitable, Optional, TYPE_CHECKING from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from msrest import Deserializer, Serializer from .. import models from ._configuration import SqlVirtualMachineManagementClientConfiguration from .operations import AvailabilityGroupListenersOperations, Operations, SqlVirtualMachineGroupsOperations, SqlVirtualMachinesOperations if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class SqlVirtualMachineManagementClient: """The SQL virtual machine management API provides a RESTful set of web APIs that interact with Azure Compute, Network & Storage services to manage your SQL Server virtual machine. The API enables users to create, delete and retrieve a SQL virtual machine, SQL virtual machine group or availability group listener. :ivar availability_group_listeners: AvailabilityGroupListenersOperations operations :vartype availability_group_listeners: azure.mgmt.sqlvirtualmachine.aio.operations.AvailabilityGroupListenersOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.sqlvirtualmachine.aio.operations.Operations :ivar sql_virtual_machine_groups: SqlVirtualMachineGroupsOperations operations :vartype sql_virtual_machine_groups: azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachineGroupsOperations :ivar sql_virtual_machines: SqlVirtualMachinesOperations operations :vartype sql_virtual_machines: azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachinesOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: Subscription
ID that identifies an Azure subscription. :type subscription_id: str :param base_url: Service URL. Default value is 'https://management.azure.com'. :type base_url: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "AsyncTokenCredential",
subscription_id: str, base_url: str = "https://management.azure.com", **kwargs: Any ) -> None: self._config = SqlVirtualMachineManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False self.availability_group_listeners = AvailabilityGroupListenersOperations(self._client, self._config, self._serialize, self._deserialize) self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) self.sql_virtual_machine_groups = SqlVirtualMachineGroupsOperations(self._client, self._config, self._serialize, self._deserialize) self.sql_virtual_machines = SqlVirtualMachinesOperations(self._client, self._config, self._serialize, self._deserialize) def _send_request( self, request: HttpRequest, **kwargs: Any ) -> Awaitable[AsyncHttpResponse]: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = await client._send_request(request) <AsyncHttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.AsyncHttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs) async def close(self) -> None: await self._client.close() async def __aenter__(self) -> "SqlVirtualMachineManagementClient": await self._client.__aenter__() return self async def __aexit__(self, *exc_details) -> None: await self._client.__aexit__(*exc_details)
ShawnHymel/TweetRace
pytest/wager_test_01.py
Python
mit
5,938
0.011115
import os import sys import pygame import signal import time import ConfigParser from twython import TwythonStreamer #----------------------------------------------------------------------------- # Import custom modules #----------------------------------------------------------------------------- # Add pyscope module to path path = os.path.join(os.path.dirname(__file__), 'py_apps/pyscope') sys.path.append(path) # Add twit_feed module to path path = os.path.join(os.path.dirname(__file__), '../py_apps/twit_feed') sys.path.append(path) import pyscope import twit_feed #import tf_test_02 #----------------------------------------------------------------------------- # Constants #----------------------------------------------------------------------------- MAX_ENTRIES = 1 FPS = 5 BET_TERM = ['#testing', '#blargz'] #['@Gr8AmTweetRace'] AUTH = { 'app_key': 'li8wn8Tb7xBifCnNIgyqUw', 'app_secret': 'vcwq36w4C4VXamlqWBDKM2E8etsOoangDoMhxNDU', 'oauth_token': '1969690717-rGw3VkRQ8IyL4OcPWtv5Y2CeBdVn8ndJrjGKraI', 'oauth_token_secret': 'KO7YIFMKWKaYTtz2zEyaSy044ixj5kIbWrDtZZL96ly0H'} # Common colors WHITE = 255,255,255 GREEN = 0,255,0 BLACK = 0,0,0 BLUE = 0,0,255 RED = 255,0,0 #----------------------------------------------------------------------------- # Global Variables #----------------------------------------------------------------------------- g_terms = [] g_bet_loop = None g_scope = None #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- # Handle graphics on the screen def draw_starting_screen(): global g_terms global g_scope # Create fonts font_mode = pygame.font.Font(None, 68) font_title_1 = pygame.font.Font(None, 68) font_title_2 = pygame.font.Font(None, 68) font_instr_1 = pygame.font.Font(None, 36) font_instr_2 = pygame.font.Font(None, 36) font_ent_title = pygame.font.Font(None, 36) font_ent = pygame.font.Font(None, 36) # Create background rect_bg = pygame.draw.rect(g_scope.screen, BLACK, \ (0, 0, 540, 960), 0) rect_title = pygame.draw.rect(g_scope.screen, WHITE, \ (20, 20, 500, 100), 0) rect_game_mode = pygame.draw.rect(g_scope.screen, WHITE, \ (20, 140, 500, 60), 0) rect_instructions = pygame.draw.rect(g_scope.screen, WHITE, \ (20, 220, 500, 100), 0) rect_tweets = pygame.draw.rect(g_scope.screen, WHITE, \ (20, 340, 500, 300), 0) # Draw title title1 = "The Great American" title2 = "Tweet Race" text_title_1 = font_title_1.render(title1,1,BLACK) text_title_2 = font_title_2.render(title2,1,BLACK) g_scope.screen.blit(text_title_1, (40, 25)) g_scope.screen.blit(text_title_2, (130, 70)) # Draw game mode mode_str = font_mode.render('Starting Gate',1,BLACK) g_scope.screen.blit(mode_str, (115, 140)) # Draw instructions instr_str_1 = 'Send a tweet to @Gr8AmTweetRace' instr_str_2 = 'with a #term to enter!' instr_1 = font_instr_1.render(instr_str_1,1,BLACK) instr_2 = font_instr_2.render(instr_str_2,1,BLACK) g_scope.screen.blit(instr_1, (40, 240)) g_scope.screen.blit(instr_2, (40, 270)) # Draw entrants ent_title = font_ent_title.render('Contestants',1,BLACK) g_scope.screen.blit(ent_title, (40, 360)) ent_y = 390 for i in range(0, MAX_ENTRIES): ent_str = ''.join([str(i + 1), ': ']) if i < len(g_terms): ent_str = ''.join([ent_str, g_terms[i]]) ent_disp = font_ent.render(ent_str,1,BLACK) g_scope.screen.blit(ent_disp, (40, 390 + (i * 30))) # Test if a term is already in the term list def is_in_terms(entry): global g_terms for term in g_terms: if ''.join(['#', entry]) == term: return True return False #----------------------------------------------------------------------------- # Main #----------------------------------------------------------------------------- def main(): global g_bet_loop global g_scope global g_terms # Setup Twitter streamer tf = twit_feed.TwitFeed(AUTH) #tf = tf_test_02.TwitFeed(AUTH) # Tweet that we are accepting bets # Start streamer to search for ter
ms tf.start_track_streamer(BET_TERM) # Setup display pygame.init() #g_scope = pyscope.pyscope() fps_clock = pygame.time.Clock() pygame.mouse.set_visible(False) # Main game loop g_bet_loop = False while g_bet_loop: # H
andle game events for event in pygame.event.get(): # End game if quit event raises if event.type == pygame.QUIT: g_bet_loop = False # End game if 'q' or 'esc' key pressed elif event.type == pygame.KEYDOWN: if event.key == pygame.K_q or event.key == pygame.K_ESCAPE: g_bet_loop = False # Get entries and print them entries = tf.get_entries() for entry in entries: print entry if is_in_terms(entry) == False: g_terms.append(''.join(['#', entry])) print len(g_terms) if len(g_terms) >= MAX_ENTRIES: print 'breaking' g_bet_loop = False # Update screen draw_starting_screen() pygame.display.update() fps_clock.tick(FPS) # Clean up Twitter feed and pygame print str(pygame.time.get_ticks()) tf.stop_tracking() print str(pygame.time.get_ticks()) pygame.quit() # Print terms print 'Search terms: ', g_terms # Run main main()
moonbury/notebooks
github/Numpy/Chapter2/elementselection.py
Python
gpl-3.0
492
0.034553
from __future__ import print_function import numpy as np # Chapter 2 Beginning with NumPy fundamentals # # Demonstrates the selection # of ndarray elements. # # Run from the commandline with # # python elementselection.py a = np.array([[1,2],[3,4]]) print("In: a") print(a) #Ou
t: #array([[1, 2], # [3, 4]]) print("In: a[0,0]") print(a[0,0]) #Out: 1 print("In: a[0,1]") print(a[0,1]) #Out: 2 prin
t("In: a[1,0]") print(a[1,0]) #Out: 3 print("In: a[1,1]") print(a[1,1]) #Out: 4
algorhythms/LeetCode
146 LRU Cache py3.py
Python
mit
2,700
0
#!/usr/bin/python3 """ Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put. get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1. put(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item. Follow up: Could you do both operations in O(1) time complexity? Example: LRUCache cache = new LRUCache( 2 /* capacity */ ); cache.put(1, 1); cache.put(2, 2); cache.get
(1); // returns 1 cache.put(3, 3); // evicts key 2 cache.get(2); // returns -1 (not found) cache.put(4, 4); // evicts key 1 cache.get(1); // returns -1 (not found) cache.get(3); // returns 3 cache.get(4); // returns 4 """ class Node: def __init__(self, key, val): self.key = key self.val = val
self.prev, self.next = None, None class LRUCache: def __init__(self, capacity: int): """ O(1) look up - Map O(1) update most recent vs. least recent - Linked List But Single linked list is not enough then Double Linked List Need dummy head and tail to avoid over complication of null checking Essentially it is the OrderedDict """ self.head = Node(None, None) self.tail = Node(None, None) self.head.next = self.tail self.tail.prev = self.head self.cap = capacity self.map = {} def get(self, key: int) -> int: if key in self.map: node = self.map[key] self._remove(key) self._appendleft(node) return node.val return -1 def put(self, key: int, value: int) -> None: if key in self.map: self._remove(key) elif len(self.map) >= self.cap: node = self.tail.prev self._remove(node.key) node = Node(key, value) self._appendleft(node) def _appendleft(self, node: Node): self.map[node.key] = node # update/delete map in these two operators nxt = self.head.next self.head.next = node node.prev = self.head node.next = nxt nxt.prev = node def _remove(self, key: int): node = self.map[key] prev = node.prev nxt = node.next prev.next = nxt nxt.prev = prev del self.map[key] # update/delete map in these two operators # Your LRUCache object will be instantiated and called as such: # obj = LRUCache(capacity) # param_1 = obj.get(key) # obj.put(key,value)
googleapis/python-api-gateway
samples/generated_samples/apigateway_v1_generated_api_gateway_service_create_api_async.py
Python
apache-2.0
1,588
0.00063
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an
"AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language govern
ing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateApi # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-api-gateway # [START apigateway_v1_generated_ApiGatewayService_CreateApi_async] from google.cloud import apigateway_v1 async def sample_create_api(): # Create a client client = apigateway_v1.ApiGatewayServiceAsyncClient() # Initialize request argument(s) request = apigateway_v1.CreateApiRequest( parent="parent_value", api_id="api_id_value", ) # Make the request operation = client.create_api(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END apigateway_v1_generated_ApiGatewayService_CreateApi_async]
AklerQ/python_training
model/group.py
Python
apache-2.0
674
0.002967
from sys import maxsize class Group: def __init__(self, name=None, header=None, footer=None, id=None): self.name = name
self.header = header self.footer = footer self.id = id def __repr__(self): return "%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer) def __eq__(self, other): return (self.id is None or other.id is None
or self.id == other.id) and (self.name == other.name or self.name is None) def id_or_max(self): if self.id: return int(self.id) else: return maxsize
IvIePhisto/ECoXiPy
ecoxipy/pyxom/__init__.py
Python
mit
21,834
0.001147
# -*- coding: utf-8 -*- u'''\ :mod:`ecoxipy.pyxom` - Pythonic XML Object Model (PyXOM) ======================================================== This module implements the *Pythonic XML Object Model* (PyXOM) for the representation of XML structures. To conveniently create PyXOM data structures use :mod:`ecoxipy.pyxom.output`, for indexing use :mod:`ecoxipy.pyxom.indexing` (if :attr:`Document.element_by_id` and :attr:`Document.elements_by_name` are not enough for you). .. _ecoxipy.pyxom.examples: Examples -------- XML Creation ^^^^^^^^^^^^ If you use the constructors be sure to supply the right data types, otherwise use the :meth:`create` methods or use :class:`ecoxipy.MarkupBuilder`, which take care of conversion. >>> from ecoxipy import MarkupBuilder >>> b = MarkupBuilder() >>> document = Document.create( ... b.article( ... b.h1( ... b & '<Example>', ... data='to quote: <&>"\\'' ... ), ... b.p( ... {'umlaut-attribute': u'äöüß'}, ... 'Hello', Element.create('em', ' World', ... attributes={'count':1}), '!' ... ), ... None, ... b.div( ... Element.create('data-element', Text.create(u'äöüß <&>')), ... b( ... '<p attr="value">raw content</p>Some Text', ... b.br, ... (i for i in range(3)) ... ), ... (i for i in range(3, 6)) ... ), ... Comment.create('<This is a comment!>'), ... ProcessingInstruction.create('pi-target', '<PI content>'), ... ProcessingInstruction.create('pi-without-content'), ... b['foo:somexml']( ... b['foo:somexml']({'foo:bar': 1, 't:test': 2}), ... b['somexml']({'xmlns': ''}), ... b['bar:somexml'], ... {'xmlns:foo': 'foo://bar', 'xmlns:t': '', ... 'foo:bar': 'Hello', 'id': 'foo'} ... ), ... {'xmlns': 'http://www.w3.org/1999/xhtml/'} ... ), doctype_name='article', omit_xml_declaration=True ... ) Enforcing Well-Formedness ^^^^^^^^^^^^^^^^^^^^^^^^^ Using the :meth:`create` methods or passing the parameter ``check_well_formedness`` as :const:`True` to the appropriate constructors enforces that the element, attribute and document type names are valid XML names, and that processing instruction target and content as well as comment contents conform to their constraints: >>> from ecoxipy import XMLWellFormednessException >>> def catch_not_well_formed(cls, *args, **kargs): ... try: ... return cls.create(*args, **kargs) ... except XMLWellFormednessException as e: ... print(e) >>> t = catch_not_well_formed(Document, [], doctype_name='1nvalid-xml-name') The value "1nvalid-xml-name" is not a valid XML name. >>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_publicid='"') The value "\\"" is not a valid document type public ID. >>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_systemid='"\\'') The value "\\"'" is not a valid document type system ID. >>> t = catch_not_well_formed(Element, '1nvalid-xml-name', [], {}) The value "1nvalid-xml-name" is not a valid XML name. >>> t = catch_not_well_formed(Element, 't', [], attributes={'1nvalid-xml-name': 'content'}) The value "1nvalid-xml-nam
e" is not a valid XML name. >>> t = catch_not_well_formed(ProcessingInstruction, '1nvalid-xml-name') The value "1nvalid-xml-name" is not a valid XML processing instruction target. >>> t = catch_not_well_formed(ProcessingInstruction, 'target', 'invalid PI content ?>') The value "invalid PI content ?>" is not a valid
XML processing instruction content because it contains "?>". >>> t = catch_not_well_formed(Comment, 'invalid XML comment --') The value "invalid XML comment --" is not a valid XML comment because it contains "--". Navigation ^^^^^^^^^^ Use list semantics to retrieve child nodes and attribute access to retrieve node information: >>> print(document.doctype.name) article >>> print(document[0].name) article >>> print(document[0].attributes['xmlns'].value) http://www.w3.org/1999/xhtml/ >>> print(document[0][-3].target) pi-target >>> document[0][1].parent is document[0] True >>> document[0][0] is document[0][1].previous and document[0][1].next is document[0][2] True >>> document.parent is None and document[0].previous is None and document[0].next is None True >>> document[0].attributes.parent is document[0] True You can retrieve iterators for navigation through the tree: >>> list(document[0][0].ancestors) [ecoxipy.pyxom.Element['article', {...}], ecoxipy.pyxom.Document[ecoxipy.pyxom.DocumentType('article', None, None), True, 'UTF-8']] >>> list(document[0][1].children()) [ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!')] >>> list(document[0][2].descendants()) [ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5')] >>> list(document[0][-2].preceding_siblings) [ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]] >>> list(document[0][2][-1].preceding) [ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]] >>> list(document[0][0].following_siblings) [ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]] >>> list(document[0][1][0].following) [ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]] Descendants and children can also be retrieved in reverse document order: >>> list(document[0][1].children(True)) == list(reversed(list(document[0][1].children()))) True >>> list(document[0][2].descendants(True)) [ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>')] Normally :meth:`~ContainerNode.descendants` traverses the XML tree depth-first, but you can also use breadth-first traversal: >>> list(document[0][2].descendants(depth_first=False)) [ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Text('raw content')] >>> list(document[0][2].descendants(True, False)) [ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('raw content'
pozetroninc/micropython
tests/internal_bench/var-8-namedtuple-1st.py
Python
mit
190
0.010526
import bench from ucollections impor
t namedtuple T = namedtuple("Tup", ["num", "bar"]) def test(num): t = T(20000000, 0) i = 0 while i < t.num: i += 1 benc
h.run(test)
agustin380/school-registry
src/school_registry/wsgi.py
Python
gpl-3.0
407
0
""" WSGI config for school_registry project. It exposes the WSGI callable
as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "school_registry.settings") applica
tion = get_wsgi_application()
le9i0nx/ansible
lib/ansible/plugins/action/aireos.py
Python
gpl-3.0
3,433
0.001748
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible import constants as C from ansible.module_utils._text import to_text from ansible.module_utils.connection import Connection from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.network.aireos.aireos import aireos_provider_spec from ansible.module_utils.network.common.utils import load_provider try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(se
lf, tmp=None, task_vars=None): if self._play_context.connection != 'local': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) provider = load_provider(aireos_provider_spec, self._task.args) pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'a
ireos' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module conn = Connection(socket_path) out = conn.get_prompt() if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) conn.send_command('exit') task_vars['ansible_socket'] = socket_path if self._play_context.become_method == 'enable': self._play_context.become = False self._play_context.become_method = None result = super(ActionModule, self).run(tmp, task_vars) return result
cmancone/mygrations
mygrate.py
Python
mit
795
0.003774
#!/usr/bin/env python3 import argparse from mygrations.mygrate import mygrate # argument parsing parser = argparse.ArgumentParser() parser.add_argument( 'command', nargs='?', default='version', choices=['version', 'apply', 'check', 'import', 'plan', 'plan_export'], help='Action to execute (default: version)' ) parser.add_argument( '--config', default='mygrate.conf', help='Location of mygrate configuration file (default: mygrate.conf)' ) parser.add_argument('-f', dest='force', action='store_true', help='Igno
re errors/warnings and execute command anyway') parser.add_argument('-v', dest='v
ersion', action='store_true', help='Display version') args = parser.parse_args() # load up a mygrate object my = mygrate(args.command, vars(args)) # and execute my.execute()
dparks1134/UniteM
unitem/bin.py
Python
gpl-3.0
11,713
0.002134
############################################################################### # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import logging from unitem.common import run_cmd from unitem.utils import check_on_path, make_sure_path_exists class Bin(): """Apply binning methods to an assembly.""" def __init__(self, assembly_file, output_dir, min_contig_len, cpus): """Initialization.""" self.logger = logging.getLogger('timestamp') self.assembly_file = assembly_file self.output_dir = output_dir self.min_contig_len = min_contig_len self.cpus = cpus self.failed_methods = [] def coverage(self, bam_files, cov_file): """Calculate coverage file for use by different binning methods.""" self.bam_files = bam_files if cov_file: self.logger.info(f'Using coverage information in {cov_file}.') # check coverage file has correct format header = open(cov_file).readline().split('\t') if not ('contig' in header[0].lower() and 'len' in header[1].lower() and ('depth' in header[2].lower() or 'mean' in header[2].lower())): self.logger.error( 'Provided coverage file does not have the correct headers.') self.logger.error( "Coverage file must have the format produced by 'jgi_summarize_bam_contig_depths'.") sys.exit(1) self.cov_file = cov_file if bam_files: self.logger.warning('BAM files are being ignored.') else: found = check_on_path( 'jgi_summarize_bam_contig_depths', exit_on_fail=False) if not found: self.logger.error( 'jgi_summarize_bam_contig_depths is not on the system path.') self.logger.error('This script is provide with MetaBAT v2.') sys.exit(1) self.logger.info( f'Calculating coverage for {len(bam_files)} BAM files.') self.logger.info("Running jgi_summarize_bam_contig_depths script.") self.cov_file = os.path.join(self.output_dir, 'coverage.tsv') cmd = 'jgi_summarize_bam_contig_depths --minContigLength {} --minContigDepth 1 --outputDepth {} {}'.format(self.min_contig_len, self.cov_file,
' '.join(bam_files)) run_cmd(cmd, program='jgi_summarize_bam_contig_depths') def check_on_path(self, options): """Check that all binning methods are on the system path.""" if options.mb2: self.logger.info('Checking MetaBAT v2 dependencies.') check_on_path('metabat2') if options.gm2:
self.logger.info('Checking GroopM v2 dependencies.') check_on_path('groopm2') if options.max40 or options.max107: self.logger.info('Checking MaxBin dependencies.') check_on_path('run_MaxBin.pl') if (options.mb_verysensitive or options.mb_sensitive or options.mb_specific or options.mb_veryspecific or options.mb_superspecific): self.logger.info('Checking MetaBAT dependencies.') check_on_path('metabat1') def run(self, options): """Run binning methods.""" bin_file = os.path.join(self.output_dir, 'bin_dirs.tsv') bin_file_out = open(bin_file, 'w') if options.mb2: self.metabat2(bin_file_out) if options.gm2: self.groopm2(bin_file_out) if options.max40: self.maxbin(bin_file_out, 40) if options.max107: self.maxbin(bin_file_out, 107) if options.mb_verysensitive: self.metabat(bin_file_out, 'verysensitive') if options.mb_sensitive: self.metabat(bin_file_out, 'sensitive') if options.mb_specific: self.metabat(bin_file_out, 'specific') if options.mb_veryspecific: self.metabat(bin_file_out, 'veryspecific') if options.mb_superspecific: self.metabat(bin_file_out, 'superspecific') bin_file_out.close() self.logger.info( f'File with location of bin directories written to {bin_file}.') if self.failed_methods: self.logger.warning( f'The following methods failed to run: {" ".join(self.failed_methods)}') def _run_method(self, cmd, bin_dir, bin_file_out, binning_method): """Run binning method.""" run_cmd(cmd, program=binning_method) bin_file_out.write(f'{binning_method}\t{os.path.abspath(bin_dir)}\n') def metabat2(self, bin_file_out): """Run MetaBAT v2.""" self.logger.info("Running MetaBAT v2.") bin_dir = os.path.join(self.output_dir, 'metabat2') bin_prefix = os.path.join(bin_dir, 'mb2') cmd = 'metabat2 -t {} -m {} -i {} -a {} -o {}'.format(self.cpus, self.min_contig_len, self.assembly_file, self.cov_file, bin_prefix) self._run_method(cmd, bin_dir, bin_file_out, 'metabat2') def metabat(self, bin_file_out, preset): """Run MetaBAT.""" self.logger.info(f"Running MetaBAT v1 with the {preset} preset.") bin_dir = os.path.join(self.output_dir, f'metabat_{preset}') bin_prefix = os.path.join(bin_dir, f'mb_{preset}') cmd = 'metabat1 -t {} -m {} -i {} -a {} -o {} --{}'.format(self.cpus, self.min_contig_len, self.assembly_file, self.cov_file, bin_prefix, preset) self._run_method(cmd, bin_dir, bin_file_out, f'metabat_{preset}') def groopm2(self, bin_file_out): """Run GroopM v2.""" self.logger.info("Running GroopM v2 parse.") bin_dir = os.path.join(self.output_dir, 'groopm2') make_sure_path_exists(bin_dir) output_db = os.
weixsong/algorithm
leetcode/123.py
Python
mit
1,949
0.00667
#!/usr/bin/env python """ Say you have an array for which the ith element is the price of a given stock on day i. Design an algorithm to find the maximum profit. You may complete at most two transactions. Note: You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again). """ class Solution(object): def maxProfit(self, prices): """ :type prices: List[int] :rtype: int Time: O(n^2) timeout """ if prices == None or len(prices) == 0: return 0
profit = 0 for i in range(len(prices)): p1 = self.max_p(prices, 0, i) p2 = self.max_p(prices, i, len(prices) - 1) profit = max(profit, p1 + p2) return profit def max_p(self, prices, start, end): if start >= end: return 0 min_price = p
rices[start] profit = 0 for i in range(start, end + 1): p = prices[i] profit = max(p - min_price, profit) min_price = min(min_price, p) return profit class Solution(object): def maxProfit(self, prices): """ :type prices: List[int] :rtype: int Time: O(n) Space: O(n) """ if prices == None or len(prices) < 2: return 0 max_profit = [0 for p in prices] min_price = prices[0] for i in range(1, len(prices)): p = prices[i] max_profit[i] = max(max_profit[i - 1], p - min_price) min_price = min(min_price, p) max_price = prices[-1] profit = 0 for i in range(len(prices) - 2, -1, -1): p = prices[i] profit = max(max_price - p + max_profit[i], profit) max_price = max(p, max_price) return profit if __name__ == '__main__': so = Solution() print so.maxProfit([2,1,2,0,1])
trailofbits/mcsema
tools/mcsema_disass/ida7/disass.py
Python
apache-2.0
2,610
0.015326
#!/usr/bin/env python # Copyright (c) 2017 Trail of Bits, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections import itertools import os import subprocess import sys import traceba
ck try: from shlex import quote except: from pipes import quote def execute(args, command_args): """Execute IDA Pro as a subprocess, passing this file in as a batch-mode script for IDA to run. This forwards along arguments passed to `mcsema-disass` down into the IDA script. `command_args` contains unparsed arguments passed to `mcsema-disass`. This script may handle extra arguments.""" ida_disass_path = os.path.abspath(__file__)
ida_dir = os.path.dirname(ida_disass_path) ida_get_cfg_path = os.path.join(ida_dir, "get_cfg.py") env = {} env["IDALOG"] = os.devnull env["TVHEADLESS"] = "1" env["HOME"] = os.path.expanduser('~') env["IDA_PATH"] = os.path.dirname(args.disassembler) env["PYTHONPATH"] = os.path.dirname(ida_dir) if "SystemRoot" in os.environ: env["SystemRoot"] = os.environ["SystemRoot"] script_cmd = [] script_cmd.append(ida_get_cfg_path) script_cmd.append("--output") script_cmd.append(args.output) script_cmd.append("--log_file") script_cmd.append(args.log_file) script_cmd.append("--arch") script_cmd.append(args.arch) script_cmd.append("--os") script_cmd.append(args.os) script_cmd.append("--entrypoint") script_cmd.append(args.entrypoint) script_cmd.extend(command_args) # Extra, script-specific arguments. cmd = [] cmd.append(quote(args.disassembler)) # Path to IDA. cmd.append("-B") # Batch mode. cmd.append("-S\"{}\"".format(" ".join(script_cmd))) cmd.append(quote(args.binary)) try: with open(os.devnull, "w") as devnull: return subprocess.check_call( " ".join(cmd), env=env, stdin=None, stdout=devnull, # Necessary. stderr=sys.stderr, # For enabling `--log_file /dev/stderr`. shell=True, # Necessary. cwd=os.path.dirname(__file__)) except: sys.stderr.write(traceback.format_exc()) return 1
bosswissam/shippo-python
shippo/api_requestor.py
Python
mit
7,865
0.000127
import calendar import datetime import platform import time import os import ssl import socket import urllib import urlparse import warnings import shippo from shippo import error, http_client, version, util, certificate_blacklist def _encode_datetime(dttime): if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None: utc_timestamp = calendar.timegm(dttime.utctimetuple()) else: utc_timestamp = time.mktime(dttime.timetuple()) return int(utc_timestamp) def _api_encode(data): for key, value in data.iteritems(): key = util.utf8(key) if value is None: continue elif hasattr(value, 'shippo_id'): yield (key, value.shippo_id) elif i
sinstance(value, list) or isinstance(value, tupl
e): for subvalue in value: yield ("%s[]" % (key,), util.utf8(subvalue)) elif isinstance(value, dict): subdict = dict(('%s[%s]' % (key, subkey), subvalue) for subkey, subvalue in value.iteritems()) for subkey, subvalue in _api_encode(subdict): yield (subkey, subvalue) elif isinstance(value, datetime.datetime): yield (key, _encode_datetime(value)) else: yield (key, util.utf8(value)) def _build_api_url(url, query): scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url) if base_query: query = '%s&%s' % (base_query, query) return urlparse.urlunsplit((scheme, netloc, path, query, fragment)) class APIRequestor(object): _CERTIFICATE_VERIFIED = False def __init__(self, auth=None, client=None): self.auth = auth from shippo import verify_ssl_certs self._client = client or http_client.new_default_http_client( verify_ssl_certs=verify_ssl_certs) def request(self, method, url, params=None): self._check_ssl_cert() rbody, rcode, my_auth = self.request_raw( method.lower(), url, params) resp = self.interpret_response(rbody, rcode) return resp, my_auth def handle_api_error(self, rbody, rcode, resp): try: err = resp['error'] except (KeyError, TypeError): raise error.APIError( "Invalid response object from API: %r (HTTP response code " "was %d)" % (rbody, rcode), rbody, rcode, resp) if rcode in [400, 404]: raise error.InvalidRequestError( err.get('message'), err.get('param'), rbody, rcode, resp) elif rcode == 401: raise error.AuthenticationError( err.get('message'), rbody, rcode, resp) elif rcode == 402: raise error.CardError(err.get('message'), err.get('param'), err.get('code'), rbody, rcode, resp) else: raise error.APIError(err.get('message'), rbody, rcode, resp) def request_raw(self, method, url, params=None): """ Mechanism for issuing an API call """ from shippo import api_version if self.auth: my_auth = self.auth else: from shippo import auth my_auth = auth if my_auth is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"shippo.auth = (<username>, <password>)"). You can generate API keys ' 'from the Shippo web interface. See https://goshippo.com/api ' 'for details, or email support@shippo.com if you have any ' 'questions.') abs_url = '%s%s' % (shippo.api_base, url) encoded_params = urllib.urlencode(list(_api_encode(params or {}))) if method == 'get' or method == 'delete': if params: abs_url = _build_api_url(abs_url, encoded_params) post_data = None elif method == 'post': post_data = encoded_params else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Shippo bindings. Please contact support@shippo.com for ' 'assistance.' % (method,)) ua = { 'bindings_version': version.VERSION, 'lang': 'python', 'publisher': 'shippo', 'httplib': self._client.name, } for attr, func in [['lang_version', platform.python_version], ['platform', platform.platform], ['uname', lambda: ' '.join(platform.uname())]]: try: val = func() except Exception, e: val = "!! %s" % (e,) ua[attr] = val headers = { 'X-Shippo-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'Shippo/v1 PythonBindings/%s' % (version.VERSION,), 'Authorization': 'Bearer %s' % (my_auth,) } if api_version is not None: headers['Shippo-Version'] = api_version rbody, rcode = self._client.request( method, abs_url, headers, post_data) util.logger.info( 'API request to %s returned (response code, response body) of ' '(%d, %r)', abs_url, rcode, rbody) return rbody, rcode, my_auth def interpret_response(self, rbody, rcode): try: if hasattr(rbody, 'decode'): rbody = rbody.decode('utf-8') resp = util.json.loads(rbody) except Exception: raise error.APIError( "Invalid response body from API: %s " "(HTTP response code was %d)" % (rbody, rcode), rbody, rcode) if not (200 <= rcode < 300): self.handle_api_error(rbody, rcode, resp) return resp def _check_ssl_cert(self): """Preflight the SSL certificate presented by the backend. This isn't 100% bulletproof, in that we're not actually validating the transport used to communicate with Shippo, merely that the first attempt to does not use a revoked certificate. Unfortunately the interface to OpenSSL doesn't make it easy to check the certificate before sending potentially sensitive data on the wire. This approach raises the bar for an attacker significantly.""" from shippo import verify_ssl_certs if verify_ssl_certs and not self._CERTIFICATE_VERIFIED: uri = urlparse.urlparse(shippo.api_base) try: certificate = ssl.get_server_certificate( (uri.hostname, uri.port or 443)) der_cert = ssl.PEM_cert_to_DER_cert(certificate) except socket.error, e: raise error.APIConnectionError(e) except TypeError: # The Google App Engine development server blocks the C socket # module which causes a type error when using the SSL library if ('APPENGINE_RUNTIME' in os.environ and 'Dev' in os.environ.get('SERVER_SOFTWARE', '')): self._CERTIFICATE_VERIFIED = True warnings.warn( 'We were unable to verify Shippo\'s SSL certificate ' 'due to a bug in the Google App Engine development ' 'server. Please alert us immediately at ' 'support@shippo.com if this message appears in your ' 'production logs.') return else: raise self._CERTIFICATE_VERIFIED = certificate_blacklist.verify( uri.hostname, der_cert)
kaiueo/octs
octs/teacher/views.py
Python
bsd-3-clause
34,607
0.014882
from flask import Blueprint, flash, redirect, render_template, request, url_for,send_from_directory, abort, make_response, send_file, session from octs.user.models import Course,Task, User, Message, Team,TeamUserRelation, File,Source,Term,TaskTeamRelation, Tag,UserScore from .forms import CourseForm,TaskForm, FileForm,TaskScoreForm, RejectReasonForm from octs.database imp
ort db from flask_login import current_user from octs.extensions import data_uploader import time import os,zipfile from pypinyin import lazy_pinyin import xlwt blueprint = Blueprint('teacher', __name__, url_prefix='/teacher',static_folder='../static') @blueprint.route('/<teacherid>/course/') def course(teacherid): teacher = User.query.filter_by(id=teacherid).first() courseList = teacher.courses term = Term.query.order_by(Term.id.desc()).first() return render_template('teacher/course.
html', list=courseList,term=term) @blueprint.route('/<courseid>/task/<taskid>') def task_detail(courseid,taskid): taskList = Task.query.filter_by(id=taskid).all() return render_template('teacher/taskdetail.html',list=taskList,courseid=courseid) @blueprint.route('/<teacherid>/course/edit/<id>',methods=['GET','POST']) def course_edit(teacherid, id): course = Course.query.filter_by(id=id).first() form = CourseForm() if form.validate_on_submit(): course.course_introduction = form.course_introduction.data course.course_outline=form.course_outline.data userlist=User.query.all() for user in userlist: user.team_min=form.low_member.data user.team_max=form.high_member.data db.session.add(user) db.session.add(course) db.session.commit() return redirect(url_for('teacher.course', teacherid=teacherid)) form.coursename.data=course.name form.credit.data=course.credit form.location.data=course.location form.start_time.data=course.start_time form.course_introduction.data=course.course_introduction form.course_outline.data=course.course_outline user=User.query.filter(User.id==teacherid).first() form.low_member.data=user.team_min form.high_member.data=user.team_max return render_template('teacher/course_edit.html',form=form) @blueprint.route('/course/student/<id>') def student(id): course=Course.query.filter_by(id=id).first() studentList = course.users return render_template('teacher/student.html',list=studentList) @blueprint.route('/mainpage/') def home(): return render_template('teacher/mainpage.html') @blueprint.route('/<courseid>/task') def task(courseid): taskList = Task.query.filter_by(course_id=courseid).all() return render_template('teacher/task.html',list = taskList, courseid=courseid) @blueprint.route('/<courseid>/task/add',methods = ['GET','POST']) def add(courseid): form = TaskForm() if form.validate_on_submit(): task = Task() task.name = form.taskname.data task.start_time = form.starttime.data task.end_time = form.endtime.data task.submit_num = form.subnum.data task.weight = form.weight.data task.teacher = current_user.name task.content = form.content.data course = Course.query.filter_by(id=courseid).first() course.tasks.append(task) teams = course.teams for team in teams: ttr = TaskTeamRelation() ttr.team = team ttr.task = task db.session.add(ttr) db.session.add(task) db.session.add(course) db.session.commit() return redirect(url_for('teacher.task', courseid=courseid)) return render_template('teacher/add.html',form=form, courseid=courseid) @blueprint.route('/<courseid>/task/edit/<userid>/<id>',methods = ['GET','POST']) def task_edit(courseid, userid,id): form = TaskForm() task = Task.query.filter_by(id = id).first() if form.validate_on_submit(): flag = True task.name = form.taskname.data task.start_time = form.starttime.data task.end_time = form.endtime.data task.content = form.content.data task.submit_num = form.subnum.data task.weight = form.weight.data db.session.add(task) db.session.commit() return redirect(url_for('teacher.task', courseid=courseid)) form.taskname.data = task.name form.starttime.data = task.start_time form.endtime.data = task.end_time form.content.data = task.content form.subnum.data = task.submit_num form.weight.data = task.weight return render_template('teacher/edit.html',form = form, courseid=courseid, taskid=id) @blueprint.route('/<courseid>/task/delete/<taskid>',methods=['GET','POST']) def delete(courseid, taskid): file_records= File.query.filter_by(task_id=taskid).all() for file_record in file_records: os.remove(file_record.path) db.session.delete(file_record) task = Task.query.filter_by(id=taskid).first() ttrs = TaskTeamRelation.query.filter_by(task_id=task.id).all() for ttr in ttrs: db.session.delete(ttr) db.session.delete(task) db.session.commit() flash('删除成功') return redirect(url_for('teacher.task', courseid=courseid)) @blueprint.route('/team',methods=['GET', 'POST']) def team(): teamlist = Team.query.join(TeamUserRelation, TeamUserRelation.team_id == Team.id).filter( TeamUserRelation.team_id == Team.id).filter(TeamUserRelation.is_master == True).join( User, TeamUserRelation.user_id == User.id).filter(TeamUserRelation.user_id == User.id).add_columns( Team.name, User.username, Team.status, Team.id, User.user_id, User.in_team) return render_template('teacher/team.html',list=teamlist) @blueprint.route('/task/score<taskid>/download') def score_download(taskid): teamidList = TaskTeamRelation.query.filter_by(task_id=taskid).all() teams = [] for teamid in teamidList: team = Team.query.filter_by(id=teamid.team_id).first() teams.append(team) task = Task.query.filter_by(id=taskid).first() book = xlwt.Workbook() alignment = xlwt.Alignment() # Create Alignment alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED style = xlwt.XFStyle() # Create Style style.alignment = alignment # Add Alignment to Style sheet1 = book.add_sheet('本次作业信息('+task.name+')',cell_overwrite_ok=True) row0 = ['团队id','团队名称','作业得分'] for i in range(0,len(row0)): sheet1.write(0,i,row0[i], style) row_num =1 for team in teams: sheet1.write(row_num,0,team.id,style) sheet1.write(row_num,1,team.name,style) score = TaskTeamRelation.query.filter(TaskTeamRelation.team_id==team.id).filter(TaskTeamRelation.task_id==taskid).first() sheet1.write(row_num,2,score.score,style) row_num=row_num+1 filename = 'score_table_'+ str(time.time()) + '.xls' book.save(os.path.join(data_uploader.path('',folder='tmp'),filename)) return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True) @blueprint.route('/team/download') def team_download(): teams = Team.query.filter_by(status=3).all() book = xlwt.Workbook() alignment = xlwt.Alignment() # Create Alignment alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED style = xlwt.XFStyle() # Create Style style.alignment = alignment # Add Alignment to Style sheet1 = book.add_sheet('团队信息', cell_overwrite_ok=True) row0 = ['团队id', '团队名称', '姓名', '学号', '性别', 'Master'] for i in range(0, len(row0)): sheet1.write(0, i, row0[i]) row_num = 1 for team in teams: turs = T
oskgeek/xmccamp
xmccamp/xmccamp/urls.py
Python
mit
338
0.011834
fro
m django.conf.urls import patterns, include, url from django.conf import settings urlpatterns = patterns('', url(r'^media/(?P<path>.*)$', 'django.views.static.serve', \ {'document_root': settings.MEDIA_ROOT, 'show_indexes':True}), url(r'', include('controller.urls')), url('^.*/$', 'controller.views.pxl
ogin'), )
JesusAMR/ProgramasUNI
testing.py
Python
gpl-3.0
101
0.009901
#! /usr/bin/p
ython import re input = raw_input() result = re.findall(r"[0-9]",input) print(result)
KohlsTechnology/ansible
lib/ansible/module_utils/k8s/lookup.py
Python
gpl-3.0
7,677
0.001693
# # Copyright 2018 Red Hat | Ansible # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function import json import os from ansible.module_utils.k8s.common import OpenShiftAnsibleModuleMixin, DateTimeEncoder, remove_secret_data, to_snake from ansible.module_utils.k8s.helper import AUTH_ARG_SPEC try: from openshift.helper.kubernetes import KubernetesObjectHelper from openshift.helper.exceptions import KubernetesException HAS_K8S_MODULE_HELPER = True except ImportError as exc: HAS_K8S_MODULE_HELPER = False try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False class KubernetesLookup(object): def __init__(self): if not HAS_K8S_MODULE_HELPER: raise Exception( "Requires th
e OpenShift Python client. Try `pip install openshift`" ) if not HAS_YAML: raise Exception( "Requires PyYAML. Try `pip install PyYAML`"
) self.kind = None self.name = None self.namespace = None self.api_version = None self.label_selector = None self.field_selector = None self.include_uninitialized = None self.resource_definition = None self.helper = None self.connection = {} def run(self, terms, variables=None, **kwargs): self.kind = kwargs.get('kind') self.name = kwargs.get('resource_name') self.namespace = kwargs.get('namespace') self.api_version = kwargs.get('api_version', 'v1') self.label_selector = kwargs.get('label_selector') self.field_selector = kwargs.get('field_selector') self.include_uninitialized = kwargs.get('include_uninitialized', False) resource_definition = kwargs.get('resource_definition') src = kwargs.get('src') if src: resource_definition = self.load_resource_definition(src) if resource_definition: self.params_from_resource_definition(resource_definition) if not self.kind: raise Exception( "Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration " "using the 'resource_definition' parameter." ) self.kind = to_snake(self.kind) self.helper = self.get_helper(self.api_version, self.kind) auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password', 'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl') for arg in AUTH_ARG_SPEC: if arg in auth_args and kwargs.get(arg) is not None: self.connection[arg] = kwargs.get(arg) try: self.helper.set_client_config(**self.connection) except Exception as exc: raise Exception( "Client authentication failed: {0}".format(exc.message) ) if self.name: return self.get_object() return self.list_objects() def get_helper(self, api_version, kind): try: helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False) helper.get_model(api_version, kind) return helper except KubernetesException as exc: raise Exception("Error initializing helper: {0}".format(exc.message)) def load_resource_definition(self, src): """ Load the requested src path """ path = os.path.normpath(src) if not os.path.exists(path): raise Exception("Error accessing {0}. Does the file exist?".format(path)) try: result = yaml.safe_load(open(path, 'r')) except (IOError, yaml.YAMLError) as exc: raise Exception("Error loading resource_definition: {0}".format(exc)) return result def params_from_resource_definition(self, defn): if defn.get('apiVersion'): self.api_version = defn['apiVersion'] if defn.get('kind'): self.kind = defn['kind'] if defn.get('metadata', {}).get('name'): self.name = defn['metadata']['name'] if defn.get('metadata', {}).get('namespace'): self.namespace = defn['metadata']['namespace'] def get_object(self): """ Fetch a named object """ try: result = self.helper.get_object(self.name, self.namespace) except KubernetesException as exc: raise Exception('Failed to retrieve requested object: {0}'.format(exc.message)) response = [] if result is not None: # Convert Datetime objects to ISO format result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder)) if self.kind == 'secret': remove_secret_data(result_json) response.append(result_json) return response def list_objects(self): """ Query for a set of objects """ if self.namespace: method_name = 'list_namespaced_{0}'.format(self.kind) try: method = self.helper.lookup_method(method_name=method_name) except KubernetesException: raise Exception( "Failed to find method {0} for API {1}".format(method_name, self.api_version) ) else: method_name = 'list_{0}_for_all_namespaces'.format(self.kind) try: method = self.helper.lookup_method(method_name=method_name) except KubernetesException: method_name = 'list_{0}'.format(self.kind) try: method = self.helper.lookup_method(method_name=method_name) except KubernetesException: raise Exception( "Failed to find method for API {0} and Kind {1}".format(self.api_version, self.kind) ) params = {} if self.field_selector: params['field_selector'] = self.field_selector if self.label_selector: params['label_selector'] = self.label_selector params['include_uninitialized'] = self.include_uninitialized if self.namespace: try: result = method(self.namespace, **params) except KubernetesException as exc: raise Exception(exc.message) else: try: result = method(**params) except KubernetesException as exc: raise Exception(exc.message) response = [] if result is not None: # Convert Datetime objects to ISO format result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder)) response = result_json.get('items', []) if self.kind == 'secret': for item in response: remove_secret_data(item) return response class OpenShiftLookup(OpenShiftAnsibleModuleMixin, KubernetesLookup): pass
mckayward/floyd-cli
floyd/model/experiment_config.py
Python
apache-2.0
507
0
from marshmall
ow import Schema, fields, post_load from floyd.model.base import BaseModel class ExperimentConfigSchema(Schema): name = fields.Str() family_id = fields.Str() @post_load def make_access_token(self, data): return ExperimentConfig(**data) class ExperimentConfig(BaseModel): schema = ExperimentConfigSch
ema(strict=True) def __init__(self, name, family_id=None): self.name = name self.family_id = family_id
garrettcap/Bulletproof-Backup
gooey/mockapplications/mockapp.py
Python
gpl-2.0
2,121
0.016973
''' Created on Dec 21, 2013 @author: Chris ''' import sys import hashlib from time import time as _time from time import sleep as _sleep from argparse import ArgumentParser from gooey import Gooey @Gooey def main(): desc = "Mock application to test Gooey's functionality" file_help_msg = "Name of the file you want to read" my_cool_parser = ArgumentParser(description=desc) my_cool_parser.add_argument("filename", help=file_help_msg) # p
ositional my_cool_parser.add_argument("outfile", help="Name of the file where you'll save the output") # positional my_cool_parser.add_argument('-c
', '--countdown', default=10, type=int, help='sets the time to count down from') my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer") my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit") my_cool_parser.add_argument('--verbose', '-v', action='count') my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!") my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders') my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something") my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes") print 'inside of main(), my_cool_parser =', my_cool_parser args = my_cool_parser.parse_args() print sys.argv print args.countdown print args.showtime start_time = _time() print 'Counting down from %s' % args.countdown while _time() - start_time < args.countdown: if args.showtime: print 'printing message at: %s' % _time() else: print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest() _sleep(.5) print 'Finished running the program. Byeeeeesss!' # raise ValueError("Something has gone wrong! AHHHHHHHHHHH") if __name__ == '__main__': # sys.argv.extend('asdf -c 5 -s'.split()) # print sys.argv main()
sfiera/googletest
test/gtest_help_test.py
Python
bsd-3-clause
5,754
0.003823
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following condi
tions are # met: # # * Redistribut
ions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the --help flag of Google C++ Testing Framework. SYNOPSIS gtest_help_test.py --build_dir=BUILD/DIR # where BUILD/DIR contains the built gtest_help_test_ file. gtest_help_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import gtest_test_utils IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), re.sub('^--', '/', LIST_TESTS_FLAG), re.sub('_', '-', LIST_TESTS_FLAG)] INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( [PROGRAM_PATH, LIST_TESTS_FLAG]).output # The help message must match this regex. HELP_REGEX = re.compile( FLAG_PREFIX + r'list_tests.*' + FLAG_PREFIX + r'filter=.*' + FLAG_PREFIX + r'also_run_disabled_tests.*' + FLAG_PREFIX + r'repeat=.*' + FLAG_PREFIX + r'shuffle.*' + FLAG_PREFIX + r'random_seed=.*' + FLAG_PREFIX + r'color=.*' + FLAG_PREFIX + r'print_time.*' + FLAG_PREFIX + r'output=.*' + FLAG_PREFIX + r'break_on_failure.*' + FLAG_PREFIX + r'throw_on_failure.*', re.DOTALL) def RunWithFlag(flag): """Runs gtest_help_test_ with the given flag. Returns: the exit code and the text output as a tuple. Args: flag: the command-line flag to pass to gtest_help_test_, or None. """ if flag is None: command = [PROGRAM_PATH] else: command = [PROGRAM_PATH, flag] child = gtest_test_utils.Subprocess(command) return child.exit_code, child.output class GTestHelpTest(gtest_test_utils.TestCase): """Tests the --help flag and its equivalent forms.""" def TestHelpFlag(self, flag): """Verifies correct behavior when help flag is specified. The right message must be printed and the tests must skipped when the given flag is specified. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assertEquals(0, exit_code) self.assert_(HELP_REGEX.search(output), output) if IS_WINDOWS: self.assert_(CATCH_EXCEPTIONS_FLAG in output, output) else: self.assert_(CATCH_EXCEPTIONS_FLAG not in output, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: self.assert_(DEATH_TEST_STYLE_FLAG in output, output) else: self.assert_(DEATH_TEST_STYLE_FLAG not in output, output) def TestNonHelpFlag(self, flag): """Verifies correct behavior when no help flag is specified. Verifies that when no help flag is specified, the tests are run and the help message is not printed. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assert_(exit_code != 0) self.assert_(not HELP_REGEX.search(output), output) def testPrintsHelpWithFullFlag(self): self.TestHelpFlag('--help') def testPrintsHelpWithShortFlag(self): self.TestHelpFlag('-h') def testPrintsHelpWithQuestionFlag(self): self.TestHelpFlag('-?') def testPrintsHelpWithWindowsStyleQuestionFlag(self): self.TestHelpFlag('/?') def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): self.TestHelpFlag(UNKNOWN_FLAG) def testPrintsHelpWithIncorrectFlagStyle(self): for incorrect_flag in INCORRECT_FLAG_VARIANTS: self.TestHelpFlag(incorrect_flag) def testRunsTestsWithoutHelpFlag(self): """Verifies that when no help flag is specified, the tests are run and the help message is not printed.""" self.TestNonHelpFlag(None) def testRunsTestsWithGtestInternalFlag(self): """Verifies that the tests are run and no help message is printed when a flag starting with Google Test prefix and 'internal_' is supplied.""" self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING) if __name__ == '__main__': gtest_test_utils.Main()
nesdis/djongo
tests/django_tests/tests/v22/tests/csrf_tests/csrf_token_error_handler_urls.py
Python
agpl-3.0
75
0
urlpatterns
= [] handler404 = 'csrf_te
sts.views.csrf_token_error_handler'
nishantsingla/optcoretech
docs/conf.py
Python
mit
7,786
0.007449
# -*- coding: utf-8 -*- # # Optcoretech documentation build configuration file, created by # sphinx-quickstart on Mon Sep 1 14:23:01 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Optcoretech' copyright = u'2014, Sheesh Mohsin' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Optcoretechdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Optcoretech.tex', u'Optcoretech Documentation', u'Sheesh Mohsin', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = []
# If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------
------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'optcoretech', u'Optcoretech Documentation', [u'Sheesh Mohsin'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Optcoretech', u'Optcoretech Documentation', u'Sheesh Mohsin', 'Optcoretech', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
CKPalk/MachineLearning
Assignment3/Naive_Bayes/nb.py
Python
mit
2,586
0.095514
import sys from csv_data import Data from csv_utilities import readIntegerCSV from csv_utilities import convertToZeroOne from statistics import mean from math import log def dotProduct( arr1, arr2 ): return sum( [ arr1[idx] * arr2[idx] for idx in range( len( arr1 ) ) ] ) # def dataSubsetWithY( data, y ): return [ row for row in data.rows if row.Y == y ] # def probabilityXisZero( data, idx, beta ): return ( 1 - probabilityXisOne( data, idx, beta ) ) # def probabilityXisOne ( data, idx, beta ): return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.X[ idx ] for row in data ] ) ) # def probabilityXY( data, x, idx, y, beta ): return ( probabilityXisOne( dataSubsetWithY( data, y ), idx, beta ) if x == 1 else probabilityXisZero( dataSubsetWithY( data, y ), idx, beta ) ) # def probabilityYisZero( data, beta ): return ( 1 - probabilityYisOne( data, beta ) ) # def probabilityYisOne ( data, beta ): return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.Y for row in data.rows ] ) ) # def findBias( data, beta ): return ( log( probabilityYisZero( data, beta ) / probabilityYisOne( data, beta ), 2 ) + sum( [ log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ] ) ) # def findWeights( data, beta ): return ( [ log( probabilityXY( data, 1, idx, 1, beta ) / probabilityXY( data, 1, idx, 0, beta ), 2 ) - log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ]
) # def rowPrediction( X, W, b ): return ( 1 if ( dotProduct( X, W ) + b >= 0 ) else 0 ) # def getResults( testing_d
ata, W, b ): return ( len( [ 1 for row in testing_data.rows if row.Y == rowPrediction( row.X, W, b ) ] ) / len( testing_data.rows ) ) # def printModel( model_stream, attrs, W, b ): model_stream.write( "{}\n".format( round( b, 4 ) ) ) for idx, attr in enumerate( attrs ): model_stream.write( "{:16}\t{}\n".format( attr, round( W[ idx ], 4 ) ) ) def main( argv ): try: training_data = Data( argv[ 0 ], readIntegerCSV, convertToZeroOne ) testing_data = Data( argv[ 1 ], readIntegerCSV, convertToZeroOne ) beta = int ( argv[ 2 ] ) model = open( argv[ 3 ], 'w+' ) b = findBias( training_data, beta ) W = findWeights( training_data, beta ) rez = getResults( testing_data, W, b ) print( rez ) printModel( model, training_data.attributes, W, b ) except IndexError: print( "ERROR: \"python3 nb.py <train> <test> <beta> <model>\"" ) finally: model.close() # if __name__=='__main__': main( sys.argv[ 1: ] )
chr15m/Infinite8BitPlatformer
engine/Progress.py
Python
gpl-3.0
960
0.039583
import os from PodSix.Resource import * from PodSix.Concurrent import Concurrent class Progress(Concurrent): def __init__(self): Concurrent.__init__(self) self.sprite = Image(file=os.path.join("resources", "progress.png")) self.showing = False self.maximum = 1 self.value = 0 self.width = 142 self.height = 20 self.priority = 5 def Draw(self): if s
elf.showing: gfx.screen.fill([80, 80, 80], special_flags=BLEND_SUB) gfx.BlitImage(self.sprite, center=[gfx.width / 2, gfx.height / 2]) g
fx.DrawRect((gfx.width / 2 - self.width / 2, gfx.height / 2 - self.height / 2, self.width * (1 - float(self.value) / self.maximum), self.height), [255, 0, 0], 0) def Show(self, maximum=None): if not maximum is None: self.maximum = maximum self.showing = True def Hide(self): if self.showing: self.showing = False def Value(self, val, maximum=None): if not maximum is None: self.maximum = maximum self.value = val
lasercar/mcedit-filters
block_elevation_stats.py
Python
mit
1,764
0.036281
# Copyright (c) 2014 Lasercar7 (@lasercar) - MIT License # http://lasercar.github.io #TODO: dictionary that maps block ids (to be collected) with readable names stats = {} def logStat(block, elevation): if not block in stats: #initialize column stats[block] = map(lambda x: 0.0, range(256)) #add to stat stats[block][elevation] += 1 #MCEdit user options inputs = ( ('Scan Radius', 100) ) #test logStat('Coal', 3) logStat('Diamond', 1) logStat('Diamond', 1) logStat('Gold', 1) logStat('Diamond', 0) #init def perform(level, box, options): #iterate through world and logStat(block, y) level.blo
ckAt(x, y, x) #calculate total blocks from scan radius, then conv
ert raw data to percentage options = {'Scan Radius': 100}#temp layerTotal = (float(options['Scan Radius']) * 2) **2 def percentFormat(): for block in stats: i = 0 for elevation in stats[block]: stats[block][i] = float(elevation)/layerTotal i += 1 percentFormat() #open csv file, convert stats to data, write data to file from os.path import expanduser, exists def filename(): prefix = expanduser('~') + '/Downloads/BlockElevationStats' postfix = '.csv' path = prefix + postfix i = 1 while exists(path): i += 1 path = prefix + str(i) + postfix return path import csv with open(filename(), 'wb') as csvFile: writer = csv.writer(csvFile, dialect='excel') #de-objectify data data = [] for key, value in stats.iteritems(): # stats.items() in python 3.x data.append([key] + value) #translate column structure to row structure grid = map(list, zip(*data)) #write to file i = -1 for row in grid: if i == -1: writer.writerow(['elevation:'] + row) else: writer.writerow([i] + row) i += 1 #TODO: move all stuff, including functions, into perform()
kaedroho/django
django/db/backends/sqlite3/introspection.py
Python
bsd-3-clause
18,452
0.001734
import re from collections import namedtuple import sqlparse from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) from django.db.models import Index from django.utils.regex_helper import _lazy_re_compile FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',)) field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') def get_field_size(name): """ Extract the size number from a "varchar(11)" type name """ m = field_size_re.search(name) return int(m.group(1)) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict: # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boo
lean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'bigint unsigned': 'PositiveBigIntegerField', 'decimal': 'DecimalField',
'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'varchar': 'CharField', 'blob': 'BinaryField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower().split('(', 1)[0].strip() return self.base_data_types_reverse[key] class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}: # No support for BigAutoField or SmallAutoField as SQLite treats # all integer primary keys as signed 64-bit integers. return 'AutoField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name, type FROM sqlite_master WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' ORDER BY name""") return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name)) return [ FieldInfo( name, data_type, None, get_field_size(data_type), None, None, not notnull, default, pk == 1, ) for cid, name, data_type, notnull, default, pk in cursor.fetchall() ] def get_sequences(self, cursor, table_name, table_fields=()): pk_col = self.get_primary_key_column(cursor, table_name) return [{'table': table_name, 'column': pk_col}] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) create_sql, table_type = cursor.fetchone() if table_type == 'view': # It might be a view, then no results will be returned return relations results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_desc in results.split(','): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] if field_desc.startswith("FOREIGN KEY"): # Find name of the target FK field m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I) field_name = m.groups()[0].strip('"') else: field_name = field_desc.split()[0].strip('"') cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchall()[0] other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li + 1:ri] for other_desc in other_table_results.split(','): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue other_name = other_desc.split(' ', 1)[0].strip('"') if other_name == column: relations[field_name] = (other_name, table) break return relations def get_key_columns(self, cursor, table_name): """ Return a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(') + 1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns key_columns.append(tuple(s.strip('"') for s in m.groups())) return key_columns def get_primary_key_column(self, cursor, table_name): """Return the column name of the primary key for the given table.""" # Don't use PRAGMA because that causes issues with some transactions cursor.execute( "SELECT sql, type FROM sqlite_master " "WHERE tbl_name = %s AND type IN ('table', 'view')", [table_name] ) row = cursor.fetchone() if row is None: raise ValueError("Table %s does not exist" % table_name) create_sql, table_type = row if table_type == 'view': # Views don't have a primary key. return None fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')] for field_desc in fields_sql.split(','): field_desc = field_desc.strip() m = re.match(r'(?:(?:["`
ImmobilienScout24/aws-monocyte
src/integrationtest/python/handler/iam_inline_policy_tests.py
Python
apache-2.0
4,364
0.000458
import json import logging import boto3 import unittest2 from mock import MagicMock from monocyte.handler import iam as iam_handler class IamInlinePolicyTests(unittest2.TestCase): def setUp(self): self.arn = '' logging.captureWarnings(True) self.iam_handler = iam_handler.InlinePolicy(MagicMock) self.iam_handler.dry_run = True self.client = boto3.client('iam') def _create_role(self): assume_role_policy = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "lambda.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } self.client.create_role( Path='/', RoleName='integrationtest_role', AssumeRolePolicyDocument=json.dumps(assume_role_policy) ) def _put_inline_role_policy(self, inline_policy): self.client.put_role_policy( RoleName='integrationtest_role', PolicyName='integrationtest_inline_policy', PolicyDocument=json.dumps(inline_policy) ) def _delete_inline_role_policy(self): self.client.delete_role_policy( RoleName='integrationtest_role', PolicyName='integrationtest_inline_policy' ) def _delete_role(self): self.client.delete_role(RoleName='integrationtest_role') def tearDown(self): self._delete_inline_role_policy() self._delete_role() def _uniq(self, resources): uniq_names = [] for resource in resources: name = resource.wrapped['RoleName'] if not name.startswith('integrationtest_role'): continue uniq_names.append(name) return uniq_names def test_wildcard_in_inline_policy_action(self): self._create_role() inline_policy = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "*" ], "Resource": "arn:aws:s3:::example_bucket" } ] } self._put_inline_role_policy(inline_policy) unwanted_resource = self.iam_handler.fetch_unwanted_resources() self.assertEqual(['integrationtest_role'], self._uniq(unwanted_resource)) def test_no_wildcard_in_inline_policy(self): self._create_role() inline_policy = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:read" ], "Resource": "arn:aws:s3:::example_bucket" } ] } self._put_inline_role_policy(inline_policy) unwanted_resource = self.iam_handler.fetch_unwanted_resources() self.assertEqual([], self._uniq(unwanted_resource)) def test_wildcard_in_inline_policy_resource(self): self._create_role() inline_policy = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "S3:read" ],
"Resource": "*" } ] } self._put_inline_role_policy(inline_policy) unwanted_resource = self.iam_handler.fetch_unwanted_resources() self.assertEqual([], self._uniq(unwanted_resource)) def test_wildcard_in_inline_policy_resource_and_action(self): self._create_role() inline_policy = { "Versio
n": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "*" ], "Resource": "*" } ] } self._put_inline_role_policy(inline_policy) unwanted_resource = self.iam_handler.fetch_unwanted_resources() self.assertEqual(['integrationtest_role'], self._uniq(unwanted_resource)) if __name__ == "__main__": unittest2.main()
EuroPython/epcon
tests/test_user_login_and_registration.py
Python
bsd-2-clause
8,704
0.00023
from pytest import mark from django.urls import reverse from email_template.models import Email from assopy.models import AssopyUser from conference.accounts import PRIVACY_POLICY_CHECKBOX, PRIVACY_POLICY_ERROR from conference.models import CaptchaQuestion from conference.users import RANDOM_USERNAME_LENGTH from tests.common_tools import make_user, redirects_to, template_used, create_homepage_in_cms SIGNUP_SUCCESFUL_302 = 302 SIGNUP_FAILED_200 = 200 login_url = reverse("accounts:login") def check_login(client, email): "Small helper for tests to check if login works correctly" response = client.post( login_url, { "email": email, "password": "password", "i_accept_privacy_policy": True, }, ) # redirect means successful login, 200 means errors on form LOGIN_SUCCESFUL_302 = 302 assert response.status_code == LOGIN_SUCCESFUL_302 return True def activate_only_user(): user = AssopyUser.objects.get() user.user.is_active = True user.user.save() @mark.django_db def test_user_registration(client): """ Tests if users can create new account on the website (to buy tickets, etc). """ # required for redirects to / create_homepage_in_cms() # 1. test if user can create new account sign_up_url = reverse("accounts:signup_step_1_create_account") response = client.get(sign_up_url) assert response.status_code == 200 assert template_used(response, "conference/accounts/signup.html") assert template_used(response, "conference/accounts/_login_with_google.html") assert template_used(response, "conference/base.html") assert PRIVACY_POLICY_CHECKBOX in response.content.decode("utf-8") assert AssopyUser.objects.all().count() == 0 response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "joedoe@example.com", "password1": "password", "password2": "password", }, follow=True, ) assert response.status_code == SIGNUP_FAILED_200 assert "/privacy/" in PRIVACY_POLICY_CHECKBOX assert "I consent to the use of my data" in PRIVACY_POLICY_CHECKBOX assert response.context["form"].errors["__all__"] == [PRIVACY_POLICY_ERROR] response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "joedoe@example.com", "password1": "password", "password2": "password", "i_accept_privacy_policy": True, }, follow=True, ) # check if redirect was correct assert template_used( response, "conference/accounts/signup_please_verify_email.html" ) assert template_used(response, "confere
nce/base.html") user = AssopyUser.objects.get() assert user.name() == "Joe Doe" assert user.user.is_active is False # check if the random username was generated assert len(user.user.username) == RANDOM_USERNAME_LENGTH is_logged_in = client.login( email="joedoe@example.com", password="password" ) assert is_logged_in is False # user is inactive response = client.get("/") assert templa
te_used(response, "conference/homepage/home_template.html") assert "Joe Doe" not in response.content.decode("utf-8") assert "Log out" not in response.content.decode("utf-8") # enable the user user.user.is_active = True user.user.save() is_logged_in = client.login( email="joedoe@example.com", password="password" ) assert is_logged_in response = client.get("/") assert template_used(response, "conference/homepage/home_template.html") # checking if user is logged in. assert "Joe Doe" in response.content.decode("utf-8") @mark.django_db def test_393_emails_are_lowercased_and_login_is_case_insensitive(client): """ https://github.com/EuroPython/epcon/issues/393 Test if we can regiester new account if we use the same email with different case. """ sign_up_url = reverse("accounts:signup_step_1_create_account") response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "JoeDoe@example.com", "password1": "password", "password2": "password", "i_accept_privacy_policy": True, }, ) assert response.status_code == SIGNUP_SUCCESFUL_302 user = AssopyUser.objects.get() assert user.name() == "Joe Doe" assert user.user.email == "joedoe@example.com" response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "jOEdOE@example.com", "password1": "password", "password2": "password", "i_accept_privacy_policy": True, }, ) assert response.status_code == SIGNUP_FAILED_200 assert response.context["form"].errors["email"] == ["Email already in use"] user = AssopyUser.objects.get() # still only one user assert user.name() == "Joe Doe" assert user.user.email == "joedoe@example.com" # activate user so we can log in user.user.is_active = True user.user.save() # check if we can login with lowercase # the emails will be lowercased in db, but user is still able to log in # using whatever case they want assert check_login(client, email="JoeDoe@example.com") assert check_login(client, email="joedoe@example.com") assert check_login(client, email="JoeDoe@example.com") assert check_login(client, email="JOEDOE@example.com") @mark.django_db def test_703_test_captcha_questions(client): """ https://github.com/EuroPython/epcon/issues/703 """ QUESTION = "Can you foo in Python?" ANSWER = "Yes you can" CaptchaQuestion.objects.create(question=QUESTION, answer=ANSWER) Email.objects.create(code="verify-account") sign_up_url = reverse("accounts:signup_step_1_create_account") response = client.get(sign_up_url) # we have question in captcha_question.initial and captcha_answer.label assert "captcha_question" in response.content.decode("utf-8") assert "captcha_answer" in response.content.decode("utf-8") assert response.content.decode("utf-8").count(QUESTION) == 2 response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "JoeDoe@example.com", "password1": "password", "password2": "password", "i_accept_privacy_policy": True, }, ) assert response.status_code == SIGNUP_FAILED_200 # because missing captcha response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "JoeDoe@example.com", "password1": "password", "password2": "password", "captcha_question": QUESTION, "captcha_answer": "No you can't", "i_accept_privacy_policy": True, }, ) assert response.status_code == SIGNUP_FAILED_200 # because wrong answer wrong_answer = ["Sorry, that's a wrong answer"] assert response.context["form"].errors["captcha_answer"] == wrong_answer response = client.post( sign_up_url, { "first_name": "Joe", "last_name": "Doe", "email": "JoeDoe@example.com", "password1": "password", "password2": "password", "captcha_question": QUESTION, "captcha_answer": ANSWER, "i_accept_privacy_policy": True, }, ) assert response.status_code == SIGNUP_SUCCESFUL_302 activate_only_user() assert check_login(client, email="joedoe@example.com") # if there are no enabled questions they don't appear on the form CaptchaQuestion.objects.update(enabled=False) response = client.get(sign_up_url) assert "captcha_question" not in response.content.decode("utf-8") assert "captc
armikhael/software-center
softwarecenter/db/enquire.py
Python
gpl-3.0
13,223
0.002723
# Copyright (C) 2011 Canonical # # Authors: # Matthew McGowan # Michael Vogt # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; version 3. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import logging import time import threading import xapian from gi.repository import GObject from softwarecenter.enums import (SortMethods, XapianValues, NonAppVisibility, DEFAULT_SEARCH_LIMIT) from softwarecenter.backend.reviews import get_review_loader from softwarecenter.db.database import ( SearchQuery, LocaleSorter, TopRatedSorter) from softwarecenter.distro import get_distro from softwarecenter.utils import ExecutionTime LOG=logging.getLogger(__name__) class AppEnquire(GObject.GObject): """ A interface to enquire data from a xapian database. It can combined with any xapian querry and with a generic filter function (that can filter on data not available in xapian) """ # signal emited __gsignals__ = {"query-complete" : (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, ()), } def __init__(self, cache, db): """ Init a AppEnquire object :Parameters: - `cache`: apt cache (for stuff like the overlay icon) - `db`: a xapian.Database that contians the applications """ GObject.GObject.__init__(self) self.cache = cache self.db = db self.distro = get_distro() self.search_query = SearchQuery(None) self.nonblocking_load = True self.sortmode = SortMethods.UNSORTED self.nonapps_visible = NonAppVisibility.MAYBE_VISIBLE self.limit = DEFAULT_SEARCH_LIMIT self.filter = None self.exact = False self.nr_pkgs = 0 self.nr_apps = 0 self._matches = [] self.match_docids = set() def __len__(self): return len(self._matches) @property def matches(self): """ return the list of matches as xapian.MSetItem """ return self._matches def _threaded_perform_search(self): self._perform_search_complete = False # generate a name and ensure we never have two threads # with the same name names = [thread.name for thread in threading.enumerate()] for i in range(threading.active_count()+1, 0, -1): thread_name = 'ThreadedQuery-%s' % i if not thread_name in names: break # create and start it t = threading.Thread( target=self._blocking_perform_search, name=thread_name) t.start() # don't block the UI while the thread is running context = GObject.main_context_default() while not self._perform_search_complete: time.sleep(0.02) # 50 fps while context.pending(): context.iteration() t.join() # call the query-complete callback self.emit("query-complete") def _get_estimate_nr_apps_and_nr_pkgs(self, enquire, q, xfilter): # filter out docs of pkgs of which there exists a doc of the app enquire.set_query(xapian.Query(xapian.Query.OP_AND, q, xapian.Query("ATapplication"))) try: tmp_matches = enquire.get_mset(0, len(self.db), None, xfilter) except Exception: LOG.exception("_get_estimate_nr_apps_and_nr_pkgs failed") return (0, 0) nr_apps = tmp_matches.get_matches_estimated() enquire.set_query(xapian.Query(xapian.Query.OP_AND_NOT, q, xapian.Query("XD"))) tmp_matches = enquire.get_mset(0, len(self.db), None, xfilter) nr_pkgs = tmp_matches.get_matches_estimated() - nr_apps return (nr_apps, nr_pkgs) def _blocking_perform_search(self): # WARNING this call may run in a thread, so its *not* # allowed to touch gtk, otherwise hell breaks loose # performance only: this is only needed to avoid the # python __call__ overhead for each item if we can avoid it # use a unique instance of both enquire and xapian database # so concurrent queries dont result in an inconsistent database # an alternative would be to serialise queries enquire = xapian.Enquire(self.db.xapiandb) if self.filter and self.filter.required: xfilter = self.filter else: xfilter = None # go over the queries self.nr_apps, self.nr_pkgs = 0, 0 _matches = self._matches match_docids = self.match_docids for q in self.search_query: LOG.debug("initial query: '%s'" % q) # for searches we may want to disable show/hide terms = [term for term in q] exact_pkgname_query = (len(terms) == 1 and terms[0].startswith("XP")) with ExecutionTime("calculate nr_apps and nr_pkgs: "): nr_apps, nr_pkgs = self._get_estimate_nr_apps_and_nr_pkgs(enquire, q, xfilter) self.nr_apps += nr_apps self.nr_pkgs += nr_pkgs # only show apps by default (unless in always visible mode) if self.nonapps_visible != NonAppVisibility.ALWAYS_VISIBLE: if not exact_pkgname_query
: q = xapian.Query(xapian.Query.OP_AND, xapia
n.Query("ATapplication"), q) LOG.debug("nearly completely filtered query: '%s'" % q) # filter out docs of pkgs of which there exists a doc of the app # FIXME: make this configurable again? enquire.set_query(xapian.Query(xapian.Query.OP_AND_NOT, q, xapian.Query("XD"))) # sort results # cataloged time - what's new category if self.sortmode == SortMethods.BY_CATALOGED_TIME: if (self.db._axi_values and "catalogedtime" in self.db._axi_values): enquire.set_sort_by_value( self.db._axi_values["catalogedtime"], reverse=True) else: LOG.warning("no catelogedtime in axi") elif self.sortmode == SortMethods.BY_TOP_RATED: review_loader = get_review_loader(self.cache, self.db) sorter = TopRatedSorter(self.db, review_loader) enquire.set_sort_by_key(sorter, reverse=True) # search ranking - when searching elif self.sortmode == SortMethods.BY_SEARCH_RANKING: #enquire.set_sort_by_value(XapianValues.POPCON) # use the default enquire.set_sort_by_relevance() pass # display name - all categories / channels elif (self.db._axi_values and "display_name" in self.db._axi_values): enquire.set_sort_by_key(LocaleSorter(self.db), reverse=False) # fallback to pkgname - if needed? # fallback to pkgname - if needed? else: enquire.set_sort_by_value_then_relevance( XapianValues.PKGNAME, False) #~ try: if self.limit == 0: matches = enquire.get_mset(0, len(self.db), None, xfilter) else: matches = enquire.get_mset(0, self.lim
xZise/pywikibot-core
scripts/newitem.py
Python
mit
4,866
0.000822
#!/usr/bin/python # -*- coding: utf-8 -*- """ This script creates new items on Wikidata based on certain criteria. * When was the (Wikipedia) page created? * When was the last edit on the page? * Does the page contain interwiki's? This script understands various command-line arguments: -lastedit The minimum number of days that has passed since the page was last edited. -pageage The minimum number of days that has passed since the page was created. -touch Do a null edit on every page which has a wikibase item. """ # # (C) Multichill, 2014 # (C) Pywikibot team, 2014 # # Distributed under the terms of the MIT license. # from __future__ import unicode_literals __version__ = '$Id$' # import pywikibot from pywikibot import pagegenerators, WikidataBot from datetime import timedelta class NewItemRobot(WikidataBot): """A bot to create new items.""" def __init__(self, generator, **kwargs): """Only accepts options defined in availableOptions.""" self.availableOptions.update({ 'lastedit': 7, 'pageage': 21, 'touch': False, }) super(NewItemRobot, self).__init__(**kwargs) self.generator = pagegenerators.PreloadingGenerator(generator) self.pageAge = self.getOption('pageage') self.lastEdit = self.getOption(
'lastedit') self.pageAgeBefore = self.repo.getcurrenttime() - timedelta(days=self.pageAge) self.lastEditBefore = self.repo.getcurrenttime() - timedelta(days=self.lastEdit) self.treat_missing_item = True pywikibot.output('Page age is set to %s days so only pages created
' '\nbefore %s will be considered.' % (self.pageAge, self.pageAgeBefore.isoformat())) pywikibot.output('Last edit is set to %s days so only pages last edited' '\nbefore %s will be considered.' % (self.lastEdit, self.lastEditBefore.isoformat())) def treat(self, page, item): """Treat page/item.""" if item and item.exists(): pywikibot.output(u'%s already has an item: %s.' % (page, item)) if self.getOption('touch'): pywikibot.output(u'Doing a null edit on the page.') page.put(page.text) return self.current_page = page if page.isRedirectPage(): pywikibot.output(u'%s is a redirect page. Skipping.' % page) return if page.editTime() > self.lastEditBefore: pywikibot.output( u'Last edit on %s was on %s.\nToo recent. Skipping.' % (page, page.editTime().isoformat())) return if page.oldest_revision.timestamp > self.pageAgeBefore: pywikibot.output( u'Page creation of %s on %s is too recent. Skipping.' % (page, page.editTime().isoformat())) return if page.langlinks(): # FIXME: Implement this pywikibot.output( "Found language links (interwiki links).\n" "Haven't implemented that yet so skipping.") return # FIXME: i18n summary = (u'Bot: New item with sitelink from %s' % page.title(asLink=True, insite=self.repo)) data = {'sitelinks': {page.site.dbName(): {'site': page.site.dbName(), 'title': page.title()} }, 'labels': {page.site.lang: {'language': page.site.lang, 'value': page.title()} } } pywikibot.output(summary) item = pywikibot.ItemPage(page.site.data_repository()) item.editEntity(data, summary=summary) # And do a null edit to force update page.put(page.text) def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: list of unicode """ # Process global args and prepare generator args parser local_args = pywikibot.handle_args(args) gen = pagegenerators.GeneratorFactory() options = {} for arg in local_args: if ( arg.startswith('-pageage:') or arg.startswith('-lastedit:')): key, val = arg.split(':', 1) options[key[1:]] = int(val) elif gen.handleArg(arg): pass else: options[arg[1:].lower()] = True generator = gen.getCombinedGenerator() if not generator: pywikibot.showHelp() return bot = NewItemRobot(generator, **options) bot.run() if __name__ == "__main__": main()
diegordzr/YellowSpider
yellow/yellow/pipelines.py
Python
mit
1,714
0.021004
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html import json import pymongo from scrapy.settings import Settings from scrapy.exceptions import DropItem from scrapy import log from pymongo import ReturnDocument class YellowPipeline(object): def __init__(self, mongo_uri, mongo_db, collection_name): self.mongo_uri = mongo_uri self.mongo_db = mongo_db self.collection_name = collection_name @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGODB_SERVER'), mongo_db=crawler.settings.get('MONGODB_DB'), collection_name=crawler.settings.get('MONGODB_COLLECTION') ) def open_spider(self, spider): log.msg("Open client", level=log.DEBUG, spider=spider) self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): log.msg("Close client", level=log.DEBUG, spider=spider) self.client.close() def process_item(self, item, spider): #self.db[self.coll
ection_name].insert(di
ct(item)) #if('email' in item): self.db[self.collection_name].find_one_and_update( { 'key': item['key'] }, { '$set': dict(item) }, upsert=True) log.msg("Contact added to MongoDB database!", level=log.DEBUG, spider=spider) return item class DuplicatesPipeline(object): def __init__(self): self.ids_seen = set() def process_item(self, item, spider): if item['id'] in self.ids_seen: raise DropItem("Duplicate item found: %s" % item) else: self.ids_seen.add(item['id']) return item
ajitghz582/PythonLearning
DAY_1_ASSIGNMENTS/1_name_phone_number.py
Python
mit
1,070
0.052336
student_phoneNumber_name = {1: 'a', 3: 'c', 2: 'b'} def Handler() : while (1) : choice = eval(input("Enter :\t 1 - to search student name
\n \t 2 - to insert new student record \n \t 0 - to quit\n")) print(choice) if (choice =
= 1) : if (student_phoneNumber_name) : phone_number = input("Enter student's phone number : ") name = SearchRecord(phone_number) if (name) : print("name : " + name ) else : print(str(phone_number) + "Does not exist in record" + str(name)) else : print("Record is empty ") elif (choice == 2) : phone_number = input("Enter student's phone number : ") name = input("Enter student's name : ") #best example to understand input() and raw_input() InsertRecord(phone_number, name) elif (choice == 0) : break else: print("Enter correct choice") def InsertRecord(x, y): student_phoneNumber_name[x] = y return; def SearchRecord(x): print(x) if (x in student_phoneNumber_name) : return student_phoneNumber_name[x] return False Handler() print(student_phoneNumber_name)